diff --git "a/4337.jsonl" "b/4337.jsonl" new file mode 100644--- /dev/null +++ "b/4337.jsonl" @@ -0,0 +1,617 @@ +{"seq_id":"125969024","text":"from datetime import timedelta, datetime\n\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import viewsets, mixins, exceptions\nfrom rest_framework.response import Response\n\nfrom api.exceptions import NotExistsGithubUser, RateLimitGithubAPI\nfrom api.githubs.serializers import OrganizationSerializer, RepositorySerializer, LanguageSerializer, \\\n GithubUserListSerializer, GithubUserSerializer\nfrom api.paginations import IdOrderingPagination, TierOrderingPagination, TotalScorePagination, DescIdOrderingPagination\nfrom api.ranks.serializers import TierSerializer\nfrom apps.githubs.models import GithubUser, Organization, Repository, Language\nfrom utils.exceptions import GitHubUserDoesNotExist, RateLimit\nfrom core.github_service import GithubInformationService\n\n\nclass GithubUserViewSet(mixins.UpdateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n viewsets.GenericViewSet):\n \"\"\"\n endpoint : githubs/users/:username\n \"\"\"\n\n queryset = GithubUser.objects.prefetch_related('organization', 'repository', 'language').all()\n serializer_class = GithubUserListSerializer\n pagination_class = TotalScorePagination\n lookup_url_kwarg = 'username'\n\n def get_queryset(self):\n data = self.request.GET\n queryset = self.queryset\n\n if data.get('company'):\n queryset = queryset.filter(company__icontains=data.get('company'))\n\n if data.get('username'):\n queryset = queryset.filter(username__icontains=data.get('username'))\n\n if data.get('tier'):\n queryset = queryset.filter(tier=data.get('tier'))\n\n return queryset\n\n def retrieve(self, request, *args, **kwargs):\n self.serializer_class = GithubUserSerializer\n username = self.kwargs.get(self.lookup_url_kwarg)\n github_user = self.get_queryset().filter(username=username).first()\n\n if not github_user:\n try:\n github_information_service = GithubInformationService(username)\n github_user = github_information_service.update()\n\n except GitHubUserDoesNotExist:\n raise NotExistsGithubUser()\n\n except RateLimit:\n raise RateLimitGithubAPI()\n\n serializer = self.serializer_class(github_user)\n return Response(serializer.data)\n\n def update(self, request, *args, **kwargs):\n self.serializer_class = GithubUserSerializer\n username = self.kwargs.get(self.lookup_url_kwarg)\n\n try:\n github_user = GithubUser.objects.filter(username=username).get()\n\n # 업데이트 한지 하루가 지나야지 재업데이트\n if github_user.updated + timedelta(1) >= datetime.now():\n response_data = self.serializer_class(github_user).data\n return Response(response_data)\n\n github_information_service = GithubInformationService(username)\n user = github_information_service.update()\n response_data = self.serializer_class(user).data\n\n except GithubUser.DoesNotExist:\n raise exceptions.NotFound\n\n except RateLimit:\n raise RateLimitGithubAPI()\n\n return Response(response_data)\n\n\nclass OrganizationViewSet(mixins.ListModelMixin,\n mixins.RetrieveModelMixin,\n viewsets.GenericViewSet):\n \"\"\"\n endpoint : githubs/users/:user_pk/organizations/\n \"\"\"\n\n queryset = Organization.objects.all()\n serializer_class = OrganizationSerializer\n lookup_url_kwarg = 'user_pk'\n\n def get_queryset(self):\n user_pk = self.kwargs.get(self.lookup_url_kwarg)\n organizations = Organization.objects.filter(org__github_user_id=user_pk)\n\n return organizations\n\n def list(self, request, *args, **kwargs):\n organizations = self.get_queryset()\n serializer = self.serializer_class(organizations, many=True)\n\n return Response(serializer.data)\n\n\nclass RepositoryViewSet(mixins.ListModelMixin,\n viewsets.GenericViewSet):\n \"\"\"\n endpoint : githubs/:user_pk/repositories/\n \"\"\"\n\n queryset = Repository.objects.all()\n serializer_class = RepositorySerializer\n pagination_class = DescIdOrderingPagination\n lookup_url_kwarg = 'user_pk'\n\n def get_queryset(self):\n user_pk = self.kwargs.get(self.lookup_url_kwarg)\n repositories = Repository.objects.filter(github_user_id=user_pk)\n\n return repositories\n\n\nclass LanguageViewSet(mixins.ListModelMixin,\n viewsets.GenericViewSet):\n \"\"\"\n endpoint : githubs/languages/\n \"\"\"\n\n queryset = Language.objects.all()\n serializer_class = LanguageSerializer\n pagination_class = IdOrderingPagination\n\n\nclass TierRankViewSet(mixins.ListModelMixin,\n viewsets.GenericViewSet):\n \"\"\"\n endpoint : githubs/tier/\n todo: 곧 삭제 예정인 API\n \"\"\"\n\n queryset = GithubUser.objects.all()\n serializer_class = TierSerializer\n pagination_class = TierOrderingPagination\n filter_backends = [DjangoFilterBackend]\n filterset_fields = ['tier']\n","sub_path":"opgc/api/githubs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"69620086","text":"from flask import Flask, render_template,request\nimport nltk\nimport numpy as np\nimport os\nimport random\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.stem.lancaster import LancasterStemmer\nnltk.download('punkt') # first-time use only\nnltk.download('wordnet') # first-time use only\n\n#-------------------------------------------------------------------------------\n# word stemmer\nstemmer = LancasterStemmer()\nlemmer = WordNetLemmatizer()\n\n# Classes of training data\ntraining_data = []\ntraining_data.append({\"class\":\"greeting\", \"sentence\":\"Hello, how are you?\",\"response\":\"Hey! How are you\"})\ntraining_data.append({\"class\":\"greeting\", \"sentence\":\"Hi how is your day?\",\"response\":\"Good day to you\"})\ntraining_data.append({\"class\":\"greeting\", \"sentence\":\"good day\",\"response\":\"Hello there!\"})\ntraining_data.append({\"class\":\"greeting\", \"sentence\":\"how is it going today?\",\"response\":\"I am glad! You are talking to me\"})\n\ntraining_data.append({\"class\":\"goodbye\", \"sentence\":\"have a nice day\"})\ntraining_data.append({\"class\":\"goodbye\", \"sentence\":\"bye, see you later\"})\ntraining_data.append({\"class\":\"goodbye\", \"sentence\":\"goodbye\"})\ntraining_data.append({\"class\":\"goodbye\", \"sentence\":\"talk to you soon\"})\n\ntraining_data.append({\"class\":\"movie\", \"sentence\":\"any suggestion movie?\",\"response\":\"what type of movie do you like?\"})\ntraining_data.append({\"class\":\"movie\", \"sentence\":\"what movie do you recommendation?\",\"response\":\"what genre of movie are you looking for?\"})\ntraining_data.append({\"class\":\"movie\", \"sentence\":\"i am looking for movies\",\"response\":\"what genre are you interest in?\"})\ntraining_data.append({\"class\":\"movie\", \"sentence\":\"please suggest me some interesting movies\",\"response\":\"Romance? Action? Comedy? Horror?\"})\n\ntraining_data.append({\"class\":\"comedy\", \"sentence\":\"what good comedy movie suggest\",\"response\":\"How about Deadpool 1 and 2? New feeling of Heroes comedy\"})\ntraining_data.append({\"class\":\"comedy\", \"sentence\":\"any fun movie suggestion\",\"response\":\"I would go for King's Man, a comedy with cool action movie\"})\ntraining_data.append({\"class\":\"comedy\", \"sentence\":\"some happy movie\",\"response\":\"Try Incredibles, you would love the cute and funny characters\"})\ntraining_data.append({\"class\":\"comedy\", \"sentence\":\"i need some movie that make is funny\",\"response\":\"Ted for sure! The teddy bear is amazing\"})\n\ntraining_data.append({\"class\":\"action\", \"sentence\":\"what good action movie suggest\",\"response\":\"Star Wars Series might be your best choice! Huge war fighting in epic space\"})\ntraining_data.append({\"class\":\"action\", \"sentence\":\"any fighting movie suggestion\",\"response\":\"Avengers Series for sure! Cool Heroes fighiting!\"})\ntraining_data.append({\"class\":\"action\", \"sentence\":\"some exciting movie\",\"response\":\"How about Jurassic World Series? Best classic!\"})\ntraining_data.append({\"class\":\"action\", \"sentence\":\"i need some adventure movie\",\"response\":\"It must be Mission Impossible Series! Exciting spy scene everywhere\"})\n\ntraining_data.append({\"class\":\"romance\", \"sentence\":\"what good romance movie\",\"response\":\"A Star is Born might be your choice. Romantic story about dreams\"})\ntraining_data.append({\"class\":\"romance\", \"sentence\":\"any love movie suggestion\",\"response\":\"With fantasy and romantic, Beauty and the Beast \"})\ntraining_data.append({\"class\":\"romance\", \"sentence\":\"some lovely movie\",\"response\":\"You should look for Love, Simon. A special story about homosexual love\"})\ntraining_data.append({\"class\":\"romance\", \"sentence\":\"i need some romantic movie\",\"response\":\"A romantic comedy story, Crazy Rich Asians\"})\n\n#list of responses\ngreet = []\nfor i in range(4):\n greet.append(training_data[i])\n grt=list(set([b[\"response\"] for b in greet]))\n\ngoodbye = []\nfor index,x in enumerate(training_data,start=4):\n if index>7:\n break\n else:\n goodbye.append(training_data[index])\n bye=list(set([b[\"sentence\"] for b in goodbye]))\n\nmovies = []\nfor index,x in enumerate(training_data,start=8):\n if index>11:\n break\n else:\n movies.append(training_data[index])\n mov=list(set([b[\"response\"] for b in movies]))\n\ncomedys = []\nfor index,x in enumerate(training_data,start=12):\n if index>15:\n break\n else:\n comedys.append(training_data[index])\n comedy=list(set([b[\"response\"] for b in comedys]))\n\nactions = []\nfor index,x in enumerate(training_data,start=16):\n if index>19:\n break\n else:\n actions.append(training_data[index])\n action=list(set([b[\"response\"] for b in actions]))\n\nromances = []\nfor index,x in enumerate(training_data,start=20):\n if index>23:\n break\n else:\n romances.append(training_data[index])\n romance=list(set([b[\"response\"] for b in romances]))\n\n# capture unique stemmed words in the training corpus\ncorpus_words = {}\nclass_words = {}\n# turn a list into a set (of unique items) and then a list again (this removes duplicates)\nclasses = list(set([a['class'] for a in training_data]))\nfor c in classes:\n # prepare a list of words within each class\n class_words[c] = []\n\n# loop through each sentence in our training data\nfor data in training_data:\n # tokenize each sentence into words\n for word in nltk.word_tokenize(data['sentence']):\n # ignore a some things\n if word not in [\"?\", \"'s\"]:\n # stem and lowercase each word\n stemmed_word = stemmer.stem(word.lower())\n # have we not seen this word already?\n if stemmed_word not in corpus_words:\n corpus_words[stemmed_word] = 1\n else:\n corpus_words[stemmed_word] += 1\n #if lemmed_word not in corpus_words:\n # corpus_words[lemmed_word] = 1\n #else:\n # corpus_words[lemmed_word] += 1\n # add the word to our words in class list\n class_words[data['class']].extend([stemmed_word])\n #class_words[data['class']].extend([lemmed_word])\n\n\n# calculate a score for a given class taking into account word commonality\ndef calculate_class_score_commonality(sentence, class_name, show_details=True):\n score = 0\n # tokenize each word in our new sentence\n for word in nltk.word_tokenize(sentence):\n # check to see if the stem of the word is in any of our classes\n if stemmer.stem(word.lower()) in class_words[class_name]:\n # treat each word with relative weight\n score += (1 / corpus_words[stemmer.stem(word.lower())])\n\n if show_details:\n print (\" match: %s (%s)\" % (stemmer.stem(word.lower()), 1 / corpus_words[stemmer.stem(word.lower())]))\n return score\n\n\ndef classify(sentence):\n high_class = None\n high_score = 0\n # loop through our classes\n for c in class_words.keys():\n # calculate score of sentence for each class\n score = calculate_class_score_commonality(sentence, c, show_details=False)\n # keep track of highest score\n if score > high_score:\n high_class = c\n high_score = score\n return high_class\n#--------------------------------------------------------\n\n\ndef response(user_response):\n flag=True\n while(flag==True):\n user_response=user_response.lower()\n if(classify(user_response)!=None):\n if classify(user_response) == \"greeting\":\n bot_response = random.choice(grt)\n elif classify(user_response) == \"goodbye\":\n flag=False\n bot_response = random.choice(bye)\n elif classify(user_response) == \"movie\":\n bot_response = random.choice(mov)\n elif classify(user_response) == \"comedy\":\n bot_response = random.choice(comedy)\n elif classify(user_response) == \"action\":\n bot_response = random.choice(action)\n elif classify(user_response) == \"romance\":\n bot_response = random.choice(romance)\n else:\n bot_response = \"I am sorry! I don't understand you\"\n print(bot_response)\n return bot_response\n\n\n#Main-----------------------------------------------------------------\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n\treturn render_template('index.html')\n\n@app.route('/process',methods=['POST'])\ndef process():\n\tuser_response=request.form['user_input']\n\n\tbot_response=response(user_response)\n\tprint(\"Friend: \"+response(user_response))\n\treturn render_template('index.html',user_input=user_response,\n\t\tbot_response=bot_response\n\t\t)\n\nif __name__=='__main__':\n app.run(debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"187061823","text":"from turtle import Screen\nfrom snake import Snake\nfrom food import Food\nfrom scoreboard import Scoreboard\nimport time\n\nSCREEN_WIDTH = 700\nSCREEN_HEIGHT = 700\nX_LIMIT = int(SCREEN_WIDTH / 2 - 5)\nY_LIMIT = int(SCREEN_HEIGHT / 2 - 5)\n\nGAME_MODE = {\"easy\": 0.5, \"normal\": 0.2, \"hard\": 0.1, \"super\": 0.05}\n\n# Create a screen\nscreen = Screen()\nscreen.setup(width=SCREEN_WIDTH, height=SCREEN_HEIGHT)\nscreen.bgcolor(\"black\")\nscreen.tracer(0)\nscreen.title(\"Snake Game\")\n\n# Create a snake and a food\nsnake = Snake()\nfood = Food()\nscore = Scoreboard()\nscreen.update()\n\n# Game mode selection by user\nuser_choice = screen.textinput(title=\"Choose a game mode\", prompt=\"Input 'EASY', 'NORMAL', 'HARD', 'SUPER'\").lower()\nif user_choice in GAME_MODE.keys():\n game_mode = GAME_MODE[user_choice]\nelse:\n game_mode = GAME_MODE[\"normal\"]\n\n# Capture the key to control snake\nscreen.listen()\nscreen.onkey(snake.up, \"Up\")\nscreen.onkey(snake.down, \"Down\")\nscreen.onkey(snake.right, \"Right\")\nscreen.onkey(snake.left, \"Left\")\n\nis_on = True\nwhile is_on:\n screen.update()\n time.sleep(game_mode)\n snake.move()\n\n # Detect collision with food\n if snake.head.distance(food) < 15:\n food.refresh()\n score.increase_score()\n snake.extend()\n\n # Detect collision with all\n if snake.head.xcor() > X_LIMIT or snake.head.xcor() < -X_LIMIT or snake.head.ycor() > X_LIMIT or snake.head.ycor() < -X_LIMIT:\n # Hit the wall\n is_on = False\n score.game_over()\n\n # Detect collision with tail\n\n for seg in snake.segments[1:]:\n if snake.head.distance(seg) < 10:\n # Hit the tail\n is_on = False\n score.game_over()\n\nscreen.exitonclick()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"328691585","text":"from django.shortcuts import render\nfrom apisite.models import *\nfrom django.contrib.auth import login, authenticate\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.shortcuts import redirect\nfrom .forms import *\nfrom django.contrib.auth import authenticate, login\n\n# Create your views here.\ndef home(request):\n perfil = Profile.objects.all()\n return render(request,'index.html',{'usuarios':perfil})\n\ndef signup(request):\n form = SignUpForm(request.POST or None)\n if request.method == 'POST':\n if form.is_valid():\n user = form.save()\n user.refresh_from_db()\n form.save()\n return redirect(home)\n else:\n form = SignUpForm(request.POST or None)\n return render(request, 'signup.html', {'form': form})\n\ndef perfil(request,id):\n perfil = Profile.objects.filter(id=id)\n print(perfil)\n return render(request, 'perfil.html', {'dados':perfil})\n","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"351578960","text":"import sys\nimport socket\n\nipserver = ['192.168.122.33', '192.168.122.10']\n\nfor i in range(2):\n\t# Create a TCP/IP socket\n\tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n\t# Connect the socket to the port where the server is listening\n\tserver_address = (ipserver[i], 10000)\n\tprint(f\"connecting to {server_address}\")\n\tsock.connect(server_address)\n\n\n\ttry:\n\t\t# Send data\n\t\tsend_filename = 'sendimage.jpg'\n\t\trecv_filename = 'recvimage' + str(i) + '.jpg'\n\n\t\twith open(send_filename, 'rb') as file:\n\t\t sendfile = file.read() \n\t\tsock.sendall(sendfile)\n\t\tprint(f\"sending {send_filename}\")\n\t\t# Look for the response\n\t\tamount_received = 0\n\t\tamount_expected = len(sendfile)\n\t\twith sock,open(recv_filename,'wb') as file:\n\t\t while amount_received < amount_expected:\n\t\t data = sock.recv(16)\n\t\t amount_received += len(data)\n\t\t if not data:\n\t\t break\n\t\t file.write(data)\n\tfinally:\n\t\tprint(f\"{recv_filename} done\")\n\t\tsock.close()\n","sub_path":"progjar1/jawaban/client-image.py","file_name":"client-image.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"204370445","text":"#!/usr/bin/env python\n\n\"\"\"Utility classes and functions for SAMPLing analysis.\"\"\"\n\n\n# =============================================================================\n# GLOBAL IMPORTS\n# =============================================================================\n\nimport os\nimport csv\nimport glob\nimport json\nimport operator\nimport collections\n\nimport numpy as np\nimport pandas as pd\n\nfrom .submission import SamplSubmission\nfrom .stats import mean_confidence_interval\n\n\n# =============================================================================\n# CONSTANTS\n# =============================================================================\n\nN_REPLICATES = 5\n\n# All YANK calculations and all phases have run for the same number of iterations/steps.\nYANK_N_ITERATIONS = 40000\nN_STEPS_PER_ITERATIONS = 500\n\n# Number of states in the alchemical protocol (complex + solvent) for each system.\nYANK_N_STATES = {\n 'CB8-G3': 69 + 62,\n 'OA-G3': 59 + 54,\n 'OA-G6': 55 + 52\n}\n\nDG_KEY = '$\\Delta$G [kcal/mol]'\nDDG_KEY = 'd$\\Delta$G [kcal/mol]'\n\n\n# =============================================================================\n# UTILITY FUNCTIONS\n# =============================================================================\n\ndef export_dictionary(data_dict, file_base_path):\n \"\"\"Export the data in CSV and JSON format.\n\n Parameters\n ----------\n data_dict : dict\n Dictionary system ID -> {key: list of values}.\n file_base_path : str\n The extension-less path to the output files.\n\n \"\"\"\n os.makedirs(os.path.dirname(file_base_path), exist_ok=True)\n\n # Export to JSON as it is.\n with open(file_base_path + '.json', 'w') as f:\n json.dump(data_dict, f, indent=4, sort_keys=True)\n\n # Flatten for CSV format.\n csv_data = []\n for system_id in sorted(data_dict):\n for key, values in data_dict[system_id].items():\n csv_data.append([system_id + '-' + key] + values)\n\n # Export CSV file.\n with open(file_base_path + '.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerows(csv_data)\n\n\ndef compute_system_name_mean_free_energies(data, extra_fields=()):\n \"\"\"Compute mean free energies and CI for each system name in data.\"\"\"\n output_data = []\n system_names = data['System name'].unique()\n for system_name in system_names:\n system_name_data = data[data['System name'] == system_name]\n\n # Obtain number of energy evaluations for data point.\n energy_evaluations = system_name_data['N energy evaluations'].unique().tolist()\n tot_energy_evaluations = max(energy_evaluations)\n\n # Add records.\n for idx, n_energy_evaluations in enumerate(energy_evaluations):\n time_point_data = system_name_data[system_name_data['N energy evaluations'] == n_energy_evaluations]\n free_energies = time_point_data[DG_KEY]\n f, ci = mean_confidence_interval(free_energies.values, confidence=0.95)\n std = np.std(free_energies.values)\n\n extra_values = {}\n for field in extra_fields:\n assert len(time_point_data[field].unique()) == 1\n extra_values[field] = time_point_data[field].values[0]\n\n # Keep \"Simulation percentage\" an integer if possible.\n if len(energy_evaluations) == 100:\n simulation_percentage = energy_evaluations.index(n_energy_evaluations) + 1\n else:\n simulation_percentage = n_energy_evaluations * 100 / tot_energy_evaluations\n\n output_data.append({\n 'System name': system_name,\n DG_KEY: f,\n 'std': std,\n '$\\Delta$G CI': ci,\n 'Simulation percentage': simulation_percentage,\n 'N energy evaluations': n_energy_evaluations,\n **extra_values\n })\n columns_order = ['System name', DG_KEY, 'std', '$\\Delta$G CI', 'Simulation percentage',\n 'N energy evaluations'] + list(extra_fields)\n output_data = pd.DataFrame(output_data, columns=columns_order)\n return output_data\n\n\n# =============================================================================\n# CONVERSION FROM ENERGY EVALUATIONS/CPU TIME TO ITERATIONS\n# =============================================================================\n\ndef energy_evaluations_from_iterations(system_name, n_iterations):\n \"\"\"Compute the number of energy evaluations necessary to run N iteration\"\"\"\n n_states = YANK_N_STATES[system_name]\n md_energy_evaluations = n_states * N_STEPS_PER_ITERATIONS\n mc_energy_evaluations = 2 * 2 * n_states # Rotation and translation, initial and final energy.\n energy_matrix_evaluations = n_states * n_states # Technically, we compute only the changed force groups.\n energy_evaluations_per_iteration = md_energy_evaluations + mc_energy_evaluations + energy_matrix_evaluations\n return energy_evaluations_per_iteration * n_iterations\n\n\ndef energy_evaluations_iteration_cutoffs(tot_energy_evaluations, system_name):\n \"\"\"Compute the 100 YANK iterations to use for analysis from the total energy evaluations to consider.\"\"\"\n # Find the number of energy evaluations per iteration.\n energy_evaluations_per_iteration = energy_evaluations_from_iterations(system_name, n_iterations=1)\n # Find total number of iterations.\n last_iteration = tot_energy_evaluations / energy_evaluations_per_iteration\n return get_iteration_cutoffs(last_iteration)\n\n\ndef cpu_time_iteration_cutoffs(tot_time, system_id, yank_cpu_times):\n \"\"\"Compute the 100 YANK iterations to use for analysis from the total wall-clock time to consider.\"\"\"\n # Find average time per iteration.\n yank_time_per_iteration = yank_cpu_times[system_id] / YANK_N_ITERATIONS\n\n # Find total number of iterations.\n last_iteration = tot_time / yank_time_per_iteration\n return get_iteration_cutoffs(last_iteration)\n\n\ndef get_iteration_cutoffs(last_iteration):\n \"\"\"Return 100 equally spaced iterations.\n\n Parameters\n ----------\n last_iteration : float\n The approximate total number of iterations considered.\n This will be rounded to the nearest integer.\n\n Returns\n -------\n iteration_cutoffs : list of int\n A list of 100 equally-spaced iterations.\n\n \"\"\"\n # Find all iterations cutoff.\n first_iteration = last_iteration / 100\n iteration_cutoffs = np.linspace(first_iteration, last_iteration, num=100, endpoint=True)\n\n # Convert to list of integers.\n iteration_cutoffs = np.rint(iteration_cutoffs).astype(int).tolist()\n\n assert iteration_cutoffs[-1] <= YANK_N_ITERATIONS\n assert iteration_cutoffs[0] != 0\n return iteration_cutoffs\n\n\n# =============================================================================\n# SAMPLing HOST-GUEST SUBMISSION\n# =============================================================================\n\nclass SamplingSubmission(SamplSubmission):\n \"\"\"A submission for the SAMPLing challenge.\n\n Parameters\n ----------\n file_path : str\n The path to the submission file.\n\n Raises\n ------\n IgnoredSubmission\n If the submission ID is among the ignored submissions.\n\n \"\"\"\n # The D3R challenge IDs that are handled by this class.\n CHALLENGE_IDS = {975}\n\n # The IDs of the submissions used for testing the validation.\n TEST_SUBMISSIONS = {'3rhz6'}\n\n # Section of the submission file.\n SECTIONS = {'Predictions', 'Cost', 'Name', 'Software', 'TechnicalDetails', 'Method'}\n\n # Sections in CSV format with kwargs to pass to pandas.read_csv().\n CSV_SECTIONS = {\n # The predictions table is transposed in __init__.\n 'Predictions': {'header': None, 'index_col': 0},\n 'Cost': {'names': ('System ID', 'N energy evaluations', 'Wall clock time', 'CPU time'),\n 'index_col': 'System ID'}\n }\n\n def __init__(self, file_path, user_map):\n super().__init__(file_path, user_map)\n\n file_name = os.path.splitext(os.path.basename(file_path))[0]\n file_data = file_name.split('-')\n\n # Check this is a known host, and tansform into uppercase.\n self.type = file_data[2]\n assert self.type in ['relative', 'absolute']\n\n self.file_name, self.index = file_data[3:]\n self.index = int(self.index)\n\n # Load predictions.\n sections = self._load_sections(file_path) # From parent-class.\n self.name = sections['Name'][0]\n self.cost = sections['Cost'] # This is a pandas DataFrame.\n\n # Reformat predictions to create main data table.\n predictions = sections['Predictions'] # This is a pandas DataFrame.\n data = []\n for system_id, row in predictions.iterrows():\n row = row.values\n free_energies = row[list(range(0, 200, 2))].tolist()\n err_free_energies = row[list(range(1, 200, 2))].tolist()\n\n tot_energy_evals = self.cost.loc[system_id, 'N energy evaluations']\n energy_evaluations = np.linspace(tot_energy_evals / 100, tot_energy_evals,\n num=100, endpoint=True)\n energy_evaluations = np.rint(energy_evaluations).astype(int).tolist()\n\n if not np.isnan(self.cost.loc[system_id, 'CPU time']):\n tot_time = self.cost.loc[system_id, 'CPU time']\n else:\n tot_time = self.cost.loc[system_id, 'Wall clock time']\n cpu_times = np.linspace(tot_time / 100, tot_time, num=100, endpoint=True)\n\n # Data row.\n for timepoint_idx in range(100):\n data.append({\n 'System ID': system_id,\n 'System name': system_id[:-2], # Remove replicate ID.\n 'N energy evaluations': energy_evaluations[timepoint_idx],\n 'CPU time [s]': cpu_times[timepoint_idx],\n 'CPU time [h]': cpu_times[timepoint_idx] / 3600,\n 'CPU time [d]': cpu_times[timepoint_idx] / 3600 / 24,\n DG_KEY: free_energies[timepoint_idx],\n 'd$\\Delta$G': err_free_energies[timepoint_idx],\n 'Simulation percentage': timepoint_idx + 1,\n })\n self.data = pd.DataFrame(data)\n\n def mean_free_energies(self):\n \"\"\"Return a dataframe with mean free energies and 95% t-based confidence intervals.\"\"\"\n return compute_system_name_mean_free_energies(self.data)\n\n\n# =============================================================================\n# YANK ANALYSIS\n# =============================================================================\n\nclass YankSamplingAnalysis:\n \"\"\"Utility class to easily access the results of the YANK analysis.\"\"\"\n\n def __init__(self, directory_path):\n # Read in analysis files.\n self._yank_free_energies = {}\n pattern_file_path = os.path.join(directory_path, 'yank-*.json')\n for file_path in glob.glob(pattern_file_path):\n file_name = os.path.splitext(os.path.basename(file_path))[0]\n _, system_id = file_name.split('-', 1)\n\n with open(file_path, 'r') as f:\n # Cast string keys to int.\n self._yank_free_energies[system_id] = {int(k): v for k, v in json.load(f).items()}\n assert len(self._yank_free_energies) == 15\n\n yank_cpu_times_file_path = os.path.join(directory_path, 'yank_cpu_times.json')\n with open(yank_cpu_times_file_path, 'r') as f:\n self._yank_cpu_times = json.load(f)\n\n def export(self, file_base_path):\n \"\"\"Export the YANK analysis into CSV and JSON formats.\n\n Parameters\n ----------\n file_base_path : str\n The extension-less path where to save the file.\n \"\"\"\n exported_data = collections.OrderedDict()\n\n # Export data of 5 replicates\n for system_id in sorted(self._yank_free_energies):\n iterations = sorted(self._yank_free_energies[system_id])\n system_id_data = self._free_energies_from_iterations(iterations, [system_id], mean_trajectory=False)\n exported_data[system_id] = collections.OrderedDict([\n ('DG', system_id_data[DG_KEY].values.tolist()),\n ('dDG', system_id_data[DDG_KEY].values.tolist()),\n ('hrex_iterations', system_id_data['HREX iteration'].values.tolist()),\n ('n_energy_evaluations', system_id_data['N energy evaluations'].values.tolist()),\n ('cpu_times', system_id_data['CPU time [s]'].values.tolist()),\n ])\n\n # Export data of mean trajectory and confidence intervals.\n system_names = {system_id[:-2] for system_id in self._yank_free_energies}\n for system_name in system_names:\n system_name_data = self.system_free_energies(system_name, mean_trajectory=True)\n exported_data[system_name + '-mean'] = collections.OrderedDict([\n ('DG', system_name_data[DG_KEY].values.tolist()),\n ('DG_CI', system_name_data['$\\Delta$G CI'].values.tolist()),\n ('hrex_iterations', system_name_data['HREX iteration'].values.tolist()),\n ('n_energy_evaluations', system_name_data['N energy evaluations'].values.tolist()),\n ])\n\n # Export.\n export_dictionary(exported_data, file_base_path)\n\n def system_free_energies(self, system_name, mean_trajectory=False):\n \"\"\"Get all the free energies from the system name as a Dataframe.\"\"\"\n system_ids = sorted([k for k in self._yank_free_energies if k[:-2] == system_name])\n\n # Find all iterations in common among the system_ids.\n system_name_common_iterations = None\n for system_id in system_ids:\n iterations = sorted(self._yank_free_energies[system_id])\n if system_name_common_iterations is None:\n system_name_common_iterations = set(iterations)\n else:\n system_name_common_iterations.intersection_update(set(iterations))\n\n # Retrieve the dataframe.\n return self._free_energies_from_iterations(sorted(system_name_common_iterations),\n system_ids, mean_trajectory)\n\n def free_energies_from_total_time(self, tot_time, system_id):\n \"\"\"Get 100 equally-spaced free energies and errors covering tot_time as a DataFrame.\"\"\"\n iterations = cpu_time_iteration_cutoffs(tot_time, system_id, self._yank_cpu_times)\n return self._free_energies_from_iterations(iterations, [system_id])\n\n def free_energies_from_energy_evaluations(self, n_energy_evaluations, system_id=None,\n system_name=None, mean_trajectory=False):\n \"\"\"Get 100 equally-spaced free energies and errors covering n_energy_evaluations as a DataFrame.\"\"\"\n assert operator.xor(system_id is not None, system_name is not None)\n\n if system_name is None:\n system_name = system_id[:-2]\n iterations = energy_evaluations_iteration_cutoffs(n_energy_evaluations, system_name)\n\n if system_id is not None:\n system_ids = [system_id]\n else:\n system_ids = sorted([k for k in self._yank_free_energies if k[:-2] == system_name])\n return self._free_energies_from_iterations(iterations, system_ids, mean_trajectory)\n\n def free_energies_from_iteration(self, final_iteration, system_id=None, mean_trajectory=False):\n \"\"\"Get 100 equally-spaced free energies and errors covering final_iterations as a DataFrame.\"\"\"\n iterations = get_iteration_cutoffs(final_iteration)\n if system_id is None:\n system_ids = []\n else:\n system_ids = [system_id]\n return self._free_energies_from_iterations(iterations, system_ids, mean_trajectory)\n\n def _free_energies_from_iterations(self, iterations, system_ids, mean_trajectory=False):\n # Handle default argument.\n if len(system_ids) == 0:\n # Pick everything.\n system_ids = sorted(self._yank_free_energies.keys())\n\n # Create dataframe.\n dataframe = []\n for system_id in system_ids:\n system_name = system_id[:-2]\n\n for iteration_idx, iteration in enumerate(iterations):\n f, df = self._yank_free_energies[system_id][iteration]\n n_energy_evaluations = energy_evaluations_from_iterations(system_name, iteration)\n cpu_time = self._yank_cpu_times[system_id] / YANK_N_ITERATIONS * iteration\n\n dataframe.append({\n 'System ID': system_id,\n 'System name': system_name,\n DG_KEY: f,\n DDG_KEY: df,\n 'Simulation percentage': iteration_idx + 1,\n 'HREX iteration': iteration,\n 'N energy evaluations': n_energy_evaluations,\n 'CPU time [s]': cpu_time,\n 'CPU time [h]': cpu_time / 3600,\n 'CPU time [d]': cpu_time / 3600 / 24,\n })\n\n # Convert to Pandas DataFrame.\n columns_order = ['System ID', 'System name', 'Simulation percentage',\n DG_KEY, DDG_KEY, 'HREX iteration', 'N energy evaluations',\n 'CPU time [s]', 'CPU time [h]', 'CPU time [d]']\n dataframe = pd.DataFrame(dataframe, columns=columns_order)\n\n # Create average +- CI trajectory if requested.\n if mean_trajectory:\n dataframe = self._compute_mean_trajectory(dataframe)\n return dataframe\n\n def _compute_mean_trajectory(self, data):\n \"\"\"Compute average free energy and t-based CI for each iteration.\"\"\"\n return compute_system_name_mean_free_energies(data, extra_fields=['HREX iteration'])\n","sub_path":"host_guest/Analysis/Scripts/pkganalysis/sampling.py","file_name":"sampling.py","file_ext":"py","file_size_in_byte":17835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"424549225","text":"import requests\nfrom common.ApiUrlUtil import ApiUrlUtil\nfrom service.CommonService import getToken\n\nclass LineStatusService:\n\n def getLineSatusList(self,token):\n header = {\n \"Token\":token\n }\n response = requests.get(ApiUrlUtil.api_line_status_getList, headers = header)\n print(\"the getLineSatusList is %s\"%response.json())\n return response.json()\n\n def getLineSatusvalidList(self, token):\n header = {\n \"Token\":token\n }\n response = requests.get(ApiUrlUtil.api_line_status_get_validList, headers = header)\n print(\"the getLineSatusvalidList is %s\"%response.json())\n return response.json()\n\n def orderLineStatus(self, token, lineStatusIdList):\n header = {\n \"Token\": token,\n \"Content-Type\": \"application/json\"\n }\n body = lineStatusIdList\n print(\"the orderLineStatus request is %s\"% body)\n response = requests.post(ApiUrlUtil.api_line_status_order, headers = header, json=body)\n print(\"the orderLineStatus response is %s\" % response.json())\n return response.json()\n\n def updateLineStatus(self, token, id, lineStatusInfo):\n header = {\n \"Token\": token,\n \"Content-type\": \"application/json\"\n }\n body = {\n \"Id\": id,\n \"LineStatusName\": lineStatusInfo[\"LineStatusName\"],\n \"Valid\": lineStatusInfo[\"Valid\"]\n }\n print(\"the updateLineStatus request is %s\"% body)\n response = requests.post(ApiUrlUtil.api_line_status_update, headers = header, json=body)\n print(\"the updateLineStatus response is %s\"% response.json())\n return response.json()\n\n\nif __name__ == '__main__':\n lstS = LineStatusService()\n token = getToken()\n lstS.getLineSatusvalidList(token)\n","sub_path":"service/LineStatusService.py","file_name":"LineStatusService.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"362927684","text":"import os\nimport pickle\nimport tensorflow as tf\nimport wolframclient.serializers as wxf\n\nname = 'karras2018iclr-celebahq-1024x1024'\nfile = open(name + '.pkl', 'rb')\nsess = tf.InteractiveSession()\nG, D, Gs = pickle.load(file)\nsaver = tf.train.Saver()\nsave_path = \"./target/\" + name + \"/\"\nmodel_name = 'model'\nif not os.path.exists(save_path):\n os.makedirs(save_path)\nsave_path_full = os.path.join(save_path, model_name)\nsaver.save(sess, save_path_full)\n\nckpt = tf.train.get_checkpoint_state(save_path)\nreader = tf.train.NewCheckpointReader(ckpt.model_checkpoint_path)\nall_variables = list(reader.get_variable_to_shape_map().keys())\nnpy = dict(zip(all_variables, map(reader.get_tensor, all_variables)))\nwxf.export(npy, name + '.wxf', target_format='wxf')\n\n# Save as protobuf\nwith tf.Session() as sess:\n tf.initialize_all_variables().run()\n output_graph_def = tf.graph_util.convert_variables_to_constants(\n sess=sess,\n input_graph_def=sess.graph_def,\n # output_node_names=['G_paper_1/images_out']\n output_node_names=['G_paper_1/ToRGB_lod0/add']\n )\n\n with tf.gfile.GFile(\"./target/\" + name + \".pb\", \"wb\") as file: # 保存模型\n file.write(output_graph_def.SerializeToString()) # 序列化输出\n","sub_path":"Publisher/PGGAN-1024 trained on CelebaHQ/2-exporter.py","file_name":"2-exporter.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"6398086","text":"m=int(input())\r\nline = input()\r\nx = line.split(' ')\r\nn=int(input())\r\nstr = input()\r\ny = str.split(' ')\r\n\r\nmiss=[]\r\ncount=0\r\n\r\nduplicates=list(set(x) & set(y))\r\n\r\nfor i in range(0,len(duplicates)):\r\n num1=duplicates[i]\r\n if x.count(num1)!=y.count(num1):\r\n miss.append(num1)\r\n count=count+1\r\n\r\nmiss.sort()\r\n\r\noutput = []\r\nfor x in miss:\r\n if x not in output:\r\n output.append(x)\r\n print(x,end=\" \")","sub_path":"New folder/Missing numbers.py","file_name":"Missing numbers.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"347332281","text":"import pymysql\nimport sys\nimport tkinter.messagebox\nfrom tkinter import *\nimport tkinter.tix\nroot = tkinter.tix.Tk()\nsemployee_id = StringVar()\nsname = StringVar()\nstitle = StringVar()\nsphone = StringVar()\nsSSN = StringVar()\nsadderss = StringVar()\nsinventory_id = StringVar()\nsstore_id = StringVar()\nsproduct_id = StringVar()\nsinventory = StringVar()\ndef turn_save(event):\n # new recode\n try:\n conn = pymysql.connect(host='localhost', user='root', passwd='root', db='wsm01', port=3306,\n charset='utf8')\n except:\n print(\"DATABASE ERROR!\")\n conn.close()\n sys.exit()\n cur = conn.cursor()\n selectSQL = \"Insert into employee values('\" + semployee_id.get() + \"','\" + sname.get() + \"',\" + stitle.get() + \",\" + sphone.get() + \",\" + sadderss.get() + \",'\" + sSSN.get() + \"')\"\n cur.execute(selectSQL)\n conn.commit()\n cur.close()\n conn.close()\n tkinter.messagebox.showinfo('SUCCEED','RECODE SUCCEED!')\ndef turn_save0(event):\n # edit recode\n try:\n conn = pymysql.connect(host='localhost', user='root', passwd='root', db='wsm01', port=3306,\n charset='utf8')\n except:\n print(\"DATABASE ERROR!\")\n conn.close()\n sys.exit()\n cur = conn.cursor()\n selectSQL2 = \"Insert into inventory values('\" + sinventory_id.get() + \"','\" + sstore_id.get() + \"',\" + sproduct_id.get() + \",'\" + sinventory.get() +\"')\"\n cur.execute(selectSQL2)\n conn.commit()\n cur.close()\n conn.close()\n tkinter.messagebox.showinfo('SUCCEED','RECODE SUCCEED!')\ndef turn_property(event):\n getSQLDate()\ndef turn_property0(event):\n getSQLDate0()\ndef turn_property1(event):\n getSQLDate1()\ndef getSQLDate():\n # VIEW TABLE CONTENT\n try:\n conn = pymysql.connect(host='localhost', user='root', passwd='root', db='wsm01', port=3306,\n charset='utf8')\n except:\n print(\"DATABASE ERROR!\")\n conn.close()\n sys.exit()\n cur = conn.cursor()\n selectSQL = 'Select * from employee'\n cur.execute(selectSQL)\n for row in cur.fetchall():\n tree.insert(\"\", 0, text=row[0], values=(row[1], row[2], row[3], row[4],row[5]))\n cur.close()\n conn.close()\ndef getSQLDate0():\n # VIEW TABLE CONTENT\n try:\n conn = pymysql.connect(host='localhost', user='root', passwd='root', db='wsm01', port=3306,\n charset='utf8')\n except:\n print(\"DATABASE ERROR!\")\n conn.close()\n sys.exit()\n cur = conn.cursor()\n selectSQL0 = 'Select * from inventory'\n cur.execute(selectSQL0)\n for row in cur.fetchall():\n tree0.insert(\"\", 0, text=row[0], values=(row[1], row[2], row[3]))\n cur.close()\n conn.close()\n\ndef getSQLDate1():\n # VIEW TABLE CONTENT\n try:\n conn = pymysql.connect(host='localhost', user='root', passwd='root', db='wsm01', port=3306,\n charset='utf8')\n except:\n print(\"DATABASE ERROR!\")\n conn.close()\n sys.exit()\n cur = conn.cursor()\n selectSQL1 = 'Select * from inventory'\n cur.execute(selectSQL1)\n for row in cur.fetchall():\n tree1.insert(\"\", 0, text=row[0], values=(row[1], row[2], row[3]))\n cur.close()\n conn.close()\nimport tkinter as tk\nfrom tkinter.constants import *\nfrom tkinter import ttk\nfrom tkinter.constants import *\nroot.title(\"AW108 Inventory Management System DEMO \")\nroot.geometry(\"960x490\")\nn=ttk.Notebook(root)\nf1=ttk.Frame(n,height=1024,width=800)\nf2=ttk.Frame(n,height=1024,width=800)\nf3=ttk.Frame(n,height=1024,width=800)\nn.add(f1, text='Product')\nn.add(f2, text='employee')\nn.add(f3, text='inventory')\nn.pack()\nproduct = tk.Label(f1, text=\"Edit the inventory\")\nproduct.pack()\nemployee = tk.Label(f2, text=\"Add employee\")\nemployee.pack()\ninventory = tk.Label(f3, text=\"View the inventory\")\ninventory.pack()\ntree0 = ttk.Treeview(f1) # TABLE VIEW\ntree0[\"columns\"] = (\"store\", \"product\", \"inventory\")\ntree0.column(\"store\", width=150)\ntree0.column(\"product\", width=150)\ntree0.column(\"inventory\", width=150)\ntree0.heading(\"store\", text=\"Store\")\ntree0.heading(\"product\", text=\"Product\")\ntree0.heading(\"inventory\", text=\"Inventory\")\ntree0.pack(side=\"top\")\nbs0 = tk.Button(f1, text=\"SEARCH...\", width=10)\nbs0.bind(\"\", turn_property0) # bind()MOUSE\nbs0.pack(padx=5,pady=5,side=\"top\")\ntop0 = tkinter.tix.Frame(f1, relief=FLAT, bd=5,height=2,width=2)\ntop0.pack(padx=200,pady=0,side='left')\ntop0.sinventory_id = tkinter.tix.LabelEntry(top0, label=\"ID:\", labelside='top',width='200')\ntop0.sinventory_id.pack(side=\"left\")\ntop0.sinventory_id.entry['textvariable'] = sinventory_id\ntop0.sstore_id = tkinter.tix.LabelEntry(top0, label=\"Store:\", labelside='top', )\ntop0.sstore_id.pack(side=\"left\")\ntop0.sstore_id.entry['textvariable'] = sstore_id\ntop0.sproduct_id = tkinter.tix.LabelEntry(top0, label=\"Product:\", labelside='top', )\ntop0.sproduct_id.pack(side=\"left\")\ntop0.sproduct_id.entry['textvariable'] = sproduct_id\ntop0.sinventory = tkinter.tix.LabelEntry(top0, label=\"Inventory:\", labelside='top', )\ntop0.sinventory.pack(side=\"left\")\ntop0.sinventory.entry['textvariable'] = sinventory\nSavebn0 = tk.Button(top0, text=\"Edit ventory\", width=10)\nSavebn0.bind(\"\", turn_save0)\nSavebn0.pack(padx=50,pady=50,side=\"right\")\ntree = ttk.Treeview(f2) # TABLE VIEW\ntree[\"columns\"] = (\"name\", \"ssn\", \"phone\", \"adderss\",\"title\")\ntree.column(\"name\", width=150)\ntree.column(\"ssn\", width=150)\ntree.column(\"phone\", width=150)\ntree.column(\"adderss\", width=150)\ntree.column(\"title\", width=150)\ntree.heading(\"name\", text=\"NAME\")\ntree.heading(\"ssn\", text=\"SSN\")\ntree.heading(\"phone\", text=\"PHONE\")\ntree.heading(\"adderss\", text=\"ADDERSS\")\ntree.heading(\"title\", text=\"TITLE\")\ntree.pack(side=\"top\")\nbs = tk.Button(f2, text=\"SEARCH...\", width=10)\nbs.bind(\"\", turn_property) # bind()MOUSE\nbs.pack(padx=5,pady=5,side=\"top\")\ntop = tkinter.tix.Frame(f2, relief=FLAT, bd=5,height=2,width=2)\ntop.pack(padx=200,pady=0,side='left')\ntop.semployee_id = tkinter.tix.LabelEntry(top, label=\"ID:\", labelside='top',width='200')\ntop.semployee_id.pack(side=\"left\")\ntop.semployee_id.entry['textvariable'] = semployee_id\ntop.sname = tkinter.tix.LabelEntry(top, label=\"NAME:\", labelside='top', )\ntop.sname.pack(side=\"left\")\ntop.sname.entry['textvariable'] = sname\ntop.title = tkinter.tix.LabelEntry(top, label=\"TITLE:\", labelside='top', )\ntop.title.pack(side=\"left\")\ntop.title.entry['textvariable'] = stitle\ntop.phone = tkinter.tix.LabelEntry(top, label=\"PHONE:\", labelside='top', )\ntop.phone.pack(side=\"left\")\ntop.phone.entry['textvariable'] = sphone\ntop.ssn = tkinter.tix.LabelEntry(top, label=\"SSN:\", labelside='top', )\ntop.ssn.pack(side=\"left\")\ntop.ssn.entry['textvariable'] = sSSN\ntop.adderss = tkinter.tix.LabelEntry(top, label=\"adderss:\", labelside='top', )\ntop.adderss.pack(side=\"left\")\ntop.adderss.entry['textvariable'] = sadderss\nSavebn = tk.Button(top, text=\"Add new\", width=10)\nSavebn.bind(\"\", turn_save)\nSavebn.pack(padx=50,pady=50,side=\"right\")\ntree1 = ttk.Treeview(f3) # TABLE VIEW\ntree1[\"columns\"] = (\"store\", \"product\", \"inventory\")\ntree1.column(\"store\", width=150)\ntree1.column(\"product\", width=150)\ntree1.column(\"inventory\", width=150)\ntree1.heading(\"store\", text=\"Store\")\ntree1.heading(\"product\", text=\"Product\")\ntree1.heading(\"inventory\", text=\"Inventory\")\ntree1.pack(side=\"top\")\ncomvalue = tkinter.StringVar()\ncomboxlist=ttk.Combobox(f3,textvariable=comvalue)\ncomboxlist[\"values\"]=(\"ALL\",\"APPLE\",\"HUAWEI\")\ncomboxlist.current(0)\ncomboxlist.pack(padx=15,pady=15)\nbs1 = tk.Button(f3, text=\"SEARCH...\", width=10)\nbs1.bind(\"\", turn_property1) # bind()MOUSE\nbs1.pack(padx=5,pady=5,side=\"top\")\nroot.mainloop()\n","sub_path":"CodeFile/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"50867642","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2014 dlilien \n#\n# Distributed under terms of the MIT license.\n\n\"\"\"\ndo some stuff, combine all the surf_n_bed stuff later\n\"\"\"\n\nimport os\nimport sys\nfrom demtools.lib import warplib\nfrom demtools.lib import malib\nimport numpy as np\nfrom scipy.ndimage.filters import gaussian_filter\n\n\ndef main():\n if len(sys.argv) == 3:\n dem1_fn = sys.argv[1]\n mask_fn = sys.argv[2]\n masked_smoothing(dem1_fn, mask_fn, filt_size=1)\n elif len(sys.argv) == 4:\n dem1_fn = sys.argv[1]\n mask_fn = sys.argv[2]\n res = float(sys.argv[3])\n masked_smoothing(dem1_fn, mask_fn, cutoff=res)\n\n\ndef masked_smoothing(input_fn, mask_fn, filt_size=1, cutoff=-9999.0):\n input_dr, input_f = os.path.split(input_fn)\n input_pref = os.path.splitext(input_f)[0]\n mask_dr, mask_f = os.path.split(mask_fn)\n mask_pref = os.path.splitext(mask_f)[0]\n mask_raster_fn = mask_dr + '/' + mask_pref + '.tif'\n print('Rasterizing Mask')\n os.system('gdal_rasterize -a id -tr 1000.0 1000.0 -l ' +\n mask_pref + ' ' + mask_fn + ' ' + mask_raster_fn)\n dem_ds_full, mask_ds_full = warplib.memwarp_multi_fn(\n [input_fn, mask_raster_fn], res='first', extent='first')\n mask_full = malib.ds_getma(mask_ds_full)\n dem_ma_full = malib.ds_getma(dem_ds_full)\n\n # make some more masks so we can do some gradual smoothing\n small_mask = mask_full.mask.astype(int)\n small_mask = gaussian_filter(small_mask.astype(float), 3)\n large_mask = mask_full.mask.astype(int)\n large_mask = gaussian_filter(large_mask.astype(float), 3)\n small_mask[small_mask > 0.02] = 1\n small_mask[small_mask != 1] = 0\n large_mask[large_mask < 0.98] = 0\n large_mask[large_mask != 0] = 1\n\n print('Smoothing input image')\n very_smooth = gaussian_filter(dem_ma_full, 3 * filt_size)\n smooth = gaussian_filter(dem_ma_full, 2 * filt_size)\n coarsish = gaussian_filter(dem_ma_full, 1 * filt_size)\n # unsmooth_mask=np.logical_or(dem_ma_full.mask,np.logical_not(large_mask))\n coarsish_mask = np.logical_or(large_mask, np.logical_not(mask_full.mask))\n smooth_mask = np.logical_or(mask_full.mask, np.logical_not(small_mask))\n very_smooth = very_smooth * np.logical_not(small_mask).astype(int)\n smooth = smooth * np.logical_not(smooth_mask).astype(int)\n coarsish = coarsish * np.logical_not(coarsish_mask).astype(int)\n\n if not np.any(dem_ma_full.mask):\n dem_ma_full.mask = np.zeros(dem_ma_full.shape)\n store_mask = False\n else:\n print('Saving original mask')\n store_mask = np.copy(dem_ma_full.mask)\n dem_ma_full = dem_ma_full * \\\n coarsish_mask.astype(int) * mask_full.mask.astype(int)\n if np.any(store_mask):\n dem_ma_full.mask = np.copy(store_mask)\n dem_ma_full.mask[dem_ma_full < cutoff] = True\n dem_ma_full = smooth + very_smooth + coarsish + dem_ma_full\n dem_ma_full.mask[dem_ma_full < cutoff] = True\n dst_fn = input_dr + '/' + input_pref + '_smoothed.tif'\n print('Writing Smoothed Tiff ' + dst_fn)\n malib.writeGTiff(dem_ma_full, dst_fn, dem_ds_full, bnum=1)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"modeltools/bin/masked_smoothing.py","file_name":"masked_smoothing.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"343177531","text":"import urllib.request\nimport json\nimport logging\nimport pandas as pd\nimport calendar\nimport time\nimport urllib.error\nfrom modules.parameters import TravelTimeConstants\n\nlogger = logging.getLogger(__name__)\n\napi_key = TravelTimeConstants.GOOGLE_DISTANCE_MATRIX_API_KEY\n\n\nclass TravelTimeCalculator(object):\n def __init__(self, csv_filename):\n self.df = pd.read_csv(csv_filename)\n\n logger.info('loaded %s as pandas dataframe' % csv_filename)\n logger.info('columns: %s' % self.df.columns)\n self.clean_bad_values('not specified', 'lat')\n\n def calculate_travel_time(self, start_lat_col, start_lon_col, end_lat, end_lon, output_col,\n departure_time, mode):\n self.df[output_col] = self.df.apply(lambda x: self._travel_time_function(x[start_lat_col], x[start_lon_col],\n end_lat, end_lon,\n departure_time, mode), axis=1)\n return self.df\n\n def _travel_time_function(self, start_lat, start_lon, end_lat, end_lon, departure_time, mode):\n start_coords = str(start_lat)+','+str(start_lon)\n end_coords = str(end_lat) + ',' + str(end_lon)\n depart_time = calendar.timegm(time.strptime(departure_time, '%Y/%m/%d %H:%M:%S'))\n logger.info('routing between %s and %s, departing at: %s' % (start_coords, end_coords, departure_time))\n full_url = (\n \"https://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&origins=%s&destinations=%s\"\n \"&mode=%s&departure_time=%s&key=%s\" %\n (start_coords, end_coords, mode, depart_time, api_key))\n logger.info('calling: %s' % full_url)\n try:\n result = urllib.request.urlopen(full_url)\n except urllib.error.HTTPError or urllib.error.URLError as e:\n logger.error(\"ERROR({0}): {1}\".format(e.errno, e.strerror))\n return 'ERROR'\n response = json.load(result)\n if response['status'] == \"OVER_QUERY_LIMIT\":\n logger.error('OVER QUERY LIMIT')\n else:\n try:\n seconds = response['rows'][0]['elements'][0]['duration']['value']\n minutes = round(seconds / 60, 0)\n logger.info('travel time calculated: %s minutes' % minutes)\n return minutes\n except KeyError:\n logger.info('could not route! %s, %s to %s, %s' % (start_lat, start_lon, end_lat, end_lon))\n return None\n\n def clean_bad_values(self, bad_value_string, column):\n logger.info('removing rows from column %s where values equals: %s' % (column, bad_value_string))\n rows_before = len(self.df)\n self.df = self.df[self.df[column] != bad_value_string]\n rows_after = len(self.df)\n logger.info('%s results removed' % (rows_before - rows_after))\n\n def save_output(self, output_csvname):\n self.df.to_csv(output_csvname, index=False)\n","sub_path":"modules/traveltime.py","file_name":"traveltime.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"232419645","text":"import os\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n# The GPU id to use, usually either \"0\" or \"1\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\n\nimport numpy as np\nfrom modules.DataGenerator import DataGenerator\nfrom keras.layers import Dense, Flatten\nfrom keras.models import Model\n\nfrom keras.applications.densenet import DenseNet121\nfrom keras.callbacks import ReduceLROnPlateau, EarlyStopping, TensorBoard\nfrom modules.utils import extract_frames, read_data, read_labels, data_generator_from_labels, read_from_file\n\nif __name__ == '__main__':\n \"\"\"\n Train DenseNet121\n \"\"\"\n\n model_name = 'DenseNet_1'\n\n lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1), cooldown=0, patience=5, min_lr=0.5e-6, mode='auto')\n early_stopper = EarlyStopping(monitor='val_loss', min_delta=1e-6, patience=10)\n tensor_board = TensorBoard('log/' + model_name)\n\n # path to the directory storing data and labels\n # at 1fps sample rate:\n # data size: 14.4GB\n # number of frames in total: 184578\n local_path = '/Users/seanxiang/data/cholec80/'\n local_train_path = '/Users/seanxiang/data/cholec80/train_frames/'\n local_vali_path = '/Users/seanxiang/data/cholec80/vali_frames/'\n local_label_path = '/Users/seanxiang/data/cholec80/phase_annotations/'\n local_train_pair = '/Users/seanxiang/data/cholec80/train_labels/labels.txt'\n local_vali_pair = '/Users/seanxiang/data/cholec80/vali_labels/labels.txt'\n\n remote_path = '/home/cxia8134/data/'\n remote_train_path = '/home/cxia8134/data/train_frames/'\n remote_vali_path = '/home/cxia8134/data/vali_frames/'\n remote_label_path = '/home/cxia8134/data/phase_annotations/'\n remote_train_pair = '/home/cxia8134/data/old_labels/1-41.txt'\n remote_vali_pair = '/home/cxia8134/data/old_labels/41-51.txt'\n\n train_folder = 'train_frames'\n vali_folder = 'vali_frames'\n\n # extracting frames from videos at 1fps\n fps = 25\n # extract_frames(data_path, 25)\n\n batch_size = 32\n nb_classes = 7\n nb_epoch = 200\n input_height, input_width = 224, 224\n input_channels = 3\n n_train = 86344\n n_vali = 21108\n\n # train_pair = read_from_file(local_train_pair)\n # vali_pair = read_from_file(local_vali_pair)\n train_pair = read_from_file(remote_train_pair)\n vali_pair = read_from_file(remote_vali_pair)\n\n # testx, testy = data_generator_test(train_pair, nb_classes, batch_size)\n\n # unordered data generator\n train_idx = np.array(len(train_pair))\n vali_idx = np.array(len(vali_pair))\n train_generator = data_generator_from_labels(train_pair, train_idx, nb_classes, batch_size)\n vali_generator = data_generator_from_labels(vali_pair, vali_idx, nb_classes, batch_size)\n\n # ordered data generator\n # train_generator = DataGenerator(train_pair, nb_classes, batch_size)\n # vali_generator = DataGenerator(vali_pair, nb_classes, batch_size)\n\n # define model structure\n # output feature vector of length 2048 for each frame\n model = DenseNet121(include_top=False,\n weights='imagenet',\n input_shape=(input_height, input_width, input_channels),\n pooling='avg')\n\n # adding classification layer\n last = model.output\n x = Dense(units=nb_classes, activation='softmax', name='fc1')(last)\n\n fine_tuned_model = Model(model.input, x)\n\n fine_tuned_model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n fine_tuned_model.summary()\n\n # create custom generator\n fine_tuned_model.fit_generator(generator=train_generator,\n steps_per_epoch=(n_train // batch_size),\n epochs=nb_epoch,\n validation_data=vali_generator,\n validation_steps=(n_vali // batch_size),\n verbose=1,\n use_multiprocessing=True,\n workers=6,\n max_queue_size=16,\n callbacks=[lr_reducer, early_stopper, tensor_board])\n\n fine_tuned_model.save('trained/' + model_name + '.h5')\n","sub_path":"denseNet.py","file_name":"denseNet.py","file_ext":"py","file_size_in_byte":4251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"343641667","text":"\nimport os\n\nfrom airflow import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.mysql_operator import MySqlOperator\nfrom airflow.sensors.base_sensor_operator import BaseSensorOperator\nfrom airflow.operators.python_operator import PythonOperator, BranchPythonOperator\nfrom airflow.hooks.mysql_hook import MySqlHook\n\nfrom datetime import datetime, timedelta\n\n\nclass ReturningSingleMySqlOperator(MySqlOperator):\n def execute(self, context):\n self.log.info('Executing: %s', self.sql)\n hook = MySqlHook(mysql_conn_id=self.mysql_conn_id, schema=self.database)\n return hook.get_first(self.sql, parameters=self.parameters)\n\n\nclass ReturningMultipleMySqlOperator(MySqlOperator):\n def execute(self, context):\n self.log.info('Executing: %s', self.sql)\n hook = MySqlHook(mysql_conn_id=self.mysql_conn_id, schema=self.database)\n return hook.get_records(self.sql, parameters=self.parameters)\n\n\nclass CSVFilesPresentSensor(BaseSensorOperator):\n def __init__(self, *args, local_path=\"\", **kwargs):\n super(CSVFilesPresentSensor, self).__init__(*args, soft_fail=True, **kwargs)\n self.local_path = local_path\n\n def poke(self, context):\n print(\"Checking local path \" + self.local_path)\n return len(select_files_from_fs(self.local_path)) > 0\n\n\ndef select_files_from_fs(local_path, **kwargs):\n files = [os.path.join(local_path, f) for f in os.listdir(local_path) if f.endswith(\".csv\")]\n\n print(\"Selected files \" + str(files))\n return files\n\n\ndef backup_files_on_fs(backup_path, **kwargs):\n selected_files = kwargs['ti'].xcom_pull(task_ids='select_files')\n for selected_file in selected_files:\n filename = os.path.basename(selected_file)\n os.rename(selected_file, os.path.join(backup_path, filename))\n\n\ndef bulk_load_sql(table_name, **kwargs):\n\n selected_files = kwargs['ti'].xcom_pull(task_ids='select_files')\n conn = MySqlHook(mysql_conn_id='telmetry_mysql')\n\n import pandas\n for selected_file in selected_files:\n df = pandas.read_csv(selected_file, sep=\",\", decimal=\".\", encoding='utf-8')\n\n df['wheel'] = df['wheel'].str[2:4]\n df['action'] = df['action'].str[2:4]\n\n connection = conn.get_conn()\n try:\n cursor = connection.cursor()\n sql = \"insert into \" + table_name + \" (\" + \",\".join([str(f) for f in df]) + \") values (\" + \",\".join([\"%s\"] * len(df.columns)) + \")\"\n print(\"SQL statement is \" + sql)\n for index, row in df.iterrows():\n values = [row[c] for c in df]\n print(\"inserting values \" + str(values))\n cursor.execute(sql, values)\n connection.commit()\n finally:\n connection.close()\n\n return table_name\n\n\ndef remove_dull_wheel_data(wheel_name, **kwargs):\n selected_wheel_records = kwargs['ti'].xcom_pull(task_ids='select_new_wheel_' + wheel_name + '_data')\n print(str(selected_wheel_records))\n # and now iterate over records and select those we don't want so we can remove them...\n\n\ndef select_unprocessed_wheel_data(wheel_name):\n return \"SELECT * FROM wheel_steer WHERE wheel = '\" + wheel_name + \"' AND timestamp >= (SELECT IFNULL(MAX(end_timestamp), 0) FROM wheel_steer_interest)\"\n\n\ndefault_args = {\n 'owner': 'daniel',\n 'start_date': datetime.combine(datetime.today() - timedelta(1), datetime.min.time()),\n 'depends_on_past': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5)\n}\n\nwith DAG('telemtry-csv-to-mysql', default_args=default_args, schedule_interval=\"@daily\") as dag:\n\n check_files_present = CSVFilesPresentSensor(\n task_id='check_files_present',\n local_path=\"/home/jovyan/telemetry-logs/\"\n )\n\n select_files = PythonOperator(\n task_id='select_files',\n python_callable=select_files_from_fs,\n provide_context=True,\n op_kwargs={'local_path': \"/home/jovyan/telemetry-logs/\"}\n )\n\n process_files = PythonOperator(\n task_id='process_files',\n python_callable=bulk_load_sql,\n provide_context=True,\n op_kwargs={'table_name': \"wheel_steer\"}\n )\n\n backup_files = PythonOperator(\n task_id='backup_files',\n python_callable=backup_files_on_fs,\n provide_context=True,\n op_kwargs={'backup_path': \"/home/jovyan/telemetry-logs/backup/\"}\n )\n\n check_files_present >> select_files >> process_files >> backup_files\n\n for wheel_name in ['fl', 'fr', 'br', 'bl']:\n select_new_wheel_data = ReturningMultipleMySqlOperator(\n task_id='select_new_wheel_' + wheel_name + '_data',\n sql=select_unprocessed_wheel_data(wheel_name),\n mysql_conn_id='telmetry_mysql'\n )\n process_files >> select_new_wheel_data\n\n remove_wheel_data = PythonOperator(\n task_id='remove_dull_wheel_' + wheel_name + '_data',\n python_callable=remove_dull_wheel_data,\n provide_context=True,\n op_kwargs={'wheel_name': wheel_name}\n )\n select_new_wheel_data >> remove_wheel_data\n","sub_path":"apache-airflow/dags/telemtry-csv-to-mysql.py","file_name":"telemtry-csv-to-mysql.py","file_ext":"py","file_size_in_byte":5104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"111978236","text":"from __future__ import annotations\nfrom dataclasses import dataclass, field\n\n__NAMESPACE__ = \"http://www.travelport.com/schema/common_v38_0\"\n\n\n@dataclass\nclass TypeNonAirReservationRef7:\n class Meta:\n name = \"typeNonAirReservationRef\"\n\n locator_code: None | str = field(\n default=None,\n metadata={\n \"name\": \"LocatorCode\",\n \"type\": \"Attribute\",\n \"required\": True,\n \"min_length\": 5,\n \"max_length\": 8,\n }\n )\n","sub_path":"travelport/models/type_non_air_reservation_ref_7.py","file_name":"type_non_air_reservation_ref_7.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"406855109","text":"import pytest\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--data\", action=\"store\", default=\"./data/supplemental.yml\", help=\"which supplemental data file to use\"\n )\n parser.addoption(\n \"--release\", action=\"store\", default=\"stable\", help=\"which release to test ('stable', 'beta', or 'all')\"\n )\n parser.addoption(\"--app\", action=\"store\", help=\"test only a single app\")\n","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"133119342","text":"import psycopg2\r\nimport psycopg2.errorcodes\r\nimport csv\r\nimport datetime\r\nimport itertools\r\n\r\n\r\ndb_name = \"postgres\"\r\ndb_host = \"127.0.0.1\"\r\ndb_port = \"5432\"\r\nuser_name = \"postgres\"\r\npassword_text = \"12zx34\"\r\n\r\n\r\ndef input_config():\r\n dbname = input(\"Input database name: \")\r\n dbhost = input(\"Input database host: \")\r\n dbport = input(\"Input database port: \")\r\n user = input(\"Input user name: \")\r\n pswd = input(\"Input password: \")\r\n return dbname, dbhost, dbport, user, pswd\r\n\r\n\r\ndef connect_db():\r\n conn = psycopg2.connect(database=db_name, user=user_name, password=password_text,\r\n host=db_host, port=db_port)\r\n cur = conn.cursor()\r\n return conn, cur\r\n\r\n\r\ndef reconnect_db(log):\r\n print(\"Падіння бази даних. Очікування відновлення.\")\r\n log.write(str(datetime.datetime.now()) + \" - З'єднання перервано\\n\")\r\n connection_restored = False\r\n while not connection_restored:\r\n try:\r\n conn, cur = connect_db()\r\n log.write(str(datetime.datetime.now()) + \" - З'єднання відновлено\\n\")\r\n connection_restored = True\r\n except psycopg2.OperationalError as e:\r\n pass\r\n print(\"З'єднання відновлено.\")\r\n return conn, cur\r\n\r\n\r\ndef create_table(filename, conn, cur):\r\n row = []\r\n with open(filename, \"r\", encoding=\"cp1251\") as csv_file:\r\n header = csv_file.readline().split(';')\r\n row = [s.strip('\"') for s in header]\r\n row[-1] = row[-1].rstrip('\"\\n')\r\n csv_file.close()\r\n # формуємо запит для створення колонок таблиці\r\n columns = \"\\n\\tYear INT,\"\r\n for s in row:\r\n # тип поля 'рік народження' - ціле число\r\n if s == 'Birth':\r\n columns += '\\n\\t' + s + ' INT,'\r\n # тип поля з оцінками - дійсне число\r\n elif 'Ball' in s:\r\n columns += '\\n\\t' + s + ' REAL,'\r\n # поле 'outid' головний ключ таблиці\r\n elif s == 'OUTID':\r\n columns += '\\n\\t' + s + ' VARCHAR(40) PRIMARY KEY,'\r\n # всі інші поля створюємо текстовими\r\n else:\r\n columns += '\\n\\t' + s + ' VARCHAR(255),'\r\n # сам запит на створення таблиці\r\n create_table_query = '''CREATE TABLE IF NOT EXISTS ZNO_table (''' + columns.rstrip(',') + '\\n);'\r\n cur.execute(create_table_query)\r\n conn.commit()\r\n return conn, cur, row\r\n\r\n# запис даних у таблицю\r\ndef insert_from_file(filename, header, year, conn, cur, log):\r\n start_time = datetime.datetime.now()\r\n log.write(str(start_time) + \" - Відкрито файл \" + filename + '\\n')\r\n with open(filename, \"r\", encoding=\"cp1251\") as csv_file:\r\n print(\"Зчитування файлу \" + filename)\r\n csv_reader = csv.DictReader(csv_file, delimiter=';')\r\n lines = 0\r\n max_lines = 100\r\n inserted_all = False\r\n while not inserted_all:\r\n try:\r\n query = '''INSERT INTO ZNO_table (year, ''' + ', '.join(header) + ') VALUES '\r\n count = 0\r\n for row in csv_reader:\r\n count += 1\r\n for s in row:\r\n if row[s] == 'null':\r\n pass\r\n elif 'ball' in s.lower():\r\n row[s] = row[s].replace(',', '.')\r\n elif ('ball' not in s.lower()) and (s.lower() != 'birth'):\r\n row[s] = \"'\" + row[s].replace(\"'\", \"''\") + \"'\"\r\n query += '\\n\\t(' + str(year) + ', ' + ','.join(row.values()) + '),'\r\n if count == max_lines:\r\n count = 0\r\n query = query.rstrip(',') + ';'\r\n cur.execute(query)\r\n conn.commit()\r\n lines += 1\r\n query = '''INSERT INTO ZNO_table (year, ''' + ', '.join(header) + ') VALUES '\r\n if count != 0:\r\n query = query.rstrip(',') + ';'\r\n cur.execute(query)\r\n conn.commit()\r\n inserted_all = True\r\n # якщо з'єднання втрачено\r\n except psycopg2.OperationalError as e:\r\n if e.pgcode == psycopg2.errorcodes.ADMIN_SHUTDOWN:\r\n conn, cur = reconnect_db(log)\r\n csv_file.seek(0, 0)\r\n csv_reader = itertools.islice(csv.DictReader(csv_file, delimiter=';'),\r\n lines * max_lines, None)\r\n end_time = datetime.datetime.now()\r\n log.write(str(end_time) + \" - Файл зчитаний\\n\")\r\n log.write('Витрачений час - ' + str(end_time - start_time) + '\\n\\n')\r\n return conn, cur\r\n\r\n\r\ndef write_result(result_file, conn, cur):\r\n print(\"Виконання запиту\")\r\n query = '''\r\n SELECT REGNAME, Year, min(histBall100) \r\n FROM ZNO_table\r\n WHERE histTestStatus = 'Зараховано' \r\n GROUP BY REGNAME, Year;'''\r\n cur.execute(query)\r\n with open(result_file, 'w', newline='', encoding=\"cp1251\") as csv_file:\r\n csv_writer = csv.writer(csv_file)\r\n # Зберігаємо заголовки\r\n csv_writer.writerow(['Область', 'Рік', 'Найгірший бал з Історії України'])\r\n # Збергіаємо результати запиту\r\n for row in cur:\r\n csv_writer.writerow(row)\r\n return conn, cur\r\n\r\n\r\nlogs_file = open('logs.txt', 'w')\r\ndb_name, db_host, db_port, user_name, password_text = input_config()\r\nconnect, cursor = connect_db()\r\ncursor.execute('DROP TABLE IF EXISTS ZNO_table;')\r\nconnect.commit()\r\n# Створюємо таблицю\r\nconnect, cursor, headline = create_table('Odata2020File.csv', connect, cursor)\r\n# Читаємо інформацію з файлів та записуємо в таблицю\r\nconnect, cursor = insert_from_file(\"Odata2019File.csv\", headline, 2019, connect, cursor, logs_file)\r\nconnect, cursor = insert_from_file(\"Odata2020File.csv\", headline, 2020, connect, cursor, logs_file)\r\n# Створюємо запрос та зберігаємо результат в файл\r\nconnect, cursor = write_result('result.csv', connect, cursor)\r\n# Закриваємо з'єднання\r\ncursor.close()\r\nconnect.close()\r\nlogs_file.close()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"512811861","text":"# 下载文件\n\nimport os\nfrom selenium import webdriver\n\n'''firefox\nbrowser.download.folderList设置为0表示文件下载到浏览器默认下载路径,为2表示下载到指��目录\nbrowser.download.dir用于指定下载文件的目录。通过os.getcwd()方法获取当前文件所在位置,即下载文件保存的目录。\n指定要下载的文件类型,即Content-type值\n通过\"binary/octet-stream\"表示二进制文件\n'''\nfp = webdriver.FirefoxProfile()\nfp.set_preference(\"browser.download.folderList\", 2)\nfp.set_preference(\"browser.download.manager.showWhenStarting\", False)\nfp.set_preference(\"browser.download.dir\", os.getcwd())\nfp.set_preference(\"browser.helperApps.neverAsk.saveToDisk\", \"application/octet-stream\")\nbrowser = webdriver.Firefox(firefox_profile=fp)\nbrowser.get(\"http://pypi.python.org/pypi/selenium\")\nbrowser.find_element_by_partial_link_text(\"selenium-2\").click()\n\n'''chrome\ndownload.default_directory设置文件下载目录\n'''\noptions = webdriver.ChromeOptions()\nprefs = {'profile.default_content_settings.popups': 0,\n 'download.default_directory': os.getcwd()}\noptions.add_experimental_option('prefs', prefs)\n\ndriver = webdriver.Chrome(chrome_options=options)\ndriver.get(\"https://pypi.org/project/selenium/#files\")\ndriver.find_element_by_partial_link_text(\"selenium-3.141.0.tar.gz\").click()\n","sub_path":"edu_selenium/test017_文件下载.py","file_name":"test017_文件下载.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"467126736","text":"from django.test import TestCase\nfrom home.views import get_index\nfrom django.core.urlresolvers import resolve\nfrom django.shortcuts import render_to_response\nfrom .models import Subject\n\n\nclass HomePageTest(TestCase):\n def test_home_page_resolves(self):\n home_page = resolve('/')\n self.assertEqual(home_page.func, get_index)\n\n def test_check_content_is_correct(self):\n home_page = self.client.get('/')\n self.assertTemplateUsed(home_page, \"index.html\")\n home_page_template_output = render_to_response(\"../home/templates/../templates/index.html\").content\n self.assertEqual(home_page.content, home_page_template_output)\n\n\nclass SubjectPageTest(TestCase):\n\n def test_check_content_is_correct(self):\n subject_page = self.client.get('/forum/')\n self.assertTemplateUsed(subject_page, \"patients.html\")\n subject_page_template_output = render_to_response(\"patients.html\",\n {'subjects':\n Subject.objects.all()}).content\n self.assertEqual(subject_page.content, subject_page_template_output)","sub_path":"threads/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"628537156","text":"import tkinter\n\nfrom thonny.plugins.paren_matcher import ParenMatcher\n\nTEST_STR1 = \"\"\"age = int(input(\"Enter age: \"))\nif age > 18:\n l = [\"H\", \"I\"]\n print(l)\nelse:\n print(\"Hello!\", end='')\n print(\"What's your name?\")\n\"\"\"\n\n\ndef test_regular_closed():\n insert_pos_groups = ((\"1.9\", \"1.10\", \"1.13\", \"1.31\"),\n (\"1.30\", \"1.29\", \"1.25\", \"1.15\"))\n expected_indices = ((\"1.9\", \"1.30\"),\n (\"1.15\", \"1.29\"))\n\n text_widget = tkinter.Text()\n text_widget.insert(\"end\", TEST_STR1)\n\n matcher = ParenMatcher(text_widget)\n matcher.text = text_widget\n for i, group in enumerate(insert_pos_groups):\n for insert_pos in group:\n text_widget.mark_set(\"insert\", insert_pos)\n\n actual = matcher.find_surrounding(\"1.0\", \"end\")\n expected = expected_indices[i]\n\n assert actual == expected, \"\\nExpected: %s\\nGot: %s\" % (expected, actual)\n print(\"\\rPassed %d of %d\" % (i+1, len(insert_pos_groups)), end=\"\")\n\n\ndef run_tests():\n test_regular_closed()\n\nif __name__ == \"__main__\":\n print(\"Test input: \")\n print(TEST_STR1)\n run_tests()\n","sub_path":"tests/plugin_tests/paren_matcher_tests.py","file_name":"paren_matcher_tests.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"151397014","text":"import random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.constants as constant\npi = constant.pi\ndef gauss(x,a,b):\n return (1.0/np.sqrt(2.0*pi)/b)*np.exp(-0.5*((x-a)/b)**2)\n\ndef score(x):\n score = gauss(x,2.0,1.0)/gauss(2.0,2.0,1.0)\n score = score*100.0\n if score< 1.0:\n score = 1.0\n return score\nt,t1 = [0.0,10.0]\ny_bad = []\ny_bad_1 = []\ny_bad_2 = []\ny_bad_3 = []\ny_good = []\nrandomx = []\nmu_bad = 3.0\nsigma_bad = 1.5\nmu_good = 2.0\nsigma_good = 0.5\nfor i in range(0,100):\n x_bad = np.random.normal(mu_bad, sigma_bad, 30)\n x_bad_1 = np.random.normal(mu_bad, sigma_bad, 30)\n x_bad_2 = np.random.normal(mu_bad, sigma_bad, 30)\n x_bad_3 = np.random.normal(mu_bad, sigma_bad, 30)\n x_good = np.random.normal(mu_good, sigma_good, 30)\n #randomx.append(np.mean(x_bad))\n y_good.append(score(np.mean(x_good)))\n y_bad.append(score(np.mean(x_bad)))\n y_bad_1.append(score(np.mean(x_bad_1)))\n y_bad_2.append(score(np.mean(x_bad_2)))\n y_bad_3.append(score(np.mean(x_bad_3)))\n#plt.hist(y,bins=20)\nnumbins = np.linspace(0.0,100.0,20)\n#plt.hist(y_good,bins=numbins,color='blue',alpha=0.5)\nplt.hist(y_bad,bins=numbins,color='red',alpha=0.5)\nplt.hist(y_bad_1,bins=numbins,color='blue',alpha=0.5)\nplt.hist(y_bad_2,bins=numbins,color='green',alpha=0.5)\nplt.hist(y_bad_3,bins=numbins,color='yellow',alpha=0.5)\nplt.show()\n\ny_bad = np.sort(y_bad)\ny_bad_1 = np.sort(y_bad_1)\ny_bad_2 = np.sort(y_bad_2)\ny_bad_3 = np.sort(y_bad_3)\ndifferences = []\ndifferences1 = []\ndifferences2 = []\nfor i in range(0,len(y_bad)):\n differences.append(abs(y_bad[i]-y_bad_1[i]))\n differences1.append(abs(y_bad[i]-y_bad_2[i]))\n differences2.append(abs(y_bad[i]-y_bad_3[i]))\n \nplt.hist(differences,alpha=0.5)\nplt.hist(differences1,color='red',alpha=0.5)\nplt.hist(differences2,color='green',alpha=0.5)\nplt.show()","sub_path":"code/test_scoring.py","file_name":"test_scoring.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"301021391","text":"import tkinter\n\nclass GeeksBro:\n \n def __init__(self, window):\n\n self.text_btn = tkinter.Button(window, text='Click Me!', command=self.say_hi)\n self.text_btn.pack()\n\n self.close_btn = tkinter.Button(window, text='Exit', command=self.close)\n self.close_btn.pack()\n\n def say_hi(self):\n tkinter.Label(window, text='Welcome', font='', fg='green').pack()\n\n def close(self):\n exit()\n\nwindow = tkinter.Tk()\nwindow.title(\"GUI-Classes\")\nwindow.geometry('800x600')\n\nwe_geek = GeeksBro(window)\n\n\nwindow.mainloop()","sub_path":"GUI/GUI_classes.py","file_name":"GUI_classes.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"72128984","text":"import datetime\nimport hashlib\nimport copy\nimport httplib, urllib, urlparse\nimport collections\n\n__author__ = 'peter.liu@xiaoi.com'\n\n\nclass AskParams:\n def __init__(self, platform=\"\", user_id=\"\", url=\"\", response_format=\"json\"):\n self.platform = platform\n self.user_id = user_id\n self.url = url\n self.response_format = response_format\n\n def __str__(self):\n return \"platform:\" + self.platform + \"\\n\" + \\\n \"user_id:\" + self.user_id + \"\\n\" + \\\n \"url:\" + self.url + \"\\n\" + \\\n \"format:\" + self.response_format\n\n\nclass AskSession:\n def __init__(self, signature, params):\n if not isinstance(signature, IBotSignature):\n raise TypeError(\"signature should be IBotSignature\")\n\n if not isinstance(params, AskParams):\n raise TypeError(\"params should be AskParams\")\n\n self.signature = copy.copy(signature)\n self.params = copy.copy(params)\n\n def get_answer(self, question):\n http_params = urllib.urlencode({'question': question,\n 'format': self.params.response_format,\n 'platform': self.params.platform,\n 'userId': self.params.user_id})\n\n xauth = self.signature.get_http_header_xauth()\n\n http_headers = {\"Content-type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"text/plain\",\n xauth.keys()[0]: xauth.values()[0]}\n\n url = urlparse.urlparse(self.params.url)\n\n conn = httplib.HTTPConnection(url.netloc)\n conn.request(\"POST\", url.path, http_params, http_headers)\n response = conn.getresponse()\n\n ret = collections.namedtuple(\"get_answer_return\", \"http_status http_body\")\n\n ret.http_body = response.read()\n ret.http_status = response.status\n\n conn.close()\n\n return ret\n\n\nclass RegParams:\n def __init__(self, url=\"\"):\n self.aue = \"\"\n self.txe = \"\"\n self.auf = \"\"\n self.url = url\n\n self.setup_for_speex_wb()\n\n def setup_for_speex_wb(self):\n self.aue = \"speex-wb;7\"\n self.txe = \"utf-8\"\n self.auf = \"audio/L16;rate=16000\"\n\n def setup_for_speex_nb(self):\n self.aue = \"speex-nb;7\"\n self.txe = \"utf-8\"\n self.auf = \"audio/L16;rate=16000\"\n\n def __str__(self):\n return \"aue:\" + self.aue + \"\\n\" + \\\n \"txe:\" + self.txe + \"\\n\" + \\\n \"auf:\" + self.auf + \"\\n\" + \\\n \"url:\" + self.url\n\n\nclass RegSession:\n def __init__(self, signature, params):\n if not isinstance(signature, IBotSignature):\n raise TypeError(\"signature should be IBotSignature\")\n\n if not isinstance(params, RegParams):\n raise TypeError(\"params should be RegParams\")\n\n self.signature = copy.copy(signature)\n self.params = copy.copy(params)\n\n def get_reg_result(self, speex_data):\n xauth = self.signature.get_http_header_xauth()\n\n http_headers = {\"Content-type\": \"application/audio\",\n \"Accept\": \"text/plain\",\n \"X-AUE\": self.params.aue,\n \"X-TXE\": self.params.txe,\n \"X-AUF\": self.params.auf,\n xauth.keys()[0]: xauth.values()[0]}\n\n url = urlparse.urlparse(self.params.url)\n\n conn = httplib.HTTPConnection(url.netloc)\n conn.request(\"POST\", url.path, speex_data, http_headers)\n response = conn.getresponse()\n\n ret = collections.namedtuple(\"get_reg_result_return\", \"http_status http_body\")\n\n ret.http_body = response.read()\n ret.http_status = response.status\n\n conn.close()\n\n return ret\n\n\nclass TTSParams(RegParams):\n def __init__(self, url=\"\"):\n RegParams.__init__(self, url)\n\n\nclass TTSSession:\n def __init__(self, signature, params):\n if not isinstance(signature, IBotSignature):\n raise TypeError(\"signature should be IBotSignature\")\n\n if not isinstance(params, TTSParams):\n raise TypeError(\"params should be TTSParams\")\n\n self.signature = copy.copy(signature)\n self.params = copy.copy(params)\n\n def get_tts_result(self, tts_string):\n xauth = self.signature.get_http_header_xauth()\n\n http_headers = {\"Content-type\": \"text/plain\",\n \"X-AUE\": self.params.aue,\n \"X-TXE\": self.params.txe,\n \"X-AUF\": self.params.auf,\n xauth.keys()[0]: xauth.values()[0]}\n\n url = urlparse.urlparse(self.params.url)\n\n conn = httplib.HTTPConnection(url.netloc)\n conn.request(\"POST\", url.path, tts_string, http_headers)\n response = conn.getresponse()\n\n ret = collections.namedtuple(\"get_tts_result_return\", \"http_status http_body\")\n\n ret.http_body = response.read()\n ret.http_status = response.status\n\n conn.close()\n\n return ret\n\n\nclass IBotSignature:\n \"\"\"\n It's about iBotCloud signature stuff\n \"\"\"\n\n def __init__(self, app_key, app_sec, uri, http_method=\"POST\", realm=\"xiaoi.com\"):\n self.app_key = app_key\n self.app_sec = app_sec\n self.uri = uri\n self.http_method = http_method.upper()\n self.realm = realm\n\n def get_signature(self):\n time_str = str(datetime.datetime.now())\n nonce = hashlib.sha1(time_str).hexdigest()\n\n HA1 = \"{0}:{1}:{2}\".format(self.app_key, self.realm, self.app_sec)\n HA1 = hashlib.sha1(HA1).hexdigest()\n\n HA2 = \"{0}:{1}\".format(self.http_method, self.uri)\n HA2 = hashlib.sha1(HA2).hexdigest()\n\n signature = \"{0}:{1}:{2}\".format(HA1, nonce, HA2)\n signature = hashlib.sha1(signature).hexdigest()\n\n # print \"signature:\" + signature\n # print \"nonce:\" + nonce\n ret = collections.namedtuple(\"get_signature_reture\", \"signature nonce\")\n\n ret.signature = signature\n ret.nonce = nonce\n\n return ret\n\n def get_http_header_xauth(self):\n ret_vals = self.get_signature()\n\n ret = {'X-Auth': \"app_key=\\\"{0}\\\",nonce=\\\"{1}\\\",signature=\\\"{2}\\\"\".format(self.app_key,\n ret_vals.nonce,\n ret_vals.signature)}\n\n return ret\n\n\n\n\n","sub_path":"xiaoi/ibotcloud.py","file_name":"ibotcloud.py","file_ext":"py","file_size_in_byte":6471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"190360786","text":"\"\"\" Advent of code 2017 day 19/1 \"\"\"\nfrom argparse import ArgumentParser\n\nclass Maze(object):\n \"\"\" Maze representation \"\"\"\n def __init__(self, data):\n self.data = self.read_data(data)\n self.p_x = self.data[0].index('|')\n self.p_y = 0\n self.width = len(self.data[0])\n self.height = len(self.data)\n self.directions = {\n 's': (1, 0),\n 'n': (-1, 0),\n 'e': (0, 1),\n 'w': (0, -1)\n }\n self.turns = {\n 's': ['e', 'w'],\n 'n': ['e', 'w'],\n 'e': ['n', 's'],\n 'w': ['n', 's']\n }\n self.dir = 's'\n self.turn = False\n\n @staticmethod\n def read_data(data):\n \"\"\" REad the data from the input \"\"\"\n return data.split('\\n')\n\n def move(self):\n \"\"\" Move the pointer forward and return the next character \"\"\"\n if not self.turn:\n n_y, n_x = self.directions[self.dir]\n self.p_y += n_y\n self.p_x += n_x\n if self.p_y < self.height and self.p_x < self.width and self.p_y >= 0 and self.p_x >= 0:\n next_char = self.data[self.p_y][self.p_x]\n if next_char == '+':\n self.turn = True\n elif next_char == ' ':\n next_char = None\n else:\n next_char = None\n else:\n self.turn = False\n next_char, self.dir = self.check_turn()\n n_y, n_x = self.directions[self.dir]\n self.p_y += n_y\n self.p_x += n_x\n return next_char\n\n def check_turn(self):\n \"\"\" Check which direction can be used to turn to \"\"\"\n directions = self.turns[self.dir]\n next_char = None\n for next_dir in directions:\n condition, next_char = self.check_dir(next_dir)\n if condition:\n break\n else:\n next_dir = None\n return next_char, next_dir\n\n def check_dir(self, direction):\n \"\"\" Check if the direction is valid \"\"\"\n n_y, n_x = self.directions[direction]\n p_y = self.p_y + n_y\n p_x = self.p_x + n_x\n in_boundaries = p_y < self.height and p_x < self.width and p_y >= 0 and p_x >= 0\n elem = ' '\n if in_boundaries:\n elem = self.data[p_y][p_x]\n return (in_boundaries and elem != ' ', elem)\n\n def walk(self):\n \"\"\" Walk through the line until the end \"\"\"\n condition = True\n letters = []\n while condition:\n next_char = self.move()\n #print(next_char)\n if next_char is None:\n condition = False\n elif next_char.isalpha():\n letters.append(next_char)\n return ''.join(letters)\n\ndef solution(data):\n \"\"\" Solution to the problem \"\"\"\n parser = Maze(data)\n return parser.walk()\n\nif __name__ == \"__main__\":\n PARSER = ArgumentParser()\n PARSER.add_argument(\"--input\", dest='input', action='store_true')\n PARSER.add_argument(\"--test\")\n ARGS = PARSER.parse_args()\n if ARGS.input:\n with(open('input.txt', 'r')) as input_file:\n print(solution(input_file.read()))\n elif ARGS.test:\n print(solution(str(ARGS.test)))\n else:\n DEBUG = \"\"\" | \n | +--+ \n A | C \n F---|----E|--+ \n | | | D \n +B-+ +--+ \"\"\"\n print(solution(DEBUG))\n","sub_path":"2017/19_1/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"623831668","text":"# -*- coding: UTF-8 -*-\nimport os\n\nfrom SimpleGladeApp import SimpleGladeApp\nfrom vars import app_name, app_version, app_web, DOMAIN_NAME, ICON_PATH, GLADE_DIR\n\n\nclass Wabout(SimpleGladeApp):\n def __init__(self, path=\"gnome-connection-manager.glade\",\n root=\"wAbout\",\n domain=DOMAIN_NAME, **kwargs):\n path = os.path.join(GLADE_DIR, path)\n SimpleGladeApp.__init__(self, path, root, domain, **kwargs)\n self.wAbout.set_icon_from_file(ICON_PATH)\n\n def new(self):\n self.wAbout.set_name(app_name)\n self.wAbout.set_version(app_version)\n self.wAbout.set_website(app_web)\n\n # Write your own methods here\n\n def on_wAbout_close(self, widget, *args):\n self.wAbout.destroy()\n","sub_path":"gcm/wabout.py","file_name":"wabout.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"38587914","text":"# Copyright 2016 Nexenta Systems, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\"\"\"\nUnit tests for OpenStack Cinder volume driver\n\"\"\"\n\nimport mock\nfrom mock import patch\n\nfrom cinder import context\nfrom cinder import db\nfrom cinder import test\nfrom cinder.tests.unit import fake_constants as fake\nfrom cinder.tests.unit.fake_volume import fake_volume_obj\nfrom cinder.volume import configuration as conf\nfrom cinder.volume.drivers.nexenta.ns5 import jsonrpc\nfrom cinder.volume.drivers.nexenta.ns5 import nfs\n\n\nclass TestNexentaNfsDriver(test.TestCase):\n TEST_SHARE = 'host1:/pool/share'\n TEST_SHARE2_OPTIONS = '-o intr'\n TEST_FILE_NAME = 'test.txt'\n TEST_SHARES_CONFIG_FILE = '/etc/cinder/nexenta-shares.conf'\n TEST_SNAPSHOT_NAME = 'snapshot1'\n TEST_VOLUME_NAME = 'volume1'\n TEST_VOLUME_NAME2 = 'volume2'\n\n TEST_VOLUME = fake_volume_obj(None, **{\n 'name': TEST_VOLUME_NAME,\n 'id': fake.VOLUME_ID,\n 'size': 1,\n 'status': 'available',\n 'provider_location': TEST_SHARE\n })\n\n TEST_VOLUME2 = fake_volume_obj(None, **{\n 'name': TEST_VOLUME_NAME2,\n 'size': 2,\n 'id': fake.VOLUME2_ID,\n 'status': 'in-use'\n })\n\n TEST_SNAPSHOT = {\n 'name': TEST_SNAPSHOT_NAME,\n 'volume_name': TEST_VOLUME_NAME,\n 'volume_size': 1,\n 'volume_id': fake.VOLUME_ID\n }\n\n TEST_SHARE_SVC = 'svc:/network/nfs/server:default'\n\n def setUp(self):\n super(TestNexentaNfsDriver, self).setUp()\n self.ctxt = context.get_admin_context()\n self.cfg = mock.Mock(spec=conf.Configuration)\n self.cfg.nexenta_dataset_description = ''\n self.cfg.nexenta_mount_point_base = '$state_path/mnt'\n self.cfg.nexenta_sparsed_volumes = True\n self.cfg.nexenta_dataset_compression = 'on'\n self.cfg.nexenta_dataset_dedup = 'off'\n self.cfg.nfs_mount_point_base = '/mnt/test'\n self.cfg.nfs_mount_attempts = 3\n self.cfg.nfs_mount_options = None\n self.cfg.nas_mount_options = 'vers=4'\n self.cfg.reserved_percentage = 20\n self.cfg.nexenta_use_https = False\n self.cfg.nexenta_rest_port = 0\n self.cfg.nexenta_user = 'user'\n self.cfg.nexenta_password = 'pass'\n self.cfg.max_over_subscription_ratio = 20.0\n self.cfg.nas_host = '1.1.1.1'\n self.cfg.nas_share_path = 'pool/share'\n self.nef_mock = mock.Mock()\n self.stubs.Set(jsonrpc, 'NexentaJSONProxy',\n lambda *_, **__: self.nef_mock)\n self.drv = nfs.NexentaNfsDriver(configuration=self.cfg)\n self.drv.db = db\n self.drv.do_setup(self.ctxt)\n\n def _create_volume_db_entry(self):\n vol = {\n 'id': fake.VOLUME_ID,\n 'size': 1,\n 'status': 'available',\n 'provider_location': self.TEST_SHARE\n }\n return db.volume_create(self.ctxt, vol)['id']\n\n def test_check_for_setup_error(self):\n self.nef_mock.get.return_value = {'data': []}\n self.assertRaises(\n LookupError, lambda: self.drv.check_for_setup_error())\n\n def test_initialize_connection(self):\n data = {\n 'export': self.TEST_VOLUME['provider_location'], 'name': 'volume'}\n self.assertEqual({\n 'driver_volume_type': self.drv.driver_volume_type,\n 'data': data\n }, self.drv.initialize_connection(self.TEST_VOLUME, None))\n\n @patch('cinder.volume.drivers.nexenta.ns5.nfs.'\n 'NexentaNfsDriver._create_regular_file')\n @patch('cinder.volume.drivers.nexenta.ns5.nfs.'\n 'NexentaNfsDriver._create_sparsed_file')\n @patch('cinder.volume.drivers.nexenta.ns5.nfs.'\n 'NexentaNfsDriver._ensure_share_mounted')\n @patch('cinder.volume.drivers.nexenta.ns5.nfs.'\n 'NexentaNfsDriver._share_folder')\n def test_do_create_volume(self, share, ensure, sparsed, regular):\n ensure.return_value = True\n share.return_value = True\n self.nef_mock.get.return_value = 'on'\n self.drv._do_create_volume(self.TEST_VOLUME)\n\n url = 'storage/pools/pool/filesystems'\n data = {\n 'name': 'share/volume-' + fake.VOLUME_ID,\n 'compressionMode': 'on',\n 'dedupMode': 'off',\n }\n self.nef_mock.post.assert_called_with(url, data)\n\n @patch('cinder.volume.drivers.nexenta.ns5.nfs.'\n 'NexentaNfsDriver._ensure_share_mounted')\n def test_delete_volume(self, ensure):\n self._create_volume_db_entry()\n self.nef_mock.get.return_value = {}\n self.drv.delete_volume(self.TEST_VOLUME)\n self.nef_mock.delete.assert_called_with(\n 'storage/pools/pool/filesystems/share%2Fvolume-' +\n fake.VOLUME_ID + '?snapshots=true')\n\n def test_create_snapshot(self):\n self._create_volume_db_entry()\n self.drv.create_snapshot(self.TEST_SNAPSHOT)\n url = ('storage/pools/pool/filesystems/share%2Fvolume-' +\n fake.VOLUME_ID + '/snapshots')\n data = {'name': self.TEST_SNAPSHOT['name']}\n self.nef_mock.post.assert_called_with(url, data)\n\n def test_delete_snapshot(self):\n self._create_volume_db_entry()\n self.drv.delete_snapshot(self.TEST_SNAPSHOT)\n url = ('storage/pools/pool/filesystems/share%2Fvolume-' +\n fake.VOLUME_ID + '/snapshots/snapshot1')\n self.drv.delete_snapshot(self.TEST_SNAPSHOT)\n self.nef_mock.delete.assert_called_with(url)\n\n @patch('cinder.volume.drivers.nexenta.ns5.nfs.'\n 'NexentaNfsDriver.extend_volume')\n @patch('cinder.volume.drivers.nexenta.ns5.nfs.'\n 'NexentaNfsDriver.local_path')\n @patch('cinder.volume.drivers.nexenta.ns5.nfs.'\n 'NexentaNfsDriver._share_folder')\n def test_create_volume_from_snapshot(self, share, path, extend):\n self._create_volume_db_entry()\n url = ('storage/pools/%(pool)s/'\n 'filesystems/%(fs)s/snapshots/%(snap)s/clone') % {\n 'pool': 'pool',\n 'fs': '%2F'.join(['share', 'volume-' + fake.VOLUME_ID]),\n 'snap': self.TEST_SNAPSHOT['name']\n }\n path = '/'.join(['pool/share', self.TEST_VOLUME2['name']])\n data = {'targetPath': path}\n self.drv.create_volume_from_snapshot(\n self.TEST_VOLUME2, self.TEST_SNAPSHOT)\n self.nef_mock.post.assert_called_with(url, data)\n\n # make sure the volume get extended!\n extend.assert_called_once_with(self.TEST_VOLUME2, 2)\n\n @patch('cinder.volume.drivers.nexenta.ns5.nfs.'\n 'NexentaNfsDriver.local_path')\n @patch('oslo_concurrency.processutils.execute')\n def test_extend_volume_sparsed(self, _execute, path):\n self._create_volume_db_entry()\n path.return_value = 'path'\n\n self.drv.extend_volume(self.TEST_VOLUME, 2)\n\n _execute.assert_called_with(\n 'truncate', '-s', '2G',\n 'path',\n root_helper='sudo cinder-rootwrap /etc/cinder/rootwrap.conf',\n run_as_root=True)\n\n @patch('cinder.volume.drivers.nexenta.ns5.nfs.'\n 'NexentaNfsDriver.local_path')\n @patch('oslo_concurrency.processutils.execute')\n def test_extend_volume_nonsparsed(self, _execute, path):\n self._create_volume_db_entry()\n path.return_value = 'path'\n with mock.patch.object(self.drv,\n 'sparsed_volumes',\n False):\n\n self.drv.extend_volume(self.TEST_VOLUME, 2)\n\n _execute.assert_called_with(\n 'dd', 'if=/dev/zero', 'seek=1073741824',\n 'of=path',\n 'bs=1M', 'count=1024',\n root_helper='sudo cinder-rootwrap /etc/cinder/rootwrap.conf',\n run_as_root=True)\n\n def test_get_capacity_info(self):\n self.nef_mock.get.return_value = {\n 'bytesAvailable': 1000,\n 'bytesUsed': 100}\n self.assertEqual(\n (1000, 900, 100), self.drv._get_capacity_info('pool/share'))\n","sub_path":"cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_nfs.py","file_name":"test_nexenta5_nfs.py","file_ext":"py","file_size_in_byte":8570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"379026695","text":"\n\n\n\nimport os\n#os.environ['TF_CPP_MIN_LOG_LEVEL']='2'\n\n# Parameters\nlearning_rate = 0.0001\ntraining_epochs = 10\nbatch_size = 50 #100\n\nimport tensorflow as tf\nfrom tensorflow.python.platform import gfile\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"mnist\", one_hot=True,reshape=False,validation_size=0)\n\n\nsess = tf.Session()\n# init = tf.global_variables_initializer()\n# sess.run(init)\nsaver = tf.train.import_meta_graph('./tmp_b/mnist.ckpt.meta')\nsaver.restore(sess,tf.train.latest_checkpoint('./tmp_b'))\nprint(\"Model Restore: \")\ngraph = tf.get_default_graph()\nX = graph.get_tensor_by_name(\"X_input:0\")\nresult = graph.get_tensor_by_name(\"yhat_output:0\")\n\n\n\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\nimport numpy as np\nimgnew = Image.open('mnist.jpg')\nim = np.asarray(imgnew)\nim = np.expand_dims(im, axis=0)\nim = im.reshape(1,28,28,1)\nprint(im.shape)\nclassification = sess.run(tf.argmax(result, 1), {X: im})\nprint('predicted', classification[0])\nplt.imshow(im.reshape(28, 28), cmap=plt.cm.binary)\nplt.show()","sub_path":"exercises/Module_4_CNN/module4_1B_cnn_mnist_delpoy.py","file_name":"module4_1B_cnn_mnist_delpoy.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"543309635","text":"# -*- coding:utf-8 -*-\nimport os\nimport uuid\nimport shutil\nimport sublime\nimport tempfile\nfrom . import SETTINGS_USER_FILE\n\n\ndef get_7za_bin():\n settings = sublime.load_settings(SETTINGS_USER_FILE)\n zip_bin = None\n\n if settings.get('7za_path') and os.path.exists(settings.get('7za_path')):\n zip_bin = settings.get('7za_path')\n elif shutil.which('7z'):\n zip_bin = shutil.which('7z')\n elif shutil.which('7za'):\n zip_bin = shutil.which('7za')\n elif os.name == 'nt':\n if os.path.exists(os.path.join(os.environ.get('ProgramFiles'), '7-Zip', '7z.exe')):\n zip_bin = os.path.join(os.environ.get('ProgramFiles'), '7-Zip', '7z.exe')\n if os.path.exists(os.path.join(os.environ.get('ProgramFiles(x86)'), '7-Zip', '7z.exe')):\n zip_bin = os.path.join(os.environ.get('ProgramFiles(x86)'), '7-Zip', '7z.exe')\n\n return zip_bin\n\n\ndef generate_temp_filename():\n return os.path.join(tempfile.gettempdir(), 'sublime-sync_%s.zip' % str(uuid.uuid4()))\n","sub_path":"sublimall/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"514353873","text":"\n\nfrom xai.brain.wordbase.nouns._confusion import _CONFUSION\n\n#calss header\nclass _CONFUSIONS(_CONFUSION, ):\n\tdef __init__(self,): \n\t\t_CONFUSION.__init__(self)\n\t\tself.name = \"CONFUSIONS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"confusion\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_confusions.py","file_name":"_confusions.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"228816262","text":"\"\"\"\nSun discipline for CADRE: Sun LOS component.\n\"\"\"\nfrom __future__ import print_function, division, absolute_import\nfrom six.moves import range\n\nimport numpy as np\n\nfrom openmdao.core.explicitcomponent import ExplicitComponent\n\n\ndef cross_deriv(v):\n \"\"\"\n Compute derivative across a cross product of two 3 dimensional vectors.\n\n Given c = a x b, dc_da = cross_deriv(b), and dc_db = -cross_deriv(a)\n \"\"\"\n m = np.array([[0.0, -v[2], v[1]],\n [v[2], 0.0, -v[0]],\n [-v[1], v[0], 0.0]])\n return m\n\n\nclass SunLOSComp(ExplicitComponent):\n \"\"\"\n Compute the Satellite to sun line of sight.\n \"\"\"\n def initialize(self):\n self.options.declare('num_nodes', types=(int, ),\n desc=\"Number of time points.\")\n self.options.declare('Re', types=(float, ), default=6378.137,\n desc=\"Radius of the Earth (km).\")\n self.options.declare('alpha', types=(float, ), default=0.85,\n desc=\"LOS smoothing factor.\")\n\n def setup(self):\n nn = self.options['num_nodes']\n\n self.add_input('r_e2b_I', np.zeros((nn, 3)), units='km',\n desc='Position vector from '\n 'Earth to satellite in Earth-centered '\n 'inertial frame over time.')\n\n self.add_input('r_e2s_I', np.zeros((nn, 3)), units='km',\n desc='Position vector from Earth to sun in Earth-centered '\n 'inertial frame over time.')\n\n self.add_output('LOS', np.zeros((nn, )), units=None,\n desc='Satellite to sun line of sight over time')\n\n rows = np.tile(np.repeat(0, 3), nn) + np.repeat(np.arange(nn), 3)\n cols = np.arange(nn*3)\n\n self.declare_partials('LOS', 'r_e2b_I', rows=rows, cols=cols)\n self.declare_partials('LOS', 'r_e2s_I', rows=rows, cols=cols)\n\n def compute(self, inputs, outputs):\n \"\"\"\n Calculate outputs.\n \"\"\"\n nn = self.options['num_nodes']\n r2 = self.options['Re']\n r1 = r2 * self.options['alpha']\n\n r_e2b_I = inputs['r_e2b_I']\n r_e2s_I = inputs['r_e2s_I']\n LOS = outputs['LOS']\n\n for i in range(nn):\n r_b = r_e2b_I[i, :]\n r_s = r_e2s_I[i, :]\n dot = np.dot(r_b, r_s)\n cross = np.cross(r_b, r_s)\n dist = np.sqrt(cross.dot(cross))\n\n if dot >= 0.0:\n LOS[i] = 1.0\n elif dist <= r1:\n LOS[i] = 0.0\n elif dist >= r2:\n LOS[i] = 1.0\n else:\n x = (dist - r1) / (r2 - r1)\n LOS[i] = 3*x**2 - 2*x**3\n\n def compute_partials(self, inputs, partials):\n \"\"\"\n Calculate and save derivatives. (i.e., Jacobian)\n \"\"\"\n nn = self.options['num_nodes']\n nj = 3 * nn\n r2 = self.options['Re']\n r1 = r2 * self.options['alpha']\n\n r_e2b_I = inputs['r_e2b_I']\n r_e2s_I = inputs['r_e2s_I']\n\n Jab = np.zeros(shape=(nj, ), dtype=r_e2b_I.dtype)\n Jas = np.zeros(shape=(nj, ), dtype=r_e2b_I.dtype)\n\n for i in range(nn):\n r_b = r_e2b_I[i, :]\n r_s = r_e2s_I[i, :]\n dot = np.dot(r_b, r_s)\n\n if dot >= 0.0:\n continue\n\n cross = np.cross(r_b, r_s)\n dist = np.sqrt(np.dot(cross, cross))\n\n if dist <= r1 or dist >= r2:\n continue\n\n else:\n x = (dist - r1)/(r2 - r1)\n # LOS = 3*x**2 - 2*x**3\n ddist_dcross = cross / dist\n dcross_drb = cross_deriv(-r_s)\n dcross_drs = cross_deriv(r_b)\n dx_ddist = 1.0/(r2 - r1)\n dLOS_dx = 6*x - 6*x**2\n dLOS_drb = dLOS_dx * dx_ddist * np.dot(ddist_dcross, dcross_drb)\n dLOS_drs = dLOS_dx * dx_ddist * np.dot(ddist_dcross, dcross_drs)\n\n Jab[i*3:i*3+3] = dLOS_drb\n Jas[i*3:i*3+3] = dLOS_drs\n\n partials['LOS', 'r_e2b_I'] = Jab\n partials['LOS', 'r_e2s_I'] = Jas\n\n","sub_path":"CADRE/sun_dymos/sun_los_comp.py","file_name":"sun_los_comp.py","file_ext":"py","file_size_in_byte":4176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"534186395","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/chisubmit/client/course.py\n# Compiled at: 2020-01-15 11:47:40\n# Size of source mod 2**32: 18472 bytes\nimport chisubmit.client.users, chisubmit.client.assignment, chisubmit.client.team\nfrom chisubmit.client.types import ChisubmitAPIObject, Attribute, AttributeType, APIStringType, APIIntegerType, Relationship, APIObjectType, APIBooleanType\nfrom chisubmit.client.users import User\nimport datetime\n\nclass Course(ChisubmitAPIObject):\n _api_attributes = {'course_id': Attribute(name='course_id', attrtype=APIStringType, editable=True), \n \n 'name': Attribute(name='name', attrtype=APIStringType, editable=True), \n \n 'archived': Attribute(name='archived', attrtype=APIBooleanType, editable=True), \n \n 'git_server_connstr': Attribute(name='git_server_connstr', attrtype=APIStringType, editable=True), \n \n 'git_staging_connstr': Attribute(name='git_staging_connstr', attrtype=APIStringType, editable=True), \n \n 'git_usernames': Attribute(name='git_usernames', attrtype=APIStringType, editable=True), \n \n 'git_staging_usernames': Attribute(name='git_staging_usernames', attrtype=APIStringType, editable=True), \n \n 'extension_policy': Attribute(name='extension_policy', attrtype=APIStringType, editable=True), \n \n 'default_extensions': Attribute(name='default_extensions', attrtype=APIIntegerType, editable=True), \n \n 'gradescope_id': Attribute(name='gradescope_id', attrtype=APIIntegerType, editable=True)}\n _api_relationships = {'instructors': Relationship(name='instructors', reltype=APIObjectType('chisubmit.client.users.Instructor')), \n \n 'graders': Relationship(name='graders', reltype=APIObjectType('chisubmit.client.users.Grader')), \n \n 'students': Relationship(name='students', reltype=APIObjectType('chisubmit.client.users.Student')), \n \n 'assignments': Relationship(name='assignments', reltype=APIObjectType('chisubmit.client.assignment.Assignment')), \n \n 'teams': Relationship(name='teams', reltype=APIObjectType('chisubmit.client.team.Team'))}\n\n def get_instructors(self):\n \"\"\"\n :calls: GET /courses/:course/instructors/\n :rtype: List of :class:`chisubmit.client.users.Instructor`\n \"\"\"\n instructors = self.get_related('instructors')\n return instructors\n\n def get_instructor(self, username):\n \"\"\"\n :calls: GET /courses/:course/instructors/:instructor\n :rtype: :class:`chisubmit.client.users.Instructor`\n \"\"\"\n headers, data = self._api_client._requester.request('GET', '/courses/' + self.course_id + '/instructors/' + username)\n return chisubmit.client.users.Instructor(self._api_client, headers, data)\n\n def add_instructor(self, user_or_username, git_username=None, git_staging_username=None):\n \"\"\"\n :calls: POST /courses/:course/instructors/\n :rtype: :class:`chisubmit.client.users.Instructor`\n \"\"\"\n if not isinstance(user_or_username, (str, str)):\n assert isinstance(user_or_username, User)\n if isinstance(user_or_username, (str, str)):\n username = user_or_username\n elif isinstance(user_or_username, User):\n username = user_or_username.username\n post_data = {'username': username}\n if git_username is not None:\n post_data['git_username'] = git_username\n if git_staging_username is not None:\n post_data['git_staging_username'] = git_staging_username\n headers, data = self._api_client._requester.request('POST', '/courses/' + self.course_id + '/instructors/', data=post_data)\n return chisubmit.client.users.Instructor(self._api_client, headers, data)\n\n def remove_instructor(self, user_or_username):\n \"\"\"\n :calls: DELETE /courses/:course/instructors/:username\n :rtype: None\n \"\"\"\n if not (isinstance(user_or_username, (str, str)) or isinstance(user_or_username, User)):\n assert isinstance(user_or_username, chisubmit.client.users.Instructor)\n if isinstance(user_or_username, (str, str)):\n username = user_or_username\n else:\n if isinstance(user_or_username, User):\n username = user_or_username.username\n elif isinstance(user_or_username, chisubmit.client.users.Instructor):\n username = user_or_username.user.username\n _ = self._api_client._requester.request('DELETE', '/courses/' + self.course_id + '/instructors/' + username)\n\n def get_graders(self):\n \"\"\"\n :calls: GET /courses/:course/graders/\n :rtype: List of :class:`chisubmit.client.users.Grader`\n \"\"\"\n graders = self.get_related('graders')\n return graders\n\n def get_grader(self, username):\n \"\"\"\n :calls: GET /courses/:course/graders/:grader\n :rtype: :class:`chisubmit.client.users.Grader`\n \"\"\"\n headers, data = self._api_client._requester.request('GET', '/courses/' + self.course_id + '/graders/' + username)\n return chisubmit.client.users.Grader(self._api_client, headers, data)\n\n def add_grader(self, user_or_username, git_username=None, git_staging_username=None):\n \"\"\"\n :calls: POST /courses/:course/graders/\n :rtype: :class:`chisubmit.client.users.Grader`\n \"\"\"\n if not isinstance(user_or_username, (str, str)):\n assert isinstance(user_or_username, User)\n if isinstance(user_or_username, (str, str)):\n username = user_or_username\n elif isinstance(user_or_username, User):\n username = user_or_username.username\n post_data = {'username': username}\n if git_username is not None:\n post_data['git_username'] = git_username\n if git_staging_username is not None:\n post_data['git_staging_username'] = git_staging_username\n headers, data = self._api_client._requester.request('POST', '/courses/' + self.course_id + '/graders/', data=post_data)\n return chisubmit.client.users.Grader(self._api_client, headers, data)\n\n def remove_grader(self, user_or_username):\n \"\"\"\n :calls: DELETE /courses/:course/graders/:username\n :rtype: None\n \"\"\"\n if not (isinstance(user_or_username, (str, str)) or isinstance(user_or_username, User)):\n assert isinstance(user_or_username, chisubmit.client.users.Grader)\n if isinstance(user_or_username, (str, str)):\n username = user_or_username\n else:\n if isinstance(user_or_username, User):\n username = user_or_username.username\n elif isinstance(user_or_username, chisubmit.client.users.Grader):\n username = user_or_username.user.username\n _ = self._api_client._requester.request('DELETE', '/courses/' + self.course_id + '/graders/' + username)\n\n def get_students(self):\n \"\"\"\n :calls: GET /courses/:course/students/\n :rtype: List of :class:`chisubmit.client.users.Student`\n \"\"\"\n students = self.get_related('students')\n return students\n\n def get_student(self, username):\n \"\"\"\n :calls: GET /courses/:course/students/:grader\n :rtype: :class:`chisubmit.client.users.Student`\n \"\"\"\n headers, data = self._api_client._requester.request('GET', '/courses/' + self.course_id + '/students/' + username)\n return chisubmit.client.users.Student(self._api_client, headers, data)\n\n def add_student(self, user_or_username, git_username=None, extensions=None, dropped=None):\n \"\"\"\n :calls: POST /courses/:course/students/\n :rtype: :class:`chisubmit.client.users.Student`\n \"\"\"\n if not isinstance(user_or_username, (str, str)):\n assert isinstance(user_or_username, User)\n if isinstance(user_or_username, (str, str)):\n username = user_or_username\n elif isinstance(user_or_username, User):\n username = user_or_username.username\n post_data = {'username': username}\n if git_username is not None:\n post_data['git_username'] = git_username\n if extensions is not None:\n post_data['extensions'] = extensions\n if dropped is not None:\n post_data['dropped'] = dropped\n headers, data = self._api_client._requester.request('POST', '/courses/' + self.course_id + '/students/', data=post_data)\n return chisubmit.client.users.Student(self._api_client, headers, data)\n\n def remove_student(self, user_or_username):\n \"\"\"\n :calls: DELETE /courses/:course/students/:username\n :rtype: None\n \"\"\"\n if not (isinstance(user_or_username, (str, str)) or isinstance(user_or_username, User)):\n assert isinstance(user_or_username, chisubmit.client.users.Student)\n if isinstance(user_or_username, (str, str)):\n username = user_or_username\n else:\n if isinstance(user_or_username, User):\n username = user_or_username.username\n elif isinstance(user_or_username, chisubmit.client.users.Student):\n username = user_or_username.user.username\n _ = self._api_client._requester.request('DELETE', '/courses/' + self.course_id + '/students/' + username)\n\n def get_assignments(self, include_rubric=False):\n \"\"\"\n :calls: GET /courses/:course/assignments/\n :rtype: List of :class:`chisubmit.client.assignment.Assignment`\n \"\"\"\n include = []\n if include_rubric:\n include.append('rubric')\n if len(include) > 0:\n params = {'include': include}\n else:\n params = None\n assignments = self.get_related('assignments', params=params)\n return assignments\n\n def get_assignment(self, assignment_id, include_rubric=False):\n \"\"\"\n :calls: GET /courses/:course/assignments/:assignment/\n :rtype: List of :class:`chisubmit.client.assignment.Assignment`\n \"\"\"\n include = []\n if include_rubric:\n include.append('rubric')\n if len(include) > 0:\n params = {'include': include}\n else:\n params = None\n headers, data = self._api_client._requester.request('GET', '/courses/' + self.course_id + '/assignments/' + assignment_id, params=params)\n return chisubmit.client.assignment.Assignment(self._api_client, headers, data)\n\n def create_assignment(self, assignment_id, name, deadline, min_students=None, max_students=None):\n \"\"\"\n :calls: POST /courses/:course/assignments/\n :param assignment_id: string\n :param name: string\n :param deadline: string\n :param min_students: int\n :param max_students: int\n :rtype: :class:`chisubmit.client.assignment.Assignment`\n \"\"\"\n assert isinstance(assignment_id, (str, str)), assignment_id\n if not isinstance(deadline, (str, str)):\n assert isinstance(deadline, datetime.datetime), deadline\n if isinstance(deadline, (str, str)):\n deadline_str = deadline\n elif isinstance(deadline, datetime.datetime):\n deadline_str = deadline.isoformat(sep=' ')\n post_data = {'assignment_id': assignment_id, \n 'name': name, \n 'deadline': deadline_str}\n if min_students is not None:\n post_data['min_students'] = min_students\n if max_students is not None:\n post_data['max_students'] = max_students\n headers, data = self._api_client._requester.request('POST', '/courses/' + self.course_id + '/assignments/', data=post_data)\n return chisubmit.client.assignment.Assignment(self._api_client, headers, data)\n\n def get_teams(self, include_students=False, include_assignments=False, include_grades=False):\n \"\"\"\n :calls: GET /courses/:course/teams/\n :rtype: List of :class:`chisubmit.client.team.Team`\n \"\"\"\n include = []\n if include_students:\n include.append('students')\n if include_assignments:\n include.append('assignments')\n if include_grades:\n include.append('assignments__grades')\n if len(include) > 0:\n params = {'include': include}\n else:\n params = None\n teams = self.get_related('teams', params=params)\n return teams\n\n def get_team(self, team_id, include_students=False, include_assignments=False, include_grades=False):\n \"\"\"\n :calls: GET /courses/:course/teams/\n :rtype: :class:`chisubmit.client.team.Team`\n \"\"\"\n assert isinstance(team_id, (str, str)), team_id\n include = []\n if include_students:\n include.append('students')\n if include_assignments:\n include.append('assignments')\n if include_grades:\n include.append('assignments__grades')\n if len(include) > 0:\n params = {'include': include}\n else:\n params = None\n headers, data = self._api_client._requester.request('GET', self.teams_url + team_id, params=params)\n return chisubmit.client.team.Team(self._api_client, headers, data)\n\n def create_team(self, team_id, extensions=None, active=None):\n \"\"\"\n :calls: POST /courses/:course/teams/\n :param name: string\n :param extensions: int\n :param active: bool\n :rtype: :class:`chisubmit.client.team.Team`\n \"\"\"\n assert isinstance(team_id, (str, str)), team_id\n post_data = {'team_id': team_id}\n if extensions is not None:\n post_data['extensions'] = extensions\n if active is not None:\n post_data['active'] = active\n headers, data = self._api_client._requester.request('POST', self.teams_url, data=post_data)\n return chisubmit.client.team.Team(self._api_client, headers, data)","sub_path":"pycfiles/chisubmit-2.1.0-py3.5/course.cpython-35.py","file_name":"course.cpython-35.py","file_ext":"py","file_size_in_byte":14335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"445085965","text":"#!/usr/bin/python\n\nimport ecdsa\nimport sha3\n\ndef cksum(s):\n h = sha3.keccak_256(s.encode('utf-8')).hexdigest()\n c = ''\n for i,k in zip(s,h):\n if int(k, 16) <= 7:\n c += i.lower()\n else:\n c += i.upper()\n return '0x' + c\n\ndef gen_keys():\n priv_key = ecdsa.SigningKey.generate(curve=ecdsa.SECP256k1)\n pub_key = priv_key.get_verifying_key().to_string()\n\n pub_key_hash = sha3.keccak_256(pub_key).hexdigest()\n address = pub_key_hash[24:]\n address = cksum(address)\n priv_key = priv_key.to_string().hex()\n return priv_key, address\n\ndef main():\n priv_key, address = gen_keys()\n print(\"Ethereum Address:\", address)\n print(\"Private Key:\", priv_key)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ethereum wallet gen.py","file_name":"ethereum wallet gen.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"529153251","text":"# Import statements\nimport pandas\n\n# Initialize dataframe lists...\nall_names = [\"Tom\", \"Bill\", \"Jack\", \"James\", \"Ed\"]\nall_tickets = [7.5, 10.5, 10.5, 10.5, 6.5]\n\npopcorn = []\nmms = []\npita_chips = []\nwater = []\norange_juice = []\n\nsnack_lists = [popcorn, mms, pita_chips, water, orange_juice]\n\nsnack_data_dict = {\n \"Name\": all_names,\n \"Ticket\": all_tickets,\n \"Popcorn\": popcorn,\n \"Water\": water,\n \"Pita Chips\": pita_chips,\n \"M&Ms\": mms,\n \"Orange Juice\": orange_juice\n}\n\n# cost of each snack item\nprice_dict = {\n \"Popcorn\": 2.5,\n \"Water\": 2,\n \"Pita Chips\": 4.5,\n \"M&Ms\": 3,\n \"Orange Juice\": 3.25\n}\n\ntest_data = [\n [[2, 'Popcorn'], [1, 'Pita Chips'], [1, 'Orange Juice']],\n [[]],\n [[1, 'Water']],\n [[1, 'Popcorn'], [1, 'Orange Juice']],\n [[1, \"M&Ms\"], [1, 'Pita Chips'], [3, 'Orange Juice']]\n]\n\ncount = 0\nfor client_order in test_data:\n\n # Assume no snacks have been brought..\n for item in snack_lists:\n item.append(0)\n\n # print (snack_lists)\n\n # get order (hard coded for easy testing)...\n snack_order = test_data[count]\n count += 1\n\n for item in snack_order:\n if len(item) > 0:\n to_find = (item[1])\n amount = (item[0])\n add_list = snack_data_dict[to_find]\n add_list[-1] = amount\n\nprint()\nprint(\"Popcorn: \", snack_lists[0])\nprint(\"M&Ms: \", snack_lists[1])\nprint(\"Pita Chips: \", snack_lists[2])\nprint(\"Water: \", snack_lists[3])\nprint(\"Orange Juice: \", snack_lists[4])\nprint()\n\n# Print details...\nmovie_frame = pandas.DataFrame(snack_data_dict)\nmovie_frame = movie_frame.set_index(\"Name\")\n\n# Create column called \"Sub Total\"\n# Fill it with price for snacks and ticket\n\nmovie_frame[\"Sub Total\"] = \\\n movie_frame[\"Ticket\"] + \\\n movie_frame[\"Popcorn\"] * price_dict[\"Popcorn\"] + \\\n movie_frame[\"Water\"] * price_dict[\"Water\"] + \\\n movie_frame[\"Pita Chips\"] * price_dict[\"Pita Chips\"] + \\\n movie_frame[\"M&Ms\"] * price_dict[\"M&Ms\"] + \\\n movie_frame[\"Orange Juice\"] * price_dict[\"Orange Juice\"]\n\nmovie_frame = movie_frame.rename(columns={\"Orange Juice\": \"OJ\",\n \"Pita Chips\": \"Chips\"})\n\nprint(movie_frame)\n","sub_path":"08_snack_lists_V3.py","file_name":"08_snack_lists_V3.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"539666881","text":"import sys\n\nfrom .base import *\n\n#\n# SENTRY\n#\nINSTALLED_APPS += [\n 'raven.contrib.django.raven_compat',\n]\n\n#\n# LOGGING\n#\n# Production logging facility.\nLOGGING['loggers'].update({\n 'brouwers': {\n 'handlers': ['project'],\n 'level': 'WARNING',\n 'propagate': True,\n },\n 'django': {\n 'handlers': ['django'],\n 'level': 'WARNING',\n 'propagate': True,\n },\n})\n\ntry:\n from .secrets import *\nexcept ImportError:\n sys.stderr.write(\"Create your secrets.py file with the secret settings.\")\n\n#\n# STATICFILES\n#\nSTATICFILES_STORAGE = 'systemjs.storage.SystemJSManifestStaticFilesStorage'\n\n#\n# SESSION\n#\nSESSION_COOKIE_DOMAIN = '.modelbrouwers.nl'\nSESSION_COOKIE_SECURE = True\nCSRF_COOKIE_SECURE = True\n\n#\n# CACHE\n#\nCACHES['default']['KEY_PREFIX'] = 'production'\n\n#\n# EMAIL\n#\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\nSERVER_EMAIL = 'beheer@modelbrouwers.nl'\n\n#\n# SECURITY\n#\nALLOWED_HOSTS = ['.modelbrouwers.nl']\n\n#\n# COMPRESS\n#\nCOMPRESS_ENABLED = True\n\n#\n# TEMPLATES\n#\nTEMPLATES[0]['APP_DIRS'] = False # conflicts with explicitly specifying the loaders\nTEMPLATES[0]['OPTIONS']['loaders'] = [\n ('django.template.loaders.cached.Loader', RAW_TEMPLATE_LOADERS),\n]\n\n#\n# SECURITY\n#\nSECURE_SSL_REDIRECT = True\nSECURE_HSTS_SECONDS = 0 # start really low\n\nSESSION_COOKIE_SECURE = True\nCSRF_COOKIE_SECURE = True\n","sub_path":"src/conf/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"54393696","text":"from OpenGL.GL import *\r\nfrom OpenGL.GLU import *\r\nfrom OpenGL.GLUT import *\r\n\r\nMS_PER_FRAME = 10\r\nmillis = 0\r\n\r\nwidth, height = 600, 600\r\nmouse = [0,0]\r\n\r\ndef draw_axis():\r\n\tglBegin(GL_LINES)\r\n\tfor v in [(1,0,0),(0,1,0),(0,0,1)]:\r\n\t\tglColor3fv(v)\r\n\t\tglVertex3f(0,0,0)\r\n\t\tglVertex3fv(v)\r\n\tglEnd()\r\n\r\ndef draw_cube():\r\n\tglPushMatrix()\r\n\tglColor3f(1,1,1)\r\n\tglutWireCube(1)\r\n\tglPopMatrix()\r\n\r\ndef display():\r\n\tglClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\r\n\tglLoadIdentity()\r\n\ty = 5.0*(1.0*height-mouse[1])/height\r\n\tgluLookAt(0,y,5.0*mouse[0]/width,0,y,0,0,1,0)\t\r\n\r\n\tdraw_axis()\r\n\tdraw_cube()\r\n\r\n\tglFlush()\r\n\r\ndef init():\r\n\tglClearColor(0,0,0,0)\r\n\tglEnable(GL_DEPTH_TEST)\r\n\r\n\tglMatrixMode(GL_PROJECTION)\r\n\tglLoadIdentity()\r\n\tgluPerspective(60,1,0.1,10)\r\n\tglMatrixMode(GL_MODELVIEW)\r\n\r\ndef resize(w,h):\r\n\tglobal width, height\r\n\twidth, height = w,h\r\n\tglViewport(0,0,w,h)\r\n\tglutPostRedisplay()\r\n\r\ndef motion(x,y):\r\n\tglobal mouse\r\n\tmouse = x, y\r\n\tglutPostRedisplay()\r\n\r\ndef timer(i):\r\n\tglobal millis\r\n\tmillis += MS_PER_FRAME\r\n\r\n\tglutPostRedisplay()\r\n\tglutTimerFunc(MS_PER_FRAME,timer,0)\r\n\r\nif __name__==\"__main__\":\r\n\tglutInit()\r\n\tglutInitWindowSize(width,height)\r\n\tglutInitDisplayMode(GLUT_DEPTH)\r\n\tglutCreateWindow(\"cube\")\r\n\tinit()\r\n\tglutDisplayFunc(display)\r\n\tglutReshapeFunc(resize)\r\n\tglutPassiveMotionFunc(motion)\r\n\tglutTimerFunc(MS_PER_FRAME,timer,0)\r\n\tglutMainLoop()\r\n\r\n","sub_path":"opengl/python/l11/cube.py","file_name":"cube.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"148477042","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nimport csv\nfrom django.core.management.base import BaseCommand\nfrom corehq.apps.locations.models import SQLLocation\nfrom custom.enikshay.integrations.bets.repeaters import BETSLocationRepeater\nfrom custom.enikshay.integrations.bets.utils import get_bets_location_json\nfrom six.moves import map\n\n\nclass Command(BaseCommand):\n field_names = [\n 'domain',\n 'parent_site_code',\n 'is_archived',\n 'last_modified',\n 'location_id',\n 'location_type',\n 'location_type_code',\n 'lineage',\n 'doc_type',\n 'name',\n 'site_code',\n 'longitude',\n 'ancestors_by_type.ctd',\n 'ancestors_by_type.sto',\n 'ancestors_by_type.dto',\n 'ancestors_by_type.cto',\n 'latitude',\n '_id',\n 'external_id',\n 'metadata.is_test',\n 'metadata.tests_available',\n 'metadata.private_sector_org_id',\n 'metadata.nikshay_code',\n 'metadata.enikshay_enabled',\n ]\n\n def add_arguments(self, parser):\n parser.add_argument('domain')\n\n def handle(self, domain, **options):\n self.domain = domain\n filename = 'eNikshay_locations.csv'\n with open(filename, 'w') as f:\n writer = csv.writer(f)\n writer.writerow(self.field_names)\n loc_types = BETSLocationRepeater.location_types_to_forward\n for loc in (SQLLocation.active_objects\n .filter(domain=domain, location_type__code__in=loc_types)\n .prefetch_related('parent', 'location_type')):\n if loc.metadata.get('is_test') != \"yes\":\n self.add_loc(loc, writer)\n print(\"Wrote to {}\".format(filename))\n\n def add_loc(self, location, writer):\n loc_data = get_bets_location_json(location)\n\n def get_field(field):\n if field == 'lineage':\n return ''\n elif '.' in field:\n obj, key = field.split('.')\n return loc_data[obj].get(key, '')\n return loc_data[field]\n\n writer.writerow(list(map(get_field, self.field_names)))\n","sub_path":"custom/enikshay/management/commands/get_locations_for_bets.py","file_name":"get_locations_for_bets.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"26159224","text":"import discord\nimport asyncio\nimport urllib.request\nimport requests\nfrom discord.ext import commands\n\nclass Shorten:\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def ouo(self, url):\n\n url2 = (\"http://ouo.io/api/B3Nm9g8L?s=\" + url)\n handle = urllib.request.Request(url2, headers={'User-Agent': 'Mozilla/5.0'})\n html = urllib.request.urlopen(handle).read().decode('utf-8')\n await self.bot.say(\"Shortened: \" + html)\n await asyncio.sleep(1)\n\ndef setup(bot):\n bot.add_cog(Shorten(bot))\n","sub_path":"ouo.py","file_name":"ouo.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"461109084","text":"import sys\r\n\r\n\r\ndef error(info, exit=True, code=-1):\r\n \"\"\"Base fatal error handling, prints info,\r\n exit if @param exit, returning @param code.\"\"\"\r\n print(info)\r\n if exit:\r\n raw_input(\"Press anything to exit\")\r\n sys.exit(code)\r\n\r\n\r\ndef address_handling():\r\n \"\"\"Does read the address from command line arguments, returns it,\r\n defaults to DEFAULT_ADRESS, defined inside.\"\"\"\r\n\r\n DEFAULT_ADRESS = \"0.0.0.0:8080\"\r\n\r\n try:\r\n address = sys.argv[1]\r\n except Exception:\r\n address = DEFAULT_ADRESS\r\n\r\n try:\r\n address = tuple(address.split(':'))\r\n address = (address[0], int(address[1]))\r\n except Exception:\r\n error(\"Can't parse address\")\r\n finally:\r\n print(\"Launching with address {}:{}\".format(address[0], address[1]))\r\n\r\n return address\r\n","sub_path":"server_client_utils.py","file_name":"server_client_utils.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"624946737","text":"import schedule, time\nfrom web import app\nfrom flask import Flask\nfrom flask_mail import Message, Mail\nfrom web.utilities import sendConfirmationEmailToUser\n\n# initiate mail with app config\nmail = Mail()\napp = Flask(__name__)\nmail.init_app(app)\n\ndef sendEmailToASM(entry):\n msg = Message(\"ArtBot Agar Art Submission on Behalf of %s\", entry.email)\n\n msg.recipients = [entry.email]\n msg.add_recipient(\"somebodyelse@asm.com\")\n\n msg.html = \"

Attached is %s's argar art submission!

\", entry.email\n\n image = Image.frombytes(\"RGBX\", (616, 414), entry.completed_picture)\n msg.attach(\"image.png\", \"image/png\", image)\n\n mail.send(msg)\n\ndef sendEmailToUser(entry):\n msg = Message(\"ArtBot is done making your art!\",\n recipients=[entry.email])\n\n msg.html = \"

Attached is a picture of your completed agart art and the original pixel art for comparison!

\"\n\n image = Image.frombytes(\"RGBX\", (616, 414), entry.completed_picture)\n imageTwo = Image.frombytes(\"RGBX\", (616, 414), entry.picture)\n msg.attach(\"completed_art.png\", \"image/png\", image)\n msg.attach(\"original_pixel_art.png\", \"image/png\", imageTwo)\n\n mail.send(msg)\n\ndef getCompletedArt():\n #environment vars should be removed when implementing - they are already set at app config\n SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.abspath(os.path.join(basedir, os.pardir, 'ARTBot.db'))\n SQL_ENGINE = db.create_engine(SQLALCHEMY_DATABASE_URI)\n\n query = f\"\"\"SELECT picture FROM artpieces\n WHERE picture IS NOT NULL AND status <> 'Completed'\n \"\"\"\n entry = pd.read_sql(query, SQL_ENGINE).iloc[0]\n return entry\n\ndef getArtSendEmail():\n entry = getCompletedArt()\n sendEmailToASM(entry)\n sendEmailToUser(entry)\n\nschedule.every().day.at(\"7am\").do(getArtSendEmail)\nschedule.every().day.at(\"2pm\").do(getArtSendEmail)\nschedule.every().day.at(\"11pm\").do(getArtSendEmail)\n\nwhile True:\n schedule.run_pending()\n time.sleep(1)\n","sub_path":"asm_emailer/asm_emailer.py","file_name":"asm_emailer.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"141080141","text":"import struct\nimport unittest\nfrom typing import List\n\nimport pyparcel\n\nDATA: List[int] = [\n -1 << 31,\n -1000,\n -57,\n -26,\n -20,\n -5,\n -2,\n -1,\n 0,\n 1,\n 2,\n 5,\n 20,\n 57,\n 1000,\n (1 << 31) - 1,\n]\n\n\nclass MyTestCase(unittest.TestCase):\n def test_pack(self):\n for i in DATA:\n self.assertEqual(pyparcel.pack(i), struct.pack(\"i\", i))\n\n def test_pack_unpack(self):\n for i in DATA:\n self.assertEqual(i, pyparcel.unpack(pyparcel.pack(i), int()))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_int.py","file_name":"test_int.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"409480647","text":"import numpy as np\n\nOFILE_SCATTERING = \"SCATTERING_RATE\"\n\n#def initialize_output(\n# natoms, nelements, cell,\n# mesh, qpoints, f2s):\n# ofs = open(OFILE, \"a\")\n# ofs.write(\"# ----------------------------------------------\\n\")\n# ofs.write(\"# Phonon scattering rate due to an impurity\\n\")\n# ofs.write(\"# calculated by phtmat\\n\")\n# ofs.write(\"# ----------------------------------------------\\n\")\n# ofs.write(\"## GENERAL information\\n\")\n# ofs.write(\"#SYSTEM\\n\")\n# ofs.write(\"{:d} {:d}\\n\",format(natoms, nelements))\n# ofs.write(\"#END SYSTEM\\n\")\n# \n# ofs.write(\"#KPOINT\\n\")\n# ofs.write(\"{:d} {:d} {:d}\\n\".format(mesh[0], mesh[1], mesh[2]))\n# ofs.write(\"{:d}\\n\".format(len(irr_qpoints)))\n# \n# for iq, q in enumerate(qpoints):\n# ofs.write(\"{:4d}: \".format(iq))\n# for j in range(3):\n# ofs.write(\"{:13.7e} \".format(q[j]))\n# ofs.write(\"\\n\".format())\n# ofs.write(\"#END KPOINT\\n\")\n# \n# #ofs.write(\"#SMEARING\\n\".format())\n# #ofs.write(\"#END SMEARING\\n\".format())\n# \n# ofs.write(\"##END GENERAL information\\n\")\n# \n# # --- phonon\n# frequencies = np.sqrt(abs(f2s)) * np.sign(f2s)\n# ofs.write(\"##Phonon Frequency\\n\")\n# ofs.write(\"#K-point (irreducible), Branch, Omega (THz)\\n\")\n# for iq, q in enumerate(qpoints):\n# ofs.write(\"{:4d} \".format(iq))\n# for im, freq in enumerate(frequencies[iq]):\n# ofs.write(\"{:4d} {:4d} {:15.7f}\\n\".format(\n# iq+1, im+1, freq))\n# ofs.write(\"##END Phonon Frequency\\n\")\n# \n# ofs.close()\n\n\ndef initialize_scattering_file(mesh, nq, OFILE=OFILE_SCATTERING):\n \"\"\"Make file of scattering rates\n Parameters\n ------------\n mesh : array, integer, shape=(3)\n # of q-points\n nq : integer\n # of irreducible q-points\n \"\"\"\n ofs = open(OFILE, \"w\")\n ofs.write(\"# ------------------------------------------\\n\")\n ofs.write(\"# Phonon scattering rate due to an impurity\\n\")\n ofs.write(\"# calculated by phtmat\\n\")\n ofs.write(\"# ------------------------------------------\\n\")\n ofs.write(\"#\\n\")\n ofs.write(\"# Units\\n\")\n ofs.write(\"# frequency, scattering rate [THz]\\n\")\n ofs.write(\"#\\n\")\n ofs.write(\"# q-mesh : \")\n for j in range(3):\n ofs.write(\"{:d} \".format(mesh[j]))\n ofs.write(\"\\n\")\n ofs.write(\"# irreducible q-points : {:d}\\n\".format(nq))\n ofs.write(\"#\\n\")\n ofs.close()\n\ndef dump_scattering_rate(iq, qpoint, im, f2, rscat, OFILE=OFILE_SCATTERING):\n \"\"\"Make file of scattering rates\n Parameters\n ------------\n qs : ndarray, float, shape=(nq, 3)\n q-points\n f2s : ndarray, float, shape=(nq, nmodes)\n Squared frequencies\n rscat : ndarray, float, shape=(nq, nmodes)\n Scattering rate\n OFILE : straing\n Output file name\n \"\"\"\n freq = np.sqrt(abs(f2)) * np.sign(f2)\n ofs = open(OFILE, \"a\")\n if im == 0:\n if iq != 0:\n ofs.write(\"\\n\")\n ofs.write(\"# {:2d} qpoint : \".format(iq))\n for j in range(3):\n ofs.write(\"{:13.7e} \".format(qpoint[j]))\n ofs.write(\"\\n\")\n \n ofs.write(\"{:2d} {:13.3f} \".format(im, freq))\n if rscat is None:\n ofs.write(\"None\\n\")\n else:\n ofs.write(\"{:18.5e}\\n\".format(rscat))\n ofs.close()\n\ndef output_scattering_rates(qs, f2s, rscat, OFILE=OFILE_SCATTERING):\n \"\"\"Make file of scattering rates\n Parameters\n ------------\n ns : integer\n # of qpoints\n qs : ndarray, float, shape=(ns, 3)\n q-points\n f2s : ndarray, float, shape=(ns, nmodes)\n Squared frequencies\n rscat : ndarray, float, shape=(ns, nmodes)\n Scattering rate\n OFILE : string\n Output file name\n \"\"\"\n ns = len(qs)\n nmodes = len(f2s[0])\n freqs = np.sqrt(abs(f2s)) * np.sign(f2s)\n \n ofs = open(OFILE, \"w\")\n ofs.write(\"# ------------------------------------------\\n\")\n ofs.write(\"# Phonon scattering rate due to an impurity\\n\")\n ofs.write(\"# calculated by pyscat\\n\")\n ofs.write(\"# ------------------------------------------\\n\")\n ofs.write(\"#\\n\")\n\n for iq, qq in enumerate(qs):\n ofs.write(\"# {:2d} qpoint : {:13.7f} {:13.7f} {:13.7f}\\n\".format(\n iq, qq[0], qq[1], qq[2]))\n for im, freq in enumerate(freqs[iq]):\n ofs.write(\"{:2d} {:13.3f} \".format(im, freq))\n if rscat[iq,im] is None:\n ofs.write(\"None\\n\")\n else:\n ofs.write(\"{:18.5e}\\n\".format(rscat[iq,im]))\n if iq != ns-1:\n ofs.write(\"\\n\")\n ofs.close()\n print(\" Output: \", OFILE)\n\n\n","sub_path":"pyscat/utils/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":4586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"150653831","text":"import numpy as np\nimport tensorflow as tf\nfrom flask import Flask, jsonify, render_template, request\nimport time\n\nfrom mnist import model\n\nX1 = tf.placeholder(\"float\", [None, 784])\nX2 = tf.placeholder(tf.float32, [None, 28, 28, 1])\nsess = tf.Session()\n\n# restore trained data\nwith tf.variable_scope(\"regression\"):\n Y1, variables = model.regression(X1)\nsaver = tf.train.Saver(variables)\nsaver.restore(sess, \"mnist/data/regression.ckpt\")\nprint(\"Regression model restored.\")\n\n\nwith tf.variable_scope(\"convolutional\"):\n pkeep = tf.placeholder(tf.float32)\n Y2, Ylogits, variables = model.convolutional(X2, pkeep)\nsaver = tf.train.Saver(variables)\nsaver.restore(sess, \"mnist/data/convolutional.ckpt\")\nprint(\"Convolutional model restored.\")\n\n\ndef regression(input):\n return sess.run(Y1, feed_dict={X1: input}).flatten().tolist()\n\n\ndef convolutional(input):\n return sess.run(Y2, feed_dict={X2: input, pkeep: 1.0}).flatten().tolist()\n\n\n# webapp\napp = Flask(__name__)\n\n\n@app.route('/api/mnist', methods=['POST'])\ndef mnist():\n\n startReg = time.time()\n input1 = ((255 - np.array(request.json, dtype=np.uint8)) / 255.0).reshape(1, 784)\n output1 = regression(input1)\n endReg = time.time()\n\n startConv = time.time()\n input2 = ((255 - np.array(request.json, dtype=np.uint8)) / 255.0).reshape(1, 28, 28, 1)\n output2 = convolutional(input2)\n endConv = time.time()\n\n diffRef = endReg - startReg\n diffConv = endConv - startConv\n\n return jsonify(results=[output1, output2], times=[diffRef, diffConv])\n\n\n@app.route('/')\ndef main():\n return render_template('index.html')\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"196472732","text":"#!/usr/bin/env python3\nimport os\nimport unittest\nimport tempfile\nimport server as proto3\nfrom flask_migrate import upgrade as db_upgrade\nfrom models.database import ProtocolType, Protocol, DefaultTOP, TOP, Document, DecisionDocument, TodoState, Todo, Decision, MeetingReminder, Error, TodoMail, OldTodo, DefaultMeta, Meta\n\nimport sqlite3\n\ndef _create_db(sql_script, database_file):\n connection = sqlite3.connect(database_file)\n with open(sql_script, \"r\") as script_file:\n connection.executescript(script_file.read())\n connection.close()\n\ndef _upgrade_db(program_dir):\n migrations_path = os.path.join(program_dir, \"migrations\")\n db_upgrade(directory=migrations_path)\n\nclass GeneralTestCase(unittest.TestCase):\n def _general_setup(self):\n self.program_dir = os.getcwd()\n proto3.app.config[\"TESTING\"] = True\n self.app = proto3.app.test_client()\n\n def _create_tempdir(self):\n self.tempdir = tempfile.TemporaryDirectory()\n os.chdir(self.tempdir.name)\n\n def _create_db(self, script_name):\n self.database_file = \"{}/{}\".format(self.tempdir.name, \"test-db.sqlite\")\n _create_db(os.path.join(self.program_dir, \"dbdumps\", \"{}.sql\".format(script_name)), self.database_file)\n proto3.app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///{}\".format(self.database_file)\n with proto3.app.app_context():\n _upgrade_db(self.program_dir)\n \n def _general_teardown(self):\n self.tempdir.cleanup()\n os.chdir(self.program_dir)\n \n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\nSTATUS_OK = 200\nSTATUS_REDIRECT = 302\nSTATUS_METHOD = 405\n\nclass FullDBAnonymousTestCase(GeneralTestCase):\n EXPECTED_SIMPLE_RESULTS = {\n \"/\": (STATUS_OK, STATUS_METHOD),\n \"/documentation\": (STATUS_REDIRECT, STATUS_METHOD),\n \"/types/list\": (STATUS_REDIRECT, STATUS_METHOD),\n \"/protocols/list\": (STATUS_OK, STATUS_METHOD),\n \"/todos/list\": (STATUS_REDIRECT, STATUS_METHOD),\n \"/decisions/list\": (STATUS_OK, STATUS_METHOD),\n \"/errors/list\": (STATUS_REDIRECT, STATUS_METHOD),\n \"/todomails/list\": (STATUS_REDIRECT, STATUS_METHOD),\n \"/login\": (STATUS_OK, STATUS_OK),\n \"/logout\": (STATUS_REDIRECT, STATUS_METHOD),\n }\n\n def setUp(self):\n self._general_setup()\n self._create_tempdir()\n self._create_db(\"full\")\n\n def tearDown(self):\n self._general_teardown()\n\n def test_simple_status(self):\n for route in self.EXPECTED_SIMPLE_RESULTS:\n get_result = self.app.get(route)\n post_result = self.app.post(route)\n expected_get, expected_post = self.EXPECTED_SIMPLE_RESULTS[route]\n assert get_result.status_code == expected_get\n assert post_result.status_code == expected_post\n\n def test_protocoltypes(self):\n with proto3.app.app_context():\n new_route = \"/type/new\"\n assert self.app.get(new_route).status_code == STATUS_REDIRECT\n assert self.app.post(new_route).status_code == STATUS_REDIRECT\n protocoltypes = ProtocolType.query.all()\n for protocoltype in protocoltypes:\n show_route = \"/type/show/{}\".format(protocoltype.id)\n assert self.app.get(show_route).status_code == STATUS_REDIRECT\n assert self.app.post(show_route).status_code == STATUS_METHOD\n edit_route = \"/type/edit/{}\".format(protocoltype.id)\n assert self.app.get(edit_route).status_code == STATUS_REDIRECT\n assert self.app.post(edit_route).status_code == STATUS_REDIRECT\n delete_route = \"/type/delete/{}\".format(protocoltype.id)\n assert self.app.get(delete_route).status_code == STATUS_REDIRECT\n assert self.app.post(delete_route).status_code == STATUS_METHOD\n new_reminder_route = \"/type/reminders/new/{}\".format(protocoltype.id)\n assert self.app.get(new_reminder_route).status_code == STATUS_REDIRECT\n assert self.app.post(new_reminder_route).status_code == STATUS_REDIRECT\n new_top_route = \"/type/tops/new/{}\".format(protocoltype.id)\n assert self.app.get(new_top_route).status_code == STATUS_REDIRECT\n assert self.app.post(new_top_route).status_code == STATUS_REDIRECT\n new_meta_route = \"/defaultmeta/new/{}\".format(protocoltype.id)\n assert self.app.get(new_meta_route).status_code == STATUS_REDIRECT\n assert self.app.post(new_meta_route).status_code == STATUS_REDIRECT\n\n def test_protocols(self):\n with proto3.app.app_context():\n new_route = \"/protocol/new\"\n assert self.app.get(new_route).status_code == STATUS_REDIRECT\n assert self.app.post(new_route).status_code == STATUS_REDIRECT\n protocols = Protocol.query.all()\n for protocol in protocols:\n visible = protocol.protocoltype.has_public_view_right(None, check_networks=False)\n state_ok_or_redirect = STATUS_OK if visible else STATUS_REDIRECT\n show_route = \"/protocol/show/{}\".format(protocol.id)\n assert self.app.get(show_route).status_code == state_ok_or_redirect\n assert self.app.post(show_route).status_code == STATUS_METHOD\n \n \n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":5471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"37400003","text":"import json\nfrom asgiref.sync import async_to_sync, sync_to_async\nfrom channels.generic.websocket import JsonWebsocketConsumer, AsyncJsonWebsocketConsumer\nfrom django.contrib.auth import authenticate\nfrom django.http import HttpRequest\nfrom django.contrib.gis.geos import Point\n\nfrom demo.api.utils import getWebsocketResponseDict, getFactoryChannelGroupName\nfrom demo.api.models import Factory\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass FactoryConsumer(AsyncJsonWebsocketConsumer):\n groups = [\"factory\"]\n factory = None\n\n async def connect(self):\n query_string = self.scope[\"query_string\"]\n\n query_string_arr = query_string.decode(\"utf-8\").split(\"=\")\n\n if query_string_arr and query_string_arr[0] == \"distance\":\n self.distance = float(query_string_arr[1])\n\n if \"factory_id\" in self.scope[\"url_route\"][\"kwargs\"]:\n self.factory_id = self.scope[\"url_route\"][\"kwargs\"][\"factory_id\"]\n\n self.factory = await sync_to_async(Factory.objects.get)(pk=self.factory_id)\n\n await self.accept()\n\n async def disconnect(self, code):\n pass\n\n # Receive message from WebSocket\n async def receive_json(self, content):\n if not content:\n return\n\n print(\"content\", content)\n\n if content.get(\"type\", None) == \"location_update\":\n\n latitude = content[\"latitude\"]\n\n longitude = content[\"longitude\"]\n\n user_id = content[\"user_id\"]\n\n # Send message to room group\n await self.channel_layer.send(\n self.channel_name,\n {\n \"type\": \"location_update\",\n \"latitude\": latitude,\n \"longitude\": longitude,\n \"user_id\": user_id,\n },\n )\n else:\n await self.send_json((\"_\", \"Unknown type provided\", False))\n\n # Receive message from room group\n async def location_update(self, event):\n message = {}\n\n print(\"event\", event)\n\n latitude = event[\"latitude\"]\n longitude = event[\"longitude\"]\n user_id = event[\"user_id\"]\n\n is_inside = False\n\n point = Point(latitude, longitude, srid=4326)\n\n if hasattr(self, \"factory\"):\n if not hasattr(self, \"distance\"):\n if self.factory.geofence.contains(point):\n is_inside = True\n else:\n transformedFence = self.factory.geofence.transform(900913, clone=True)\n transformedPoint = point.transform(900913, clone=True)\n if transformedFence.distance(transformedPoint) <= self.distance:\n is_inside = True\n\n # Send message to WebSocket\n await self.send_json(\n (\n \"location_update\",\n {\n \"latitude\": latitude,\n \"longitude\": longitude,\n \"user_id\": user_id,\n \"is_inside\": is_inside,\n },\n True,\n )\n )\n\n async def decode_json(self, text_data):\n is_authenticated = True\n if not getattr(self, \"user\", None):\n is_authenticated = False\n\n try:\n return json.loads(text_data)\n except json.decoder.JSONDecodeError as e:\n logger.error(e)\n self.send_json((\"_\", \"Bad json\", False))\n except Exception as e:\n logger.error(e)\n self.send_json((\"_\", \"Internal Error\", False))\n\n async def encode_json(self, content, **kwargs):\n is_authenticated = True\n if not getattr(self, \"user\", None):\n is_authenticated = False\n\n (type_name, content_data, is_success) = content\n\n return json.dumps(\n getWebsocketResponseDict(\n type_name, content_data, is_success, is_authenticated\n )\n )\n","sub_path":"demo/api/consumers/factory_consumer.py","file_name":"factory_consumer.py","file_ext":"py","file_size_in_byte":3925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"363346","text":"from Environment.MapHandler import MapHandler\nfrom Environment.StateUpdate import StateUpdateHandler\nfrom Environment.KeyboardController import KeyboardController\nimport numpy as np\nimport torch\nimport time\nfrom multiprocessing import Process\nfrom threading import Thread\nfrom GameConnection import GameConnection\n\nclass DungeonEnv:\n\tdef __init__(self, waitingTime):\n\t\tself.done = False\n\t\tself.Hp = 0\n\t\tself.PlayerClass = \"\"\n\t\tself.PlayerRace = \"\"\n\t\tself.God = \"\"\n\t\tself.Dexterity = 0\n\t\tself.Intelligence = 0\n\t\tself.Strength = 0\n\t\tself.HaveOrb = False\n\t\tself.Hunger = 0\n\t\tself.Turns = 0\n\t\tself.Where = \"\"\n\t\tself.LevelProgress = 0\n\t\tself.Level = 0\n\t\tself.ExploringDone = False\n\t\tself.Map = MapHandler()\n\t\tself.Keyboard = KeyboardController()\n\t\tself.actionCount = 0\n\t\tself.ValidMoves = 0\n\t\tself.InvalidMoves = 0\n\t\tself.MessagesReceived = 0\n\t\tself.WaitingTime = waitingTime\n\t\tself.GameConn = None\n\t\tself.MessageConn = None\n\t\tself.FoodKey = ''\n\t\tself.StateUpdate = StateUpdateHandler(self)\n\t\tself.ResetCount = 0\n\t\tpass\n\n\tdef step(self,action):\n\t\ttmpLevelProgress = self.LevelProgress\n\t\ttmpLevel = self.Level\n\t\ttmpTurns = self.Turns\n\t\ttmpMap = self.Map.GetMapExploration()\n\t\ttmpMapDepth = self.Map.currentLevel\n\t\ttmpHp = self.Hp\n\n\n\t\tself.Keyboard.PressSpace()\n\n\t\tactionKey = self.Keyboard.ExecutAction(action, self.Turns)\n\t\tself.actionCount = self.actionCount + 1\n\t\t\n\t\ttime.sleep(1)\n\t\t\n\t\tstartTime = time.time()\n\t\twhile(self.MessagesReceived == 0):\n\t\t\tif(time.time() - startTime > self.WaitingTime):\n\t\t\t\tself.ResetCount = self.ResetCount + 1\n\t\t\t\tself.GameConn.terminate()\n\t\t\t\tself.GameConn.join()\n\t\t\t\tself.StateUpdate.stop()\n\t\t\t\ttime.sleep(5)\n\t\t\t\tself.Keyboard.UpgradeStats()\n\t\t\t\tself.GameConn = Process(target=GameConnection().start)\n\t\t\t\tself.StateUpdate = StateUpdateHandler(self)\n\t\t\t\tself.MessageConn = Thread(target= self.StateUpdate.start)\n\t\t\t\tself.GameConn.start()\n\t\t\t\tself.MessageConn.start()\n\t\t\t\tbreak\n\t\t\n\t\tif(time.time() - startTime < self.WaitingTime):\n\t\t\tself.ResetCount = 0\n\n\t\tself.MessagesReceived = 0\n\n\t\tif(self.Hunger < 3):\n\t\t\tself.Keyboard.eat(self.FoodKey)\n\n\t\tans = self.GetReward(tmpLevelProgress, tmpLevel, tmpTurns, tmpMap, tmpMapDepth ,tmpHp, actionKey)\n\n\t\tif(self.ExploringDone):\n\t\t\tself.Keyboard.GoDownStairs()\n\n\t\tprint(\"Reward: \" + str(ans))\n\n\t\treturn torch.tensor(ans, dtype = torch.float32)\n\n\tdef reset(self):\n\t\tself.done = False\n\t\tself.Hp = 0\n\t\tself.PlayerClass = \"\"\n\t\tself.PlayerRace = \"\"\n\t\tself.God = \"\"\n\t\tself.Dexterity = 0\n\t\tself.Intelligence = 0\n\t\tself.Strength = 0\n\t\tself.HaveOrb = False\n\t\tself.Hunger = 0\n\t\tself.Turns = 0\n\t\tself.Where = \"\"\n\t\tself.LevelProgress = 0\n\t\tself.Level = 0\n\t\tself.ExploringDone = False\n\t\tself.Map = MapHandler()\n\t\tpass\n\n\tdef getState(self):\n\t\tplayerStats = [self.Hp, self.Dexterity, self.Intelligence, self.Strength, self.Hunger, self.HaveOrb, int(self.Turns), self.LevelProgress, self.Level, self.ExploringDone]\n\t\tmapState = self.Map.GetState(20,20)\n\t\tstate = playerStats + mapState\n\t\tstate = self.ClearState(state)\n\t\tstate = np.array(state)\n\t\tans = torch.from_numpy(state).type(torch.FloatTensor)\n\n\t\treturn ans\n\t\t\n\tdef ClearState(self, state):\n\t\tans = []\n\t\tfor item in state:\n\t\t\tif isinstance(item, int):\n\t\t\t\tans.append(item)\n\t\t\telif isinstance(item, list):\n\t\t\t\tfor subitem in item:\n\t\t\t\t\tif isinstance(subitem, str):\n\t\t\t\t\t\tans.append(ord(subitem[0]))\n\t\t\t\tpass\n\n\t\treturn ans\n\n\tdef GetReward(self,tmpLevelProgress, tmpLevel, tmpTurns, tmpMap, tmpMapDepth, tmpHp, actionKey):\n\n\t\t#Consider player xp level\n\t\tans = self.LevelProgress - tmpLevelProgress\n\t\tif(self.Level != tmpLevel):\n\t\t\tans = (100+self.LevelProgress) - tmpLevelProgress\n\n\t\t#Consider player hp\n\t\tans = ans - (tmpHp - self.Hp)\n\n\t\tif(actionKey == 'e'):\n\t\t\tif(self.Hunger > 5):\n\t\t\t\treturn -5\n\n\t\t#Consider if the player made or not a valid mode in the game\n\t\tif ans==0 and self.Turns == tmpTurns:\n\t\t\tans = -2\n\t\t\tself.InvalidMoves = self.InvalidMoves + 1\n\t\telse:\n\t\t\tself.ValidMoves = self.ValidMoves + 1\n\n\t\t#Consider if the player explored the map or not, and if it was went deeper into the dungeon\n\t\tif tmpMapDepth != self.Map.currentLevel:\n\t\t\tans = ans + 10\n\t\telse:\n\t\t\tans = ans + (self.Map.GetMapExploration() - tmpMap)\n\t\t\tif(self.Map.GetMapExploration() - tmpMap !=0):\n\t\t\t\tprint(\"Map exploration: \" + str(self.Map.GetMapExploration() - tmpMap))\n\t\t\telse:\n\t\t\t\tans = ans - 1\n\n\t\treturn ans\n\n\tdef ChooseStatToUpgrade(self):\n\t\tself.Keyboard.UpgradeStats()","sub_path":"src/ReinforcementBot/Environment/DCSSEnv.py","file_name":"DCSSEnv.py","file_ext":"py","file_size_in_byte":4364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"621517511","text":"#!/usr/bin/env python\n\n## Copyright 2009-2020 Intel Corporation\n## SPDX-License-Identifier: Apache-2.0\n\nimport os\nfrom glob import glob\nfrom shutil import which\nimport argparse\n\n# Parse the command-line arguments\nparser = argparse.ArgumentParser(description='Compares images produced by the library with generated baseline images.')\nparser.usage = '\\rIntel(R) Open Image Denoise - Regression Test\\n' + parser.format_usage()\nparser.add_argument('command', type=str, nargs='*', choices=['generate', 'test'], help='tasks to perform')\nparser.add_argument('--filter', '-f', nargs='*', choices=['RT', 'RTLightmap'], default=None, help='filters to test')\nparser.add_argument('--build_dir', '-B', type=str, default='build', help='build directory')\nparser.add_argument('--data_dir', '-D', type=str, default=os.path.join('training', 'data'), help='directory of datasets (e.g. training, validation, test)')\nparser.add_argument('--results_dir', '-R', type=str, default=os.path.join('training', 'results'), help='directory of training results')\nparser.add_argument('--baseline_dir', '-G', type=str, default=os.path.join('training', 'infer'), help='directory of generated baseline images')\nparser.add_argument('--arch', '-a', type=str, nargs='*', choices=['native', 'pnr', 'hsw', 'skx', 'knl'], help='CPU architectures to test')\nparser.add_argument('--log', '-l', type=str, default='regression.log', help='output log file')\ncfg = parser.parse_args()\n\nif not cfg.arch:\n cfg.arch = ['native']\n # Detect whether Intel(R) Software Development Emulator (SDE) is installed\n # See: https://software.intel.com/en-us/articles/intel-software-development-emulator\n if which('sde'):\n cfg.arch += ['pnr', 'hsw', 'skx', 'knl'] # Penryn, Haswell, Skylake-X, Knights Landing\n\n# Runs tests for the specified model\ndef test(result, filter, features, dataset):\n # Generate baseline images\n if 'generate' in cfg.command:\n print('Generate:', result)\n infer_cmd = os.path.join('training', 'infer.py')\n infer_cmd += ' -D \"%s\" -R \"%s\" -O \"%s\" -i %s -r %s -F exr -d cpu' % (cfg.data_dir, cfg.results_dir, cfg.baseline_dir, dataset, result)\n \n os.system('echo \"%s\" >> %s' % (infer_cmd, cfg.log))\n infer_cmd += ' >> %s' % cfg.log\n\n if os.system(infer_cmd) != 0:\n print('Error: inference failed')\n exit(1)\n\n if 'test' in cfg.command:\n main_feature = features[0]\n\n # Gather the list of images\n dataset_dir = os.path.join(cfg.data_dir, dataset)\n image_filenames = sorted(glob(os.path.join(dataset_dir, '**', '*.%s.exr' % main_feature), recursive=True))\n image_names = [os.path.relpath(filename, dataset_dir).rsplit('.', 3)[0] for filename in image_filenames]\n\n # Iterate over architectures\n for arch in cfg.arch:\n # Iterate over the images\n for image_name in image_names:\n # Iterate over memory usages (tiling)\n for memory_use in ['himem', 'lomem']:\n print('Test:', result, arch, image_name, memory_use)\n denoise_cmd = os.path.join(cfg.build_dir, 'denoise')\n\n ref_filename = os.path.join(cfg.baseline_dir, dataset, '%s.%s.%s.exr' % (image_name, result, main_feature))\n if not os.path.isfile(ref_filename):\n print('Error: missing baseline image (run with \"generate\" first)')\n exit(1)\n denoise_cmd += ' -f %s -v 2 --ref %s' % (filter, ref_filename)\n\n for feature in features:\n feature_filename = os.path.join(dataset_dir, image_name) + '.%s.exr' % feature\n denoise_cmd += ' --%s %s' % (feature, feature_filename)\n\n if memory_use == 'himem':\n denoise_cmd += ' --maxmem 16384'\n elif memory_use == 'lomem':\n denoise_cmd += ' --maxmem 300'\n\n if arch != 'native':\n denoise_cmd = ('sde -%s -- ' % arch) + denoise_cmd\n\n os.system('echo >> %s' % cfg.log)\n os.system('echo \"%s\" >> %s' % (denoise_cmd, cfg.log))\n denoise_cmd += ' >> %s' % cfg.log\n\n if os.system(denoise_cmd) != 0:\n exit(1)\n\n# Filter: RT\nif not cfg.filter or 'RT' in cfg.filter:\n dataset = 'rt_test'\n test('rt_hdr_alb_nrm', 'RT', ['hdr', 'alb', 'nrm'], dataset)\n test('rt_hdr_alb', 'RT', ['hdr', 'alb'], dataset)\n test('rt_hdr', 'RT', ['hdr'], dataset)\n test('rt_ldr_alb_nrm', 'RT', ['ldr', 'alb', 'nrm'], dataset)\n test('rt_ldr_alb', 'RT', ['ldr', 'alb'], dataset)\n test('rt_ldr', 'RT', ['ldr'], dataset)\n\n# Filter: RTLightmap\nif not cfg.filter or 'RTLightmap' in cfg.filter:\n dataset = 'rtlightmap_test'\n test('rtlightmap_hdr', 'RTLightmap', ['hdr'], dataset)\n\nif 'test' in cfg.command:\n print('Success: all tests passed')","sub_path":"scripts/regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":4701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"617495504","text":"import zipfile\r\nimport os\r\nfrom pandas import Series\r\nimport io \r\n\r\nimport torch\r\nimport torch.utils.data as data \r\nimport numpy as np \r\nimport pickle\r\nimport contextlib\r\n\r\n\r\nfrom tokenization.tokenization_bert import BertTokenizer\r\n\r\ndef check_file_exist(filename):\r\n if not os.path.isfile(filename):\r\n # raise IOError(msg_tmpl.format(filename))\r\n return False\r\n return True\r\n\r\n\r\nclass TxtReader(object):\r\n def __init__(self):\r\n super(TxtReader, self).__init__()\r\n self.id_context = Series()\r\n\r\n def read(self, txt_file, position, pid):\r\n key_name = txt_file + \"_\" + str(pid)\r\n\r\n if key_name in self.id_context:\r\n self.id_context[key_name].seek(position, os.SEEK_SET)\r\n return self.id_context[key_name].readline()\r\n else:\r\n file_handle = open(txt_file, 'r',encoding='utf-8')\r\n self.id_context[key_name] = file_handle\r\n self.id_context[key_name].seek(position, os.SEEK_SET)\r\n return self.id_context[key_name].readline()\r\n\r\n\r\nclass ZipReader(object):\r\n def __init__(self):\r\n super(ZipReader, self).__init__()\r\n self.id_context = Series()\r\n\r\n def read(self, zip_file, image_name, pid):\r\n key_name = zip_file + \"_\" + str(pid)\r\n\r\n if key_name in self.id_context:\r\n with self.id_context[key_name].open(image_name) as f:\r\n tmp = f.read()\r\n return tmp\r\n else:\r\n file_handle = zipfile.ZipFile(zip_file, 'r', zipfile.ZIP_LZMA)\r\n self.id_context[key_name] = file_handle\r\n return self.id_context[key_name].read(image_name)\r\n\r\nclass GoogleDataset(data.Dataset):\r\n \"\"\"\r\n Load Googel conption dataset, include captions and image features.\r\n \"\"\"\r\n def __init__(self,data_path,data_split,token_config=\"bert-base-uncased\"):\r\n super(GoogleDataset,self).__init__()\r\n assert data_split in [\"train\", \"val\"]\r\n\r\n self.data_split = data_split\r\n self.data_path = data_path\r\n \r\n self.zipreader = ZipReader()\r\n self.language_reader = TxtReader()\r\n self.create_language_id2val()\r\n self.language_ids = self.language_ids\r\n\r\n self.tokenizer = BertTokenizer.from_pretrained(token_config)\r\n\r\n \r\n def create_language_id2val(self):\r\n self.language_ids =[]\r\n if self.data_split == \"train\":\r\n if check_file_exist(os.path.join(self.data_path,\"train_language_all_ids.pkl\")):\r\n self.language_ids = pickle.load(open(os.path.join(self.data_path,\"train_language_all_ids.pkl\"),'rb'))\r\n else:\r\n for file in os.listdir(self.data_path):\r\n if \"train_\" in file and file.endswith(\".txt\"):\r\n data_full_path = os.path.join(self.data_path,file)\r\n with open(data_full_path,'r', encoding='utf-8') as f:\r\n file_pos = f.tell()\r\n self.language_ids.extend([file+\"#\"+str(file_pos)])\r\n while f.readline() !=\"\":\r\n file_pos = f.tell()\r\n self.language_ids.extend([file+\"#\"+str(file_pos)])\r\n pickle.dump(self.language_ids,open(os.path.join(self.data_path,\"train_language_all_ids.pkl\"),'wb'))\r\n \r\n elif self.data_split == \"val\":\r\n if check_file_exist(os.path.join(self.data_path,\"val_language_all_ids.pkl\")):\r\n self.language_ids = pickle.load(open(os.path.join(self.data_path,\"val_language_all_ids.pkl\"),'rb'))\r\n else:\r\n for file in os.listdir(self.data_path):\r\n if \"val_\" in file and file.endswith(\".txt\"):\r\n data_full_path = os.path.join(self.data_path,file)\r\n with open(data_full_path,'r', encoding='utf-8') as f:\r\n file_pos = f.tell()\r\n self.language_ids.extend([file+\"#\"+str(file_pos)])\r\n while f.readline() !=\"\":\r\n file_pos = f.tell()\r\n self.language_ids.extend([file+\"#\"+str(file_pos)])\r\n pickle.dump(self.language_ids,open(os.path.join(self.data_path,\"val_language_all_ids.pkl\"),'wb'))\r\n\r\n \r\n def tokenize(self,sentence,max_length=20):\r\n tokens_a = self.tokenizer.tokenize(sentence)\r\n sentence_length = 0 \r\n\r\n if len(tokens_a) > max_length:\r\n tokens_a = tokens_a[:max_length]\r\n sentence_length = max_length\r\n else:\r\n padding = [\"[PAD]\"]*(max_length-len(tokens_a))\r\n sentence_length =len(tokens_a)\r\n tokens_a.extend(padding)\r\n\r\n tokens = self.tokenizer.convert_tokens_to_ids(tokens_a)\r\n return tokens,sentence_length\r\n\r\n def process_line(self,line):\r\n ids,caption,image=line.strip().split(\"\\t\")\r\n id=ids.split(\"id:\")[-1]\r\n cap = caption.split(\"caption:\")[-1]\r\n feature = image.split(\"Image:\")[-1]\r\n return id,cap,feature\r\n\r\n def __getitem__(self,index):\r\n language_id = self.language_ids[index] \r\n txt_file,position = language_id.split(\"#\")\r\n tmp_line = self.language_reader.read(os.path.join(self.data_path,txt_file),int(position),0)\r\n print(\"tmp_line=\",tmp_line)\r\n ids,cap,features_name = self.process_line(tmp_line)\r\n tokens,sentence_length = self.tokenize(cap)\r\n tokens = torch.Tensor(tokens)\r\n sentence_length = torch.Tensor([sentence_length])\r\n feature_name = features_name.split(\".jpg\")[0]+\".npy\"\r\n tmp_image =self.zipreader.read(os.path.join(self.data_path,self.data_split+\"_features.zip\"),features_name,0)\r\n\r\n with contextlib.closing(io.BytesIO(tmp_image)) as f:\r\n img = np.load(f,all_pickle=True).item()\r\n features, boxes = torch.from_numpy(img['features'],torch.from_numpy(img['boxes']))\r\n\r\n return features,tokens,index,torch.Tensor([int(ids)])\r\n \r\n def __len__(self):\r\n return len(self.language_ids)\r\n\r\ndef collate_fn(data):\r\n \"\"\"Build mini-batch tensors from a list of (image, caption) tuples.\r\n Args:\r\n data: list of (image, caption) tuple.\r\n - image: torch tensor of shape (3, 256, 256).\r\n - caption: torch tensor of shape (?); variable length.\r\n\r\n Returns:\r\n images: torch tensor of shape (batch_size, 3, 256, 256).\r\n targets: torch tensor of shape (batch_size, padded_length).\r\n lengths: list; valid length for each padded caption.\r\n \"\"\"\r\n # Sort a data list by caption length\r\n data.sort(key=lambda x: len(x[1]), reverse=True)\r\n images, captions, ids, img_ids = zip(*data)\r\n\r\n # Merge images (convert tuple of 3D tensor to 4D tensor)\r\n images = torch.stack(images, 0)\r\n\r\n # Merget captions (convert tuple of 1D tensor to 2D tensor)\r\n lengths = [len(cap) for cap in captions]\r\n targets = torch.zeros(len(captions), max(lengths)).long()\r\n for i, cap in enumerate(captions):\r\n end = lengths[i]\r\n targets[i, :end] = cap[:end]\r\n\r\n return images, targets, lengths, ids\r\n\r\ndef get_precomp_loader(data_path,data_split,batch_size=128,shuffle=True,num_workers=4):\r\n dset = GoogleDataset(data_path,data_split)\r\n\r\n data_loader = torch.utils.data.DataLoader(dataset=dset,batch_size=batch_size,shuffle=shuffle,pin_memory=True,collate_fn=collate_fn)\r\n return data_loader\r\n\r\n\r\ndef get_loaders(data_path,batch_size,workers):\r\n train_loader = get_precomp_loader(data_path,\"train\",batch_size)\r\n val_loader = get_precomp_loader(data_path,\"val\",batch_size)\r\n \r\n return train_loader,val_loader\r\n","sub_path":"data_new.py","file_name":"data_new.py","file_ext":"py","file_size_in_byte":7727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"645459282","text":"#*************************************************************************************\r\n#\r\n# FILENAME : Tennis_View.py\r\n#\r\n# SOURCE : Kata Task\r\n#\r\n# COPYRIGHT :(C) GW\r\n#\r\n#************************************************************************************\r\n#\r\n# DESCRIPTION:\r\n#\tTennis_View.py contains all capability which interacts with the user\r\n#\t\r\n#*************************************************************************************\r\n\r\nfrom Tkinter import *\r\nfrom ttk import *\r\nimport threading\r\nimport Tkinter as tk\r\n\r\n# Class which manages all other view objects takes action from the controller\r\nclass View_Capability_Handler(threading.Thread):\r\n\t\r\n\tdef __init__(self, controller):\r\n\t\t\r\n\t\t# Dictionary holding all views\r\n\t\tself.frames = {}\r\n\t\t\r\n\t\t# Attribute to hold controller class instance\r\n\t\tself.controller_instance = controller\r\n\t\t\r\n\t\t# Setup tkinter root object\r\n\t\tself.root = Tk()\r\n\t\tself.root.title(\"Tennis Game\")\r\n\t\tself.root.protocol(\"WM_DELETE_WINDOW\", self.controller_instance.request_view_termination)\r\n\t\tself.root.geometry(\"500x675\")\r\n\t\tself.root.resizable(width=False, height=False)\r\n\t\tself.root.grid_rowconfigure(0, weight=1)\r\n\t\tself.root.grid_columnconfigure(0, weight=1)\r\n\t\r\n\t# Controller to call to run up view\r\n\tdef start_view(self):\r\n\t\tthreading.Thread.__init__(self)\r\n\t\tself.start()\r\n\t\r\n\tdef run(self):\r\n\t\t\r\n\t\tself.main_frame = Frame(self.root).grid(row=1, sticky=\"nsew\")\r\n\t\t\r\n\t\t# For each view init instance and assign to attribute dict container\r\n\t\tfor view in (Co_Op_Setup_View, Single_Player_Setup_View, Title_Screen_View, Main_Tennis_View):\r\n\r\n\t\t\tframe = view(self.main_frame, self.controller_instance)\r\n\r\n\t\t\tself.frames[view] = frame\r\n\t\t\t\r\n\t\t\tframe.grid(column=0,row=0, sticky=(N,W,E,S) )\r\n\t\t\r\n\t\t# Load initial frame\r\n\t\tself.controller_instance.request_frame(Title_Screen_View)\r\n\t\t\r\n\t\t# On any key press call move_player method\r\n\t\tself.root.bind(\"\", self.frames[Main_Tennis_View].move_player)\r\n\t\t\r\n\t\tself.root.mainloop()\r\n\t\r\n\t# Method for handler to return all views to controller\r\n\tdef get_all_views(self):\r\n\t\treturn self.frames\r\n\r\n\tdef get_view_root_object(self):\r\n\t\treturn self.root\r\n\r\n\t# Method which adds label with error message to view\r\n\tdef ammend_error_to_frame(self, cont, error_message):\r\n\t\tself.frames[cont].ammend_error(error_message)\r\n\r\n\r\nclass Main_Tennis_View(Frame):\r\n\r\n\tdef move_player(self, event):\r\n\t\t# Get key press\r\n\t\tkey = event.keysym\r\n\t\t# Key has been pressed move ball\r\n\t\tself.move_Ball()\r\n\t\t# Get player object from controller\r\n\t\tplayer_object = self.view_controller.request_player_object(key)\r\n\t\t# Check if player_object is None - This occurs if pressed key is not assoicated with a player\r\n\t\tif player_object is not None:\r\n\t\t\t# Get overlap of player\r\n\t\t\toverlap_player = self.tennis_lawn_canvas.find_overlapping(*self.tennis_lawn_canvas.coords(player_object.get_player_view_object()))\r\n\t\t\t# Get overlap of ball\r\n\t\t\toverlap_tennis_ball = self.tennis_lawn_canvas.find_overlapping(*self.tennis_lawn_canvas.coords(self.tennis_ball))\r\n\t\t\t# Get line overlap\r\n\t\t\tgoal_1_overlap = self.tennis_lawn_canvas.find_overlapping(*self.tennis_lawn_canvas.coords(self.goal_2))\r\n\t\t\t# Get line overlap\r\n\t\t\tgoal_2_overlap = self.tennis_lawn_canvas.find_overlapping(*self.tennis_lawn_canvas.coords(self.goal_1))\r\n\t\t\t# Check if ball on either line\r\n\t\t\tif(overlap_tennis_ball == goal_1_overlap or overlap_tennis_ball == goal_2_overlap):\r\n\t\t\t\t# Check if player has hit ball\r\n\t\t\t\tif(self.last_contact_id is not None):\r\n\t\t\t\t\t# Player has scored request controller process and update model\r\n\t\t\t\t\tself.view_controller.request_player_score(self.last_contact_id)\r\n\t\t\t\t\t# Move ball to original position\r\n\t\t\t\t\tself.reset_ball()\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.reset_ball()\r\n\t\t\t# Check if player hit ball\r\n\t\t\tif(overlap_player == overlap_tennis_ball):\r\n\t\t\t\t# Assign hit player and invert ball projectory\r\n\t\t\t\tself.last_contact_id = player_object.get_player_id\r\n\t\t\t\tself.ball_y = self.ball_y * -1\r\n\t\t\t\tself.ball_x = self.ball_x * -1\r\n\t\t\t# Get player view object\r\n\t\t\tplayer_view_object = player_object.get_player_view_object()\r\n\t\t\t# Move player according to key press\r\n\t\t\tif key == player_object.get_left_movement_key():\r\n\t\t\t\tself.tennis_lawn_canvas.move(player_view_object, -20, 0) \r\n\t\t\telif key == player_object.get_right_movement_key():\r\n\t\t\t\tself.tennis_lawn_canvas.move(player_view_object, 20, 0) \r\n\t\t\telif key == player_object.get_up_movement_key():\r\n\t\t\t\tself.tennis_lawn_canvas.move(player_view_object, 0, -20) \r\n\t\t\telif key == player_object.get_down_movement_key():\r\n\t\t\t\tself.tennis_lawn_canvas.move(player_view_object, 0, 20) \r\n\t\r\n\t\r\n\tdef get_object_coordinates(self, object):\r\n\t\treturn self.tennis_lawn_canvas.coords(object)\r\n\t\r\n\tdef move_Ball(self):\r\n\t\tself.tennis_lawn_canvas.move('tennis_ball', self.ball_x, self.ball_y)\r\n\t\r\n\tdef reset_ball(self):\r\n\t\tself.tennis_lawn_canvas.delete(self.tennis_ball)\r\n\t\tself.tennis_ball = self.tennis_lawn_canvas.create_oval(10,10,20,20, fill=\"blue\", tag=('tennis_ball'))\r\n\t\tself.tennis_lawn_canvas.move('tennis_ball', 240, 240)\r\n\t\r\n\tdef draw_pitch(self):\r\n\t\tself.tennis_lawn_canvas = Canvas(self, bg='green', width=500, height=500)\r\n\t\tself.tennis_lawn_canvas.grid(row=1, column=0, pady=(0, 20))\r\n\t\tself.tennis_lawn_canvas.create_rectangle(50, 50, 440, 440, outline='white', width=7)\r\n\t\tself.tennis_lawn_canvas.create_rectangle(50, 50, 440, 245, outline='white', width=7)\r\n\t\tself.tennis_lawn_canvas.create_rectangle(50, 50, 440, 160, outline='white', width=7)\r\n\t\tself.tennis_lawn_canvas.create_rectangle(50, 330, 440, 160, outline='white', width=7)\r\n\t\tself.tennis_lawn_canvas.create_rectangle(245, 245, 440, 160, outline='white', width=7)\r\n\t\tself.tennis_lawn_canvas.create_rectangle(245, 330, 440, 160, outline='white', width=7)\r\n\t\tself.tennis_lawn_canvas.create_rectangle(0, 0, 380, 7, fill='black', tags=('net'))\r\n\t\tself.goal_1 = self.tennis_lawn_canvas.create_rectangle(0, 0, 400, 7, fill='white', tags=('goal_1'))\r\n\t\tself.goal_2 = self.tennis_lawn_canvas.create_rectangle(0, 0, 400, 7, fill='white', tags=('goal_2'))\r\n\t\tself.tennis_lawn_canvas.move('net', 55, 240)\r\n\t\tself.tennis_lawn_canvas.move('goal_1', 55, 50)\r\n\t\tself.tennis_lawn_canvas.move('goal_2', 55, 440)\r\n\t\r\n\tdef create_player(self, player_name, player_index, color=\"red\"):\r\n\t\tif player_index == 0:\r\n\t\t\treturn self.tennis_lawn_canvas.create_oval(50,50,70,70, fill=color, tags=('player_one'))\r\n\t\telse:\r\n\t\t\treturn self.tennis_lawn_canvas.create_oval(50,50,70,70, fill=color, tag=('player_two'))\r\n\t\r\n\tdef set_player_starting_positions(self, player_index):\r\n\t\tif player_index == 0:\r\n\t\t\tself.tennis_lawn_canvas.move('player_one', 20, 20)\r\n\t\telse:\r\n\t\t\tself.tennis_lawn_canvas.move('player_two', 350, 350)\r\n\t\r\n\tdef __init__(self, parent, controller):\r\n\t\tself.ball_x = 2\r\n\t\tself.ball_y = 2\r\n\t\tself.last_contact_id = None\r\n\t\tself.game_on_flag = True\r\n\t\tFrame.__init__(self, parent)\r\n\t\tself.view_controller = controller\r\n\t\tself.draw_pitch()\r\n\t\tself.tennis_ball = self.tennis_lawn_canvas.create_oval(10,10,20,20, fill=\"blue\", tag=('tennis_ball'))\r\n\t\tself.tennis_lawn_canvas.move('tennis_ball', 240, 240)\r\n\t\t\r\n\t\r\nclass Setup_View_Base_View():\r\n\t\r\n\tdef __init__(self):\r\n\t\tself.entry_widget_dict = {\"player_name\" : [], \"up_movement_key\" : [], \"down_movement_key\" : [], \"left_movement_key\" : [], \"right_movement_key\" : [], \"player_color\" : []}\r\n\t\r\n\tdef setup_player_information_view_elements(self, player_numeric):\r\n\t\tif player_numeric == 1:\r\n\t\t\tplayer_string = \"One\"\r\n\t\t\tadditional_elements = 7\r\n\t\telse:\r\n\t\t\tplayer_string = \"\"\r\n\t\t\tadditional_elements = 0\r\n\t\t\t\r\n\t\tLabel(self, text=\"Player \" + player_string, font=(\"Verdana\", 15)).grid(row = 1 + additional_elements, column = 0)\r\n\t\tLabel(self, text=\"Player \" + player_string + \" Name : \", font=(\"Verdana\", 10)).grid(row = 2 + additional_elements, column = 0)\r\n\t\tLabel(self, text=\"Up Key : \", font=(\"Verdana\", 10)).grid(row = 3 + additional_elements, column = 0)\r\n\t\tLabel(self, text=\"Left Key : \", font=(\"Verdana\", 10)).grid(row = 4 + additional_elements, column = 0)\r\n\t\tLabel(self, text=\"Right Key : \", font=(\"Verdana\", 10)).grid(row = 5 + additional_elements, column = 0)\r\n\t\tLabel(self, text=\"Down Key : \", font=(\"Verdana\", 10)).grid(row = 6 + additional_elements, column = 0)\r\n\t\tLabel(self, text=\"Player Color : \", font=(\"Verdana\", 10)).grid(row = 7 + additional_elements, column = 0)\r\n\t\t\r\n\t\tplayer_color_entry = Entry(self)\r\n\t\tplayer_color_entry.grid(row = 7 + additional_elements, column = 1)\r\n\t\tself.entry_widget_dict[\"player_color\"].append(player_color_entry)\r\n\t\t\r\n\t\tplayer_name_entry = Entry(self)\r\n\t\tplayer_name_entry.grid(row = 2 + additional_elements, column = 1)\r\n\t\tself.entry_widget_dict[\"player_name\"].append(player_name_entry)\r\n\t\t\r\n\t\tup_movement_key_entry = Entry(self)\r\n\t\tup_movement_key_entry.grid(row = 3 + additional_elements, column = 1)\r\n\t\tself.entry_widget_dict[\"up_movement_key\"].append(up_movement_key_entry)\r\n\t\t\r\n\t\tleft_movement_key_entry = Entry(self)\r\n\t\tleft_movement_key_entry.grid(row = 4 + additional_elements, column = 1)\r\n\t\tself.entry_widget_dict[\"left_movement_key\"].append(left_movement_key_entry)\r\n\t\t\r\n\t\tright_movement_key_entry = Entry(self)\r\n\t\tright_movement_key_entry.grid(row = 5 + additional_elements, column = 1)\r\n\t\tself.entry_widget_dict[\"right_movement_key\"].append(right_movement_key_entry)\r\n\t\t\r\n\t\tdown_movement_key_entry = Entry(self)\r\n\t\tdown_movement_key_entry.grid(row = 6 + additional_elements, column = 1)\r\n\t\tself.entry_widget_dict[\"down_movement_key\"].append(down_movement_key_entry)\r\n\t\t\r\n\t# Method called by handler class to update error message from controller\r\n\tdef ammend_error(self, error_message):\r\n\t\tself.error_label = Label(self, text=error_message, font=(\"Verdana\", 8)).grid(row = 15, column = 0, pady=(10))\t\r\n\t\t\r\n\t\r\nclass Co_Op_Setup_View(Frame, Setup_View_Base_View):\r\n\t\r\n\t# Method to init all Tkinter Objects\r\n\tdef setup_view_elements(self):\r\n\t\tLabel(self, text=\"Co-oP\", font=(\"Verdana\", 20)).grid(row = 0, column = 0)\r\n\t\tself.setup_player_information_view_elements(0)\r\n\t\tself.setup_player_information_view_elements(1)\t\t\r\n\t\tButton(self, text=\"Face Off!\", command=lambda: self.view_controller.request_player_model_population(Co_Op_Setup_View, Main_Tennis_View)).grid(row=15, column =1)\r\n\t\t\r\n\t# Method for controller to get all user input from this view\r\n\tdef get_view_payload(self):\r\n\t\tview_pay_load = {\"player_name\" : [], \"up_movement_key\" : [], \"down_movement_key\" : [], \"left_movement_key\" : [], \"right_movement_key\" : [], \"player_color\" : []}\r\n\t\tfor data_key in self.entry_widget_dict.keys():\r\n\t\t\tfor entry_widget in self.entry_widget_dict[data_key]:\r\n\t\t\t\tview_pay_load[data_key].append(entry_widget.get())\t\t\t\t\r\n\t\treturn view_pay_load \r\n\t\t\t\r\n\tdef __init__(self, parent, controller):\r\n\t\tFrame.__init__(self, parent)\r\n\t\tSetup_View_Base_View.__init__(self)\r\n\t\t\r\n\t\tself.view_controller = controller\r\n\t\t\r\n\t\tself.setup_view_elements()\r\n\t\t\r\n\t\t\r\n\t\t\r\nclass Single_Player_Setup_View(Frame, Setup_View_Base_View):\r\n\r\n\t# Method to init all Tkinter Objects\r\n\tdef setup_view_elements(self):\r\n\t\tsingle_player_label = Label(self, text=\"Single Player\", font=(\"Verdana\", 20)).grid(row = 0, column = 0)\t\t\r\n\t\tself.setup_player_information_view_elements(0)\r\n\t\tButton(self, text=\"Lets Play!\", command=lambda: self.view_controller.request_player_model_population(Single_Player_Setup_View, Main_Tennis_View)).grid(row=10, column =1)\r\n\t\t\r\n\t# Method for controller to get all user input from this view\r\n\tdef get_view_payload(self):\r\n\t\tview_pay_load = {\"player_name\" : [], \"up_movement_key\" : [], \"down_movement_key\" : [], \"left_movement_key\" : [], \"right_movement_key\" : [], \"player_color\" : []}\r\n\t\t# For each entry box widget\r\n\t\tfor data_key in self.entry_widget_dict.keys():\r\n\t\t\tfor entry_widget in self.entry_widget_dict[data_key]:\r\n\t\t\t\t# Update data with key\r\n\t\t\t\tview_pay_load[data_key].append(entry_widget.get())\t\t\t\t\r\n\t\treturn view_pay_load \r\n\t\r\n\tdef __init__(self, parent, controller):\r\n\t\tFrame.__init__(self, parent)\r\n\t\tSetup_View_Base_View.__init__(self)\r\n\t\t\t\t\r\n\t\tself.view_controller = controller\r\n\t\t\r\n\t\tself.setup_view_elements()\r\n\t\t\r\n\t\t\r\n\t\r\nclass Title_Screen_View(Frame):\r\n\t\t\r\n\t# Method to init all Tkinter objects\r\n\tdef setup_view_elements(self):\r\n\t\ttitle_label = Label(self, text=\"Grand Slam Tennis\", font=(\"Verdana\", 20)).grid(row = 0, column = 0)\t\t\r\n\t\ttennis_lawn_canvas = Canvas(self, bg='green', width=500, height=500)\r\n\t\ttennis_lawn_canvas.grid(row=1, column=0, pady=(0, 20))\r\n\t\ttennis_lawn_canvas.create_rectangle(50, 50, 440, 440, outline='white', width=7)\r\n\t\ttennis_lawn_canvas.create_rectangle(50, 50, 440, 245, outline='white', width=7)\r\n\t\ttennis_lawn_canvas.create_rectangle(50, 50, 440, 160, outline='white', width=7)\r\n\t\ttennis_lawn_canvas.create_rectangle(50, 330, 440, 160, outline='white', width=7)\r\n\t\ttennis_lawn_canvas.create_rectangle(245, 245, 440, 160, outline='white', width=7)\r\n\t\ttennis_lawn_canvas.create_rectangle(245, 330, 440, 160, outline='white', width=7)\r\n\t\ttennis_lawn_canvas.create_rectangle(0, 0, 380, 7, fill='black', tags=('net'))\r\n\t\ttennis_lawn_canvas.move('net', 55, 240)\r\n\t\t\r\n\t\tButton(self, text=\"Quit\", command=lambda: self.view_controller.request_view_termination()).grid(row=4, column =0)\t\r\n\t\tButton(self, text=\"Single Player\", command=lambda: self.view_controller.request_frame(Single_Player_Setup_View)).grid(row=2, column =0)\t\r\n\t\tButton(self, text=\"Co-Op\", command=lambda: self.view_controller.request_frame(Co_Op_Setup_View)).grid(row=3, column =0)\t\r\n\t\r\n\t# Method called by handler class to update error message from controller\r\n\tdef ammend_error(self, error_message):\r\n\t\tself.error_label = Label(self, text=error_message, font=(\"Verdana\", 12)).grid(row = 20, column = 0)\t\r\n\t\r\n\tdef __init__(self, parent, controller):\r\n\r\n\t\tFrame.__init__(self, parent)\r\n\t\t\r\n\t\tself.view_controller = controller\r\n\t\t\r\n\t\tself.setup_view_elements()\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t","sub_path":"Tennis_View.py","file_name":"Tennis_View.py","file_ext":"py","file_size_in_byte":13781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"482874018","text":"import sys\nsys.path.append('.')\nimport pudb as db\nimport Leetcode.utils as ut\nfrom solution_by_day import Solution as SolutionByDay\nfrom solution_over_time import Solution as SolutionOverTime\n\n\ntests = [\n [\n 2,\n [2,4,1],\n 2\n ],\n [\n 2,\n [3,2,6,5,0,3],\n 7,\n ],\n [\n 4,\n [1,2,4,2,5,7,2,4,9,0],\n 15\n ],\n [\n 1,\n [],\n 0\n ],\n [\n 1,\n [4],\n 0\n ]\n]\n\n\nif __name__ == \"__main__\":\n results = []\n sol_day = SolutionByDay()\n sol_time = SolutionOverTime()\n\n for i, t in enumerate(tests):\n k, prices, exp = t\n\n if sys.argv[1:2] and int(sys.argv[1]) == i + 1:\n db.set_trace()\n\n ans = sol_day.maxProfit(k, prices)\n # ans = sol_time.maxProfit(k, prices)\n results.append(ans == exp)\n ut.print_test_oneline(i, results[-1], ans, exp)\n\n ut.print_test_results(results)\n","sub_path":"Leetcode/best_time_to_buy_and_sell_stock_iv/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"566924625","text":"from colorama import Back, Fore, Style\nfrom pprint import pprint\nfrom utils.better_requests import requests_retry_session\nfrom yaspin import yaspin\nimport sys\n\ndef get_credentials(client_id, client_secret):\n with yaspin(\n side=\"right\",\n text=Fore.YELLOW\n + \" POST https://accounts.spotify.com/api/token\"\n + Style.RESET_ALL,\n color=\"yellow\",\n ) as spinner:\n res = requests_retry_session().post(\n \"https://accounts.spotify.com/api/token\",\n auth=(client_id, client_secret),\n data={\"grant_type\": \"client_credentials\"},\n timeout=60,\n )\n if res.ok:\n spinner.text = (\n Fore.GREEN\n + \" POST https://accounts.spotify.com/api/token\"\n + Style.RESET_ALL\n )\n spinner.color = \"green\"\n spinner.ok(\"✓\")\n\n print(Fore.GREEN + \" → Got credentials!\" + Style.RESET_ALL)\n print()\n return res.json()\n else:\n spinner.text = (\n Fore.RED\n + \" POST https://accounts.spotify.com/api/token\"\n + Style.RESET_ALL\n )\n spinner.color = \"red\"\n spinner.fail(\"✗\")\n\n print()\n print(\n Fore.RED\n + \" [{0}] {1}\".format(res.status_code, res.reason)\n + Style.RESET_ALL\n )\n pprint(res.json())\n print()\n print()\n\n print(Fore.RED + \" Aborting!\" + Style.RESET_ALL)\n sys.exit(1)\n\n","sub_path":"scripts/pitchfork_crawler/src/api/credentials.py","file_name":"credentials.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"406036830","text":"import numpy as np\n\n\ndef levenshtein_swap_weight(char1, char2):\n return 1 if char1 != char2 else 0\n\n\ndef lcs_swap_weight(char1, char2):\n return 2 if char1 != char2 else 0\n\n\ndef get_edit_matrix(elements1, elements2, swap_weight_func=levenshtein_swap_weight):\n size1 = len(elements1) + 1\n size2 = len(elements2) + 1\n edit_matrix = np.empty((size1, size2), dtype=np.uintc)\n edit_matrix[0, :] = np.arange(0, size2)\n edit_matrix[:, 0] = np.arange(0, size1)\n for i in range(1, size1):\n for j in range(1, size2):\n swap_weight = swap_weight_func(elements1[i - 1], elements2[j - 1])\n neigh_min = min(edit_matrix[i - 1, j - 1] + swap_weight, # edit\n edit_matrix[i - 1, j] + 1, # delete\n edit_matrix[i, j - 1] + 1) # add\n edit_matrix[i, j] = neigh_min\n return edit_matrix\n\n\ndef get_di_dj(edit_matrix, i, j):\n di, dj = 1, 1\n curr_min = edit_matrix[i - 1, j - 1]\n if curr_min > edit_matrix[i, j - 1]:\n curr_min = edit_matrix[i, j - 1]\n di, dj = 0, 1\n if curr_min > edit_matrix[i - 1, j]:\n di, dj = 1, 0\n return di, dj\n\n\ndef get_steps(edit_matrix, i=None, j=None):\n if i is None:\n i = np.size(edit_matrix, 0) - 1\n if j is None:\n j = np.size(edit_matrix, 1) - 1\n steps = []\n while (i, j) != (0, 0):\n di, dj = 0, 0\n if i == 0:\n dj = 1\n elif j == 0:\n di = 1\n else:\n di, dj = get_di_dj(edit_matrix, i, j)\n i -= di\n j -= dj\n steps.insert(0, (di, dj))\n return steps\n\n\ndef get_edits(edit_matrix, str1, str2):\n steps = get_steps(edit_matrix)\n\n prev_word = list(str1)\n output = [str1, \" -> \", str2, \"\\n\"]\n i, j, certain = 0, 0, 0\n for di, dj in steps:\n if (di, dj) == (1, 0):\n del prev_word[certain]\n elif (di, dj) == (0, 1):\n prev_word.insert(certain, str2[j])\n certain += 1\n else:\n prev_word[certain] = str2[j]\n certain += 1\n if str1[i] == str2[j]:\n i += di\n j += dj\n continue\n i += di\n j += dj\n output.append(\"\".join(prev_word))\n output.append(\"\\n\")\n del output[-1] # delete last \\n\n return \"\".join(output)\n\n\n# print_steps only works for strings\ndef edit_dist(elements1, elements2, swap_weigh_func=levenshtein_swap_weight, print_steps=False):\n edit_matrix = get_edit_matrix(elements1, elements2, swap_weigh_func)\n if print_steps:\n if not (type(elements1) == type(elements2) == str):\n raise(Exception(\"Printing steps only available for strings\"))\n print(get_edits(edit_matrix, elements1, elements2))\n return edit_matrix[len(elements1), len(elements2)]\n\n\ndef main():\n str1 = \"przełącz\"\n str2 = \"wyłącz\"\n\n dist = edit_dist(str1, str2, print_steps=True)\n print(dist)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"project/edit_dist.py","file_name":"edit_dist.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"536702660","text":"#!/usr/bin/env python3 \n# -*- coding: utf-8 -*-\n\n''' \nget_daily_data.py 获取单个股票的所有每日数据,并进行拼接\n时间从2008年1月1日起,到2019年4月30日为止\n对于股票单日数据是一个二维表格的,需要reshape展成一维\n股票每日数据,存在维度爆炸的可能\n每日数据中,包括一些文本数据 \n'''\n\n__author__ = 'LongFly'\n\nimport os\nimport sys\n\nsys.path.append('C:\\\\Users\\\\longf.DESKTOP-7QSFE46\\\\GitHub\\\\A-Stock-Prediction-System-with-GAN-and-DRL')\n\nimport tushare as ts\nimport pandas as pd \nimport numpy as np \nfrom bin.base.stock import *\n\nSTARTDATE = 20080101\nENDDATE = 20181231\nTSCODE = '600690.SH'\n\ndef getParameter(ts_code=None):\n # 切割起始时间,按年获取数据,再进行拼接\n # 返回一个parameters的列表\n start = int(STARTDATE / 10000)\n end = int(ENDDATE / 10000)\n paralist = []\n for i in range(start, end + 1):\n para = Parameters(ts_code=ts_code, \n start_date=str(i)+'0101', \n end_date=str(i)+'1231')\n paralist.append(para)\n return paralist\n \n # 获取参数\n\ndef getDailyStock(paralist, save=False):\n '''\n 获取每日的股价数据以及基本数据\n 一边获取数据 一边修改列名\n '''\n total = pd.DataFrame()\n for para in paralist:\n stockdata = StockData(para)\n cal = stockdata.getTradeCalender().drop(columns=['exchange','is_open'])\n daily = stockdata.getDaily().drop(columns='ts_code').rename(columns= lambda x: 'daily_'+x) \n daily_indicator = stockdata.getDailyIndicator().drop(columns='ts_code').rename(columns= lambda x: 'daily_indicator_'+x) \n moneyflow = stockdata.getMoneyflow().drop(columns='ts_code').rename(columns= lambda x: 'moneyflow_'+x) \n \n daily_total = pd.merge(\n pd.merge(\n pd.merge(cal, daily, left_on='cal_date', right_on='daily_trade_date', how='left'),\n daily_indicator, left_on='cal_date', right_on='daily_indicator_trade_date', how='left'), \n moneyflow, left_on='cal_date', right_on='moneyflow_trade_date', how='left')\n # 整合个股每日行情、资金信息\n res_qfq = stockdata.getRestoration(adj='qfq').drop(columns='ts_code').rename(columns= lambda x: 'res_qfq_'+x) \n res_hfq = stockdata.getRestoration(adj='hfq').drop(columns='ts_code').rename(columns= lambda x: 'res_hfq_'+x)\n \n restoration = pd.merge(res_hfq,res_qfq, left_on='res_hfq_trade_date', right_on='res_qfq_trade_date')\n # 整合复权信息\n\n df = pd.merge(daily_total, restoration, left_on='cal_date', right_on='res_hfq_trade_date', how='left')\n total = total.append(df.sort_values(by='cal_date', ascending=True), ignore_index=True)\n print('Get {0} stock market data at {1} dimentions and {2} rows.'.format(TSCODE, total.shape[1], total.shape[0])) \n if save:\n total.to_csv('dataset\\\\Dailystock-'+TSCODE+'.csv') \n return total\n\ndef getDailyFinance(paralist, save=False):\n '''\n 获取每日的上市公司财务信息\n '''\n total = pd.DataFrame()\n for para in paralist:\n stockdata = StockData(para)\n cal = stockdata.getTradeCalender().drop(columns=['exchange','is_open'])\n stockfinance = StockFinance(para)\n income = stockfinance.getIncome().drop(columns=['ts_code',]).rename(columns= lambda x: 'income_'+x) \n balancesheet = stockfinance.getBalanceSheet().drop(columns='ts_code').rename(columns= lambda x: 'balancesheet_'+x) \n cashflow = stockfinance.getCashflow().drop(columns='ts_code').rename(columns= lambda x: 'cashflow_'+x) \n forecast = stockfinance.getForecast().drop(columns=['ts_code', 'type', 'summary', 'change_reason']).rename(columns= lambda x: 'forecast_'+x) \n express = stockfinance.getExpress().drop(columns='ts_code').rename(columns= lambda x: 'express_'+x) \n dividend = stockfinance.getDividend().drop(columns=['ts_code', 'div_proc']).rename(columns= lambda x: 'dividend_'+x) \n financeindicator = stockfinance.getFinacialIndicator().drop(columns='ts_code').rename(columns= lambda x: 'financeindicator_'+x) \n\n finance_total = pd.merge(cal, income, left_on='cal_date', right_on='income_ann_date', how='left')\n finance_total = pd.merge(finance_total, financeindicator, left_on='cal_date', right_on='financeindicator_ann_date', how='left')\n finance_total = pd.merge(finance_total, balancesheet, left_on='cal_date', right_on='balancesheet_ann_date', how='left')\n finance_total = pd.merge(finance_total, cashflow, left_on='cal_date', right_on='cashflow_ann_date', how='left')\n finance_total = pd.merge(finance_total, forecast, left_on='cal_date', right_on='forecast_ann_date', how='left')\n finance_total = pd.merge(finance_total, express, left_on='cal_date', right_on='express_ann_date', how='left')\n finance_total = pd.merge(finance_total, dividend, left_on='cal_date', right_on='dividend_ann_date', how='left')\n \n total = total.append(finance_total.sort_values(by='cal_date', ascending=True), ignore_index=True)\n print('Get {0} stock finance data at {1} dimentions and {2} rows.'.format(TSCODE, total.shape[1], total.shape[0])) \n if save:\n finance_total.to_csv('dataset\\\\DailyFinance-' + TSCODE + '.csv') \n return finance_total\n\ndef getDailyMarket(paralist, save=False):\n '''\n 获取每日市场基本信息\n '''\n total = pd.DataFrame()\n for para in paralist:\n stockdata = StockData(para)\n cal = stockdata.getTradeCalender().drop(columns=['exchange','is_open'])\n market = Market(para)\n HSGTflow = market.getMoneyflow_HSGT().rename(columns= lambda x: 'HSGTflow_'+x) \n margin = market.getMargin().drop(columns='exchange_id').rename(columns= lambda x: 'margin_'+x) \n if margin.shape[0]:# 如果有记录数据 才进行聚合操作 否则会损失column数据\n margin = margin.groupby('margin_trade_date').mean().reset_index()\n pledge = market.getPledgeState().drop(columns='ts_code').rename(columns= lambda x: 'pledge_'+x) \n if pledge.shape[0]:\n pledge = pledge.groupby('pledge_end_date').mean().reset_index()\n repurchase = market.getRepurchase().drop(columns=['end_date','proc','exp_date']).rename(columns= lambda x: 'repurchase_'+x) \n if repurchase.shape[0]:\n repurchase = repurchase.groupby('repurchase_ann_date').mean().reset_index()\n desterilization = market.getDesterilization().drop(columns=['holder_name','share_type']).rename(columns= lambda x: 'desterilization_'+x) \n if desterilization.shape[0]:\n desterilization = desterilization.groupby('desterilization_ann_date').mean().reset_index()\n block = market.getBlockTrade().drop(columns=['buyer','seller']).rename(columns= lambda x: 'block_'+x) \n if block.shape[0]:\n block = block.groupby('block_trade_date').sum().reset_index()\n \n # 为限售解禁和大宗交易添加两列数据 便于接下来合并数据\n\n market_total = cal.merge(HSGTflow, \n left_on='cal_date', right_on='HSGTflow_trade_date', how='left').merge(margin, \n left_on='cal_date', right_on='margin_trade_date', how='left').merge(pledge, \n left_on='cal_date', right_on='pledge_end_date', how='left').merge(repurchase, \n left_on='cal_date', right_on='repurchase_ann_date', how='left').merge(desterilization, \n left_on='cal_date', right_on='desterilization_ann_date', how='left').merge(block, \n left_on='cal_date', right_on='block_trade_date', how='left')\n # print(market_total)\n total = total.append(market_total.sort_values(by='cal_date', ascending=True), ignore_index=True)\n print('Get {0} daily market data at {1} dimentions and {2} rows.'.format(TSCODE, total.shape[1], total.shape[0])) \n if save:\n total.to_csv('dataset\\\\Dailymarket-'+TSCODE+'.csv') \n return total\n\ndef getDailyInterest(paralist, save=False):\n '''\n 获取每日宏观经济 利率信息\n '''\n total = pd.DataFrame()\n for para in paralist:\n stockdata = StockData(para)\n cal = stockdata.getTradeCalender().drop(columns=['exchange','is_open'])\n \n interest = Interest(para)\n shibor = interest.getShibor().rename(columns= lambda x: 'shibor_'+x) \n shiborquote = interest.getShiborQuote().drop(columns='bank').rename(columns= lambda x: 'shiborquote_'+x) \n if shiborquote.shape[0]:\n shiborquote = shiborquote.groupby('shiborquote_date').mean().reset_index()\n shiborLPR = interest.getShibor_LPR().rename(columns= lambda x: 'shiborLPR_'+x) \n libor = interest.getLibor().drop(columns='curr_type').rename(columns= lambda x: 'libor_'+x) \n hibor = interest.getHibor().rename(columns= lambda x: 'hibor_'+x) \n wen = interest.getWenZhouIndex().rename(columns= lambda x: 'wen_'+x) \n\n interest_total = cal.merge(shibor, \n left_on='cal_date', right_on='shibor_date', how='left').merge(shiborquote, \n left_on='cal_date', right_on='shiborquote_date', how='left').merge(shiborLPR, \n left_on='cal_date', right_on='shiborLPR_date', how='left').merge(libor, \n left_on='cal_date', right_on='libor_date', how='left').merge(hibor, \n left_on='cal_date', right_on='hibor_date', how='left').merge(wen, \n left_on='cal_date', right_on='wen_date', how='left')\n # print(market_total)\n total = total.append(interest_total.sort_values(by='cal_date', ascending=True), ignore_index=True)\n print('Get {0} interest data at {1} dimentions and {2} rows.'.format(TSCODE, total.shape[1], total.shape[0])) \n if save:\n total.to_csv('dataset\\\\Dailyinterest-'+TSCODE+'.csv') \n return total\n \n\ndef main():\n paralist = getParameter(ts_code=TSCODE)\n stock_total = getDailyStock(paralist, save=True)\n finance_total = getDailyFinance(paralist, save=True)\n market_total = getDailyMarket(paralist, save=True)\n interest_total = getDailyInterest(paralist, save=True) \n\n total = stock_total.merge(finance_total, \n on='cal_date', how='left').merge(market_total, \n on='cal_date', how='left').merge(interest_total,\n on='cal_date', how='left')\n print('Get {0} daily total data at {1} dimentions and {2} rows.'.format(TSCODE, total.shape[1], total.shape[0])) \n total.to_csv('dataset\\\\DailyTotal-'+TSCODE+'.csv')\n\n\nif __name__ == \"__main__\":\n main()\n \n","sub_path":"bin/base/get_daily_data.py","file_name":"get_daily_data.py","file_ext":"py","file_size_in_byte":10950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"213113786","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef PI(x):\n return np.clip(x, 0, 1)\n\ndef A(x):\n return np.gradient(x)\n\ndef AT(x):\n return -np.gradient(x)\n\nN = 100\n\nplot = False\nbest_coeffs = None\nbest_var = np.inf\n\nfor σq in np.logspace(-3, 3, 20):\n for σd in np.logspace(-3, 3, 20):\n for ε in np.logspace(-3, 3, 10):\n\n #σq = 0.001\n #σd = 0.001\n #ε = 0.01\n q = np.zeros(N)\n G = np.ones(N)\n d = np.random.random(N)\n a = d.copy()\n θ = np.inf\n\n if plot:\n plt.plot(d)\n\n for i in range(1000):\n q = PI((q + σq * G * A(d)) / (1 + σq * ε))\n d = (d + σd * (-G * AT(q) + 1 / θ * a)) / (1 + σd / θ)\n\n print(np.var(d), np.var(np.random.random(N)), σq, σd, ε)\n if np.var(d) < best_var:\n best_var = np.var(d)\n best_coeffs = [σq, σd, ε]\n\n if plot:\n plt.plot(d, '+-')\n\n plt.show()\n\nprint(best_var)\nprint(best_coeffs)\n","sub_path":"Mlib/Sfm/Disparity/dtam1d.py","file_name":"dtam1d.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"635146628","text":"import os\n\nfrom bizfly_two_fa import BizflyTwoFa\n\nif __name__ == '__main__':\n app_secret = os.getenv('TEST_APP_SECRET')\n email = os.getenv('TEST_USER_EMAIL')\n phone = os.getenv('TEST_USER_PHONE')\n\n bizfly_2fa = BizflyTwoFa(\n secret_key=app_secret\n )\n session_uid = bizfly_2fa.generate_session(\n email=email,\n phone=phone\n )\n print(session_uid)\n","sub_path":"tests/gen_session.py","file_name":"gen_session.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"202527396","text":"import config as conf\nfrom parsers import protocolutil as util\nimport queue\nimport threading as thread\nimport time\n\nredmech = conf.rediser\n\ntruck_info_tube = queue.Queue() # the info syncs messaging queue.\n\n\nclass TruckInfoWatchTube(thread.Thread):\n\n def __init__(self, truck_info_tube, tube_name):\n thread.Thread.__init__(self, name='TruckInfoWatchTube')\n self.truck_info_tube = truck_info_tube\n self.tube_name = tube_name\n\n def run(self):\n print('tube_watch:' + self.tube_name)\n while True:\n comeindata = redmech.lpop(self.tube_name)\n if comeindata is not None:\n self.truck_info_tube.put(util.bstr_to_str(comeindata))\n time.sleep(1 / 1000)\n","sub_path":"redtube/tube.py","file_name":"tube.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"467653823","text":"# Local imports\nfrom evennia import Command\nfrom world.combat_loop import CombatLoop\nfrom commands.combat import Helper\nfrom evennia import utils\nfrom typeclasses.npc import Npc\nfrom commands.combatant import Combatant\n\nclass CmdSunder(Command):\n \"\"\"\n Issues a sunder command.\n Usage:\n sunder \n This will calculate an attack score based on your weapon and master of arms level.\n \"\"\"\n\n key = \"sunder\"\n help_category = \"combat\"\n\n def parse(self):\n \"Very trivial parser\"\n self.target = self.args.strip()\n\n def func(self):\n combatant = Combatant(self.caller)\n\n if not self.target:\n self.caller.msg(\"|430Usage: sunder |n\")\n return\n\n # Init combat helper class for logic\n h = Helper(self.caller)\n\n if not h.canFight(self.caller):\n self.caller.msg(\"|400You are too injured to act.|n\")\n return\n\n # Check for and error handle designated target\n target = self.caller.search(self.target)\n\n victim = combatant.getVictim(self.target)\n\n if not target.db.bleed_points:\n combatant.message(f\"{victim.name} |400is dead. You only further mutiliate their body.|n\")\n combatant.broadcast(f\"{combatant.name} |025further mutilates the corpse of|n {victim.name}|025.|n\")\n return\n\n loop = CombatLoop(combatant.caller, combatant.target)\n loop.resolveCommand()\n\n # Run logic for cleave command\n if not self.caller.db.combat_turn:\n self.msg(\"|430You need to wait until it is your turn before you are able to act.|n\")\n return\n\n combat_stats = h.getMeleeCombatStats(self.caller)\n target_stats = h.getMeleeCombatStats(target)\n\n # Confirm whether object carried is a two handed weapon\n right_hand_item = combat_stats.get(\"right_slot\", '')\n rightObject = self.caller.search(right_hand_item)\n isTwoHanded = True if rightObject.db.twohanded else False\n\n left_hand_item = combat_stats.get(\"left_slot\", '')\n sundersRemaining = self.caller.db.sunder\n\n if not isTwoHanded:\n self.msg(\"|430Before you attack you must equip a two-handed weapon using the command equip .|n\")\n return\n\n if sundersRemaining <= 0:\n self.caller.msg(\"|400You have 0 sunders remaining or do not have the skill.\\nPlease choose another action.|n\")\n return\n\n die_result = h.fayneChecker(combat_stats.get(\"master_of_arms\", 0), combat_stats.get(\"wylding_hand\", 0))\n\n # Get damage result and damage for weapon type\n attack_result = (die_result + self.caller.db.weapon_level) - combat_stats.get(\"dmg_penalty\", 0) - combat_stats.get(\"weakness\", 0)\n damage = 2 if combat_stats.get(\"two_handed\", False) else 1\n target_av = target.db.av\n shot_location = h.shotFinder(target.db.targetArray)\n\n if attack_result >= target.db.av:\n # Check target left and right slots for items. Decrement material value from right and then left.\n # If no more items, subtract damage as normal.\n if target_stats.get(\"right_slot\", ''):\n # Get item and material value for right slot.\n right_item = self.caller.search(target.db.right_slot[0], location=target)\n right_mv = right_item.db.material_value\n # Decrement one from material value.\n # Check to make sure it won't go below 0.\n if (right_mv - 1) <= 0:\n right_item.db.material_value = 0\n right_item.db.broken = True\n # If two handed, remove from both slots\n if right_item.db.twohanded:\n target.db.left_slot.clear()\n # Remove right slot\n target.db.right_slot.remove(right_item)\n self.caller.location.msg_contents(f\"{self.caller.key} |025strikes|n (|020{attack_result}|n) |025with great ferocity and sunders|n {target.key}|025's {right_item.key}|n (|400{target.db.av}|n)|025, breaking it.|n\")\n else:\n right_item.db.material_value -= 1\n self.caller.location.msg_contents(f\"{self.caller.key} |025strikes|n (|020{attack_result}|n) |025with great ferocity and damages|n {target.key}|025's {right_item.key}|n (|400{target.db.av}|n)|025.|n\")\n\n elif target_stats.get(\"left_slot\", ''):\n # Get item and material value for right slot.\n left_item = self.caller.search(target.db.left_slot[0], location=target)\n left_mv = left_item.db.material_value\n # Decrement one from material value\n if (left_mv - 1) <= 0:\n left_item.db.material_value = 0\n left_item.db.broken = True\n # if utils.inherits_from(target, Npc):\n # target.db.skip_turn = 1\n target.db.left_slot.remove(left_item)\n\n self.caller.location.msg_contents(f\"{self.caller.key} |025strikes|n (|020{attack_result}|n) |025with great ferocity and sunders|n {target.key}|025's {left_item.key}|n (|400{target.db.av}|n)|025, breaking it.|n\")\n else:\n left_item.db.material_value -= 1\n self.caller.location.msg_contents(f\"{self.caller.key} |025strikes|n (|020{attack_result}|n) |025with great ferocity and damages|n {target.key}|025's {left_item.key}|n (|400{target.db.av}|n)|025.|n\")\n\n # Do damage resolution block\n elif target_av:\n # subtract damage from corresponding target stage (shield_value, armor, tough, body)\n new_av = h.damageSubtractor(damage, target, self.caller)\n # Update target av to new av score per damageSubtractor\n target.db.av = new_av\n self.caller.location.msg_contents(f\"{self.caller.key} |025strikes with great ferocity|n (|020{attack_result}|n) |025at|n {target.key} |025and hits|n (|400{target_av}|n), |025dealing|n |430{damage}|n |025damage!|n\")\n target.msg(f\"|430Your new total Armor Value is {new_av}:\\nShield: {target.db.shield}\\nArmor Specialist: {target.db.armor_specialist}\\nArmor: {target.db.armor}\\nTough: {target.db.tough}|n\")\n else:\n self.caller.location.msg_contents(f\"{self.caller.key} |025strikes with great ferocity|n (|020{attack_result}|n) |025at|n {target.key}|025's {shot_location} and hits|n (|400{target_av}|n), |025dealing|n |430{damage}|n |025damage!|n\")\n # First torso shot always takes body to 0. Does not pass excess damage to bleed points.\n if shot_location == \"torso\" and target.db.body > 0:\n target.db.body = 0\n self.caller.location.msg_contents(f\"{target.key} |025has been fatally wounded and is now bleeding to death. They will soon be unconscious.|n\")\n else:\n h.deathSubtractor(damage, target, self.caller)\n\n # Decrement amount of cleaves from amount in database\n self.caller.db.sunder -= 1\n else:\n self.caller.location.msg_contents(f\"{self.caller.key} |025strikes a devastating blow at|n {target.key}|025, but misses.|n\")\n # Clean up\n # Set self.caller's combat_turn to 0. Can no longer use combat commands.\n loop.combatTurnOff(self.caller)\n\n loop.cleanup()\n","sub_path":"eldritchmush/commands/combat_commands/sunder.py","file_name":"sunder.py","file_ext":"py","file_size_in_byte":7536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"70174423","text":"import csv\n\n\nclass CSVOperations:\n \"\"\"\n This is class for reading from a specified CSV file and writing new data to it\n\n Author:\n Zhiming Liu\n \"\"\"\n file_path = \"\"\n fieldnames = ['empid', 'gender', 'age', 'sales', 'bmi', 'salary', 'birthday']\n data = []\n\n def __init__(self, file_path):\n \"\"\"\n This the constructor of the class\n\n Args:\n file_path: the path of the CSV file\n \"\"\"\n self.file_path = file_path\n self.data = self.read_data\n\n @property\n def read_data(self):\n \"\"\"\n This function return content of the CSV file\n\n Returns:\n List of the file content\n \"\"\"\n with open(self.file_path, newline=\"\") as f:\n try:\n reader = csv.DictReader(f, fieldnames=self.fieldnames)\n data = [dict(row) for row in reader][1:]\n except csv.Error as e:\n print(\"Read file error: \" + e)\n return data\n\n def save_data(self, new_data: list = []):\n \"\"\"\n This function saves new data to the CSV file\n\n Args:\n new_data: a list of new data in Dictionary with specified keys\n\n Returns:\n Boolean values. True is success, False if failed\n \"\"\"\n for row in new_data:\n self.data.append(row)\n with open(self.file_path, 'w', newline=\"\") as f:\n try:\n writer = csv.DictWriter(f, fieldnames=self.fieldnames)\n writer.writeheader()\n writer.writerows(self.data)\n except csv.Error as e:\n print(\"Write CSV file error: \" + e)\n return False\n return True\n\n\nop = CSVOperations('staffinfo.csv')\nprint(op.data)\n# new_data_01 = {\"empid\": \"Y411\", \"gender\": \"M\", \"age\": 41, \"sales\": 200,\n# \"bmi\": \"Obesity\", \"salary\": 450, \"birthday\": \"01-09-1977\"}\n# op.save_data(new_data_01)\n# print(op.data)\n","sub_path":"src/CSVOperations.py","file_name":"CSVOperations.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"513511601","text":"from tkinter import *\nfrom general import calc\n\nclass Application(Frame):\n \"\"\" GUI application that times your work on projects \"\"\"\n def __init__(self, master):\n \"\"\" Initialize Frame. \"\"\"\n super(Application, self).__init__(master)\n self.grid()\n self.create_widgets()\n\n def create_widgets(self):\n \"\"\" Create widgets \"\"\"\n # create question label\n Label(self,\n text = \"What project are you working on ?\\n\"\n ).grid(row = 0, column = 0, columnspan = 2, sticky = W)\n\n\n # create variable for project\n self.project = StringVar()\n self.project.set(None)\n\n # create radio buttons\n\n Radiobutton(self,\n text = \"Python\",\n background = \"light green\",\n indicatoron = 0,\n width = 20,\n variable = self.project,\n #command = create_project_dir(\"Python\"),\n value = 1\n ).grid(row = 2, column = 1, sticky = W)\n\n Radiobutton(self,\n text = \"Lynux\",\n background = \"light green\",\n indicatoron = 0,\n width = 20,\n variable = self.project,\n #command = print(\"lin\"),\n value = 2\n ).grid(row = 2, column = 2, sticky = W)\n\n Radiobutton(self,\n text = \"SQL\",\n background = \"light green\",\n indicatoron = 0,\n width = 20,\n variable = self.project,\n #command = print(\"SQ\"),\n value = 3\n ).grid(row = 2, column = 3, sticky = W)\n\n Radiobutton(self,\n text = \"ISTQB\",\n background = \"light green\",\n indicatoron = 0,\n width = 20,\n variable = self.project,\n #command = print(\"stuff\"),\n value = 4\n ).grid(row = 3, column = 1, sticky = W)\n\n Radiobutton(self,\n text = \"Swift\",\n background = \"light green\",\n indicatoron = 0,\n width = 20,\n variable = self.project,\n #command = function,\n value = 5\n ).grid(row = 3, column = 2, sticky = W)\n\n Radiobutton(self,\n text = \"C#\",\n background = \"light green\",\n indicatoron = 0,\n width = 20,\n variable = self.project,\n command = calc,\n value = 6\n ).grid(row = 3, column = 3, sticky = W)\n\n self.screen = Text(self, width = 75, height = 5, wrap = WORD, background = \"light blue\")\n self.screen.grid(row = 4, column = 0, columnspan = 4)\n\n Button(self,\n text = \"Start\",\n width = 20,\n background = \"orange\"\n #command = pass,\n ).grid(row = 5, column = 0, sticky = W)\n\n Button(self,\n text = \"Stop\",\n width = 20,\n background = \"orange\"\n #command = pass,\n ).grid(row = 5, column = 1, sticky = W)\n\n Button(self,\n text = \"Display\",\n width = 20,\n background = \"orange\"\n #command = pass\n ).grid(row = 5, column = 2, sticky = W)\n\n Button(self,\n text = \"Reset\",\n width = 20,\n background = \"orange\"\n #command = pass,\n ).grid(row = 5, column = 3, sticky = W)\n\n\nroot = Tk()\nroot.title(\"TIMER\")\napp = Application(root)\nroot.mainloop()","sub_path":"timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":3892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"401147738","text":"'''\nRequired input settings for web service: \n\n'''\nservice_id = \"ws_10\"\nservice_endpoint = \"http://phylo.cs.nmsu.edu:5004/phylotastic_ws/sl/eol\"\n\ninput_settings = [{'method': \"GET\", 'path': \"/get_links\", 'weight': 0.3, 'input_data': {'species': \"Panthera onca\"} }, \n\t\t{'method': \"POST\", 'path': \"/links\" ,'weight': 0.7, 'input_data': {'species': [\"Catopuma badia\",\n\"Catopuma temminckii\"]} }\n\t\t]\n\n","sub_path":"QoS/input_configs/ws10_config.py","file_name":"ws10_config.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"476456545","text":"\"\"\"module qui contient toutes les fonctions pour manipuler du texte\"\"\"\n\ndef decompose (phrase):\n accumule = \"\"\n liste = []\n for lettre in phrase:\n if lettre == \" \":\n liste.append(accumule)\n accumule = \"\"\n else:\n accumule = accumule + lettre\n liste.append(accumule)\n return liste\n\ndef epelle (mot):\n accumule = \"\"\n liste = []\n for lettre in mot:\n liste.append(lettre)\n return liste\n\ndef estMajuscule(mot):\n majuscules = [\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"J\",\"K\",\"L\",\"M\",\"N\",\"O\",\"P\",\"Q\",\"R\",\"S\",\"T\",\"U\",\"V\",\"W\",\"X\",\"Y\",\"Z\"]\n liste = epelle (mot)\n if liste[0] in majuscules:\n return 1\n else:\n return 0\n\n\ndef rechercheNom(liste):\n if len(liste) == 1:\n return liste[0]\n else:\n liste[0] = \"tralalala\"\n for element in liste:\n if estMajuscule(element) == 1:\n return element\n return \"l'inconnu\"\n\ndef coder(mot):\n from random import randrange\n alphabet = [\"a\",\"z\",\"e\",\"r\",\"t\",\"y\",\"u\",\"i\",\"o\",\"p\",\"q\",\"s\",\"d\",\"f\",\"g\",\"h\",\"j\",\"k\",\"l\",\"m\",\"w\",\"x\",\"c\",\"v\",\"b\",\"n\"]\n i = 0\n code =[\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\"]\n while i < 26:\n a = randrange(0,len(alphabet))\n code[i] = alphabet[a]\n del alphabet[a]\n i = i + 1\n alpha = {\"a\":0,\"z\":25,\"e\":4,\"r\":24,\"t\":23,\"y\":22,\"u\":21,\"i\":8,\"o\":19,\"p\":20,\"q\":18,\"s\":17,\"d\":3,\"f\":5,\"g\":6,\"h\":7,\"j\":9,\"k\":10,\"l\":11,\"m\":12,\"w\":16,\"x\":15,\"c\":2,\"v\":14,\"b\":1,\"n\":13}\n for lettre in mot:\n if lettre == \" \":\n final += \" \"\n else:\n final += code[alpha[lettre]]\n return (final)\n\ndef mettreMajuscule(mot):\n lettres = []\n for lettre in mot:\n lettres.append(lettre)\n if estMajuscule(lettres[0]) == 1:\n return mot\n else:\n majuscules = {\"a\":\"A\",\"b\":\"B\",\"c\":\"C\",\"d\":\"D\",\"e\":\"E\",\"f\":\"F\",\"g\":\"G\",\"h\":\"H\",\"i\":\"I\",\"j\":\"J\",\"k\":\"K\",\"l\":\"L\",\"m\":\"M\",\"n\":\"N\",\"o\":\"O\",\"p\":\"P\",\"q\":\"Q\",\"r\":\"R\",\"s\":\"S\",\"t\":\"T\",\"u\":\"U\",\"v\":\"V\",\"w\":\"W\",\"x\":\"X\",\"y\":\"Y\",\"z\":\"Z\",\"é\":\"E\",\"è\":\"E\",\"â\":\"A\"}\n lettres[0] = majuscules[lettres[0]]\n mot = \"\"\n for e in lettres:\n mot += e\n return mot\n\ndef enleverMajuscule(mot):\n lettres = []\n for lettre in mot:\n lettres.append(lettre)\n if estMajuscule(lettres[0]) == 0:\n return mot\n else:\n majuscules = {\"A\":\"a\",\"B\":\"b\",\"C\":\"c\",\"D\":\"d\",\"E\":\"e\",\"F\":\"f\",\"G\":\"g\",\"H\":\"h\",\"I\":\"i\",\"J\":\"j\",\"K\":\"k\",\"L\":\"l\",\"M\":\"m\",\"N\":\"n\",\"O\":\"o\",\"P\":\"p\",\"Q\":\"q\",\"R\":\"r\",\"S\":\"s\",\"T\":\"t\",\"U\":\"u\",\"V\":\"v\",\"W\":\"w\",\"X\":\"x\",\"Y\":\"y\",\"Z\":\"z\"}\n lettres[0] = majuscules[lettres[0]]\n mot = \"\"\n for e in lettres:\n mot += e\n return mot\n\ndef accordeGenre(mot,genre):\n if genre == \"masculin\":\n return mot\n else :\n mots = decompose(mot)\n mot = mots[0]\n mots.pop(0)\n exceptions = {\"il\":\"elle\",\"professeur\":\"professeure\",\"un\":\"une\",\"facteur\":\"factrice\",\"ecrivain\":\"ecrivaine\",\"illustrateur\":\"illustratrice\"}\n liste = []\n for lettres in mot:\n liste.append(lettres)\n try:\n mot = exceptions[mot]\n return mot\n except:\n if liste[-1] == \"x\":\n liste[-1] = \"s\"\n elif liste[-1] == \"l\":\n liste.append(\"l\")\n elif liste[-1] == \"c\":\n liste.append(\"h\")\n elif liste[-1] == \"e\":\n liste[-1] = \"\"\n elif liste[-1] == \"n\":\n liste.append(\"n\")\n elif liste[-1] == \"r\":\n if liste[-2] == \"u\" and liste[-3] == \"e\":\n liste[-1] = \"s\"\n mot = \"\"\n for e in liste:\n mot += e\n mot += \"e\"\n for e in mots:\n mot += \" \" + e\n return mot\n\ndef accordeNombre(mot,nombre):\n if nombre == \"singulier\":\n return mot\n else :\n mots = decompose(mot)\n mot = mots[0]\n exceptions = {}\n liste = []\n for lettres in mot:\n liste.append(lettres)\n try:\n mot = exceptions[mot]\n return mot\n except:\n if liste[-1] == \"s\":\n liste[-1] = \"\"\n elif liste[-1] == \"x\":\n return mot\n mot = \"\"\n for e in liste:\n mot += e\n return mot + \"s\"\n\ndef accordePays(mot,pays):\n if pays in [\"Danemark\",\"Pays-Bas\",\"Portugal\",\"Kosovo\",\"Maroc\",\"Senegal\",\"Mali\",\"Congo\",\"Qatar\",\"Koweit\",\"Pakistan\",\"Bangladesh\",\"Viet Nam\",\"Japon\",\"Kazakhstan\",\"Népal\",\"Tibet\",\"Laos\",\"Cambodge\",\"Cameroun\",\"Tchad\",\"Kenya\",\"Botswana\",\"Mozambique\",\"Gabon\",\"Togo\",\"Ghana\",\"Soudan\"]:\n genre = 0\n else:\n genre = 1\n mots = {\"en\":\"au\",\"la\":\"le\"}\n if genre == 0:\n return mots[mot]\n else:\n return mot\n\ndef lireMois(n):\n mois = [\"janvier\",\"fevrier\",\"mars\",\"avril\",\"mai\",\"juin\",\"juillet\",\"aout\",\"septembre\",\"octobre\",\"novembre\",\"decembre\"]\n return mois[n]\n","sub_path":"Python/modules/langue.py","file_name":"langue.py","file_ext":"py","file_size_in_byte":5091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"173692837","text":"#Source from Zybooks 4.6\r\n\r\nfrom Node import Node\r\nfrom LinkedList import LinkedList\r\n\r\nimport random\r\noutfile = open('students.txt', 'w')\r\n\r\nfor x in range(100):\r\n num = random.randint(900000000,900999999)\r\n outfile.write(str(num)+'\\n')\r\n\r\noutfile.close()\r\n \r\nstudents=LinkedList()\r\ninfile = open('students.txt', 'r')\r\n\r\ny = infile.readline()\r\nwhile y!= '':\r\n node_x = Node(y)\r\n students.append(node_x)\r\n y = infile.readline()\r\nstudents.print()\r\n\r\n\r\nattempts = 'A'\r\n\r\nwhile attempts == 'A':\r\n search = input(\"Enter Student ID:\")\r\n found = students.ListSearch(students, search)\r\n if found != None:\r\n print('Student ID:', found, 'Found')\r\n else:\r\n print('Student ID Not Found. Try Again Below')\r\n \r\n attempts = input(\"Enter A to try again:\")\r\n \r\n \r\n","sub_path":"RoyalsLab2 (2).py","file_name":"RoyalsLab2 (2).py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"161577025","text":"import re\nimport rlp\nfrom bitcoin import encode_pubkey\nfrom bitcoin import ecdsa_raw_sign, ecdsa_raw_recover\nfrom utils import big_endian_to_int as decode_int\nfrom utils import int_to_big_endian as encode_int\nfrom utils import sha3, privtoaddr\nimport utils\nimport sys\n\n\nclass Transaction(object):\n\n \"\"\"\n A transaction is stored as:\n [ nonce, value, gasprice, startgas, to, data, v, r, s]\n\n nonce is the number of transactions already sent by that account, encoded\n in binary form (eg. 0 -> '', 7 -> '\\x07', 1000 -> '\\x03\\xd8').\n\n (v,r,s) is the raw Electrum-style signature of the transaction without the\n signature made with the private key corresponding to the sending account,\n with 0 <= v <= 3. From an Electrum-style signature (65 bytes) it is\n possible to extract the public key, and thereby the address, directly.\n\n A valid transaction is one where:\n (i) the signature is well-formed (ie. 0 <= v <= 3, 0 <= r < P, 0 <= s < N,\n 0 <= r < P - N if v >= 2), and\n (ii) the sending account has enough funds to pay the fee and the value.\n \"\"\"\n\n # nonce,value,gasprice,startgas,to,data\n def __init__(*args):\n self = args[0]\n if len(args) == 2:\n self.parse(args[1])\n else:\n self.nonce = args[1]\n self.value = args[2]\n self.gasprice = args[3]\n self.startgas = args[4]\n self.to = utils.coerce_addr_to_bin(args[5])\n self.data = args[6]\n # includes signature\n if len(args) > 7:\n self.v, self.r, self.s = args[7:10]\n if self.r > 0 and self.s > 0:\n rawhash = sha3(rlp.encode(self.serialize(False)))\n pub = encode_pubkey(\n ecdsa_raw_recover(rawhash, (self.v, self.r, self.s)),\n 'bin')\n self.sender = sha3(pub[1:])[-20:].encode('hex')\n # does not include signature\n else:\n self.v, self.r, self.s = 0, 0, 0\n self.sender = 0\n\n # nonce,value,gasprice,startgas,code\n @classmethod\n def contract(*args):\n sys.stderr.write(\n \"Deprecated method. Use pyethereum.transactions.contract \" +\n \"instead of pyethereum.transactions.Transaction.contract\\n\")\n return contract(*args[1:])\n\n @classmethod\n def parse(cls, data):\n if re.match('^[0-9a-fA-F]*$', data):\n data = data.decode('hex')\n o = rlp.decode(data) + ['', '', '']\n tx = cls(decode_int(o[0]),\n decode_int(o[1]),\n decode_int(o[2]),\n decode_int(o[3]),\n o[4].encode('hex'),\n o[5],\n decode_int(o[6]),\n decode_int(o[7]),\n decode_int(o[8]))\n return tx\n\n def sign(self, key):\n rawhash = sha3(rlp.encode(self.serialize(False)))\n self.v, self.r, self.s = ecdsa_raw_sign(rawhash, key)\n self.sender = privtoaddr(key)\n return self\n\n def coerce_to_hex(self, n):\n return n.encode('hex') if len(n) == 20 else n\n\n def serialize(self, signed=True):\n return rlp.encode([encode_int(self.nonce),\n encode_int(self.value),\n encode_int(self.gasprice),\n encode_int(self.startgas),\n utils.coerce_addr_to_bin(self.to),\n self.data,\n encode_int(self.v),\n encode_int(self.r),\n encode_int(self.s)][:9 if signed else 6])\n\n def hex_serialize(self):\n return self.serialize().encode('hex')\n\n def hash(self):\n return sha3(self.serialize())\n\n\ndef contract(nonce, endowment, gasprice, startgas, code, v=0, r=0, s=0):\n tx = Transaction(nonce, endowment, gasprice, startgas, '', code)\n tx.v, tx.r, tx.s = v, r, s\n return tx\n","sub_path":"pyethereum/transactions.py","file_name":"transactions.py","file_ext":"py","file_size_in_byte":3983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"511784076","text":"#!/usr/bin/python3\n# -*-coding:Latin-1 -*\n#import os\n#print(\"Hello the world !\")\n#input(\"Press enter to close this program ...\")\n#os.system(\"sleep 5\")\n\nannee = input(\"Entrez l\\'année à tester:\")\nannee = int(annee)\n\nif annee % 400 == 0 or (annee % 4 == 0 and annee % 100 != 0):\n print(\"\"\"L'année saisie est bissextille.\"\"\")\nelse:\n print(\"\"\"L'année saisie n'est pas bissextille.\"\"\")\n \n#print(\"année : \", annee)","sub_path":"Books/BookSiteDuZero/Chapter_04/bissextile.py","file_name":"bissextile.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"160116202","text":"class Solution:\n # @return a list of lists of length 4, [[val1,val2,val3,val4]]\n def fourSum(self, num, target):\n #1.create the sum for each pair,like[sum,listofpairs]\n #2.sort the pair by key\n #3.for the match condition pairs,and filter them out\n num=sorted(num);\n sum2={}\n for i in range(len(num)):\n for j in range(i+1,len(num)):\n sumij=num[i]+num[j];\n if(sumij not in sum2):\n sum2[sumij]=[];\n sum2[sumij].append((i,j));\n sum2 = sorted(sum2.items(), key=lambda x:x[0])\n\n res=set();\n i,j=0,len(sum2)-1;\n while(i<=j):\n total=sum2[i][0]+sum2[j][0];\n if(total==target):\n for k1 in range(len(sum2[i][1])):\n for k2 in range(len(sum2[j][1])):\n a,b=sum2[i][1][k1];\n c,d=sum2[j][1][k2];\n items= set([a,b,c,d]);\n if(len(items)==4):\n newItem=[num[fi] for fi in items];\n newItem=tuple(sorted(newItem));\n res.add((newItem));\n i+=1;\n j-=1;\n elif(total T: \n break\n \n if len(yy) < 4:\n yy.append(yy[-1] + h * f(xx[-1], yy[-1]))\n xx.append(xx[-1] + h)\n else:\n yn = yy[-1] + h * (55 * f(xx[-1], yy[-1]) - 58 * f(xx[-2], yy[-2]) + 37 * f(xx[-3], yy[-3]) - 9 * f(xx[-4], yy[-4])) / 24.0\n yn = yy[-1] + h * (9 * f(xx[-1] + h, yn) + 19 * f(xx[-1], yy[-1]) - 5 * f(xx[-2], yy[-2]) + f(xx[-3], yy[-3])) / 24.0\n \n yy.append(yn)\n xx.append(xx[-1] + h)\n return xx, yy\n \na = 0.2\nb = 0.2\nc = 2.5\nxx, uu = predict_correct(lambda x, uv: np.array([uv[1] - uv[2], uv[0] + a * uv[1], b + uv[2] * (uv[0] - c)]), 0, np.array([1, 1, 1]), 10, 0.010)\nfor i in range(len(xx)):\n print(\"u1(\" + str(xx[i]) + \") = \" + str(uu[i][0]))\nfor i in range(len(xx)):\n print(\"u2(\" + str(xx[i]) + \") = \" + str(uu[i][1]))\nfor i in range(len(xx)):\n print(\"u3(\" + str(xx[i]) + \") = \" + str(uu[i][2]))\n\na = 0.2\nb = 0.2\nc = 5.0\nxx, uu = predict_correct(lambda x, uv: np.array([uv[1] - uv[2], uv[0] + a * uv[1], b + uv[2] * (uv[0] - c)]), 0, np.array([1, 1, 1]), 10, 0.010)\nfor i in range(len(xx)):\n print(\"u1(\" + str(xx[i]) + \") = \" + str(uu[i][0]))\nfor i in range(len(xx)):\n print(\"u2(\" + str(xx[i]) + \") = \" + str(uu[i][1]))\nfor i in range(len(xx)):\n print(\"u3(\" + str(xx[i]) + \") = \" + str(uu[i][2]))","sub_path":"Lab 4.2.py","file_name":"Lab 4.2.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"571338351","text":"\"\"\"\nThis is the deployments module and supports all the ReST actions for the\nLanding Zone folder structure collection\n\"\"\"\nimport json\nimport logging\nfrom pprint import pformat\n\nfrom config import db\nfrom models import LZFolderStructure, LZFolderStructureChild\nfrom tb_houston_service.extendedSchemas import ExtendedLZMetadataFSApplicationSchema\n\nlogger = logging.getLogger(\"tb_houston_service.lzFolderStructure\")\n\n# At time of writing max number of levels is ten\nmax_levels = 10\n\n\ndef add_folders(folders):\n for folder in folders:\n children = folder.get(\"children\")\n if children:\n for child in children:\n fs = (\n db.session.query(LZFolderStructure)\n .filter(LZFolderStructure.id == folder[\"id\"])\n .one_or_none()\n )\n if fs:\n fs.name = folder[\"name\"]\n fs.isActive = folder[\"isActive\"]\n db.session.merge(fs)\n else:\n fs = LZFolderStructure(\n id=folder[\"id\"],\n name=folder[\"name\"],\n isActive=folder[\"isActive\"],\n )\n db.session.add(fs)\n\n fsc = (\n db.session.query(LZFolderStructureChild)\n .filter(\n LZFolderStructureChild.folderId == folder[\"id\"],\n LZFolderStructureChild.childId == child[\"id\"],\n )\n .one_or_none()\n )\n if fsc:\n fsc.folderId = folder[\"id\"]\n fsc.childId = child[\"id\"]\n db.session.merge(fsc)\n else:\n fsc = LZFolderStructureChild(\n folderId=folder[\"id\"], childId=child[\"id\"]\n )\n db.session.add(fsc)\n add_folders(children)\n else:\n fs = (\n db.session.query(LZFolderStructure)\n .filter(LZFolderStructure.id == folder[\"id\"])\n .one_or_none()\n )\n if fs:\n fs.name = folder[\"name\"]\n fs.isActive = folder[\"isActive\"]\n db.session.merge(fs)\n else:\n fs = LZFolderStructure(\n id=folder[\"id\"], name=folder[\"name\"], isActive=folder[\"isActive\"]\n )\n db.session.add(fs)\n\n fsc = (\n db.session.query(LZFolderStructureChild)\n .filter(\n LZFolderStructureChild.folderId == folder[\"id\"],\n LZFolderStructureChild.childId is None,\n )\n .one_or_none()\n )\n if fsc:\n fsc.folderId = folder[\"id\"]\n fsc.childId = None\n db.session.merge(fsc)\n else:\n fsc = LZFolderStructureChild(folderId=folder[\"id\"], childId=None)\n db.session.add(fsc)\n\n\ndef read():\n \"\"\"\n This function responds to a request for /api/lzfolderstructure\n with the complete lists of Folder Structure relationships\n\n :return: json string of list of Folder Structure\n \"\"\"\n schema = ExtendedLZMetadataFSApplicationSchema(many=True)\n fss = (\n db.session.query(LZFolderStructure).order_by(LZFolderStructure.id.desc()).all()\n )\n\n children = None\n fs = None\n for fs in fss:\n print(f\"Processing {fs.name} *****\")\n for fs_1 in (\n db.session.query(LZFolderStructure)\n .filter(\n LZFolderStructureChild.folderId == fs.id,\n LZFolderStructure.id == LZFolderStructureChild.childId,\n )\n .order_by(LZFolderStructure.id.desc())\n .all()\n ):\n if children:\n children = [\n {\n \"id\": fs_1.id,\n \"name\": fs_1.name,\n \"isActive\": fs_1.isActive,\n \"children\": children,\n }\n ]\n else:\n children = [\n {\"id\": fs_1.id, \"name\": fs_1.name, \"isActive\": fs_1.isActive}\n ]\n\n print(f\"children: {children}\")\n\n folder_structure = [\n {\"id\": fs.id, \"name\": fs.name, \"isActive\": fs.isActive, \"children\": children}\n ]\n data = schema.dump(folder_structure)\n print(f\"fs: {json.dumps(data, indent=4)}\")\n return data, 200\n\n\ndef create(lzFolderStructureDetails):\n \"\"\"\n This function updates folder structure relationships.\n\n :param folder structure details: folder structure to update\n :return: updated folder structure\n \"\"\"\n logger.debug(\"create_all: %s\", pformat(lzFolderStructureDetails))\n\n try:\n add_folders(lzFolderStructureDetails)\n db.session.commit()\n except BaseException:\n db.session.rollback()\n raise\n finally:\n db.session.close()\n resp = read()\n return resp[0], 201\n","sub_path":"tb_houston_service/lzfolderstructure.py","file_name":"lzfolderstructure.py","file_ext":"py","file_size_in_byte":5136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"444982855","text":"from Jira import Jira\nimport logging\n\n\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\ndef create_jira_tickets(event, context):\n ticket = Jira()\n logger.info(event)\n\n \"\"\"\n \n headers = {\n 'x-rapidapi-host': \"your_api_host\",\n 'x-rapidapi-key': \"your_api_key\"\n }\n url = \"https://community-open-weather-map.p.rapidapi.com/forecast\"\n\n querystring = {\"q\": \"london,uk\"}\n\n response = requests.request(\"GET\", url, headers=headers,\n params=querystring)\n \n \"\"\"\n\n res = {\"cod\": \"200\", \"message\": 0.0072, \"cnt\": 40, \"list\":\n [{\"dt\": 1566118800,\n \"main\": {\"temp\": 288.36, \"temp_min\": 288.36, \"temp_max\": 288.7,\n \"pressure\": 1004.23, \"sea_level\": 1004.23,\n \"grnd_level\": 999.22, \"humidity\": 78, \"temp_kf\": -0.34},\n \"weather\": [{\"id\": 500, \"main\": \"Rain\",\n \"description\": \"light rain\", \"icon\": \"10d\"}],\n \"clouds\": {\"all\": 99}, \"wind\": {\"speed\": 4.8, \"deg\": 236.684},\n \"rain\": {\"3h\": 1.562}, \"sys\": {\"pod\": \"d\"},\n \"dt_txt\": \"2019-08-18 09:00:00\"},\n {\"dt\": 1566129600,\n \"main\": {\"temp\": 292.72, \"temp_min\": 292.72, \"temp_max\": 292.978,\n \"pressure\": 1004.75, \"sea_level\": 1004.75,\n \"grnd_level\": 999.63, \"humidity\": 51, \"temp_kf\": -0.26},\n \"weather\": [{\"id\": 500, \"main\": \"Rain\",\n \"description\": \"light rain\", \"icon\": \"10d\"}],\n \"clouds\": {\"all\": 50}, \"wind\": {\"speed\": 6.9, \"deg\": 248.098},\n \"rain\": {\"3h\": 0.063}, \"sys\": {\"pod\": \"d\"},\n \"dt_txt\": \"2019-08-18 12:00:00\"},\n {\"dt\": 1566140400,\n \"main\": {\"temp\": 293.23, \"temp_min\": 293.23, \"temp_max\": 293.4,\n \"pressure\": 1005.92, \"sea_level\": 1005.92,\n \"grnd_level\": 1000.51, \"humidity\": 52, \"temp_kf\": -0.17},\n \"weather\": [{\"id\": 500, \"main\": \"Rain\",\n \"description\": \"light rain\", \"icon\": \"10d\"}],\n \"clouds\": {\"all\": 50}, \"wind\": {\"speed\": 6.87, \"deg\": 250.52},\n \"rain\": {\"3h\": 0.188}, \"sys\": {\"pod\": \"d\"},\n \"dt_txt\": \"2019-08-18 15:00:00\"}\n ]}\n\n for result in res['list']:\n if result['main']['temp'] < 289.0 and result['weather'][0]['description'] != 'clear sky':\n summary = result['weather'][0]['description']\n id = result['weather'][0]['id']\n if not ticket.exists(id):\n logger.info(\"Preparing to create a Jira ticket ...\")\n summary = 'Alert for ' + str(id) + ' ' + str(summary)\n description = 'Creating Jira ticket....'\n issuetype = 'Task'\n\n logger.info(\"Creating ticket .... \")\n jira_response = ticket.create(summary, description, issuetype)\n\n return jira_response\n\n# create_tickets1()\n","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"328132672","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nwith open(\"T_40_1.txt\") as f:\r\n data = f.read()\r\n\r\ndata = data.split('\\n')\r\n\r\nx = [row.split(' ')[0] for row in data]\r\ny = [row.split(' ')[1] for row in data]\r\n\r\nfig = plt.figure()\r\n\r\nax1 = fig.add_subplot(111)\r\n\r\nax1.set_title(\"Plot title...\") \r\nax1.set_xlabel('your x label..')\r\nax1.set_ylabel('your y label...')\r\n\r\nax1.plot(x,y, c='r', label='the data')\r\n\r\nleg = ax1.legend()\r\n\r\nplt.show()","sub_path":"Simone Lucky/FirstTry.py","file_name":"FirstTry.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"331709962","text":"from ascii_graph import Pyasciigraph\n\n\ndef barGraph(title, data):\n \"\"\"\n Dado un titulo y una tupla de datos formados por (titulo, numero)\n imprime un gráfico de barra en la terminal\n \"\"\"\n\n print()\n for line in Pyasciigraph().graph(title, data):\n print(line)\n return\n","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"416214418","text":"from random import randint\nfrom time import sleep, time\nfrom multiprocessing.pool import ThreadPool\n\n\ndef print_names(name):\n sleep(randint(1, 3))\n print('Meu nome é: {}'.format(name))\n\n\nruntime = []\nthreads = []\nnames = ['Adson', 'Gabriel', 'Siqueira', 'Ronaldo', 'Gleilson',\n 'Emerson', 'Joselito', 'Piloto', 'Kleber', 'Mauricio']\n\npool = ThreadPool(processes=4)\nstart = time()\n\nfor name in names:\n async_result = pool.map(print_names, (name,))\n threads.append(async_result)\n\nend = time()\nprint('tempo de execução da tradução: {}'.format(end - start))","sub_path":"exemplo5_multip_map.py","file_name":"exemplo5_multip_map.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"513578638","text":"from airflow import DAG\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.utils.dates import days_ago\nfrom datetime import timedelta\nimport logging\n\ndef check_packages():\n import sys\n packages= ['pymongo', 'pytest', 'pandas', 'urllib']\n logger = logging.getLogger(\"airflow.task\")\n for package in packages:\n try:\n exec(\"from {module} import *\".format(module=package))\n logging.info(package, \"OK, is installed \")\n except Exception as e:\n logging.info(package, \"is not installed\")\n logging.error(e)\n\n\ndefault_args = {\n 'owner': 'airflow', # Lo ejecuta el usuario airflow\n 'depends_on_past': False,\n 'start_date': days_ago(2), # Comienzo inmediato\n 'email': ['joseinn@correo.ugr.es'], # Email al que enviar el informe si hay error.\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=10),\n}\n\n\ndag = DAG(\n dag_id='aatestpackages',\n default_args = default_args,\n description = 'comprobaciondepaquetes',\n dagrun_timeout=timedelta(minutes=2),\n schedule_interval=timedelta(days=1),\n)\n\n\nCheckPackages = PythonOperator(\n task_id='install_packages',\n python_callable=check_packages,\n op_kwargs={},\n provide_context=True,\n dag=dag)\n \nCheckPackages","sub_path":"dags/aatestpackages.py","file_name":"aatestpackages.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"116274732","text":"import requests\r\nimport json\r\n\r\ndef localtion_weather():\r\n '''\r\n 查看平台所在地天气\r\n '''\r\n url = r'http://api.ip138.com/weather/'\r\n data = {'code':'370102',\\\r\n 'type':'1',\\\r\n 'callback':'find',\\\r\n 'token':'57ff2b9f0f415a5d88ec9c0e9b5c3155'\r\n }\r\n response = requests.get(url,params=data)\r\n str1 = response.text\r\n str1 = str1[5:-1]\r\n print(str1)\r\n res = json.loads(str1)\r\n print(res['ret'])\r\n\r\n res = res['province']+res['city']+res['area'] + '\\n'\\\r\n + '===========================\\n'\\\r\n + '实时温度:'+res['data']['temp']+'摄氏度\\n' \\\r\n + '实时天气:'+res['data']['weather'] +'\\n'\\\r\n + '实时风力:'+res['data']['wind']+'\\n'\\\r\n + '湿度:'+res['data']['humidity']+'\\n'\\\r\n + 'pm2.5指数:'+res['data']['pm25']+'\\n'\\\r\n + '===========================\\n'\\\r\n + res['data']['time'] + ' from user.ip138.com'\r\n return res\r\n \r\nif __name__ == '__main__':\r\n res = localtion_weather()\r\n print(res)\r\n","sub_path":"tools/Wweather.py","file_name":"Wweather.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"125289143","text":"import os\nimport json\nimport datetime\n\nfrom urllib.parse import urlencode\nfrom urllib.request import Request, urlopen\n\nfrom flask import Flask, request\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['POST'])\ndef webhook():\n data = request.get_json()\n usrmg = data['text'].lower()\n now = datetime.datetime.today()\n day = now.weekday()\n tomorrow = now.weekday()\n if day + 1 > 6:\n tomorrow = 0\n else:\n tomorrow = day + 1\n \n msg = ''\n\t\n if data['name'] != 'Principles Quiz Bot':\n if \"quiz\" in usrmg and \"exam\" in usrmg:\n msg = '{}, please specify if you are talking about a quiz or exam.'.format(data['name'])\n elif \"quiz\" in usrmg:\n if \"today\" in usrmg and day == 0:\n msg = '{}, there are no more quizzes. Just the final.'.format(data['name'])\n elif \"today\" in usrmg and day != 0:\n msg = '{}, there are no more quizzes. Just the final.'.format(data['name'])\n elif \"tomorrow\" in usrmg:\n if tomorrow == 0:\n msg = '{}, there are no more quizzes. Just the final.'.format(data['name'])\n else:\n msg = '{}, there are no more quizzes. Just the final.'.format(data['name'])\n elif \"when\" in usrmg or \"what day\" in usrmg:\n msg = '{}, there are no more quizzes. Just the final.'.format(data['name'])\n elif \"quiz about\" in usrmg or \"cover\" in usrmg or \"over\" in usrmg or \"about\" in usrmsg:\n msg = '{}, there are no more quizzes. Just the final.'.format(data['name'])\n elif \"final\" in usrmg or \"exam\" in usrmg:\n if \"when\" in usrmg:\n msg = '{}, the final exam is on Monday, May 6th at 4:30PM.'.format(data['name'])\n if \"notes\" in usrmg:\n msg = '{}, you get 3 pages (front and back) of notes for the final.'.format(data['name'])\n if \"about\" in usermg:\n msg = '{}, the final will be 80 percent past quizzes and 20 percent questions about homeworks and general programming language questions.'.format(data['name'])\n if len(msg) != 0:\n send_message(msg)\n\n return \"ok\", 200\n\n\ndef send_message(msg):\n url = 'https://api.groupme.com/v3/bots/post'\n\n data = {\n 'bot_id' : os.getenv('GROUPME_BOT_ID'),\n 'text' : msg,\n }\n request = Request(url, urlencode(data).encode())\n json = urlopen(request).read().decode()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"596321139","text":"#! /usr/bin/env python3\n\nfrom sys import argv\n\nfrom parser import *\nfrom lexer import *\nfrom regex_lex_tokens import *\n\ndef main():\n f_data = open(argv[1]).read()\n print(f_data)\n\n l = Lexer()\n for r in regexes:\n l.add(r[0], r[1], r[2])\n\n tokens = l.lex(f_data)\n \n for i in tokens:\n print(i.token, \" \", i.seq, \" \", i.t_type)\n\n p = Parser()\n p.parse(tokens)\n\nmain()\n","sub_path":"compiler.py","file_name":"compiler.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"552177734","text":"import tensorflow as tf\nfrom tensorflow.python.util import nest\nfrom collections import namedtuple\n\n\ndef create_cache(tensors, cache_size):\n ten_seq = nest.flatten(tensors)\n ret = []\n inits = []\n collections = (tf.GraphKeys.LOCAL_VARIABLES,)\n for ten in ten_seq:\n assert isinstance(ten, tf.Tensor)\n shape = ten.shape.as_list()\n assert shape[0] is None\n shape[0] = cache_size\n init = tf.zeros(shape, dtype=ten.dtype)\n\n var = tf.Variable(initial_value=init,\n trainable=False,\n collections=collections)\n ret.append(var)\n inits.append(var.initializer)\n return nest.pack_sequence_as(tensors, ret), tuple(inits)\n\n\n\ndef create_template_nest(name, types, shapes, names=None):\n\n flat_types = nest.flatten(types)\n flat_shapes = nest.flatten(shapes)\n flat_names = nest.flatten(names) \\\n if names else (None,) * len(flat_types)\n\n tensors = []\n with tf.name_scope(name):\n for t, s, n in zip(flat_types, flat_shapes, flat_names):\n # the batch dim\n s = (None,) + s\n tensor = tf.placeholder(t, s, n)\n tensors.append(tensor)\n return nest.pack_sequence_as(types, tensors)\n\n\ndef write_cache(cache, updates, start):\n batch_size = tf.shape(updates)[0]\n indices = tf.range(start, start + batch_size)\n up_iter = zip(nest.flatten(cache), nest.flatten(updates))\n updates = [tf.scatter_update(x, indices, y) for x, y in up_iter]\n if isinstance(start, tf.Variable):\n with tf.control_dependencies(updates):\n updates.append(start.assign_add(batch_size))\n return tuple(updates)\n\n\ndef gather_nests(indices, nests):\n flat = nest.flatten(nests)\n ret = [tf.gather(s, indices) for s in flat]\n return nest.pack_sequence_as(nests, ret)\n\n\ndef aos_to_soa(aos, nests):\n # TODO: implement\n return nests\n\n\ndef soa_to_aos(soa):\n # TODO: implement\n return []\n\n\nTensorSpec = namedtuple(\n 'TensorSpec', ('dtype', 'shape', 'name'))\n\ndef cache_from_specs(specs, cache_size):\n pass\n\n\ndef create_place_holder(specs, batch_size=None):\n\n if nest.is_sequence(specs):\n iter_specs = nest.flatten(specs)\n else:\n iter_specs = (specs,)\n\n ret = []\n for spec in iter_specs:\n assert isinstance(specs, TensorSpec)\n shape = list(spec.shape)\n shape[0] = batch_size\n\n ph = tf.placeholder(\n name=specs.name, dtype=spec.dtype, shape=shape)\n ret.append(ph)\n\n if nest.is_sequence(specs):\n ret = nest.pack_sequence_as(ret, specs)\n else:\n ret = ret[0]\n return ret\n\ndef create_specs(tensors, partial_specs=None):\n\n if nest.is_sequence(tensors):\n iter_tensors = nest.flatten(tensors)\n else:\n iter_tensors = (tensors,)\n\n specs = []\n if partial_specs:\n partials = nest.flatten(partial_specs)\n else:\n partials = [TensorSpec((None, None, None, None))\n for _ in xrange(len(iter_tensors))]\n\n for i, tensor in enumerate(iter_tensors):\n if partials[i].batch_dim:\n shape = tensor.shape.as_list()\n shape[partials[i].batch_dim] = None\n else:\n shape = tensor.shape\n\n name = partials[i].name or tensor.name.split('/')[-1]\n spec = TensorSpec(\n dtype=tensor.dtype, shape=shape,\n batch_dim=partials[i].batch_dim, name=name)\n specs.append(spec)\n\n if nest.is_sequence(tensors):\n ret = nest.pack_sequence_as(specs, tensors)\n else:\n ret = specs[0]\n\n return ret\n","sub_path":"headspace/utils/nest.py","file_name":"nest.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"613705667","text":"import pytest\n\nfrom os.path import isdir\nfrom shutil import rmtree\n\nfrom nequip.utils.config import Config\nfrom nequip.utils.auto_init import dataset_from_config\n\n\ninclude_frames = [0, 1]\n\n\n@pytest.mark.parametrize(\"name\", [\"aspirin\"])\ndef test_simple(name, temp_data, BENCHMARK_ROOT):\n\n config = Config(\n dict(\n dataset=name,\n root=f\"{temp_data}/{name}\",\n extra_fixed_fields={\"r_max\": 3},\n include_frames=include_frames,\n )\n )\n\n if name == \"aspirin\":\n config.dataset_file_name = str(BENCHMARK_ROOT / \"aspirin_ccsd-train.npz\")\n\n a = dataset_from_config(config)\n print(a.data)\n print(a.fixed_fields)\n assert isdir(config.root)\n assert isdir(f\"{config.root}/processed\")\n assert len(a.data.edge_index) == len(include_frames)\n rmtree(f\"{config.root}/processed\")\n","sub_path":"tests/datasets/test_simplesets.py","file_name":"test_simplesets.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"637577415","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom tkinter import *\n\n\nclass inputbox:\n value=''\n def __init__(self, parent, title, message, default):\n self.pencere = Toplevel()\n self.pencere.title(title)\n self.pencere.geometry(\"300x75\")\n\n self.label = Label(self.pencere)\n self.label.config(text=message)\n self.label.pack(fill=X)\n\n self.entry = Entry(self.pencere)\n self.entry.insert(0, default)\n self.entry.focus_set()\n self.entry.select_range(0,END)\n self.entry.pack(fill=X)\n\n self.button = Button(self.pencere)\n self.button.config(text=\"OK\", width=10, command=self.ok)\n self.button.pack(pady=5)\n\n self.pencere.deiconify()\n self.pencere.wait_window()\n\n def ok(self):\n self.value=self.entry.get()\n self.pencere.destroy()\n","sub_path":"016-Python Uygulamalar/Adres Defteri with Tkinter/frm_input.py","file_name":"frm_input.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"248329220","text":"# *_* coding:utf-8 *_*\n\n\nimport numpy as np\nimport os\nfrom skimage import measure, io\ndef get_isolate_character(image,path):\n lb_image = measure.label(image, connectivity=1)\n regions = measure.regionprops(lb_image)\n totallist = []\n for region in regions:\n totallist.append([region.bbox, region.image.astype(np.int32), 0])\n\n\nif __name__ == \"__main__\":\n path = 'D:\\藏文识别\\相关文献\\data\\gt_text_lines'\n path2 = 'D:\\藏文识别\\相关文献\\data\\Sticky_text\\ '\n path3 = 'D:\\藏文识别\\相关文献\\data\\marke_Sticky_text\\ '\n\n for i in os.listdir(path):\n file = os.path.join(path,i)\n if file.endswith('.png'):\n image = io.imread(file)\n image = image / 255\n name,type = os.path.basename(file).split('.')\n # process_and_save_photo(path2 + name,image)\n get_isolate_character(image, path2)\n","sub_path":"isolate_class/get_isolate_deep_by_more_character.py","file_name":"get_isolate_deep_by_more_character.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"212107591","text":"\"\"\"database_project URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom database_project.library_app.views import *\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('show_authors/', show_authors_view),\n path('search_authors/', search_authors_view),\n path('add_author/', add_author_view),\n path('', show_books_view),\n path('search_books/', search_books_view),\n path('add_book/', add_book_view),\n path('show_book_instances/', show_book_instances_view),\n path('search_book_instances/', search_book_instances_view),\n path('add_book_instance/', add_book_instance_view),\n path('show_readers/', show_readers_view),\n path('search_readers/', search_readers_view),\n path('add_reader/', add_reader_view),\n]\n","sub_path":"database_project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"346079823","text":"from person_c import Person\n\nclass Batman(Person):\n def __init__(self,deposit,*args,**kwargs):\n super().__init__(*args,**kwargs)\n self.deposit=deposit\n\nif __name__==\"__main__\":\n b=Batman(10000,\"Bad\",100,199)\n b.speak()","sub_path":"batman.py","file_name":"batman.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"202850942","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\komidl\\scraper.py\n# Compiled at: 2019-10-12 01:27:51\n# Size of source mod 2**32: 13113 bytes\n\"\"\"This module contains the Scraper class\"\"\"\nimport os, sys, imghdr, shutil, asyncio\nfrom argparse import Namespace\nfrom typing import List, Tuple\nimport aiofiles, requests\nfrom aiohttp import ClientSession\nfrom bs4 import BeautifulSoup, Tag\nimport komidl.status as status\nimport komidl.constants as constants\nfrom komidl.exceptions import ExtractorFailed, InvalidURL\n\nclass Scraper:\n __doc__ = \"Download all images from a URL with an extractor.\\n\\n The Scraper class given a URL and extractor will work to create\\n directories and download the images, as well as write tags to files.\\n\\n File downloading is heavily IO bound, and thus is performed\\n asynchronously.\\n\\n Scraper also has a session object that is shared among methods, and\\n is used for all non-asynchronous web requests. Asynchronous web\\n requests use a 'ClientSession' from aiohttp in context, but it\\n shares the headers of the session object.\\n \"\n\n def __init__(self, extractor=None):\n self.extractor = extractor\n self._session = requests.Session()\n self._session.headers = {'User-Agent':constants.USER_AGENT, 'Accept-encoding':'gzip, deflate'}\n requests.packages.urllib3.disable_warnings()\n self._downloaded = 0\n\n @staticmethod\n def _soup_request(session: requests.Session, url: str) -> Tag:\n \"\"\"Returns a BS4 soup from the URL's response\n\n The request is done using the session.\n \"\"\"\n request = session.get(url, verify=False)\n request.raise_for_status()\n content = request.content\n return BeautifulSoup(content, 'html.parser')\n\n def reset(self) -> None:\n \"\"\"Resets the state of the Scraper after use\"\"\"\n self._downloaded = 0\n\n @staticmethod\n def _get_extension(url: str) -> Tuple[(str, str)]:\n \"\"\"Returns the path and extension of a URL.\n\n Acts exactly like os.path.splitext(), but without the '.' char\n in the extension.\n \"\"\"\n path, ext = os.path.splitext(url)\n return (path, ext[1:])\n\n @staticmethod\n async def _write_image(response: str, path: str) -> None:\n \"\"\"Saves the response of an image request to the path\"\"\"\n async with aiofiles.open(path, 'wb') as img:\n await img.write(await response.read())\n\n @staticmethod\n async def _retry_request(session, path: str, url: str) -> None:\n \"\"\"Retries downloading an image with alternate file extensions\n\n Alternate image formats are defined in constants.COMMON_FORMATS.\n\n Raises an ExtractorFailed exception if all attempts using\n alternate image extensions have been exhausted.\n \"\"\"\n _, current_ext = Scraper._get_extension(url)\n other_exts = (ext for ext in constants.COMMON_FORMATS if ext != current_ext)\n for ext in other_exts:\n new_url = Scraper._change_extension(url, ext)\n response = await session.get(new_url)\n if response.status == 200:\n await Scraper._write_image(response, path)\n break\n else:\n raise ExtractorFailed(f\"Extractor returned an invalid URL: {url}\")\n\n async def _image_request(self, status_bar, path: str, url: str) -> None:\n \"\"\"Downloads an image from the URL and saves it to the path.\n\n Failure to download an image (HTTP 404) may be caused by a wrong\n extension from the extractor. On failure, various other\n extensions are exhaustively tried. If the image can't be\n downloaded, then download of the whole gallery is failed.\n\n It may be possible that the file extension in the 'url' or\n 'path' parameters are inaccurate, so after a successful\n download the magic bytes of the image are checked and the image\n may be renamed with the appropriate extension.\n\n Downloading an image increments the counter and updates the\n status bar.\n \"\"\"\n async with ClientSession(headers=(self._session.headers)) as session:\n response = await session.get(url)\n if response.status == 200:\n await self._write_image(response, path)\n else:\n if response.status == 404:\n await self._retry_request(session, path, url)\n else:\n raise InvalidURL(f\"Server error encountered at: {url}\")\n await self._fix_extension(path)\n self._downloaded += 1\n status_bar.update(self._downloaded)\n\n def _download_images(self, status_bar, urls) -> None:\n \"\"\"Asynchronously downloads images from the gallery urls\"\"\"\n futures = [self._image_request(status_bar, path, url) for path, url in urls]\n asyncio.run(asyncio.wait(futures))\n\n @staticmethod\n async def _fix_extension(image: str) -> None:\n \"\"\"Renames the image's file extension based on the magic bytes\n\n If magic bytes could not be found, the image's extension is not\n modified.\n \"\"\"\n _, cur_ext = Scraper._get_extension(image)\n actual_ext = imghdr.what(image)\n if actual_ext is not None:\n if cur_ext != actual_ext:\n new_image = Scraper._change_extension(image, actual_ext)\n os.rename(image, new_image)\n\n @staticmethod\n def _change_extension(url: str, ext: str) -> str:\n \"\"\"Replaces the URL's file extension.\"\"\"\n base_url, _ = os.path.splitext(url)\n return f\"{base_url}.{ext}\"\n\n @staticmethod\n def _create_dir(title: str, root_dir: str, overwrite: bool=False) -> str:\n \"\"\"Creates a directory to hold all downloaded files.\n\n The directory is created in the destination (root_dir)\n directory, which is defined by the --directory argument (by\n default it is the directory the user was in when they ran the\n script).\n\n If the destination directory does not exists, an exception is\n raised.\n\n If 'overwrite' is set, then any existing folder is automatically\n overwritten. Otherwise, a prompt will appear to give the user\n the option to overwrite or create the directory with a new name.\n\n Returns the full path of the newly created directory.\n \"\"\"\n root_dir = os.path.abspath(root_dir)\n dest = os.path.join(root_dir, title)\n try:\n os.mkdir(dest)\n except FileExistsError:\n if not overwrite:\n prompt_msg = f\"{dest} already exists. Overwrite?\"\n overwrite = status.prompt(prompt_msg)\n elif overwrite:\n shutil.rmtree(dest)\n else:\n duplicates = sum((1 for dir_ in os.listdir(root_dir) if title in dir_))\n dest = f\"{dest} ({duplicates})\"\n os.mkdir(dest)\n\n return dest\n\n @staticmethod\n def _append_path(path: str, urls: List[List[str]]) -> Tuple[(str, str)]:\n \"\"\"Appends the path to all image paths in gallery_urls.\"\"\"\n for img, url in urls:\n yield (os.path.join(path, img), url)\n\n @staticmethod\n def _create_subdirs(urls: List[List[str]]) -> None:\n \"\"\"Create all sub-directories from paths in gallery_urls.\"\"\"\n img_paths = (os.path.split(img) for img, _ in urls)\n for sub_dir, _ in img_paths:\n os.makedirs(sub_dir, exist_ok=True)\n\n def scrape(self, url: str, args: Namespace) -> str:\n \"\"\"Scrapes an image gallery at the URL.\n\n Using the extractor set within the object, if the URL given is a\n gallery then it is used to scrape.\n\n Raises an InvalidURL exception if the given URL or scraped image\n URL could not be requested.\n\n Raises an ExtractorFailed exception if an error was encountered\n within the extractor.\n\n Raises a ValueError exception if any argument values are\n incorrect.\n\n Returns the full path of the directory containing all scraped\n images.\n \"\"\"\n soup = self._soup_request(self._session, url)\n if not self.extractor.is_gallery(url):\n raise InvalidURL(f\"'{url}' is not a valid image gallery for the '{self.extractor.name}' extractor\")\n try:\n directory = self.scrape_gallery(soup, url, args)\n finally:\n self.extractor.reset()\n\n return directory\n\n def scrape_gallery(self, soup: Tag, url: str, args: Namespace) -> str:\n \"\"\"Scrape a URL and download all images.\n\n Tags are written to a file if the --tags option is selected.\n\n In the process, the directory to hold all scraped info is\n created.\n \"\"\"\n tags = self.extractor.get_tags(url, soup, args)\n title = self._build_title(tags)\n path = self._create_dir(title, (args.directory), overwrite=(args.yes))\n _, dirname = os.path.split(path)\n size = self.extractor.get_size(url, soup, args)\n msg = f\"Downloading: {dirname}\"\n with status.progress(msg, constants.STATUSBAR_LEN, size) as (status_bar):\n gallery_urls = self.extractor.get_gallery_urls(url, soup, args)\n img_urls = list(Scraper._append_path(path, gallery_urls))\n self._create_subdirs(img_urls)\n self._download_images(status_bar, img_urls)\n if args.tags:\n self._write_tags(tags, path)\n return path\n\n @staticmethod\n def _build_title(tags: dict) -> str:\n \"\"\"Build a name for the directory containing downloaded images\"\"\"\n\n def langs_tostr(langs: str) -> str:\n \"\"\"Return language as an ISO 639-1 abbreviation\"\"\"\n if isinstance(langs, list):\n language, *_ = langs\n else:\n language = langs\n return constants.LANG_TO_ISO[language.title()]\n\n def build_credit_tag(tags: dict) -> str:\n \"\"\"Credit the author/artist/group in the title\n\n Priority of: Authors -> Artists -> Groups -> UNKNOWN\n \"\"\"\n credit = tags.get('Authors', tags.get('Artists', tags.get('Groups', 'Unknown')))\n if isinstance(credit, list):\n return 'x'.join((name.upper() for name in credit))\n return credit.upper()\n\n title = tags.get('Title', 'UNTITLED')\n language = langs_tostr(tags['Languages'])\n credit = build_credit_tag(tags)\n chapters = tags.get('Chapters')\n chapter_tag = f\"[{chapters}] \" if chapters else ' '\n return f\"[{credit}][{language}]{chapter_tag}{title}\"\n\n @staticmethod\n def _write_tags(tags: dict, path: str) -> None:\n \"\"\"Writes tags to a text file path\"\"\"\n info_str = Scraper._tags_tostr(tags)\n info_file = os.path.join(path, 'info.txt')\n with open(info_file, 'w') as (file_):\n file_.write(info_str)\n\n @staticmethod\n def _tags_tostr(tags: dict) -> str:\n \"\"\"Returns tags as a string of format: 'KEY:ITEM,ITEM,ITEM'\"\"\"\n valid_keys = [key for key in tags if tags[key]]\n item_strs = ((','.join(tags[key]) if isinstance(tags[key], list) else tags[key]) for key in valid_keys)\n tag_strs = (f\"{key}:{item_str}\\n\" for key, item_str in zip(valid_keys, item_strs))\n return ''.join(tag_strs)","sub_path":"pycfiles/KomiDL-0.7.1-py3.7/scraper.cpython-37.py","file_name":"scraper.cpython-37.py","file_ext":"py","file_size_in_byte":11479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"195532860","text":"import maya.cmds as mc\nimport maya.api.OpenMaya as om\n\ndef maya_useNewAPI():\n pass\n\nclass TateCurveLengthP(om.MPxNode):\n\n TYPE_NAME = \"tate_curveLengthP\"\n TYPE_ID = om.MTypeId(0x77770003)\n\n inputCurve = None\n inputLength = None\n outputParameter = None\n\n def __init__(self):\n super(TateCurveLengthP, self).__init__()\n\n def compute(self, plug, data):\n \n if plug == TateCurveLengthP.outputParameter:\n\n inputCrv = data.inputValue(TateCurveLengthP.inputCurve).asNurbsCurve()\n inputLen = data.inputValue(TateCurveLengthP.inputLength).asDouble()\n if not inputCrv.isNull():\n crvFn = om.MFnNurbsCurve(inputCrv)\n outputPValue = crvFn.findParamFromLength(inputLen)\n\n outputP = data.outputValue(TateCurveLengthP.outputParameter)\n outputP.setDouble(outputPValue)\n\n data.setClean(plug)\n\n @classmethod\n def creator(cls):\n return TateCurveLengthP()\n \n @classmethod\n def initialize(cls):\n mtAttr = om.MFnTypedAttribute()\n nuAttr = om.MFnNumericAttribute()\n\n cls.inputCurve = mtAttr.create(\"inputCurve\", \"inputCurve\", om.MFnData.kNurbsCurve)\n mtAttr.keyable = True\n\n cls.inputLength = nuAttr.create(\"inputLength\", \"inputLength\", om.MFnNumericData.kDouble, 0.0)\n nuAttr.keyable = True\n\n cls.outputParameter = nuAttr.create(\"outputParameter\", \"outputParameter\", om.MFnNumericData.kDouble, 0.0)\n nuAttr.writable = False\n\n cls.addAttribute(cls.inputCurve)\n cls.addAttribute(cls.inputLength)\n cls.addAttribute(cls.outputParameter)\n \n cls.attributeAffects(cls.inputCurve, cls.outputParameter)\n cls.attributeAffects(cls.inputLength, cls.outputParameter)\n\ndef initializePlugin(plugin):\n\n vendor = \"cho wooseoung\"\n version = \"1.0.0\"\n\n pluginFn = om.MFnPlugin(plugin, vendor, version)\n\n try:\n pluginFn.registerNode(TateCurveLengthP.TYPE_NAME,\n TateCurveLengthP.TYPE_ID,\n TateCurveLengthP.creator,\n TateCurveLengthP.initialize,\n om.MPxNode.kDependNode)\n except:\n om.MGlobal.displayError(\"Failed to register node : {0}\".format(TateCurveLengthP.TYPE_NAME))\n\ndef uninitializePlugin(plugin):\n \n pluginFn = om.MFnPlugin(plugin)\n\n try:\n pluginFn.deregisterNode(TateCurveLengthP.TYPE_ID)\n except:\n om.MGlobal.displayError(\"Failed to deregister node : {0}\".format(TateCurveLengthP.TYPE_NAME))\n\n\nif __name__ == \"__main__\":\n \n mc.file(new=True, force=True)\n plugin_name = \"tate_curveLengthP.py\"\n\n mc.evalDeferred(\"if mc.pluginInfo('{0}', q=True, loaded=True): mc.unloadPlugin('{0}')\".format(plugin_name))\n mc.evalDeferred(\"if not mc.pluginInfo('{0}', q=True, loaded=True): mc.loadPlugin('{0}')\".format(plugin_name))\n\n mc.evalDeferred(\"curveLength = mc.createNode('tate_curveLengthP')\")\n mc.evalDeferred(\"pointonCurve = mc.createNode('pointOnCurveInfo')\")\n mc.evalDeferred(\"loc = mc.spaceLocator()[0]\")\n\n mc.evalDeferred(\"mc.connectAttr('{0}.outputParameter'.format(curveLength), '{0}.parameter'.format(pointonCurve))\")\n mc.evalDeferred(\"mc.connectAttr('{0}.inputCurve'.format(curveLength), '{0}.inputCurve'.format(pointonCurve))\")\n mc.evalDeferred(\"mc.connectAttr('{0}.result.position'.format(pointonCurve), '{0}.t'.format(loc))\")\n ","sub_path":"plug-ins/tate_curveLengthP.py","file_name":"tate_curveLengthP.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"199514090","text":"import configparser\r\nfrom alice_blue import TransactionType, ProductType\r\nimport datetime\r\n\r\nconfig = configparser.ConfigParser()\r\nconfig.read(\"keys.conf\")\r\ntry:\r\n section = config[\"KEYS\"]\r\nexcept KeyError:\r\n raise Exception(\"Please configure your keys in keys.conf\")\r\n\r\n# credentials\r\nAPP_ID = config[\"KEYS\"][\"APP_ID\"]\r\nAPP_NAME = config[\"KEYS\"][\"APP_NAME\"]\r\nAPP_SECRET = config[\"KEYS\"][\"APP_SECRET\"]\r\nUSERNAME = config[\"KEYS\"][\"USERNAME\"]\r\nPASSWORD = config[\"KEYS\"][\"PASSWORD\"]\r\nTWO_FA = config[\"KEYS\"][\"TWO_FA\"]\r\n\r\n# data directory\r\nDATA_DIR = \"data\"\r\nTRANS_PATH = \"transactions\"\r\nLOGS_PATH = f\"{TRANS_PATH}/{datetime.datetime.today().strftime('%Y-%m-%d')}-logs.csv\"\r\nTICKERS_PATH = f\"metadata/ind_nifty50list.csv\"\r\nNIFTY50_TICKERS = \"https://www1.nseindia.com/content/indices/ind_nifty50list.csv\"\r\n\r\n# Dictionaries\r\nTRANSACTIONS = {\r\n TransactionType.Buy: 1,\r\n TransactionType.Sell: -1\r\n}\r\n\r\nTRANSACTIONS_INV = {\r\n 1: TransactionType.Buy,\r\n -1: TransactionType.Sell\r\n}\r\n\r\nINTRADAY = {\r\n True: ProductType.Intraday,\r\n False: ProductType.Delivery\r\n}\r\n\r\nTRANSACTIONS_LOGS = {\r\n \"Datetime\": [],\r\n \"Ticker\": [],\r\n \"Order ID\": [],\r\n \"Quantity\": [],\r\n}\r\n\r\n\r\n# current position structure [quantity, oms_order_id, target, sl]\r\nNULL_POS_RESP = [0, '', 0., 0.]\r\n\r\n\r\n# Strategy parameters\r\n# BBANDS PARAMETERS\r\nSTDEV = 2\r\nN_PERIODS = 20\r\nMA_TYPE = 1 # {0 : SMA, 1 : EMA, check the ma_type in talib for others}\r\n\r\n# RSI\r\nRSI_PERIOD = 14\r\nRSI_OVERBOUGHT = 75\r\nRSI_OVERSOLD = 25\r\n\r\n# maximum rows\r\nROW_SIZE = 50\r\n\r\n# Position params\r\nPROFIT_EXIT = 0.5\r\nLOSS_EXIT = 3\r\n\r\n\r\n# max loss per trade\r\nMAX_LOSS_PER_TRADE = 200\r\n","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"602133055","text":"import unittest\n#O(N*N)\ndef RotateMatrix(nbynarray):\n length = len(nbynarray)\n nlayers = length/2\n for layer in range(nlayers):\n length = len(nbynarray)\n first = layer\n last = length-1-layer\n for i in range(first,last):\n offset=i-first\n tmp=nbynarray[first][i]#top\n nbynarray[first][i] = nbynarray[last-offset][first]\n nbynarray[last-offset][first] = nbynarray[last][last-offset]\n nbynarray[last][last-offset] = nbynarray[i][last]\n nbynarray[i][last] = tmp\n return nbynarray\n\nclass Test(unittest.TestCase):\n '''Test Cases'''\n data = [\n ([[1,2],[3,4]], [[3,1],[4,2]]),\n ([[1,2,3],[4,5,6],[7,8,9]], [[7,4,1],[8,5,2],[9,6,3]]),\n ([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]],[[13,9,5,1],[14,10,6,2],[15,11,7,3],[16,12,8,4]])]\n\n def test_rotate_mat(self):\n for [array, expected] in self.data:\n actual = RotateMatrix(array)\n self.assertEqual(actual, expected)\n\nif __name__ == \"__main__\":\n unittest.main()\n\n","sub_path":"Python/Chapter 1/17RotateMatrix.py","file_name":"17RotateMatrix.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"499933046","text":"import mysql.connector\nfrom cassandra.cluster import Cluster\n\nprint(\"Migrate application table by descending order of AppIndex.\")\ncnx = mysql.connector.connect(user='zhenchang', password='SecureAge123', host='192.168.0.45', database='UniversalAvDb2')\nmysql_cursor = cnx.cursor()\ncluster = Cluster(['192.168.0.42'])\nuav_session = cluster.connect(\"uav\")\nmysql_cursor.execute(\"select appIndex from Application order by appIndex desc limit 1\")\n(largest_index,) = mysql_cursor.fetchone()\nprint(\"Currently largest AppIndex is: \" + str(largest_index))\nprint(\"Input AppIndex to start at: \")\nstart_at_index = int(input(\"-->\"))\nstart_at_index += 1\napp_index = start_at_index\nprint(\"Input AppIndex to stop at: \")\nstop_at_index = input(\"-->\")\nprint(\"Start migration...\")\ntotal = int(start_at_index) - int(stop_at_index)\npercentage = 0\nwhile True:\n mysql_cursor.execute(\n \"select * from Application where appindex < %s and appindex >= \" + stop_at_index +\n \" order by appindex desc limit 10000\", (app_index,))\n count = 0\n for (app_index, filehash, filesize, isscanned, submissiondate, isvirus, binaryfilepath, source, null,\n md5, sha1, sha256, sha512, lastupdate) in mysql_cursor:\n count += 1\n if isscanned == 1:\n isscanned = True\n else:\n isscanned = None\n if isvirus == 1:\n isvirus = True\n else:\n isvirus = None\n uav_session.execute(\n \"INSERT INTO application (filehash , filesize , binaryfilepath , isscanned , isvirus , lastupdate , md5 , \"\n \"sha1 , sha256 , sha512 , source , submissiondate ) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\",\n (filehash, filesize, binaryfilepath, isscanned, isvirus, lastupdate, md5, sha1, sha256, sha512, source,\n submissiondate))\n if count == 0:\n print(\"Latest inserted AppIndex: \" + str(app_index))\n print(\"%100.\")\n exit()\n print(\"Latest inserted AppIndex: \" + str(app_index))\n cur = int(start_at_index) - int(app_index)\n cur_percentage = int(cur * 100 / total)\n if cur_percentage > percentage:\n percentage = cur_percentage\n print(\"%\" + str(percentage))\n","sub_path":"uav_data/migrate_application_table.py","file_name":"migrate_application_table.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"639928301","text":"# Copyright 2014 - Rackspace\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport mock\nimport testscenarios\n\nfrom solum.api.controllers.v1 import language_pack\nfrom solum.common import exception\nfrom solum.tests import base\nfrom solum.tests import fakes\n\n\nload_tests = testscenarios.load_tests_apply_scenarios\n\n\n@mock.patch('pecan.request', new_callable=fakes.FakePecanRequest)\n@mock.patch('pecan.response', new_callable=fakes.FakePecanResponse)\n@mock.patch('solum.api.handlers.language_pack_handler.LanguagePackHandler')\nclass TestLanguagePackController(base.BaseTestCase):\n def test_language_pack_get(self, handler_mock, resp_mock, request_mock):\n handler_get = handler_mock.return_value.get\n handler_get.return_value = fakes.FakeLanguagePack()\n language_pack_obj = language_pack.LanguagePackController(\n 'test_id')\n result = language_pack_obj.get()\n self.assertEqual(200, resp_mock.status)\n self.assertIsNotNone(result)\n handler_get.assert_called_once_with('test_id')\n\n def test_lp_get_not_found(self, handler_mock, resp_mock, request_mock):\n handler_get = handler_mock.return_value.get\n handler_get.side_effect = exception.NotFound(name='language_pack',\n id='test_id')\n language_pack_obj = language_pack.LanguagePackController(\n 'test_id')\n language_pack_obj.get()\n self.assertEqual(404, resp_mock.status)\n handler_get.assert_called_once_with('test_id')\n\n\n@mock.patch('pecan.request', new_callable=fakes.FakePecanRequest)\n@mock.patch('pecan.response', new_callable=fakes.FakePecanResponse)\n@mock.patch('solum.api.handlers.language_pack_handler.LanguagePackHandler')\nclass TestLanguagePacksController(base.BaseTestCase):\n def test_language_packs_get_all(self, handler_mock, resp_mock,\n request_mock):\n language_packs_obj = language_pack.LanguagePacksController()\n response = language_packs_obj.get_all()\n self.assertIsNotNone(response)\n self.assertEqual(200, resp_mock.status)\n","sub_path":"solum/tests/api/v1/test_language_pack.py","file_name":"test_language_pack.py","file_ext":"py","file_size_in_byte":2619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"501854801","text":"import random\n\na = []\nfor i in range(10):\n i = random.randrange(1, 100)\n a.append(i)\nprint(a)\n\nb = a[0]\nfor u in range(len(a)):\n if(b > a[u]):\n b = a[u]\n\nprint(\"minimum\", b)","sub_path":"Basic_grammer/for/for_random_minimum_number.py","file_name":"for_random_minimum_number.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"268180179","text":"import torch\nfrom functools import partial\nfrom torch.utils.data import DataLoader\nfrom torchtext.data import to_map_style_dataset\nfrom torchtext.data.utils import get_tokenizer\nfrom torchtext.vocab import build_vocab_from_iterator\nfrom torchtext.datasets import WikiText2, WikiText103\n\nfrom utils.constants import (\n CBOW_N_WORDS,\n SKIPGRAM_N_WORDS,\n MIN_WORD_FREQUENCY,\n MAX_SEQUENCE_LENGTH,\n)\n\n\ndef get_english_tokenizer():\n \"\"\"\n Documentation:\n https://pytorch.org/text/stable/_modules/torchtext/data/utils.html#get_tokenizer\n \"\"\"\n tokenizer = get_tokenizer(\"basic_english\", language=\"en\")\n return tokenizer\n\n\ndef get_data_iterator(ds_name, ds_type, data_dir):\n if ds_name == \"WikiText2\":\n data_iter = WikiText2(root=data_dir, split=(ds_type))\n elif ds_name == \"WikiText103\":\n data_iter = WikiText103(root=data_dir, split=(ds_type))\n else:\n raise ValueError(\"Choose dataset from: WikiText2, WikiText103\")\n data_iter = to_map_style_dataset(data_iter)\n return data_iter\n\n\ndef build_vocab(data_iter, tokenizer):\n \"\"\"Builds vocabulary from iterator\"\"\"\n \n vocab = build_vocab_from_iterator(\n map(tokenizer, data_iter),\n specials=[\"\"],\n min_freq=MIN_WORD_FREQUENCY,\n )\n vocab.set_default_index(vocab[\"\"])\n return vocab\n\n\ndef collate_cbow(batch, text_pipeline):\n \"\"\"\n Collate_fn for CBOW model to be used with Dataloader.\n `batch` is expected to be list of text paragrahs.\n \n Context is represented as N=CBOW_N_WORDS past words \n and N=CBOW_N_WORDS future words.\n \n Long paragraphs will be truncated to contain\n no more that MAX_SEQUENCE_LENGTH tokens.\n \n Each element in `batch_input` is N=CBOW_N_WORDS*2 context words.\n Each element in `batch_output` is a middle word.\n \"\"\"\n batch_input, batch_output = [], []\n for text in batch:\n text_tokens_ids = text_pipeline(text)\n\n if len(text_tokens_ids) < CBOW_N_WORDS * 2 + 1:\n continue\n\n if MAX_SEQUENCE_LENGTH:\n text_tokens_ids = text_tokens_ids[:MAX_SEQUENCE_LENGTH]\n\n for idx in range(len(text_tokens_ids) - CBOW_N_WORDS * 2):\n token_id_sequence = text_tokens_ids[idx : (idx + CBOW_N_WORDS * 2 + 1)]\n output = token_id_sequence.pop(CBOW_N_WORDS)\n input_ = token_id_sequence\n batch_input.append(input_)\n batch_output.append(output)\n\n batch_input = torch.tensor(batch_input, dtype=torch.long)\n batch_output = torch.tensor(batch_output, dtype=torch.long)\n return batch_input, batch_output\n\n\ndef collate_skipgram(batch, text_pipeline):\n \"\"\"\n Collate_fn for Skip-Gram model to be used with Dataloader.\n `batch` is expected to be list of text paragrahs.\n \n Context is represented as N=SKIPGRAM_N_WORDS past words \n and N=SKIPGRAM_N_WORDS future words.\n \n Long paragraphs will be truncated to contain\n no more that MAX_SEQUENCE_LENGTH tokens.\n \n Each element in `batch_input` is a middle word.\n Each element in `batch_output` is a context word.\n \"\"\"\n batch_input, batch_output = [], []\n for text in batch:\n text_tokens_ids = text_pipeline(text)\n\n if len(text_tokens_ids) < SKIPGRAM_N_WORDS * 2 + 1:\n continue\n\n if MAX_SEQUENCE_LENGTH:\n text_tokens_ids = text_tokens_ids[:MAX_SEQUENCE_LENGTH]\n\n for idx in range(len(text_tokens_ids) - SKIPGRAM_N_WORDS * 2):\n token_id_sequence = text_tokens_ids[idx : (idx + SKIPGRAM_N_WORDS * 2 + 1)]\n input_ = token_id_sequence.pop(SKIPGRAM_N_WORDS)\n outputs = token_id_sequence\n\n for output in outputs:\n batch_input.append(input_)\n batch_output.append(output)\n\n batch_input = torch.tensor(batch_input, dtype=torch.long)\n batch_output = torch.tensor(batch_output, dtype=torch.long)\n return batch_input, batch_output\n\n\ndef get_dataloader_and_vocab(\n model_name, ds_name, ds_type, data_dir, batch_size, shuffle, vocab=None\n):\n\n data_iter = get_data_iterator(ds_name, ds_type, data_dir)\n tokenizer = get_english_tokenizer()\n\n if not vocab:\n vocab = build_vocab(data_iter, tokenizer)\n \n text_pipeline = lambda x: vocab(tokenizer(x))\n\n if model_name == \"cbow\":\n collate_fn = collate_cbow\n elif model_name == \"skipgram\":\n collate_fn = collate_skipgram\n else:\n raise ValueError(\"Choose model from: cbow, skipgram\")\n\n dataloader = DataLoader(\n data_iter,\n batch_size=batch_size,\n shuffle=shuffle,\n collate_fn=partial(collate_fn, text_pipeline=text_pipeline),\n )\n return dataloader, vocab\n ","sub_path":"utils/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":4704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"463072499","text":"import json\nfrom django.shortcuts import render\nfrom django.core import serializers\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.conf import settings\nfrom api.models.ec.organization import Organization\nfrom api.models.ec.store import Store\nfrom api.models.ec.product import Product\nfrom api.models.ec.customer import Customer\nfrom api.models.ec.employee import Employee\nfrom api.models.ec.receipt import Receipt\nfrom api.models.ec.wishlist import Wishlist\n\n\ndef front_page(request):\n employee = Employee.objects.get_for_user_id_or_none(request.user.id)\n org = request.organization\n store = request.store\n customer = request.customer\n receipt = request.receipt\n wishlists = request.wishlists\n\n # Fetch all the featured comics throughout all the stores or depending\n # on the organization / store.\n try:\n featured_products = Product.objects.filter(\n is_listed=True,\n store__is_aggregated=True,\n is_sold=False,\n is_featured=True,\n organization__is_listed=True,\n store__is_listed=True,\n )\n \n if org:\n featured_products = featured_products.filter(organization=org).order_by('-price')\n \n if store:\n featured_products = featured_products.filter(store=store).order_by('-price')\n except Product.DoesNotExist:\n featured_products = None\n \n # Fetch all the new comics throghout all the stores or depending on the\n # organization / store.\n try:\n new_products = Product.objects.filter(\n is_listed=True,\n store__is_aggregated=True,\n is_sold=False,\n is_new=True,\n )\n\n if org:\n new_products = new_products.filter(organization=org)\n\n if store:\n new_products = new_products.filter(store=store)\n except Product.DoesNotExist:\n new_products = None\n\n # Display the view with all our model information.\n return render(request, 'store_landpage/index.html',{\n 'page_metadata': 'store_landpage/meta.html',\n 'GOOGLE_ANALYTICS_KEY': settings.GOOGLE_ANALYTICS_KEY,\n 'receipt': receipt,\n 'wishlists': wishlists,\n 'customer': customer,\n 'employee': employee,\n 'featured_products': featured_products,\n 'new_products': new_products,\n 'org': org,\n 'store': store,\n 'page': 'home',\n 'settings': settings,\n })\n\n\ndef tos_page(request):\n employee = Employee.objects.get_for_user_id_or_none(request.user.id)\n org = request.organization\n store = request.store\n customer = request.customer\n receipt = request.receipt\n wishlists = request.wishlists\n\n # Display the view with all our model information.\n return render(request, 'store_landpage/tos.html',{\n 'page_metadata': 'store_landpage/meta.html',\n 'GOOGLE_ANALYTICS_KEY': settings.GOOGLE_ANALYTICS_KEY,\n 'receipt': receipt,\n 'wishlists': wishlists,\n 'customer': customer,\n 'employee': employee,\n 'org': org,\n 'store': store,\n 'page' : 'tos',\n })\n\n\ndef privacy_page(request):\n employee = Employee.objects.get_for_user_id_or_none(request.user.id)\n org = request.organization\n store = request.store\n customer = request.customer\n receipt = request.receipt\n wishlists = request.wishlists\n \n # Display the view with all our model information.\n return render(request, 'store_landpage/privacy.html',{\n 'page_metadata': 'store_landpage/meta.html',\n 'GOOGLE_ANALYTICS_KEY': settings.GOOGLE_ANALYTICS_KEY,\n 'receipt': receipt,\n 'wishlists': wishlists,\n 'customer': customer,\n 'employee': employee,\n 'org': org,\n 'store': store,\n 'page' : 'tos',\n })\n\n\n","sub_path":"store_landpage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"112190079","text":"import glob\r\nfrom os import path\r\n\r\nfrom aip import AipOcr\r\n\r\n\r\n# def get_Same_Image(image):\r\n# # size = (489, 113)\r\n# return image.convert('RGB')\r\n#\r\n#\r\n# def store_pic(img, name):\r\n# img.save(name)\r\n#\r\n#\r\n#\r\n# def difference(list1, list2):\r\n# sum1 = 0\r\n# for i in range(len(list1)):\r\n# if list1[i] == list2[i]:\r\n# sum1 += 1\r\n# else:\r\n# # 照公式可获得\r\n# sum1 += 1-(abs(list1[i] - list2[i]) / max(list1[i], list2[i]))\r\n# return sum1 / len(list1)\r\n#\r\n#\r\n# def Get_Similarity(image1, image2):\r\n# # 统一格式\r\n# img1 = get_Same_Image(image1)\r\n# img2 = get_Same_Image(image2)\r\n# # 获得直方图\r\n# list1 = img1.histogram()\r\n# list2 = img2.histogram()\r\n# return difference(list1, list2)\r\n#\r\n#\r\n# t = (383, 138, 470, 165)\r\n# pyautogui.click(x=1277, y=11)\r\n# time.sleep(1)\r\n# n = 5\r\n# pyautogui.click(x=670, y=280)\r\n# while n < 10:\r\n# time.sleep(1)\r\n# img = ImageGrab.grab(t)\r\n# img.save('D:\\PYcode\\old_pic\\\\' + str(n) +'.jpg')\r\n# pyautogui.click(x=600, y=690)\r\n# n = n + 1\r\n# pyautogui.click(x=920, y=300)\r\n#\r\n# a = Image.open(\"D:\\\\PYcode\\\\old_pic\\\\2.jpg\")\r\n# b = Image.open(\"D:\\\\PYcode\\\\old_pic\\\\0.jpg\")\r\n# c = Image.open(\"D:\\\\PYcode\\\\old_pic\\\\3.jpg\")\r\n# d = Image.open(\"D:\\\\PYcode\\\\old_pic\\\\zijin.jpg\")\r\n# # print((Get_Similarity(a, d)))\r\n# # print((Get_Similarity(d, c)))\r\n# # print((Get_Similarity(b, d)))\r\n#\r\n# # string = pytesseract.image_to_string(a, lang='chi_sim')\r\n# # print(string)\r\n# # string = pytesseract.image_to_string(c, lang='chi_sim')\r\n# # print(string)\r\n# # string = pytesseract.image_to_string(d, lang='chi_sim')\r\n# # print(string)\r\n#\r\n#\r\n#\r\n# def matchImg(imgsrc, imgobj, value):#imgsrc=原始图像,imgobj=待查找的图片\r\n# imsrc = ac.imread(imgsrc)\r\n# imobj = ac.imread(imgobj)\r\n#\r\n# match_result = ac.find_template(imsrc, imobj,value) # {'confidence': 0.5435812473297119, 'rectangle': ((394, 384), (394, 416), (450, 384), (450, 416)), 'result': (422.0, 400.0)}\r\n# if match_result is not None:\r\n# match_result['shape']=(imsrc.shape[1], imsrc.shape[0]) # 0为高,1为宽\r\n#\r\n# return match_result\r\n# # 图像识别启动模拟器\r\n# weituo_old = Image.open(\"D:\\\\PYcode\\\\old_pic\\\\moniqi.png\")\r\n# quanping = (0, 0, 1365, 726)\r\n# quanping_img = ImageGrab.grab(quanping)\r\n# t = time.strftime('%Y%m%d_%H%M%S', time.localtime())\r\n# name = 'D:\\\\PYcode\\\\new_pic\\\\' + 'zhuomian_' + t +'.jpg'\r\n# quanping_img.save(name)\r\n# position = matchImg(name, \"D:\\\\PYcode\\\\old_pic\\\\moniqi.png\", 0.01)\r\n# print (position)\r\n# if position != None:\r\n# x, y = position['result']\r\n# print(x, y)\r\n# pyautogui.click(x=x, y=y, clicks=2, interval=0.1)\r\n\r\ndef baiduOCR(outfile):\r\n \"\"\"利用百度api识别文本,并保存提取的文字\r\n picfile: 图片文件名\r\n outfile: 输出文件\r\n \"\"\"\r\n filename = path.basename(picfile)\r\n\r\n APP_ID = '25680730'\r\n API_KEY = 'oEyoYzFPwlUMFiibBcGDBv3l'\r\n SECRET_KEY = '4XbGE36ubU3P1Xit67uW3kr4McoPD0YU'\r\n client = AipOcr(APP_ID, API_KEY,SECRET_KEY)\r\n\r\n i = open(picfile, 'rb')\r\n img = i.read()\r\n print(\"正在识别图片:\\t\" + filename)\r\n # message = client.basicGeneral(img) # 通用文字识别,每天 50 000 次免费\r\n message = client.basicAccurate(img) # 通用文字高精度识别,每天 500 次免费\r\n print(\"识别成功!\")\r\n i.close()\r\n\r\n with open(outfile, 'a+',encoding='utf-8') as fo:\r\n fo.writelines(\"+\" * 60 + '\\n')\r\n fo.writelines(\"识别图片:\\t\" + filename + \"\\n\" * 2)\r\n fo.writelines(\"文本内容:\\n\")\r\n # 输出文本内容\r\n for text in message.get('words_result'):\r\n fo.writelines(text.get('words') + '\\n')\r\n fo.writelines('\\n' * 2)\r\n print(\"文本导出成功!\")\r\n print()\r\n\r\nif __name__ == \"__main__\":\r\n open('result.txt', 'a+',encoding='utf-8').close()\r\n outfile = 'result.txt'\r\n for picfile in glob.glob(\"C:\\\\Users\\\\lenovo-pc\\\\Desktop\\\\微信图片_20220406164116.jpg\"):\r\n baiduOCR(outfile)\r\n print('图片文本提取结束!文本��出结果位于 %s 文件中。' % outfile)\r\n\r\n# # Hash值对比\r\n#\r\n# def cmpHash(hash1, hash2, shape=(10, 10)):\r\n# n = 0\r\n# # hash长度不同则返回-1代表传参出错\r\n# if len(hash1) != len(hash2):\r\n# return -1\r\n# # 遍历判断\r\n# for i in range(len(hash1)):\r\n# # 相等则n计数+1,n最终为相似度\r\n# if hash1[i] == hash2[i]:\r\n# n = n + 1\r\n# return n/(shape[0]*shape[1])\r\n#\r\n#\r\n# # 均值哈希算法\r\n# def aHash(img, shape=(10, 10)):\r\n# # 缩放为10*10\r\n# img = cv2.resize(img, shape)\r\n# # 转换为灰度图\r\n# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n# # s为像素和初值为0,hash_str为hash值初值为''\r\n# s = 0\r\n# hash_str = ''\r\n# # 遍历累加求像素和\r\n# for i in range(shape[0]):\r\n# for j in range(shape[1]):\r\n# s = s + gray[i, j]\r\n# # 求平均灰度\r\n# avg = s / 100\r\n# # 灰度大于平均值为1相反为0生成图片的hash值\r\n# for i in range(shape[0]):\r\n# for j in range(shape[1]):\r\n# if gray[i, j] > avg:\r\n# hash_str = hash_str + '1'\r\n# else:\r\n# hash_str = hash_str + '0'\r\n# return hash_str\r\n#\r\n#\r\n# def main():\r\n# # t = (383, 138, 470, 165)\r\n# # n = 3\r\n# # img = \"D:\\\\PYcode\\\\old_pic\\\\\" + str(n) + \".jpg\"\r\n# img1 = cv2.imread(\"D:\\\\PYcode\\\\new_pic\\\\keyan_20220318_125123.png\")\r\n# img2 = cv2.imread(\"D:\\\\PYcode\\\\para_pic\\\\keyanxiangmu_20220318_121033.png\")\r\n# img3 = cv2.imread(\"D:\\\\PYcode\\\\old_pic\\\\zhanshuqianwang.png\")\r\n# img_zijin = cv2.imread(\"D:\\\\PYcode\\\\old_pic\\\\zijin.jpg\")\r\n# # img_test = cv2.imread(\"D:\\\\PYcode\\\\new_pic\\\\keyan_20220316_190832.png\")\r\n# # img4 = cv2.imread(\"D:\\\\PYcode\\\\para_pic\\\\zhanshujiaocheng_20220316_190218.png\")\r\n# # 使用两个变量,1记录五个科研任务 2 保存图片的数量 break跳出while循环\r\n# hash1 = aHash(img1)\r\n# hash2 = aHash(img2)\r\n# hash3 = aHash(img3)\r\n# # hash4 = aHash(img4)\r\n# hash_zijin = aHash(img_zijin)\r\n# # hash_test = aHash(img_test)\r\n# n1 = cmpHash(hash1, hash2)\r\n# n2 = cmpHash(hash1, hash3)\r\n# n3 = cmpHash(hash2, hash3)\r\n# # n_test = cmpHash(hash4, hash_test)\r\n# print('均值哈希算法相似度:', n1)\r\n# # print('均值哈希算法相似度:', n2)\r\n# # print('均值哈希算法相似度:', n3)\r\n# # 前往按钮相似度最大为0.76 暂定为0.78\r\n# # 科研类型 0.9\r\n\r\n\r\n# if __name__==\"__main__\":\r\n# main()\r\n\r\n\r\n#\r\n# def get_Same_Image(image):\r\n# # size = (489, 113)\r\n# return image.convert('RGB')\r\n#\r\n#\r\n# def difference(list1, list2):\r\n# sum1 = 0\r\n# for i in range(len(list1)):\r\n# if list1[i] == list2[i]:\r\n# sum1 += 1\r\n# else:\r\n# # 照公式可获得\r\n# sum1 += 1-(abs(list1[i] - list2[i]) / max(list1[i], list2[i]))\r\n# return sum1 / len(list1)\r\n#\r\n#\r\n# def Get_Similarity(image1, image2):\r\n# # 统一格式\r\n# img1 = get_Same_Image(image1)\r\n# img2 = get_Same_Image(image2)\r\n# # 获得直方图\r\n# list1 = img1.histogram()\r\n# list2 = img2.histogram()\r\n# return difference(list1, list2)\r\n\r\n# os.makedirs(\"D:\\\\PYcode\\\\old_pic2\")\r\n# os.rmdir(\"D:\\\\PYcode\\\\old_pic2\")\r\n\r\n\r\n# def zdy(a):\r\n# m = a + 3\r\n# n = 4\r\n# if m < n:\r\n# print(\"wwwww\")\r\n# return\r\n# else:\r\n# print(\"wh\")\r\n# print(\"wwwwwxxxxx\")\r\n# print(\"zdydsb\")\r\n#\r\n# zdy(0)\r\n# zdy(4)\r\n","sub_path":"tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":7658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"568314831","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis module implements the base class for table classes.\n\n.. :copyright: (c) 2014 by Jelte Fennema.\n :license: MIT, see License for more details.\n\"\"\"\n\nfrom . import LatexObject, Environment, Command\nfrom ..utils import dumps_list\nfrom ..package import Package\n\n\nfrom collections import Counter\nimport re\n\n\ndef _get_table_width(table_spec):\n \"\"\"Calculate the width of a table based on its spec.\n\n :param table_spec:\n\n :type table_spec: str\n\n :return:\n :rtype: int\n \"\"\"\n\n column_letters = ['l', 'c', 'r', 'p', 'm', 'b']\n\n # Remove things like {\\bfseries}\n cleaner_spec = re.sub(r'{[^}]*}', '', table_spec)\n spec_counter = Counter(cleaner_spec)\n\n return sum(spec_counter[l] for l in column_letters)\n\n\nclass TabularBase(Environment):\n\n \"\"\"A class that is used as a base for all table classes.\n\n :param table_spec:\n :param data:\n :param pos:\n\n :type table_spec: str\n :type data: list\n :type pos: list\n \"\"\"\n\n def __init__(self, table_spec, data=None, pos=None, **kwargs):\n self.width = _get_table_width(table_spec)\n\n super().__init__(data=data, options=pos,\n arguments=table_spec, **kwargs)\n\n def add_hline(self, start=None, end=None):\n \"\"\"Add a horizontal line to the table.\n\n :param start:\n :param end:\n\n :type start: int\n :type end: int\n \"\"\"\n\n if start is None and end is None:\n self.append(r'\\hline')\n else:\n if start is None:\n start = 1\n elif end is None:\n end = self.width\n\n self.append(Command('cline', str(start) + '-' + str(end)))\n\n def add_empty_row(self):\n \"\"\"Add an empty row to the table.\"\"\"\n\n self.append((self.width - 1) * '&' + r'\\\\')\n\n def add_row(self, cells, escape=False, mapper=None):\n \"\"\"Add a row of cells to the table.\n\n :param cells:\n :param escape:\n\n :type cells: tuple\n :type escape: bool\n \"\"\"\n\n # Propegate packages used in cells\n for c in cells:\n if isinstance(c, LatexObject):\n for p in c.packages:\n self.packages.add(p)\n\n self.append(dumps_list(cells, escape=escape, token='&', mapper=mapper)\n + r'\\\\')\n\n def add_multicolumn(self, size, align, content, cells=None, escape=False):\n \"\"\"Add a multicolumn of width size to the table, with cell content.\n\n :param size:\n :param align:\n :param content:\n :param cells:\n :param escape:\n\n :type size: int\n :type align: str\n :type content: str\n :type cells: tuple\n :type escape: bool\n \"\"\"\n\n self.append(Command('multicolumn', arguments=(size, align, content)))\n\n if cells is not None:\n self.add_row(cells)\n else:\n self.append(r'\\\\')\n\n def add_multirow(self, size, align, content, hlines=True, cells=None,\n escape=False):\n \"\"\"Add a multirow of height size to the table, with cell content.\n\n :param size:\n :param align:\n :param content:\n :param hlines:\n :param cells:\n :param escape:\n\n :type size: int\n :type align: str\n :type content: str\n :type hlines: bool\n :type cells: tuple\n :type escape: bool\n \"\"\"\n\n self.append(Command('multirow', arguments=(size, align, content)))\n self.packages.add(Package('multirow'))\n\n if cells is not None:\n for i, row in enumerate(cells):\n if hlines and i:\n self.add_hline(2)\n\n self.append('&')\n self.add_row(row)\n else:\n for i in range(size):\n self.add_empty_row()\n","sub_path":"pylatex/base_classes/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"71196119","text":"import sys\nsys.stdin = open('8-9 input.txt')\n\n\nT = int(input())\nfor tc in range(1, T+1):\n N = int(input())\n data = list(map(int, input().split()))\n M = int(input())\n\n dp = [1000]*(M+1)\n dp[0] = 0\n for i in range(N):\n for j in range(data[i], M+1):\n dp[j] = min(dp[j], dp[j-data[i]]+1)\n\n print(dp[M])\n\n\n\n''' [풀이]\nimport sys\nsys.stdin = open(\"input.txt\", 'r') \nif __name__==\"__main__\":\n n=int(input())\n coin=list(map(int, input().split()))\n m=int(input())\n dy=[1000]*(m+1);\n dy[0]=0\n for i in range(n):\n for j in range(coin[i], m+1):\n dy[j]=min(dy[j], dy[j-coin[i]]+1)\n print(dy[m])\n'''","sub_path":"inflearn/파이썬 알고리즘 문제풀이/8-9 동전교환.py","file_name":"8-9 동전교환.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"449965679","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/6/23\n# @Author : wangyingqi\n# @Site : 文件管理models\n# @File : models.py\n# @Software: pycharm\n# @Function:\n\nimport datetime as dt\nfrom richdataxweb.database import Column, Model, SurrogatePK, db, reference_col, relationship\n\n\nclass File_Path(SurrogatePK, Model):\n \"\"\"A role for a user.\"\"\"\n\n __tablename__ = 'file_path'\n path = Column(db.String(255), unique=True, nullable=False) # 设置文件路径\n remarks = Column(db.String(255), unique=True, nullable=False) # 路径描述\n create_by = Column(db.String(80), nullable=True) # 创建人\n create_date = Column(db.DateTime, nullable=False, default=dt.datetime.now) # 创建时间\n update_by = Column(db.String(80), nullable=True) # 更新人\n update_date = Column(db.DateTime, nullable=True) # 更新时间\n\n def __init__(self, path, **kwargs):\n \"\"\"Create instance.\"\"\"\n db.Model.__init__(self, path=path, **kwargs)\n\n\nclass File_src_manager(SurrogatePK, Model):\n \"\"\"A role for a user.\"\"\"\n\n __tablename__ = 'file_src_manager'\n name = Column(db.String(255), unique=True, nullable=False) # socket服务器名称\n ip = Column(db.String(255), unique=True, nullable=False) # socket服务器IP地址\n port = Column(db.String(255), unique=True, nullable=False) # socket服务器端口号\n remarks = Column(db.String(255), unique=True, nullable=False) # socket服务器备注\n del_flag = Column(db.String(255), unique=True, nullable=False) # 删除标记: 0-未删除;1-已删除\n create_by = Column(db.String(80), nullable=True) # 创建人\n create_date = Column(db.DateTime, nullable=False, default=dt.datetime.now) # 创建时间\n update_by = Column(db.String(80), nullable=True) # 更新人\n update_date = Column(db.DateTime, nullable=True) # 更新时间\n\n def __init__(self, name, **kwargs):\n \"\"\"Create instance.\"\"\"\n db.Model.__init__(self, name=name, **kwargs)\n","sub_path":"richdataxweb/filemanage/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"614509377","text":"\"\"\"\n Author:VolcanicSnow\n Email:liupf2792@gmail.com\n Function:\n Version:1.0\n Date:2020/8/22 9:31\n\"\"\"\n\n\ndef merge(nums, left, mid, right):\n temp = []\n i = left # 左边部分的第一个元素索引\n j = mid + 1 # 右边部分的第一个元素索引\n # 开始对两边进行比较合并\n while i <= mid and j <= right:\n if nums[i] <= nums[j]:\n temp.append(nums[i])\n i += 1\n else:\n temp.append(nums[j])\n j += 1\n # 当某一边元素合并完了之后,将另一边元素全部加到 temp 中\n while i <= mid:\n temp.append(nums[i])\n i += 1\n while j <= right:\n temp.append(nums[j])\n j += 1\n nums[left:right + 1] = temp # 将合并后的结果替换掉原来\n\n\ndef merge_sort(nums, left, right):\n if left == right: # ,左右指针相遇,列表中只有一个元素\n return\n mid = (left + right) >> 1\n merge_sort(nums, left, mid) # 左分\n merge_sort(nums, mid + 1, right) # 右分\n merge(nums, left, mid, right) # 并\n\n\nnums = [5, 7, 4, 6, 3, 1, 2, 9, 8]\n# print(nums)\n# merge(nums, 0, len(nums) // 2, len(nums))\n# print(nums)\nmerge_sort(nums, 0, len(nums) - 1)\nprint(nums)\n","sub_path":"Week_06/10大排序算法/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"90627763","text":"# This file is part of Tryton. The COPYRIGHT file at the top level of\n# this repository contains the full copyright notices and license terms.\nimport datetime as dt\nimport logging\nimport random\nimport select\nimport signal\nimport time\nfrom multiprocessing import Pool as MPool, cpu_count\n\nfrom sql import Flavor\n\nfrom trytond import backend\nfrom trytond.config import config\nfrom trytond.pool import Pool\nfrom trytond.transaction import Transaction\n\n__all__ = ['work']\nlogger = logging.getLogger(__name__)\nDatabase = backend.get('Database')\nDatabaseOperationalError = backend.get('DatabaseOperationalError')\n\n\nclass Queue(object):\n def __init__(self, pool, mpool):\n self.database = Database(pool.database_name).connect()\n self.connection = self.database.get_connection(autocommit=True)\n self.pool = pool\n self.mpool = mpool\n\n def pull(self, name=None):\n Queue = self.pool.get('ir.queue')\n return Queue.pull(self.database, self.connection, name=name)\n\n def run(self, task_id):\n return self.mpool.apply_async(\n run_task, (self.pool.database_name, task_id))\n\n\nclass TaskList(list):\n def filter(self):\n for t in list(self):\n if t.ready():\n self.remove(t)\n return self\n\n\ndef work(options):\n Flavor.set(Database.flavor)\n if not config.getboolean('queue', 'worker', default=False):\n return\n try:\n processes = options.processes or cpu_count()\n except NotImplementedError:\n processes = 1\n logger.info(\"start %d workers\", processes)\n mpool = MPool(\n processes, initializer, (options,), options.maxtasksperchild)\n queues = [Queue(pool, mpool) for pool in initializer(options, False)]\n\n tasks = TaskList()\n timeout = options.timeout\n try:\n while True:\n while len(tasks.filter()) >= processes:\n time.sleep(0.1)\n for queue in queues:\n task_id, next_ = queue.pull(options.name)\n timeout = min(\n next_ or options.timeout, timeout, options.timeout)\n if task_id:\n tasks.append(queue.run(task_id))\n break\n else:\n connections = [q.connection for q in queues]\n connections, _, _ = select.select(connections, [], [], timeout)\n for connection in connections:\n connection.poll()\n while connection.notifies:\n connection.notifies.pop(0)\n except KeyboardInterrupt:\n mpool.close()\n\n\ndef initializer(options, worker=True):\n if worker:\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n pools = []\n database_list = Pool.database_list()\n for database_name in options.database_names:\n pool = Pool(database_name)\n if database_name not in database_list:\n with Transaction().start(database_name, 0, readonly=True):\n pool.init()\n pools.append(pool)\n return pools\n\n\ndef run_task(pool, task_id):\n if not isinstance(pool, Pool):\n pool = Pool(pool)\n Queue = pool.get('ir.queue')\n logger.info('task \"%d\" started', task_id)\n try:\n for count in range(config.getint('database', 'retry'), -1, -1):\n with Transaction().start(pool.database_name, 0) as transaction:\n try:\n Queue(task_id).run()\n break\n except DatabaseOperationalError:\n if count:\n transaction.rollback()\n continue\n raise\n logger.info('task \"%d\" done', task_id)\n except DatabaseOperationalError:\n try:\n with Transaction().start(pool.database_name, 0) as transaction:\n task = Queue(task_id)\n scheduled_at = dt.datetime.now()\n scheduled_at += dt.timedelta(\n seconds=random.randint(0, 2 * retry))\n Queue.push(task.name, task.data, scheduled_at=scheduled_at)\n except Exception:\n logger.critical(\n 'rescheduling task \"%d\" failed', task_id, exc_info=True)\n except Exception:\n logger.critical('task \"%d\" failed', task_id, exc_info=True)\n","sub_path":"lib/python3.8/site-packages/trytond/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":4294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"600533516","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[58]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[118]:\n\n\nmovie = pd.read_csv('data/movie.csv', index_col='movie_title')\nc1 = movie['title_year'] >= 2010\nc2 = movie['title_year'].isnull()\ncriteria = c1 | c2\nmovie_mask = movie.mask(criteria).dropna(how='all')\nmovie_mask.head()\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"minggu-12/praktik/src/5_81.py","file_name":"5_81.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"162624487","text":"from django.shortcuts import render, redirect\nfrom django.core.mail import send_mail, BadHeaderError\nfrom django.http import HttpResponse , HttpResponseRedirect\nfrom .forms import ContactForm\n\ndef send_email(request):\n\tif request.method =='POST':\n\t\tform= ContactForm(request.POST)\n\n\t\tif form.is_valid():\n\t\t\tsubject= form.cleaned_data('subject')\n\t\t\tfrom_email=form.cleaned_data('from_email')\n\t\t\tmessage=form.cleaned_data('message')\n\n\t\t\ttry:\n\t\t\t\tsend_mail(subject,message,from_email,['lear@exapmle.com'])\n\n\n\t\t\texcept BadHeaderError:\n\t\t\t\treturn HttpResponse('invalid email')\n\n\t\t\treturn redirect('contact_us:send_success')\n\n\n\n\telse:\n\t\tform = ContactForm()\n\n\n\tcontext={ \n\t\t\t\t'form': form\n\t\t\t}\n\n\n\treturn render(request, 'contact_us/contact_us.html', context)\n\n\ndef send_success(request):\n\treturn HttpResponse('Thanks for contacting us. We will get back to you soon.')\n\n","sub_path":"contact_us/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"548444158","text":"\"\"\"\nGiven two binary trees, write a function to check if they are the same or not.\n给两个二叉树,吮吸Ian一个函数可以判断是否他们相同.\nTwo binary trees are considered the same if they are structurally identical and the nodes have the same value.\n二叉树结构和节点 相同被认为是相同的二叉树\nExample 1:\n\nInput: 1 1\n / \\ / \\\n 2 3 2 3\n\n [1,2,3], [1,2,3]\n\nOutput: true\nExample 2:\n\nInput: 1 1\n / \\\n 2 2\n\n [1,2], [1,null,2]\n\nOutput: false\nExample 3:\n\nInput: 1 1\n / \\ / \\\n 2 1 1 2\n\n [1,2,1], [1,1,2]\n\nOutput: false\n\n\n链接:https://leetcode-cn.com/problems/same-tree\n\n\"\"\"\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def isSameTree(self, p: TreeNode, q: TreeNode) -> bool:\n ans = []\n\n def walk(node, node1):\n if not node:\n return node1 is None\n if not node1:\n return node is None\n if node.val != node1.val:\n return\n ans.append(node.val)\n walk(node.left, node1.left)\n walk(node.right, node.right)\n\n stack = []\n node1 = p\n node2 = q\n while stack and node1:\n while node1:\n stack.append(node1)\n node2 = node2.left\n node1 = node1.left\n one = stack.pop()\n\n\n\n","sub_path":"code_practice/binary_tree/same.py","file_name":"same.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"145221639","text":"from __future__ import division\nimport numpy as np\nimport matplotlib.pyplot as pp\nimport pyregion\nfrom mpl_toolkits.axes_grid1 import ImageGrid\nfrom matplotlib.transforms import Affine2D\nfrom astropy.io import fits\nfrom astropy.wcs import WCS\nfrom scipy.ndimage import gaussian_filter\n\nfrom astropy.visualization import AsinhStretch\nfrom astropy.visualization.mpl_normalize import ImageNormalize\nfrom matplotlib import rc\nrc('font', family='serif')\n\n_psfresid_pat = 'runJWST/SDSS_z7_{}/mcmc_out_mock_{}_point_source_subtracted.fits'\n_psfresid_pat_F200W = 'runJWST/SDSS_z7_SN/mcmc_out_mock_{}_point_source_subtracted.fits'\n_true_pat = 'data/sci_mock_JWST_{}_{}_onlyHost.fits'\n_mag_zp = 25.9463\n\n_stretch = AsinhStretch()\n#_stretch.a = (0.01 - 0.0001)/2 / (0.01+0.0001)\n#_pnorm = ImageNormalize(vmin=-0.0001, vmax=0.01, stretch=_stretch, clip=True)\n#_xytix = [-3,-2, -1, 0, 1, 2,3] # in arcsec\n_xytix = [-0.3, 0, 0.3] # in arcsec\n_coltix = np.array([26,27,28]) # in mag/arcsec**2\nxy_format = pp.FormatStrFormatter(r'$%0.1f^{\\prime\\prime}$')\n_axis_range = [-0.3,0.3,-0.3,0.3]#[-2.5, 2.5, -2.5, 2.5] # in arcsec\n\ngray_r = pp.cm.cmap_d['Spectral_r']\n\n\ndef mag_to_flux(mag, zp=0.0, scale=(1.0, 1.0)):\n return 10**(-0.4*(mag - zp)) * np.prod(scale)\n\n\ndef plot_models(quasar, filt, save_name=None):\n q_ind = quasar.split('_',1)[1]#+'_host'\n if filt=='F200W':\n psfresid = fits.getdata(_psfresid_pat_F200W.format(quasar))\n else:\n psfresid = fits.getdata(_psfresid_pat.format(filt,quasar))\n trueHost = fits.getdata(_true_pat.format(filt,q_ind))\n #quasardata = fits.getdata(_quasar_pat.format(qdir))\n\n #cent=int(len(quasardata)/2)\n #flux_ratio=psfresid[cent,cent]/quasardata[cent,cent]\n #print('Flux ratio: ',psfresid[95,95]/quasar[95,95])\n #if flux_ratio>0.05:\n # print(flux_ratio,quasar)\n \n \n #psfresid_smooth = gaussian_filter(psfresid, (2, 2))\n resid_smooth = gaussian_filter(psfresid, (1, 1))\n true_smooth = gaussian_filter(trueHost, (1, 1))\n\n center = np.array(psfresid.shape)[::-1]/2\n if filt in ['F277W','F356W','F444W']:\n pxscale = 0.063/2 #arcsec\n #_stretch.a = (0.03 - 0.00001)/2 / (0.03+0.00001)\n #_pnorm = ImageNormalize(vmin=-0.00001*2.4, vmax=0.03*2.4, stretch=_stretch, clip=True)\n _stretch.a = (0.05 - 0.00005)/2 / (0.05+0.00005)\n _pnorm = ImageNormalize(vmin=-0.00005, vmax=0.05, stretch=_stretch, clip=True)\n else:\n pxscale = 0.031/2\n _stretch.a = (0.005 - 0.00005)/2 / (0.005+0.00005)\n _pnorm = ImageNormalize(vmin=-0.00005, vmax=0.005, stretch=_stretch, clip=True)\n extents = np.array([-center[0], center[0],\n -center[1], center[1]])*pxscale\n\n for ind,data in enumerate([resid_smooth,true_smooth]):\n #for ind,data in enumerate([psfresid,trueHost]):\n if ind==0:\n grid_ind=ii+(2*len(filters)*nn)\n if nn==0:\n grid[grid_ind].set_title(filt)\n else:\n grid_ind=ii+len(filters)+(2*len(filters)*nn)\n im = grid[grid_ind].imshow(data, extent=extents, origin='lower',\n cmap=gray_r, norm=_pnorm,\n interpolation='nearest')\n grid[grid_ind].axis(_axis_range)\n \n ticks = mag_to_flux(_coltix, zp=_mag_zp, scale=pxscale)\n cbar = pp.colorbar(im, cax=grid.cbar_axes[0], ticks=ticks)\n grid[grid_ind].set_xticks(_xytix)\n grid[grid_ind].set_yticks(_xytix)\n grid[grid_ind].xaxis.set_major_formatter(xy_format)\n grid[grid_ind].yaxis.set_major_formatter(xy_format)\n cbar.set_ticklabels(_coltix)\n grid.cbar_axes[0].set_ylabel('mag arcsec$^{-2}$')\n grid.cbar_axes[0].set_xlabel('mag arcsec$^{-2}$')\n\n #grid[ii].set_title(quasar)\n\n\nif __name__ == '__main__':\n from sys import argv\n # import glob\n to_plot = [2, 3, 6, 7, 8, 9]# 10, 12, 16, 18, 20, 22, 23, 25, 27, 32, 36, 40, 43, 45, 46, 100]\n to_plot = [10, 12, 16, 18, 20]#22, 23, 25, 27, 32, 36, 40, 43, 45, 46, 100]\n filters = ['F115W','F150W','F200W','F277W','F356W','F444W']\n\n if 'test' in argv:\n to_plot = to_plot[0:1]\n\n fig = pp.figure(figsize=(7, 10))\n grid = ImageGrid(fig, 111, nrows_ncols=(2*len(to_plot),len(filters)), axes_pad=0.1,\n label_mode='L',share_all=False,\n cbar_location='right', cbar_mode='single')\n\n for nn,qq in enumerate(to_plot):\n ii=0 \n quasar = 'JWST_SDSS_' + str(qq)\n for filt in filters:\n save_name = 'output_image_{}.pdf'.format(quasar) if 'save' in argv else None\n plot_models(quasar, filt, save_name=save_name)\n ii+=1\n \n for ii in range(0,len(to_plot)):\n grid[0+(2*len(filters)*ii)].set_ylabel('Subtracted')\n grid[0+len(filters)+(2*len(filters)*ii)].set_ylabel('True')\n pp.subplots_adjust(left=0.08, bottom=0.1, right=0.92, top=0.95)\n pp.savefig('residuals_filter_comparison_multiple_filters.pdf'.format(qq))\n pp.show() \n #pp.close(fig)\n","sub_path":"plot_residuals_filters_multiple_quasars.py","file_name":"plot_residuals_filters_multiple_quasars.py","file_ext":"py","file_size_in_byte":4961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"390874710","text":"from django.urls import path, include\nfrom rest_framework import routers\nfrom .views import QuestionViewSet, AnswerViewSet, UserViewSet\n\nrouter = routers.DefaultRouter()\n\nrouter.register('user', UserViewSet)\nrouter.register('question', QuestionViewSet)\nrouter.register('answer', AnswerViewSet)\n\nurlpatterns = [\n path('', include(router.urls)),\n\n]\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"583030727","text":"\nclass Integration:\n \n def definite_integral(self,samples, sampling_period=1,TimeFactor=1):\n FinalResult = 0\n #DEFINIMOD LA ECUACION PARA LA Trapezoidal Integration \n upper_limit = samples[-1]\n #print(samples.index(samples[-1]))\n lower_limit = samples[0]\n \n\n #tratando de aplicar la formula, veremos si funciona el for que vaya incrementando de acuerdo al rango que se pide del sampling period\n #Una formula para los primer y ultimo valor ya que estos no estan multiplicados\n Formula1 = (TimeFactor*sampling_period/2) * (lower_limit + upper_limit)\n Formula2 = 0\n #print(\"Valor de la formuala 1 carnal\", Formula1)\n #print('Samples[-1]: %i' % samples[-1])\n for Values in range(samples.index(samples[1]), samples.index(samples[-1]), sampling_period):\n #print(Values)\n summ = (TimeFactor*sampling_period/2) * (2 * samples[Values])\n Formula2 += summ\n FinalResult = Formula2 + Formula1\n #print(FinalResult)\n return FinalResult\n\n\n\n##j = Integration()\n##\n##x = [0,1,4,9,16,25,36,49]\n###x = [0,1.5,2.5,3.5,4.5,5.5]\n##j.definite_integral(x)\n","sub_path":"Lab8 - Gyroscope/Integral.py","file_name":"Integral.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"559763657","text":"from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nfrom imutils import paths, resize\nimport numpy as np\nimport argparse\nimport os\nimport cv2\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-d\", \"--dataset\", required=True,help=\"path to input dataset\")\nap.add_argument(\"-s\", \"--save\", required=True,help=\"path to output image\")\nargs = vars(ap.parse_args())\n\n# construct the image generator for data augmentation\naug = ImageDataGenerator(rotation_range=20,width_shift_range=0.1,height_shift_range=0.1, shear_range=0.2, zoom_range=0.2,horizontal_flip=True,vertical_flip=True, fill_mode=\"nearest\")\n\n# grab the list of images that we’ll be describing, then extract\n# the class label names from the image paths\nprint(\"[INFO] loading images...\")\nimagePaths = list(paths.list_images(args[\"dataset\"]))\nclassNames = [pt.split(os.path.sep)[-2] for pt in imagePaths]\nclassNames = [str(x) for x in np.unique(classNames)]\n\ni=0\nfor path in imagePaths:\n\timg = cv2.imread(path)\n\timg = cv2.resize(img, (224, 224))\n\timg = img_to_array(img)\n\timg = img.reshape((1,) + img.shape)\n\tfor batch in aug.flow(img, batch_size=1,save_to_dir=args[\"save\"],save_prefix=classNames[0], save_format='png'):\n\t\ti += 1\n\t\tif i > 99:\n\t\t\tbreak # otherwise the generator would loop indefinitely \n\n","sub_path":"data_aug.py","file_name":"data_aug.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"430346721","text":"# Copyright (c) 2016-present, Facebook, Inc.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport argparse\nimport logging\nimport os\nimport subprocess\nimport sys\nimport time\nfrom argparse import Namespace\nfrom logging import Logger\nfrom typing import Iterable\n\nfrom . import CONFIGURATION_FILE, FAILURE, SUCCESS, log\nfrom .configuration import Configuration\nfrom .exceptions import EnvironmentException\nfrom .filesystem import find_root\n\n\nLOG: Logger = logging.getLogger(__name__)\n\n\ndef _parallel_check(command: Iterable[str], process_count: int) -> float:\n LOG.info(\n \"Running %d process%s of `%s`\",\n process_count,\n \"es\" if process_count > 1 else \"\",\n \" \".join(command),\n )\n processes = []\n start = time.time()\n for _ in range(process_count):\n processes.append(\n subprocess.Popen(\n # pyre-fixme[6]: Expected\n # `Union[typing.Sequence[typing.Union[_PathLike[typing.Any], bytes,\n # str]], bytes, str]` for 1st param but got `Iterable[str]`.\n command,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n )\n )\n for process in processes:\n process.wait()\n return time.time() - start\n\n\ndef _compare_parallel_check(\n arguments: argparse.Namespace, configuration: Configuration, project_root: str\n) -> None:\n if not os.path.isdir(arguments.source_directory):\n raise EnvironmentException(\n \"`{}` is not a valid source directory.\".format(arguments.source_directory)\n )\n flags = [\"-typeshed\", configuration.typeshed, \"-project-root\", project_root]\n search_path = configuration.search_path\n if search_path:\n flags.extend([\"-search-path\", \",\".join(search_path)])\n client_command = [configuration.binary, \"check\"]\n client_command.extend(flags)\n client_command.append(arguments.source_directory)\n\n process_count = arguments.min\n while process_count < arguments.max:\n time_elapsed = _parallel_check(client_command, process_count) # ms\n time_elapsed_per_process = time_elapsed / process_count\n LOG.info(\n \"Ran %d concurrent `pyre check` process%s in %dm%ds: \"\n + \"%dm%ds per process.\",\n process_count,\n \"es\" if process_count > 1 else \"\",\n time_elapsed / 60,\n time_elapsed % 60,\n time_elapsed_per_process / 60,\n time_elapsed_per_process % 60,\n )\n process_count += 1\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--source-directory\", action=\"store\", help=\"Source directory to run check on.\"\n )\n parser.add_argument(\n \"--min\",\n action=\"store\",\n default=1,\n help=\"Minimum number of concurrent processes to measure.\",\n )\n parser.add_argument(\n \"--max\",\n action=\"store\",\n default=10,\n help=\"Maximum number of concurrent processes to measure.\",\n )\n arguments: Namespace = parser.parse_args()\n arguments: Namespace = True\n log.initialize(noninteractive=False)\n\n try:\n exit_code = SUCCESS\n root: str = find_root(os.getcwd(), CONFIGURATION_FILE) or os.getcwd()\n os.chdir(root)\n configuration = Configuration(local_root=arguments.local_root)\n _compare_parallel_check(arguments, configuration, root)\n except Exception as error:\n LOG.error(str(error))\n exit_code = FAILURE\n finally:\n log.cleanup()\n sys.exit(exit_code)\n","sub_path":"client/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":3653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"530092259","text":"#!/usr/bin/python\n\nimport concurrent.futures\nimport os\nimport requests\nimport sys\nimport time\nimport numpy as np\n\n# Functions\n\ndef usage(status=0):\n progname = os.path.basename(sys.argv[0])\n print('''Usage: {} [-h HAMMERS -t THROWS] URL\n -h HAMMERS Number of hammers to utilize (1)\n -t THROWS Number of throws per hammer (1)\n -v Display verbose output\n '''.format(progname))\n sys.exit(status)\n\ndef hammer(url, throws, verbose, hid):\n ''' Hammer specified url by making multiple throws (ie. HTTP requests).\n\n - url: URL to request\n - throws: How many times to make the request \n - verbose: Whether or not to display the text of the response\n - hid: Unique hammer identifier\n\n Return the average elapsed time of all the throws.\n '''\n elapsed_time = 0\n total_throws = 0\n for throw in range(throws):\n start_time = time.time()\n request = requests.get(url)\n if verbose:\n print(request.text)\n throw_time = time.time() - start_time\n\n print(\"Hammer: {}, Throw: {:>3}, Elapsed Time: {:.2f}\".format(hid, throw, throw_time))\n\n elapsed_time += throw_time\n total_throws += 1\n\n return elapsed_time / total_throws\n\ndef do_hammer(args):\n ''' Use args tuple to call `hammer` '''\n return hammer(*args)\n\ndef main():\n hammers = 1\n throws = 1\n verbose = False\n \n arguments = sys.argv[1:]\n\n if len(arguments) == 0:\n usage(1)\n\n # Parse command line arguments\n while len(arguments):\n argument = arguments.pop(0)\n\n if argument == '-h':\n hammers = int(arguments.pop(0))\n elif argument == '-t':\n throws = int(arguments.pop(0))\n elif argument == '-v':\n verbose = True\n elif argument.startswith('-') and len(argument) == 2:\n usage(1)\n else:\n url = argument \n\n # Create pool of workers and perform throws\n average_time = []\n args = ((url, throws, verbose, hid) for hid in range(hammers))\n \n with concurrent.futures.ProcessPoolExecutor(hammers) as executor:\n average_time = list(executor.map(do_hammer, args))\n \n for hid in range(hammers):\n print(\"Hammer: {}, AVERAGE , Elapsed Time: {:.2f}\".format(hid, average_time[hid]))\n\n print(\"TOTAL AVERAGE ELAPSED TIME: {:.2f}\".format(np.mean(average_time)))\n\n# Main execution\n\nif __name__ == '__main__':\n main()\n\n# vim: set sts=4 sw=4 ts=8 expandtab ft=python:\n","sub_path":"bin/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"440484553","text":"# -*- coding: utf-8 -*-\n\nimport psycopg2\nimport pandas as pd\nimport numpy as np\nimport datetime\nfrom datetime import datetime as dt\nimport language_tool_python\nfrom fuzzywuzzy import fuzz\nfrom fuzzywuzzy import process\nfrom fuzzysearch import find_near_matches\nimport statistics \n\nclass postgres_conn:\n def getConn(self):\n try:\n connection = psycopg2.connect(user='postgres',\n password='postgres_007',\n host=\"1.pgsql.db.1digitalstack.com\",\n port='5432',\n database='postgres')\n\n cursor = connection.cursor()\n # Print PostgreSQL Connection properties\n print(connection.get_dsn_parameters(), \"\\n\")\n\n # Print PostgreSQL version\n return cursor, connection\n\n except (Exception, psycopg2.Error) as error:\n print(\"Error while connecting to PostgreSQL\", error)\n return error, error\n\n def close_connection(self, cursor, connection):\n # closing database connection.\n if(connection):\n cursor.close()\n connection.close()\n print(\"PostgreSQL connection is closed\")\n \n\nclass DescriptionCheck:\n\n def getdata(self,brand):\n pg = postgres_conn()\n conn = pg.getConn()\n query_readyprod = f\"\"\"\n Select distinct a.brand, a.channel_sku_id,b.* from ready.ready_product_details b left join entity.product_feature_mapping a on a.channel_sku_id =\n b.asin where a.brand = '{brand}' \"\"\"\n \n df_readyprod = pd.read_sql_query(query_readyprod, conn[1])\n df_features = df_readyprod[['brand','asin', 'feature_1', 'feature_2', 'feature_3', 'feature_4',\n 'feature_5', 'feature_6', 'feature_7', 'feature_8', 'fba',\n 'extra_feature_1', 'extra_feature_2', 'extra_feature_3', 'aplus_text', 'aplus_images',\n 'aplus_present', 'description', 'cat_lev_one', 'cat_lev_two', 'cat_lev_three',\n 'cat_lev_four']]\n df_features.drop_duplicates(subset = ['asin'],inplace=True)\n return df_features\n\n def getnumfeatures(self,df):\n df[\"Number of Features\"] = df.iloc[:,2:10].count(axis = 1)\n \n def getnumheaders(self,df):\n df[\"Number of Headers\"] = df.iloc[:,2:10].apply(lambda x: x.str.contains(\":\")).sum(axis = 1)\n\n\n\n\n\n#############################################################################\n\n# for ind in df_readyprod.index:\n# count = 0\n# count1 = 0\n# for i in range(1,8):\n# col_name = \"feature_\"+str(i)\n# feature_val = df_readyprod[col_name][ind]\n \n# if feature_val == None : \n# continue\n# elif len(feature_val)!=0 :\n# count = count+1\n# if \":\" in feature_val:\n# count1 = count1+1\n# Number_Of_Features.append(count)\n# Headers.append(count1)\n \n# df_readyprod[\"Number Of features\"] = Number_Of_Features\n# df_readyprod[\"Number Of Headers\"] = Headers\n \n \n\n ","sub_path":"build/lib/productDescription/descriptionCheck.py","file_name":"descriptionCheck.py","file_ext":"py","file_size_in_byte":3090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"205754293","text":"def even(n):\n\tif n % 2 == 0:\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef main():\n\tfib = [1, 1]\n\n\tfor i in range(0, 100000):\n\t\tnext = fib[i] + fib[i + 1]\n\n\t\tif next < 4000000:\n\t\t\tfib.append(next)\n\t\telse:\n\t\t\tbreak\n\tprint(fib)\n\ttotal = 0\n\n\tfor i in fib:\n\t\tif even(i):\n\t\t\ttotal += i\n\t\t\tprint(total)\n\n\tprint(total)\nmain()\n","sub_path":"0002/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"420058543","text":"import random\nimport sys\nimport traceback\nimport battlecode as bc\n\n# Reads the Earth and Mars maps at the beginning of the game (time intensive)\n# Store the two grids in a readable format\n# Provides read and write access for other controllers to easily utilize\n# May be responsible for keeping track of comets\nclass MapController:\n\n \"\"\"This is the Map Controller\"\"\"\n def __init__(self, gameController):\n self.gameController = gameController\n self.map = {}\n self.marsMap = {}\n #access properties of earthMap like this self.earth_map[3][5]['isPassable']\n #which will access the isPassable bool for map location x = 3 y = 5\n self.earth_map = []\n self.mars_map = []\n self.my_team_start = []\n self.enemy_team_start = []\n self.startLocation = [0,0]\n\n def InitializeEarthMap(self):\n try:\n print(\"Initializing Earth map\")\n self.map = self.gameController.starting_map(bc.Planet.Earth)\n #print(self.map.width)\n #print(self.map.height)\n for unit in self.map.initial_units:\n if unit.team == self.gameController.team():\n if len(self.my_team_start) == 0:\n self.my_team_start.append(unit.location.map_location())\n else:\n if len(self.enemy_team_start) == 0:\n self.enemy_team_start.append(unit.location.map_location())\n print(self.my_team_start)\n self.earth_Map = []\n for mapX in range(self.map.width):\n self.earth_map.append([])\n for mapY in range(self.map.height):\n mapLoc = bc.MapLocation(bc.Planet.Earth,mapX, mapY)\n self.earth_map[mapX].append({\"x\": mapX, \"y\": mapY, \"hash\": self.hashCoordinates(mapX, mapY), \"isPassable\": self.map.is_passable_terrain_at(mapLoc), \"karboniteCount\": self.map.initial_karbonite_at(mapLoc)})\n #print(self.map.planet)\n except Exception as e:\n print('Error:', e)\n # use this to show where the error was\n traceback.print_exc()\n\n def InitializeMarsMap(self):\n try:\n print(\"Initializing Mars Map\")\n self.marsMap = self.gameController.starting_map(bc.Planet.Mars)\n self.mars_map = []\n print(self.marsMap.width)\n for mapX in range(self.marsMap.width):\n self.mars_map.append([])\n for mapY in range(self.marsMap.height):\n mapLoc = bc.MapLocation(bc.Planet.Mars, mapX, mapY)\n self.mars_map[mapX].append({\"x\": mapX, \"y\": mapY, \"hash\": self.hashCoordanates(mapX, mapY), \"isPassable\": self.marsMap.is_passable_terrain_at(mapLoc), \"karboniteCount\": self.marsMap.initial_karbonite_at(mapLoc)})\n #print(self.marsMap.planet)\n except Exception as e:\n print('Error:', e)\n # use this to show where the error was\n traceback.print_exc()\n \n def hashCoordinates(self, inX, inY):\n hash = 23\n hash = 29 * hash + inX\n hash = 29 * hash + inY\n return hash\n\n def GetNode(self, planet, mapX, mapY):\n if planet == bc.Planet.Earth:\n node = self.GetNodeEarth(mapX, mapY)\n else:\n node = self.GetNodeMars(mapX, mapY)\n return node\n\n def GetRandomEarthNode(self):\n Xcoord = random.randint(0,self.map.width -1)\n Ycoord = random.randint(0,self.map.height - 1)\n location = bc.MapLocation(bc.Planet.Earth,Xcoord,Ycoord)\n return location\n\n def GetRandomMarsNode(self):\n allNodes = []\n for Xnodes in self.mars_map:\n for node in Xnodes:\n if node[\"isPassable\"]:\n allNodes.append(node)\n returnNode = random.choice(allNodes)\n bcNode = bc.MapLocation(bc.Planet.Mars, returnNode[\"x\"], returnNode[\"y\"])\n return bcNode\n #Xcoord = random.randint(0, self.marsMap.width - 1)\n #Ycoord = random.randint(0, self.marsMap.height - 1)\n #location = self.mars_map[mapX][mapY]# bc.MapLocation(bc.Planet.Mars,Xcoord,Ycoord)\n #return location\n \n def GetNodeEarth(self, mapX, mapY):\n if (mapX <= self.map.width - 1 and mapY <= self.map.height - 1 and mapX > -1 and mapY > -1):\n node = self.earth_map[mapX][mapY]\n return node\n return None\n\n def GetNodeMars(self, mapX, mapY):\n if (mapX <= self.marsMap.width - 1 and mapY <= self.marsMap.height - 1 and mapX > -1 and mapY > -1):\n node = self.mars_map[mapX][mapY]\n return node\n return None","sub_path":"bc18-scaffold/OnshoreBattlebot2018/Controllers/MapController.py","file_name":"MapController.py","file_ext":"py","file_size_in_byte":5153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"62086811","text":"import newt\nimport objax\nimport numpy as np\nimport time\nimport pickle\nimport sys\n\nprint('loading rainforest data ...')\ndata = np.loadtxt('../../data/TRI2TU-data.csv', delimiter=',')\n\nspatial_points = np.array([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])\n\nif len(sys.argv) > 1:\n model_type = int(sys.argv[1])\n nr_ind = int(sys.argv[2])\n fold = int(sys.argv[3])\nelse:\n model_type = 0\n nr_ind = 1\n nr = 100 # spatial grid point (y-axis)\n fold = 0\n\nnr = spatial_points[nr_ind]\nnt = 200 # temporal grid points (x-axis)\nscale = 1000 / nt\n\nt, r, Y_ = newt.utils.discretegrid(data, [0, 1000, 0, 500], [nt, nr])\nt_flat, r_flat, Y_flat = t.flatten(), r.flatten(), Y_.flatten()\n\nN = nr * nt # number of data points\n\n# sort out the train/test split\nnp.random.seed(99)\nind_shuffled = np.random.permutation(N)\nind_split = np.stack(np.split(ind_shuffled, 10)) # 10 random batches of data indices\ntest_ind = ind_split[fold] # test_ind = np.random.permutation(N)[:N//10]\nt_test = t_flat[test_ind]\nr_test = r_flat[test_ind]\nY_test = Y_flat[test_ind]\nY_flat[test_ind] = np.nan\nY = Y_flat.reshape(nt, nr)\n\n# put test points on a grid to speed up prediction\nX_test = np.concatenate([t_test[:, None], r_test[:, None]], axis=1)\nt_test, r_test, Y_test = newt.utils.create_spatiotemporal_grid(X_test, Y_test)\n\nvar_f = 1. # GP variance\nlen_f = 10. # lengthscale\n\nkern = newt.kernels.SpatialMatern32(variance=var_f, lengthscale=len_f, z=r[0, ...], sparse=False)\nlik = newt.likelihoods.Poisson()\nif model_type == 0:\n model = newt.models.MarkovGP(kernel=kern, likelihood=lik, X=t, R=r, Y=Y)\nelif model_type == 1:\n model = newt.models.MarkovGPMeanField(kernel=kern, likelihood=lik, X=t, R=r, Y=Y)\nelif model_type == 2:\n model = newt.models.InfiniteHorizonGP(kernel=kern, likelihood=lik, X=t, R=r, Y=Y)\n\nprint('num spatial pts:', nr)\nprint(model)\n\ninf = newt.inference.VariationalInference(cubature=newt.cubature.Unscented())\n\ntrainable_vars = model.vars() + inf.vars()\nenergy = objax.GradValues(inf.energy, trainable_vars)\n\nlr_adam = 0.2\nlr_newton = 0.2\niters = 100\nopt = objax.optimizer.Adam(trainable_vars)\n\n\ndef train_op():\n inf(model, lr=lr_newton) # perform inference and update variational params\n dE, E = energy(model) # compute energy and its gradients w.r.t. hypers\n return dE, E\n\n\ntrain_op = objax.Jit(train_op, trainable_vars)\n\nt0 = time.time()\nfor i in range(1, iters + 1):\n grad, loss = train_op()\n opt(lr_adam, grad)\n print('iter %2d: energy: %1.4f' % (i, loss[0]))\nt1 = time.time()\nprint('optimisation time: %2.2f secs' % (t1-t0))\n\n# calculate posterior predictive distribution via filtering and smoothing at train & test locations:\nprint('calculating the posterior predictive distribution ...')\nt0 = time.time()\nnlpd = model.negative_log_predictive_density(X=t_test, R=r_test, Y=Y_test)\nt1 = time.time()\nprint('prediction time: %2.2f secs' % (t1-t0))\nprint('nlpd: %2.3f' % nlpd)\n\nwith open(\"output/\" + str(model_type) + \"_\" + str(nr_ind) + \"_\" + str(fold) + \"_nlpd.txt\", \"wb\") as fp:\n pickle.dump(nlpd, fp)\n","sub_path":"newt/experiments/rainforest/rainforest.py","file_name":"rainforest.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"87404964","text":"import random\nimport itertools\n\n\nclass TicTacToeGame:\n def __init__(self):\n self.turn = True # True: O, False: X\n self.table = [\n [0, 0, 0],\n [0, 0, 0],\n [0, 0, 0],\n ]\n\n def get_now_turn(self):\n return 'O' if self.turn else 'X'\n\n def get_checkerboard(self):\n \"\"\"\n format checkerboard and return\n \"\"\"\n return '\\n'.join(['\\t'.join([{1: 'O', -1: 'X'}.get(j, ' ') for j in i]) for i in self.table])\n\n def update(self, place):\n i = (place - 1) // 3\n j = (place - 1) % 3\n if self.table[i][j] != 0:\n return False\n\n value = 1 if self.turn else -1 # 1 -> O, -1 -> X, 0 -> empty\n self.table[i][j] = value\n return True\n\n def check(self):\n \"\"\"\n :return: True -> someone win, False -> Nobody win, None -> tie\n \"\"\"\n # 檢查列\n for line in self.table:\n if abs(sum(line)) == 3:\n return True\n\n # 檢查行\n for i in zip(*self.table):\n if abs(sum(i)) == 3:\n return True\n\n # 檢查 1, 5, 9\n if abs(sum([self.table[i][i] for i in range(3)])) == 3:\n return True\n\n # 檢查 3, 5, 7\n if abs(sum([self.table[i][2 - i] for i in range(2, -1, -1)])) == 3:\n return True\n\n if 0 not in itertools.chain(*self.table):\n return None\n\n return False\n\n def switch_user(self):\n self.turn = not self.turn\n\n def computer_choice(self):\n check_value_list = [-2, 2, -1]\n for v in check_value_list:\n for i, line in enumerate(self.table):\n if sum(line) == v:\n if 0 not in line:\n continue\n\n return i * 3 + line.index(0) + 1\n\n for i, line in enumerate(zip(*self.table)):\n if sum(line) == v:\n if 0 not in line:\n continue\n\n return line.index(0) * 3 + i + 1\n\n line = [self.table[i][i] for i in range(3)]\n if sum(line) == v and 0 in line:\n i = line.index(0)\n return i * 3 + i + 1\n\n line = [self.table[i][2 - i] for i in range(3)]\n if sum(line) == v and 0 in line:\n i = line.index(0)\n return i * 3 + (2 - i) + 1\n\n t = itertools.chain(*self.table)\n return random.choice([i + 1 for i, j in enumerate(t) if j == 0])\n\n\ndef check_input(value):\n return 0 < value < 10\n\n\ndef user_input():\n while True:\n value = input('Place you want to put: ')\n if not value.isdigit():\n print('You should input a number\\n')\n continue\n\n value = int(value)\n if not check_input(value):\n print('The number you input is not between 1 and 9\\n')\n continue\n\n return value\n\n\ndef main():\n game = TicTacToeGame()\n with_c = input('Do you want to play with computer? [Y/n] ') == 'n'\n # not with computer -- > True\n # with computer -- > False\n print()\n\n while True:\n print('It\\'s {} turn.'.format(game.get_now_turn()))\n value = user_input() if game.turn or with_c else game.computer_choice()\n\n if not game.turn:\n print('Computer choice to put at:', value)\n\n if not game.update(value):\n print('This place can not be put\\n')\n continue\n\n result = game.check()\n print(game.get_checkerboard() + '\\n')\n if result is None:\n print('Tie')\n break\n\n if result:\n print('Player {} win!\\n'.format(game.get_now_turn()))\n break\n\n game.switch_user()\n print('-' * 30)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"446114318","text":"import sys\nimport re\nimport argparse\nfrom itertools import chain\nfrom collections import defaultdict\n\ndef color_print(syntax, theme, source):\n\t\"\"\"\n\tOutput to the terminal highlighted text from the given file\n\n\tParameters: \n\tsyntax - file with regular expressions\n\ttheme - file with coloring parameters\n\tsource - file with text to be highlight\n\t\"\"\"\n\t#Read from syntax file\n\tdictReg = readSyntax(syntax)\n\n\t#Read from theme file\n\tdictColor = readTheme(theme)\n\t\n\t#Read from file to be highlighted\n\tformattedText = open(source).read()\n\t\n\t#Merge two dictionaries with token's name as key\n\tmap = mergeDictionaries(dictReg, dictColor)\n\t\n\t#Highlight text\n\tformattedText = highlight(map, formattedText)\t\t\n\t\n\t#Handle overlapping \n\tformattedText = solveOverlaps(formattedText)\n\t\n\tprint(formattedText)\n\ndef readInput(file, pattern):\n\t\"\"\"\n\tExtract matches based on given pattern \n\t\n\tParameters:\n\tfile - source file \n\tpattern - regular expression to be searched\n\t\"\"\"\n\tfileText = open(file).read()\n\treg = re.findall(pattern, fileText)\n\treturn reg\n\t\ndef mergeDictionaries(dict1, dict2):\n\t\"\"\"\n\tMerge two dictionaries with the same key and different values\n\t\"\"\"\n\tmap = defaultdict(list)\n\tfor k, v in chain(dict1.items(), dict2.items()):\n\t\tmap[k].append(v)\n\treturn map\n\ndef highlight(map, text):\n\t\"\"\"\n\tAdd coloring code to the matches from map-dictionary\n\t\"\"\"\n\tfor name, value in map.items():\n\t\t\tstart_code = \"\\033[{}m\".format(value[1])\n\t\t\tend_code = \"\\033[0m\"\n\t\t\tregex_in = value[0]\n\t\t\tregex_out = start_code + \"\\g<0>\" + end_code\n\t\t\ttext = re.sub(regex_in, regex_out, text)\n\treturn text\n\ndef solveOverlaps(text):\n\t\"\"\"\n\tSplit overlapping coloring code: two coloring start_codes followed \n\tby end_code\n\t\"\"\"\n\tregex_in = \"(?P\\\\033\\[(?:\\d+;)+\\d+m)(?P.*?)(?P\\\\033\\[(?:\\d+;)+\\d+m.*?\\\\033\\[0m)\"\n\tregex_out = r'\\g\\g\\033[0m\\g\\g'\n\ttext = re.sub(regex_in, regex_out, text)\n\treturn text\n\ndef readSyntax(syntax):\n\t\"\"\"\n\tExtract regex patterns and their names from given syntax file\n\t\"\"\"\n\tpattern = r'\\\"(.+)\\\":'\n\treg = readInput(syntax, pattern)\n\tpattern = r'\\\".+\\\": (.+)'\n\tnames = readInput(syntax, pattern)\n\tdictReg = dict(zip(names, reg))\n\treturn dictReg\n\t\ndef readTheme(theme):\n\t\"\"\"\n\tExtract coloring codes and their names from given theme file\n\t\"\"\"\n\tpattern = r'\\b(.+):'\n\tnamesTheme = readInput(theme, pattern)\n\tpattern = r': (.+)'\n\tcolorsTheme = readInput(theme, pattern)\n\tdictColor = dict(zip(namesTheme, colorsTheme))\n\treturn dictColor\n\t\ndef are_equal(a, b):\n\treturn set(a) == set(b) and len(a) == len(b)\n\ndef make_parser():\n\t\"\"\"Populate and return argparse.ArgumentParser\"\"\"\n\t\n\tparser = argparse.ArgumentParser(description='Python highlighter')\n\tparser.add_argument('syntax', type=str, help=\"Name of the syntax file ending with .syntax\")\n\tparser.add_argument('theme', type=str, help=\"Name of the theme file ending with .theme\")\n\tparser.add_argument('source', type=str, help=\"Name of the source file ending with .[type]\")\n\treturn parser\t\n\ndef parse_args(parser):\n\t\"\"\"Parse command line arguments and return argparse.Namespace object\"\"\"\n\tif len(sys.argv[1:]) == 0:\n\t\t#No parameters passed, so help is shown \n\t\tparser.print_usage()\n\t\tsys.exit()\n\treturn parser.parse_args()\n\t\ndef run():\n\t\"\"\"Run the program\"\"\"\n\targs = parse_args(make_parser())\n\tcolor_print(syntax = args.syntax, theme = args.theme, source = args.source)\n\nif __name__ == '__main__':\n\t\"\"\"Execute if this module is run directly\"\"\"\n\trun()","sub_path":"highlighter.py","file_name":"highlighter.py","file_ext":"py","file_size_in_byte":3403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"153975806","text":"import torch\n\nimport numpy as np\n\nfrom evaluation.eval_func import *\n\ndef eval_AA(out, gt):\n num = len(out)\n err = []\n # eval process\n for i in range(num):\n err.append(min(np.arccos(np.abs(out[i] @ gt[i][0]).clip(max=1))) / np.pi * 180)\n\n # performance\n err = np.sort(np.array(err))\n y = (1 + np.arange(len(err))) / len(err)\n print(\" | \".join([f\"{AA(err, y, th):.3f}\" for th in [0.2, 0.5, 1.0, 2.0, 10.0]]))\n\n return [AA(err, y, th) for th in [0.2, 0.5, 1.0, 2.0, 10.0]]\n\ndef eval_dist_AUC(out, gt):\n num = len(out)\n err = []\n # eval process\n for i in range(num):\n err.append(compute_error(out[i], gt[i]))\n\n # performance\n auc = calculate_AUC(err, tau=10)\n return auc","sub_path":"Dominant_parallel_lines_detection/MNet/code/evaluation/eval_process.py","file_name":"eval_process.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"364056514","text":"import arcpy\nimport datetime\nimport csv\nimport CobbDot1_Common as cobb_common\n\n\ndef sum_feet_by_vendor(np_array, vendor_name):\n return sum([x[0] for x in np_array if x[1] == vendor_name])\n\n\ndef calc_feet(state, start_date, end_date, fc_list, vendor_list, csv_writer):\n # date '7/5/2016 12:00:00 AM'\n # date_stub = \"\"\"\"{}\" >= TO_DATE('{} 00:00:00','YYYY-MM-DD HH24:MI:SS') AND\"\"\"\n # date_stub += \"\"\" \"{}\" <= TO_DATE('{} 23:59:59','YYYY-MM-DD HH24:MI:SS')\"\"\"\n date_stub = \"\"\"\"{}\" >= date '{} 12:00:00 AM' AND\"\"\"\n date_stub += \"\"\" \"{}\" <= date '{} 11:59:59 PM'\"\"\"\n\n where_clause = date_stub.format('REPORT_DATE', start_date, 'REPORT_DATE', end_date)\n\n vendor_dict = {el: [] for el in vendor_list}\n\n for fc in fc_list:\n a = arcpy.da.TableToNumPyArray(fc, ['FOOTAGE3D', 'GPS_OPERATORCOMPANY'], where_clause)\n\n # print a['FOOTAGE3D'].sum()\n\n for vendor in vendor_dict.keys():\n vendor_dict[vendor].append(sum_feet_by_vendor(a, vendor))\n\n for f in vendor_dict:\n total_output = (state[0], f) + tuple(vendor_dict[f]) + (end_date,)\n csv_writer.writerow(tuple(total_output))\n\n\n# arcpy.env.workspace = r'E:/Richard/FileGBs/Cobbler_0811.gdb'\n\n# states = [(\"OH\", \"pg13\", \"xs03\"),\n# (\"PA\", \"pg15\", \"xs05\"),\n# (\"VA\", \"pg16\", \"xs06\")]\n\nstates = [(\"VA\", \"pg16\", \"xs06\")] # [(\"VA\", \"tg16\", \"vs06\")]\n\nline_fcs = ['GPS_GasMain_Line_Delivered',\n 'GPS_ServiceLine_G_Line_Delivered',\n 'GPS_GasMain_Line_Approved',\n 'GPS_ServiceLine_G_Line_Approved',\n ]\nc_file = open('{}_Cobb0_Footages.csv'.format(datetime.datetime.now().strftime(\"%m%d\")), 'wb')\nc_writer = csv.writer(c_file, quoting=csv.QUOTE_NONNUMERIC, dialect='excel')\n\nheading = ('State', 'Vendor', 'Main Del', 'Service Del',\n 'Main App', 'Service App', 'End Date')\nc_writer.writerow(heading)\n\nfor current_state in states:\n dest_env_folder = 'E:/Cobbler/GDB/'\n dest_env_db = \"{}_Cobbler_Dot1.gdb\".format(current_state[0])\n # dest_env_db = \"{}_Cobbler_0815.gdb\".format(current_state)\n arcpy.env.workspace = dest_env_folder + dest_env_db\n\n vendors = cobb_common.build_vendor_list(line_fcs)\n\n start, end, boy, bot = cobb_common.satandsun(datetime.date.today())\n\n calc_feet(current_state, bot, end, line_fcs, vendors, c_writer)\n\n # for day in cobb_common.datespan(datetime.date(2016, 1, 2), datetime.date.today(), delta=datetime.timedelta(days=7)):\n # # start = day - datetime.timedelta(days=6)\n # # print current_state, start.isoformat(), day.isoformat()\n # calc_feet(current_state, bot, day.isoformat(), line_fcs, vendors, c_writer)\n\nc_file.close()\n","sub_path":"cobbler_early/CobbDot1_3_Footages.py","file_name":"CobbDot1_3_Footages.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"60742026","text":"import tkinter as tk\nfrom tkinter import *\nfrom PIL import Image, ImageTk\nimport os\nimport numpy as np\nimport cv2\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D\nfrom keras.optimizers import Adam\nfrom keras.layers import MaxPooling2D\nfrom keras.preprocessing.image import ImageDataGenerator\nimport threading\nimport matplotlib.pyplot as plt\n\nemotion_model = Sequential()\nemotion_model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(48,48,1)))\nemotion_model.add(Conv2D(64, kernel_size=(3,3), activation='relu'))\nemotion_model.add(MaxPooling2D(pool_size=(2, 2)))\nemotion_model.add(Dropout(0.25))\nemotion_model.add(Conv2D(128, kernel_size=(3,3), activation='relu'))\nemotion_model.add(MaxPooling2D(pool_size=(2, 2)))\nemotion_model.add(Conv2D(128, kernel_size=(3,3), activation='relu'))\nemotion_model.add(MaxPooling2D(pool_size=(2, 2)))\nemotion_model.add(Dropout(0.25))\nemotion_model.add(Flatten())\nemotion_model.add(Dense(1024, activation='relu'))\nemotion_model.add(Dropout(0.5))\nemotion_model.add(Dense(7, activation='softmax'))\nemotion_model.load_weights('model1.h5')\ncv2.ocl.setUseOpenCL(False)\n\nemotion_dict = {0:\" Angry \", 1:\" Disgust \", 2:\" Fearful \", 3:\" Happy \", 4:\" Neutral \", 5:\" sad \", 6:\" surprised \"}\ncur_path = os.path.dirname(os.path.abspath(__file__))\nprint(\"şu dosya\",cur_path)\nemoji_dist={0:cur_path+\"/emoji/angry.png\",1:cur_path+\"/emoji/disgust.png\",2:cur_path+\"/emoji/fear.png\",3:cur_path+\"/emoji/happy.png\",4:cur_path+\"/emoji/neutral.png\",5:cur_path+\"/emoji/sad.png\",6:cur_path+\"/emoji/surprise.png\"}\nglobal last_frame1\nlast_frame1 = np.zeros((480, 640, 3), dtype=np.uint8)\nglobal cap1\nshow_text = [0]\nglobal frame_number\ndef show_subject():\n cap1 = cv2.VideoCapture(r'vido.mp4')\n if not cap1.isOpened():\n print(\"algılamıyo///////////////\")\n global frame_number\n length = int(cap1.get(cv2.CAP_PROP_FRAME_COUNT))\n frame_number += 1\n if frame_number >= length:\n exit()\n cap1.set(1, frame_number)\n flag1, frame1 = cap1.read()\n frame1 = cv2.resize(frame1,(600,500))\n bounding_box = cv2.CascadeClassifier('E:/anaconda/Lib/site-packages/cv2/data/haarcascade_frontalface_alt.xml')\n gray_frame = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n num_faces = bounding_box.detectMultiScale(gray_frame, scaleFactor=1.3, minNeighbors=5)\n for (x, y, w, h) in num_faces:\n print(\"dghjklş\")\n cv2.rectangle(frame1, (x,y-50),(x+w, y+h+10), (255,0,0),2)\n roi_gray_frame = gray_frame[y:y +h, x:x +w]\n cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray_frame, (48,48)),-1),0)\n prediction = emotion_model.predict(cropped_img)\n maxindex = int(np.argmax(prediction))\n cv2.putText(frame1, emotion_dict[maxindex],(x+20, y-60), cv2.FONT_HERSHEY_SIMPLEX,1 ,(255,255,255) ,2, cv2.LINE_AA)\n show_text[0]=maxindex\n if flag1 is None:\n print(\"Majer error!\")\n\n elif flag1:\n global last_frame1\n last_frame1 = frame1.copy()\n pic = cv2.cvtColor(last_frame1, cv2.COLOR_BGR2RGB)\n img = Image.fromarray(pic)\n imgtk = ImageTk.PhotoImage(image=img)\n lmain.imgtk = imgtk\n lmain.configure(image=imgtk)\n root.update()\n lmain.after(10, show_subject)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n exit()\ndef show_avatar():\n print(\"show_avatar\")\n frame2=cv2.imread(emoji_dist[show_text[0]])\n pic2=cv2.cvtColor(frame2,cv2.COLOR_BGR2RGB)\n img2=Image.fromarray(frame2)\n imgtk2= ImageTk.PhotoImage(image=img2)\n lmain2.imgtk2=imgtk2\n lmain3.configure(text=emotion_dict[show_text[0]],font=('arial',45,'bold'))\n\n lmain2.configure(imge=imgtk2)\n root.update()\n lmain2.after(10, show_avatar)\n\nif __name__ == '__main__':\n frame_number = 0\n root=tk.Tk()\n lmain= tk.Label(master=root, padx=50, bd=10)\n lmain2 = tk.Label(master=root, bd=10)\n lmain3 = tk.Label(master=root, bd=10 ,fg=\"#CDCDCD\", bg='black')\n lmain.pack(side=LEFT)\n lmain.place(x=50,y=150)\n lmain3.pack()\n lmain3.place(x=960,y=250)\n lmain2.pack(side=RIGHT)\n lmain2.place(x=900,y=350)\n\n root.title(\"Photo TO Emoji\")\n root.geometry(\"1400x900+100+10\")\n root['bg']='black'\n exitButton = Button(root, text='Quit', fg=\"red\",command=root.destroy, font=('arial',25,'bold')).pack(side= BOTTOM)\n\n threading.Thread(target=show_subject).start()\n threading.Thread(target=show_avatar).start()\n root.mainloop()\n\n\n\n","sub_path":"emojis/emoji.py","file_name":"emoji.py","file_ext":"py","file_size_in_byte":4506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"571440297","text":"\"\"\"Get runtime of a snippet of python code.\"\"\"\n\nfrom time import perf_counter as pc\n\nt0 = pc();\n\nfor i in range(100):\n if i % 10 == 0:\n assert i < 100\n\nt1 = pc();\n\nruntime = t1 - t0;\n","sub_path":"zruntime.py","file_name":"zruntime.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"286938391","text":"# # Task 8\r\n# Write a program to calculate the minimal cost of connecting n ropes. You\r\n# are given n ropes of different length, connect them into single rope with\r\n# minimum cost. You can assume the cost of connecting two ropes is same\r\n# as the sum of their lengths. Use heap for this problem\r\n\r\nclass heap:\r\n def __init__(self):\r\n self.a = []\r\n\r\n def create(self, d):\r\n self.a.append(d)\r\n self.minheap(self.a)\r\n\r\n def parent(self, i):\r\n return int((i - 1) / 2)\r\n\r\n def mak(self, k):\r\n self.a = []\r\n for i in k:\r\n self.create(i)\r\n\r\n def minheap(self, c):\r\n i = len(c) - 1\r\n parent = self.parent(i)\r\n while parent is not None:\r\n if c[i] < c[parent]:\r\n t = c[parent]\r\n c[parent] = c[i]\r\n c[i] = t\r\n else:\r\n return\r\n i = parent \r\n parent = self.parent(i) \r\n\r\n def minsum(self):\r\n con = 0\r\n tot = 0\r\n while len(self.a) > 1:\r\n for i in range(2):\r\n z = self.a.pop(0)\r\n self.mak(self.a)\r\n con += z\r\n self.a.append(con)\r\n self.mak(self.a)\r\n tot += con\r\n con = 0\r\n print(\"min: \", tot)\r\n \r\nh = heap()\r\nv = [8, 4, 2, 5]\r\nfor j in v:\r\n h.create(j)\r\nprint(h.a)\r\nh.minsum()\r\n","sub_path":"8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"205591249","text":"from WMCore.Configuration import Configuration\nconfig = Configuration()\n\nprodName = \"xxx\"\n\nconfig.section_(\"General\")\nconfig.General.requestName = prodName\nconfig.General.transferLogs = True\n\nconfig.section_(\"JobType\")\nconfig.JobType.psetName = 'miniAOD_cfg.py'\nconfig.JobType.disableAutomaticOutputCollection = False\nconfig.JobType.numCores = 1\nconfig.JobType.maxMemoryMB = 2500\nconfig.JobType.maxJobRuntimeMin = 2750\n\nconfig.section_(\"Data\")\nconfig.Data.inputDataset = '/store/user/ntonon/tllqdim6_v2/tllqdim6_v2/200217_153913/0000/LHE-GEN-SIM*'\nconfig.Data.inputDBS = 'phys03' #Else, looking in DAS under prod/global\n# config.Data.userInputFiles = open('/afs/cern.ch/work/n/ntonon/public/TopEFT_MCSimulation/CMSSW_9_4_12/src/crabDir/workdir_ttlldim6/inputs_paths.txt').readlines() #Read list of input files generated using 'GenerateInputPathFile.py' script\nconfig.Data.splitting = 'FileBased'\nconfig.Data.unitsPerJob = 1 #number of files per jobs\nconfig.Data.totalUnits = 500 #Total nof files\nconfig.Data.publication = True\nconfig.Data.outputDatasetTag = prodName\nconfig.Data.ignoreLocality = True\n\nconfig.section_(\"Site\")\nconfig.Site.storageSite = 'T2_DE_DESY'\nconfig.Site.whitelist = ['T2_DE_*']\n\nconfig.section_(\"User\")\n\n## only german users\nconfig.User.voGroup = \"dcms\"\n","sub_path":"ProductionScripts/ConfigFiles/FullSim/crabConfigMiniAOD.py","file_name":"crabConfigMiniAOD.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"327511373","text":"from i3Deep import utils\r\nimport numpy as np\r\nimport os\r\nimport sys\r\nfrom tqdm import tqdm\r\nfrom scipy import optimize\r\nimport time\r\nimport matplotlib.pyplot as plt\r\nfrom numbers import Number\r\nimport copy\r\nimport pickle\r\nimport multiprocessing\r\nfrom multiprocessing import Pool\r\nfrom functools import partial\r\nfrom i3Deep import uncertainty_metrices as um\r\n\r\n\r\ndef evaluate(data_dir, prediction_dir, ground_truth_dir, uncertainty_dir, labels, end=None, step=None, parallel=False):\r\n if end is not None:\r\n thresholds = np.arange(0.0, end, step)\r\n else:\r\n thresholds = None\r\n print(\"Thresholds: \", thresholds)\r\n prediction_filenames = utils.load_filenames(prediction_dir)\r\n ground_truth_filenames = [os.path.join(ground_truth_dir, os.path.basename(prediction_filename)) for prediction_filename in prediction_filenames]\r\n uncertainty_filenames = []\r\n\r\n for prediction_filename in prediction_filenames:\r\n basename = os.path.basename(prediction_filename)\r\n uncertainty_label_filenames = []\r\n for label in labels:\r\n filename = os.path.join(uncertainty_dir, '{}_{}.nii.gz'.format(basename[:-7], label))\r\n uncertainty_label_filenames.append(filename)\r\n uncertainty_filenames.append(uncertainty_label_filenames)\r\n uncertainty_filenames = np.asarray(uncertainty_filenames)\r\n\r\n prediction_filenames, ground_truth_filenames, uncertainty_filenames = remove_missing_cases(prediction_filenames, ground_truth_filenames, uncertainty_filenames)\r\n results = []\r\n\r\n start_time = time.time()\r\n for i, label in enumerate(tqdm(labels)):\r\n predictions, ground_truths, uncertainties = load_data(prediction_filenames, ground_truth_filenames, uncertainty_filenames[:, i])\r\n predictions, ground_truths = binarize_data_by_label(predictions, ground_truths, label)\r\n if thresholds is None:\r\n thresholds = find_best_threshold(predictions, ground_truths, uncertainties)\r\n if isinstance(thresholds, Number):\r\n thresholds = [thresholds]\r\n if not parallel:\r\n for threshold in thresholds:\r\n result = evaluate_threshold(predictions, ground_truths, uncertainties, threshold)\r\n # result[\"label\"] = label\r\n # result[\"threshold\"] = threshold\r\n results.append(result)\r\n else:\r\n with Pool(processes=4) as pool: # multiprocessing.cpu_count() kills memory\r\n results = pool.map(partial(evaluate_threshold, predictions=predictions, ground_truths=ground_truths, uncertainties=uncertainties), thresholds)\r\n results = [{\"label\": label, \"threshold\": thresholds[i], \"dice_score\": results[i][0], \"uncertainty_sum\": results[i][1]} for i in range(len(results))] # TODO: Old\r\n\r\n for key in results[0].keys():\r\n plt.plot(thresholds, [result[key] for result in results], label=key)\r\n plt.legend(loc=\"upper left\")\r\n plt.xlim(0, end)\r\n plt.ylim(0, 2)\r\n plt.savefig(data_dir + os.path.basename(uncertainty_dir[:-1]) + \"_end\" + str(end) + \"_step\" + str(step) + '.png')\r\n\r\n for result in results:\r\n print(result)\r\n\r\n with open(data_dir + os.path.basename(uncertainty_dir[:-1]) + \"_end\" + str(end) + \"_step\" + str(step) + \".pkl\", 'wb') as handle:\r\n pickle.dump(results, handle, protocol=pickle.HIGHEST_PROTOCOL)\r\n\r\n print(\"Elapsed time (evaluate): \", time.time() - start_time)\r\n\r\n\r\ndef load_data(prediction_filenames, ground_truth_filenames, uncertainty_filenames):\r\n print(\"Loading data...\")\r\n predictions, ground_truths, uncertainties = [], [], []\r\n target_shape = (512, 512, 260)\r\n\r\n for i in tqdm(range(len(prediction_filenames))):\r\n prediction = utils.load_nifty(prediction_filenames[i])[0].astype(np.float16)\r\n ground_truth = utils.load_nifty(ground_truth_filenames[i])[0].astype(np.float16)\r\n uncertainty = utils.load_nifty(uncertainty_filenames[i])[0].astype(np.float16)\r\n uncertainty = np.nan_to_num(uncertainty)\r\n prediction = utils.interpolate(prediction, target_shape, mask=True)\r\n ground_truth = utils.interpolate(ground_truth, target_shape, mask=True)\r\n uncertainty = utils.interpolate(uncertainty, target_shape, mask=False)\r\n predictions.append(prediction)\r\n ground_truths.append(ground_truth)\r\n uncertainties.append(uncertainty)\r\n\r\n predictions = np.asarray(predictions)\r\n ground_truths = np.asarray(ground_truths)\r\n uncertainties = np.asarray(uncertainties)\r\n print(\"Finished loading data\")\r\n return predictions, ground_truths, uncertainties\r\n\r\n\r\ndef binarize_data_by_label(predictions, ground_truths, label):\r\n predictions = np.rint(predictions)\r\n ground_truths = np.rint(ground_truths)\r\n predictions = predictions.astype(int)\r\n ground_truths = ground_truths.astype(int)\r\n predictions[predictions != label] = 0\r\n ground_truths[ground_truths != label] = 0\r\n predictions[predictions == label] = 1\r\n ground_truths[ground_truths == label] = 1\r\n return predictions, ground_truths\r\n\r\n\r\ndef find_best_threshold(predictions, ground_truths, uncertainties):\r\n\r\n def _evaluate_threshold(threshold):\r\n return 1 - evaluate_threshold(predictions, ground_truths, uncertainties, threshold)[\"uncertainty_filtered_dice\"]\r\n\r\n start_time = time.time()\r\n result = optimize.minimize_scalar(_evaluate_threshold, bounds=(0, 1))\r\n print(\"Elapsed time (find_best_threshold): \", time.time() - start_time)\r\n print(\"Success: \", result.success)\r\n print(\"best_threshold: \", result.x)\r\n return result.x\r\n\r\n\r\ndef evaluate_threshold(predictions, ground_truths, uncertainties, threshold):\r\n print(\"Threshold: \", threshold)\r\n start_time = time.time()\r\n\r\n thresholded_uncertainties = threshold_uncertainty(uncertainties, threshold)\r\n # uncertainty_sum = np.sum(thresholded_uncertainties)\r\n uncertainty_filtered_dice = um.uncertainty_filtered_dice(predictions, ground_truths, thresholded_uncertainties)\r\n relaxed_uncertainty_dice = um.relaxed_uncertainty_dice(predictions, ground_truths, thresholded_uncertainties)\r\n certain_missclassification2uncertainty_ratio = um.certain_missclassification2uncertainty_ratio(predictions, ground_truths, thresholded_uncertainties)\r\n certain_missclassification2gt_ratio = um.certain_missclassification2gt_ratio(predictions, ground_truths, thresholded_uncertainties)\r\n certain_missclassification2prediction_ratio = um.certain_missclassification2prediction_ratio(predictions, ground_truths, thresholded_uncertainties)\r\n uncertainty2prediction_ratio = um.uncertainty2prediction_ratio(thresholded_uncertainties, predictions)\r\n uncertainty2gt_ratio = um.uncertainty2gt_ratio(thresholded_uncertainties, ground_truths)\r\n certain2prediction_ratio = um.certain2prediction_ratio(thresholded_uncertainties, predictions)\r\n print(\"Elapsed time (evaluate_threshold): \", time.time() - start_time)\r\n\r\n return {\"uncertainty_filtered_dice\": uncertainty_filtered_dice, \"relaxed_uncertainty_dice\": relaxed_uncertainty_dice,\r\n \"certain_missclassification2uncertainty_ratio\": certain_missclassification2uncertainty_ratio, \"certain_missclassification2gt_ratio\": certain_missclassification2gt_ratio,\r\n \"certain_missclassification2prediction_ratio\": certain_missclassification2prediction_ratio,\r\n \"uncertainty2prediction_ratio\": uncertainty2prediction_ratio, \"uncertainty2gt_ratio\": uncertainty2gt_ratio, \"certain2prediction_ratio\": certain2prediction_ratio}\r\n\r\n\r\ndef threshold_uncertainty(uncertainty, threshold):\r\n thresholded_uncertainty = copy.deepcopy(uncertainty)\r\n thresholded_uncertainty[thresholded_uncertainty <= threshold] = 0\r\n thresholded_uncertainty[thresholded_uncertainty > threshold] = 1\r\n thresholded_uncertainty = thresholded_uncertainty.astype(int)\r\n return thresholded_uncertainty\r\n\r\n\r\ndef remove_missing_cases(prediction_filenames, ground_truth_filenames, uncertainty_filenames):\r\n existing_prediction_filenames = []\r\n existing_ground_truth_filenames = []\r\n existing_uncertainty_filenames = []\r\n for i in range(len(prediction_filenames)):\r\n exists = True\r\n if not os.path.isfile(prediction_filenames[i]):\r\n exists = False\r\n if not os.path.isfile(ground_truth_filenames[i]):\r\n exists = False\r\n for uncertainty_filename_label in uncertainty_filenames[i]:\r\n if not os.path.isfile(uncertainty_filename_label):\r\n exists = False\r\n if exists:\r\n existing_prediction_filenames.append(prediction_filenames[i])\r\n existing_ground_truth_filenames.append(ground_truth_filenames[i])\r\n existing_uncertainty_filenames.append(uncertainty_filenames[i])\r\n existing_uncertainty_filenames = np.asarray(existing_uncertainty_filenames)\r\n return existing_prediction_filenames, existing_ground_truth_filenames, existing_uncertainty_filenames\r\n\r\n\r\nif __name__ == '__main__':\r\n # prediction_dir = \"/gris/gris-f/homelv/kgotkows/datasets/prostate/Task05_Prostate/predictions_tta_Tr/\"\r\n # ground_truth_dir = \"/gris/gris-f/homelv/kgotkows/datasets/prostate/Task05_Prostate/labelsTr/\"\r\n # uncertainty_dir = \"/gris/gris-f/homelv/kgotkows/datasets/prostate/Task05_Prostate/uncertainties_tta_Tr/\"\r\n # evaluate(prediction_dir, ground_truth_dir, uncertainty_dir, labels=(1, 2))\r\n\r\n data_dir = \"/gris/gris-f/homelv/kgotkows/datasets/nnUnet_datasets/nnUNet_raw_data/nnUNet_raw_data/Task086_frankfurt2/\"\r\n prediction_dir = data_dir + \"predictions_with_tta_merged/\"\r\n ground_truth_dir = data_dir + \"labelsTr/\"\r\n uncertainty_dir = data_dir + \"uncertainties_tta_variance/\"\r\n end = 0.24 # 0.003\r\n step = 0.02 # 0.0002\r\n thresholds = np.arange(0.0, end, step)\r\n plot = False\r\n if not plot:\r\n evaluate(data_dir, prediction_dir, ground_truth_dir, uncertainty_dir, labels=(1,), end=end, step=step)\r\n else:\r\n with open(data_dir + os.path.basename(uncertainty_dir[:-1]) + \"_end\" + str(end) + \"_step\" + str(step) + \".pkl\", 'rb') as handle:\r\n results = pickle.load(handle)\r\n\r\n for result in results:\r\n print(result)\r\n\r\n for key in results[0].keys():\r\n plt.plot(np.arange(0.0, end, step), [result[key] for result in results], label=key)\r\n plt.legend(loc=\"upper left\")\r\n plt.xlim(0, end)\r\n plt.ylim(0, 2)\r\n plt.savefig(data_dir + os.path.basename(uncertainty_dir[:-1]) + \"_end\" + str(end) + \"_step\" + str(step) + '.png')\r\n\r\n\r\n\r\n","sub_path":"i3Deep/find_uncertainty_threshold.py","file_name":"find_uncertainty_threshold.py","file_ext":"py","file_size_in_byte":10573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"614639225","text":"from unittest2 import TestCase\nfrom mock import Mock\n\n\n\nclass Error(object):\n def __init__(self, response_error):\n self.response_error = response_error\n\n\n\nclass ErrorMessageAndFieldsTest(TestCase):\n def assert_results(self, obj, err, ret):\n from ccui.core.errors import error_message_and_fields\n self.assertEqual(error_message_and_fields(obj, Error(err)), ret)\n\n\n def test_simple(self):\n self.assert_results(\n object(), \"duplicate.name\",\n (\"The name is already in use.\", [\"name\", \"cases\"]))\n\n\n def test_field_interpolation(self):\n m = Mock()\n m.name = \"Some object\"\n self.assert_results(\n m, \"duplicate.name\",\n (\"The name Some object is already in use.\", [\"name\", \"cases\"]))\n\n\n def test_name_interpolation(self):\n class TestSuite(object):\n def __unicode__(self):\n return \"thinger\"\n\n self.assert_results(\n TestSuite(), \"changing.used.entity\",\n (\"thinger is in use elsewhere and cannot be modified.\", []))\n\n\n def test_classname_lookup(self):\n class TestSuite(object):\n pass\n\n class TestRun(object):\n pass\n\n self.assert_results(\n TestSuite(), \"activating.incomplete.entity\",\n (\"Test suite is empty; add some test cases.\", []))\n\n self.assert_results(\n TestRun(), \"activating.incomplete.entity\",\n (\"Activate or unlock parent test cycle first.\", []))\n\n\n def test_bad_error_code(self):\n self.assert_results(\n object(), \"some.wierd.error\",\n ('Unknown conflict \"some.wierd.error\"; please correct and try again.',\n []))\n\n\nclass ErrorMessageTest(ErrorMessageAndFieldsTest):\n \"\"\"\n ``error_message`` should always return the first element of the return\n value of ``error_message_and_fields``.\n\n \"\"\"\n def assert_results(self, obj, err, ret):\n from ccui.core.errors import error_message\n self.assertEqual(error_message(obj, Error(err)), ret[0])\n","sub_path":"tests/core/test_errors.py","file_name":"test_errors.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"149370375","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# import plot_decision_regions\nfrom PlotClassification import plot_decision_regions\n\n# sci-kit learn\nfrom sklearn import datasets\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import balanced_accuracy_score\nfrom sklearn.metrics import multilabel_confusion_matrix\n\n# Loading the Iris dataset from scikit-learn\niris = datasets.load_iris()\nX = iris.data[:, :4]\ny = iris.target\n\nprint('Class labels:', np.unique(y))\n\n# Splitting data into 75% training and 25% test data\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.25, random_state=1, stratify=y)\n\n# Standardizing the features\nsc = StandardScaler()\nsc.fit(X_train)\nX_train_std = sc.transform(X_train)\nX_test_std = sc.transform(X_test)\n\nfrom sklearn.linear_model import LogisticRegression\n\nlr = LogisticRegression(C=1.0, random_state=1)\nlr.fit(X_train_std, y_train)\n\ny_pred = lr.predict(X_test_std)\n\n\n#Compute performance\nmcm = multilabel_confusion_matrix(y_test, y_pred)\ntn = mcm[:, 0, 0]\ntp = mcm[:, 1, 1]\nfn = mcm[:, 1, 0]\nfp = mcm[:, 0, 1]\ntarget_names = ['Setosa', 'Versicolor', 'Virginica']\nprint('Confusion matrix')\nprint(metrics.confusion_matrix(y_test, y_pred))\nprint('Misclassified samples: %d' % (y_test != y_pred).sum())\n\nprint('Accuracy: %.2f' % accuracy_score(y_test, y_pred))\nprint(classification_report(y_test, y_pred, target_names=target_names))\n\n\n\n\n\nconfmat = confusion_matrix(y_test, y_pred)\n\n\n\n# Plot confusion matrix\nfig, ax = plt.subplots(figsize=(2.5, 2.5))\nax.matshow(confmat, cmap=plt.cm.Blues, alpha=0.3)\nfor i in range(confmat.shape[0]):\n for j in range(confmat.shape[1]):\n ax.text(x=j, y=i, s=confmat[i, j], va='center', ha='center')\nplt.xlabel('predicted label')\nplt.ylabel('true label')\nplt.show()\n\n# Plotting\nX_combined_std = np.vstack((X_train_std, X_test_std))\ny_combined = np.hstack((y_train, y_test))\n\nplot_decision_regions(X_combined_std, y_combined,\n classifier=lr, test_idx=range(105, 150))\nplt.xlabel('petal length [standardized]')\nplt.ylabel('petal width [standardized]')\nplt.legend(loc='upper left')\nplt.tight_layout()\nplt.show()\nweights, params = [], [] \nfor c in np.arange(-5, 5):\n lr = LogisticRegression(C=10.**c, random_state=1) \n lr.fit(X_train_std, y_train) \n weights.append(lr.coef_[2]) \n params.append(10.**c)\nweights = np.array(weights) \nplt.plot(params, weights[:, 0],label='petal length') \nplt.plot(params, weights[:, 1],linestyle='‐‐',label='petal width') \nplt.ylabel('weight coefficient') \nplt.xlabel('C')\nplt.legend(loc='upper left') \nplt.xscale('log')\nplt.show()\n\n","sub_path":"Codes/Logistic regression/LogisticRegression_scikit_learn.py","file_name":"LogisticRegression_scikit_learn.py","file_ext":"py","file_size_in_byte":2838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"232876236","text":"#kmeans的应用\nfrom sklearn.cluster import KMeans\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\n\n#生成数据\ndata = np.random.rand(100,3)\n\n#创建模型\nestim = KMeans(n_clusters=3)\n\n#聚类训练\ny = estim.fit_predict(data)\n\n#获取聚类中心\ncenter = estim.cluster_centers_\n#获取类别的标签\nlabel_pre = estim.labels_\n\nprint(label_pre)\nprint(center)\n\nfig = plt.figure()\nax = Axes3D(fig)\nax.scatter(data[:,0],data[:,1],data[:,2],c =y,marker=\"*\" )\nax.scatter(center[:,0],center[:,1],center[:,2],c = center[0],marker=\">\",s=120)\nplt.show()\n","sub_path":"629_705/7042.py","file_name":"7042.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"314713712","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@desc: 合并代码文件,压缩可用 http://tool.oschina.net/jscompress/\n 运行本文件后将 static/built 下的 _*.html 移到发布版本的 views 下,其余的js和css可压缩后发布到built目录。\n@time: 2019/7/12\n\"\"\"\nimport os\nimport re\nimport shutil\nimport hashlib\nfrom os import path, mkdir\n\nNEED_BUILD = ['base_css', 'base_js', 'base_cut']\nPATH = path.dirname(path.dirname(__file__))\nDST_PATH = path.join(PATH, 'static', 'built')\n\n\ndef merge_files(dst_name, files):\n ext = re.sub(r'^.+\\.', '', files[0])\n content = '\\n'.join(open(path.join(PATH, 'static', mf)).read() for mf in files)\n\n m = hashlib.md5()\n m.update(content.encode('utf-8'))\n ver = m.hexdigest()[:8]\n\n dst_name = '%s-%s.%s' % (dst_name, ver, ext)\n print(dst_name + ''.join('\\n\\t' + mf for mf in files))\n\n with open(path.join(DST_PATH, dst_name), 'w') as mf:\n mf.write(content)\n return dst_name\n\n\ndef merge_css_js(name, html_lines):\n css, js, lines, js_i = [], [], [], -1\n for text in html_lines:\n if re.match(r\"' in text:\n js_i = len(lines)\n if 'var resizefunc = [];' in text:\n js_i = -1\n if js_i < 0 and '' in text:\n js_i = len(lines) + 1\n lines.append(text)\n\n filename = css and merge_files(name, css)\n if filename:\n css = '' % filename\n if js_i >= 0:\n lines.insert(js_i, css)\n else:\n lines.append(css)\n\n filename = js and merge_files(name, js)\n if filename:\n js = '' % filename\n if js_i >= 0:\n lines.insert(js_i, js)\n else:\n lines.append(js)\n\n with open(path.join(PATH, 'views', '_%s_.html' % name), 'w') as f:\n f.write('\\n'.join(lines))\n\n\ndef merge_from_html(names):\n for name in names:\n html_file = path.join(PATH, 'views', '_%s.html' % name)\n print(html_file)\n if path.exists(html_file):\n with open(html_file) as f:\n html_lines = f.read().split('\\n')\n merge_css_js(name, html_lines)\n\n\ndef merge_3rd_assets(which=None):\n \"\"\" 合并第三方资源\"\"\"\n which = which or ['base_assets_css', 'base_assets_js', 'cut_assets_js',\n 'task_admin_assets_js', 'task_admin_assets_css']\n\n if 'base_assets_css' in which:\n files = [\n 'assets/sweetalert2/sweetalert2.min.css',\n 'assets/css/waves-effect.css',\n 'assets/css/animate.css',\n ]\n dst_name = 'static/built/base_assets.css'\n content = '\\n'.join(open(path.join(PATH, 'static', fn)).read() for fn in files)\n with open(path.join(PATH, dst_name), 'w') as fn:\n fn.write(content)\n\n if 'base_assets_js' in which:\n files = [\n 'assets/jquery/jquery.slimscroll.min.js',\n 'assets/sweetalert2/sweetalert2.min.js',\n 'assets/sweetalert2/promise.min.js',\n 'assets/modal-effect/js/modalEffects.js',\n 'assets/modal-effect/js/classie.js',\n ]\n dst_name = 'static/built/base_assets.js'\n content = '\\n'.join(open(path.join(PATH, 'static', fn)).read() for fn in files)\n with open(path.join(PATH, dst_name), 'w') as fn:\n fn.write(content)\n\n if 'cut_assets_js' in which:\n files = [\n 'js/cut/raphael.js',\n 'js/cut/raphael.zoom.js',\n 'js/cut/jquery.mapkey.js',\n ]\n dst_name = 'static/built/cut_assets.js'\n content = '\\n'.join(open(path.join(PATH, 'static', fn)).read() for fn in files)\n with open(path.join(PATH, dst_name), 'w') as fn:\n fn.write(content)\n\n if 'task_admin_assets_js' in which:\n files = [\n 'assets/select2/select2.min.js',\n 'assets/select2/zh-CN.js',\n 'assets/jquery-multi-select/jquery.multi-select.js',\n 'assets/jquery-multi-select/jquery.quicksearch.js',\n 'assets/flatpickr/flatpickr.min.js',\n 'assets/flatpickr/zh.js',\n ]\n dst_name = 'static/built/task_admin_assets.js'\n content = '\\n'.join(open(path.join(PATH, 'static', fn)).read() for fn in files)\n with open(path.join(PATH, dst_name), 'w') as fn:\n fn.write(content)\n\n if 'task_admin_assets_css' in which:\n files = [\n 'assets/jquery-multi-select/multi-select.css',\n 'assets/flatpickr/flatpickr.min.css',\n 'assets/select2/select2.css',\n ]\n dst_name = 'static/built/task_admin_assets.css'\n content = '\\n'.join(open(path.join(PATH, 'static', fn)).read() for fn in files)\n with open(path.join(PATH, dst_name), 'w') as fn:\n fn.write(content)\n\n\ndef merge_local_assets(which=None):\n \"\"\" 合并本地资源\"\"\"\n which = which or ['base_local_css', 'base_local_js', 'cut_local_js']\n if 'base_local_css' in which:\n files = [\n 'css/helper.css',\n 'css/style.css',\n 'css/base.css',\n ]\n dst_name = 'static/built/base_local.css'\n content = '\\n'.join(open(path.join(PATH, 'static', fn)).read() for fn in files)\n with open(path.join(PATH, dst_name), 'w') as fn:\n fn.write(content)\n\n if 'base_local_js' in which:\n files = [\n 'js/backend.js',\n 'js/util.js',\n 'js/l10n.js',\n ]\n dst_name = 'static/built/base_local.js'\n content = '\\n'.join(open(path.join(PATH, 'static', fn)).read() for fn in files)\n with open(path.join(PATH, dst_name), 'w') as fn:\n fn.write(content)\n\n if 'cut_local_js' in which:\n files = [\n 'js/cut/cut.js',\n 'js/cut/cut_keys.js',\n 'js/cut/cut_adv.js',\n ]\n dst_name = 'static/built/cut_local.js'\n content = '\\n'.join(open(path.join(PATH, 'static', fn)).read() for fn in files)\n with open(path.join(PATH, dst_name), 'w') as fn:\n fn.write(content)\n\n\nif __name__ == '__main__':\n # if not path.exists(DST_PATH):\n # mkdir(DST_PATH)\n # merge_from_html(NEED_BUILD)\n merge_3rd_assets()\n merge_local_assets()\n","sub_path":"static/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":6707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"1492157","text":"#! /usr/bin/env python3\n\nfrom collections import defaultdict\nfrom functools import partial\nfrom get_filenames import get_filenames\nfrom get_data import get_data\nfrom process_xsec import process_xsec\nfrom delta_var import get_delta\nimport numpy as np\n\nfiles = get_filenames('/home/luke/Documents/Physics/Research/MCFM-tools/sig-ggH/ktg_1.0_0.0/')\ntestfile = '/home/luke/Documents/Physics/Research/MCFM-tools/sig-ggH/ktg_1.0_0.0/ggWW4l_lo_PDF4LHC_0.25_0.25_125_6.dat'\n\n\nfiles.groupfiles(stripendnum=True)\n\nprint(files.cases)\nprint(files.parts)\nprint(files.runstrings)\nprint(files.scales)\n\ngen = get_data._getBookPlot(testfile, hist=5)\n\n# for item in gen:\n # print(item)\n\ndata = get_data(files.filenames, files.cases, files.parts, files.runstrings, files.scales)\ndata.getxsec(5)\nprint(data.xsec['ggWW4l']['nll'][('0.25', '0.50', '0.50')][''])\n\nxsec = process_xsec(data.xsec, files.cases, files.parts, files.runstrings, files.scales)\n\nprint(xsec.scales)\nprint(xsec.fixo_scales)\nprint(xsec.resm_scales)\n\nxsec._nlo()\n\nxsec.domatching()\nxsec._centralscale('ggWW4l', 'lo', '', central='0.50')\nxsec._centralscale('ggWW4l', 'nll', '', central='0.50')\nxsec._varyscalefixo('ggWW4l', 'lo', '')\nxsec._varyscaleresm('ggWW4l', 'nll', '')\nprint(xsec.xsecvar)\n# # xsec._varyscalefo('ggWW4l', 'lo')\n\nsig_bsm = {('0.50', '0.50', '0.50') : np.array([2,2,3,4,3])}\nsig_sm = {('0.50', '0.50', '0.50') : np.array([1.5,1.7, 2.3,3.6,2.1])}\nbkg = {('0.50', '0.50', '0.50') : np.array([100,100,100,100,100])}\n\ndelta = get_delta(sig_sm, sig_bsm, bkg, {('0.50', '0.50', '0.50')} )\n\n\n\ndelta.varyscaledelta()\n\nprint(delta.delta)\nprint(delta.deltavar)\n","sub_path":"data-pipeline/runpipe.py","file_name":"runpipe.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"276561591","text":"# Copyright (c) Alibaba, Inc. and its affiliates.\nimport copy\nimport logging\nimport os\n\nimport tensorflow as tf\nfrom tensorflow.core.framework import graph_pb2\nfrom tensorflow.python.framework import importer\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework.dtypes import _TYPE_TO_STRING\nfrom tensorflow.python.saved_model import signature_constants\nfrom tensorflow.python.saved_model.utils_impl import get_variables_path\nfrom tensorflow.python.tools import saved_model_utils\nfrom tensorflow.python.training import saver as tf_saver\n\nFLAGS = tf.app.flags.FLAGS\ntf.app.flags.DEFINE_string('model_dir', '', '')\ntf.app.flags.DEFINE_string('user_model_dir', '', '')\ntf.app.flags.DEFINE_string('item_model_dir', '', '')\ntf.app.flags.DEFINE_string('user_fg_json_path', '', '')\ntf.app.flags.DEFINE_string('item_fg_json_path', '', '')\n\nlogging.basicConfig(\n level=logging.INFO, format='[%(asctime)s][%(levelname)s] %(message)s')\n\n\ndef search_pb(directory):\n dir_list = []\n for root, dirs, files in tf.gfile.Walk(directory):\n for f in files:\n _, ext = os.path.splitext(f)\n if ext == '.pb':\n dir_list.append(root)\n if len(dir_list) == 0:\n raise ValueError('savedmodel is not found in directory %s' % directory)\n elif len(dir_list) > 1:\n raise ValueError('multiple saved model found in directory %s' % directory)\n\n return dir_list[0]\n\n\ndef _node_name(name):\n if name.startswith('^'):\n return name[1:]\n else:\n return name.split(':')[0]\n\n\ndef extract_sub_graph(graph_def, dest_nodes, variable_protos):\n \"\"\"Extract the subgraph that can reach any of the nodes in 'dest_nodes'.\n\n Args:\n graph_def: graph_pb2.GraphDef\n dest_nodes: a list includes output node names\n\n Returns:\n out: the GraphDef of the sub-graph.\n variables_to_keep: variables to be kept for saver.\n \"\"\"\n if not isinstance(graph_def, graph_pb2.GraphDef):\n raise TypeError('graph_def must be a graph_pb2.GraphDef proto.')\n\n edges = {}\n name_to_node_map = {}\n node_seq = {}\n seq = 0\n nodes_to_keep = set()\n variables_to_keep = set()\n\n for node in graph_def.node:\n n = _node_name(node.name)\n name_to_node_map[n] = node\n edges[n] = [_node_name(item) for item in node.input]\n node_seq[n] = seq\n seq += 1\n for d in dest_nodes:\n assert d in name_to_node_map, \"'%s' is not in graph\" % d\n\n next_to_visit = dest_nodes[:]\n while next_to_visit:\n n = next_to_visit[0]\n\n if n in variable_protos:\n proto = variable_protos[n]\n next_to_visit.append(_node_name(proto.initial_value_name))\n next_to_visit.append(_node_name(proto.initializer_name))\n next_to_visit.append(_node_name(proto.snapshot_name))\n variables_to_keep.add(proto.variable_name)\n\n del next_to_visit[0]\n if n in nodes_to_keep:\n continue\n # make sure n is in edges\n if n in edges:\n nodes_to_keep.add(n)\n next_to_visit += edges[n]\n nodes_to_keep_list = sorted(list(nodes_to_keep), key=lambda n: node_seq[n])\n\n out = graph_pb2.GraphDef()\n for n in nodes_to_keep_list:\n out.node.extend([copy.deepcopy(name_to_node_map[n])])\n out.library.CopyFrom(graph_def.library)\n out.versions.CopyFrom(graph_def.versions)\n\n return out, variables_to_keep\n\n\ndef load_meta_graph_def(model_dir):\n \"\"\"Load meta graph def in saved model.\n\n Args:\n model_dir: saved model directory.\n\n Returns:\n meta_graph_def: a MetaGraphDef.\n variable_protos: a dict of VariableDef.\n input_tensor_names: signature inputs in saved model.\n output_tensor_names: signature outputs in saved model.\n \"\"\"\n input_tensor_names = {}\n output_tensor_names = {}\n variable_protos = {}\n\n meta_graph_def = saved_model_utils.get_meta_graph_def(\n model_dir, tf.saved_model.tag_constants.SERVING)\n signatures = meta_graph_def.signature_def\n collections = meta_graph_def.collection_def\n\n # parse collection_def in SavedModel\n for key, col_def in collections.items():\n if key in ops.GraphKeys._VARIABLE_COLLECTIONS:\n tf.logging.info('[Collection] %s:' % key)\n for value in col_def.bytes_list.value:\n proto_type = ops.get_collection_proto_type(key)\n proto = proto_type()\n proto.ParseFromString(value)\n tf.logging.info('%s' % proto.variable_name)\n variable_node_name = _node_name(proto.variable_name)\n if variable_node_name not in variable_protos:\n variable_protos[variable_node_name] = proto\n\n # parse signature info for SavedModel\n for sig_name in signatures:\n if signatures[\n sig_name].method_name == tf.saved_model.signature_constants.PREDICT_METHOD_NAME:\n tf.logging.info('[Signature] inputs:')\n for input_name in signatures[sig_name].inputs:\n input_tensor_shape = []\n input_tensor = signatures[sig_name].inputs[input_name]\n for dim in input_tensor.tensor_shape.dim:\n input_tensor_shape.append(int(dim.size))\n tf.logging.info('\"%s\": %s; %s' %\n (input_name, _TYPE_TO_STRING[input_tensor.dtype],\n input_tensor_shape))\n input_tensor_names[input_name] = input_tensor.name\n tf.logging.info('[Signature] outputs:')\n for output_name in signatures[sig_name].outputs:\n output_tensor_shape = []\n output_tensor = signatures[sig_name].outputs[output_name]\n for dim in output_tensor.tensor_shape.dim:\n output_tensor_shape.append(int(dim.size))\n tf.logging.info('\"%s\": %s; %s' %\n (output_name, _TYPE_TO_STRING[output_tensor.dtype],\n output_tensor_shape))\n output_tensor_names[output_name] = output_tensor.name\n\n return meta_graph_def, variable_protos, input_tensor_names, output_tensor_names\n\n\ndef export(model_dir, meta_graph_def, variable_protos, input_tensor_names,\n output_tensor_names, part_name, part_dir):\n \"\"\"Export subpart saved model.\n\n Args:\n model_dir: saved model directory.\n meta_graph_def: a MetaGraphDef.\n variable_protos: a dict of VariableDef.\n input_tensor_names: signature inputs in saved model.\n output_tensor_names: signature outputs in saved model.\n part_name: subpart model name, user or item.\n part_dir: subpart model export directory.\n \"\"\"\n output_tensor_names = {\n x: output_tensor_names[x]\n for x in output_tensor_names.keys()\n if part_name in x\n }\n output_node_names = [\n _node_name(output_tensor_names[x]) for x in output_tensor_names.keys()\n ]\n\n inference_graph, variables_to_keep = extract_sub_graph(\n meta_graph_def.graph_def, output_node_names, variable_protos)\n\n tf.reset_default_graph()\n with tf.Session() as sess:\n with sess.graph.as_default():\n graph = ops.get_default_graph()\n importer.import_graph_def(inference_graph, name='')\n for name in variables_to_keep:\n variable = graph.get_tensor_by_name(name)\n graph.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, variable)\n saver = tf_saver.Saver()\n saver.restore(sess, get_variables_path(model_dir))\n\n builder = tf.saved_model.builder.SavedModelBuilder(part_dir)\n signature_inputs = {}\n for input_name in input_tensor_names:\n try:\n tensor_info = tf.saved_model.utils.build_tensor_info(\n graph.get_tensor_by_name(input_tensor_names[input_name]))\n signature_inputs[input_name] = tensor_info\n except Exception:\n print('ignore input: %s' % input_name)\n\n signature_outputs = {}\n for output_name in output_tensor_names:\n tensor_info = tf.saved_model.utils.build_tensor_info(\n graph.get_tensor_by_name(output_tensor_names[output_name]))\n signature_outputs[output_name] = tensor_info\n\n prediction_signature = (\n tf.saved_model.signature_def_utils.build_signature_def(\n inputs=signature_inputs,\n outputs=signature_outputs,\n method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME\n ))\n\n builder.add_meta_graph_and_variables(\n sess, [tf.saved_model.tag_constants.SERVING],\n signature_def_map={\n signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:\n prediction_signature,\n })\n builder.save()\n config_path = os.path.join(model_dir, 'assets/pipeline.config')\n assert tf.gfile.Exists(config_path)\n dst_path = os.path.join(part_dir, 'assets')\n dst_config_path = os.path.join(dst_path, 'pipeline.config')\n tf.gfile.MkDir(dst_path)\n tf.gfile.Copy(config_path, dst_config_path)\n if part_name == 'user' and FLAGS.user_fg_json_path:\n dst_fg_path = os.path.join(dst_path, 'fg.json')\n tf.gfile.Copy(FLAGS.user_fg_json_path, dst_fg_path)\n if part_name == 'item' and FLAGS.item_fg_json_path:\n dst_fg_path = os.path.join(dst_path, 'fg.json')\n tf.gfile.Copy(FLAGS.item_fg_json_path, dst_fg_path)\n\n\ndef main(argv):\n model_dir = search_pb(FLAGS.model_dir)\n tf.logging.info('Loading meta graph...')\n meta_graph_def, variable_protos, input_tensor_names, output_tensor_names = load_meta_graph_def(\n model_dir)\n tf.logging.info('Exporting user part model...')\n export(\n model_dir,\n meta_graph_def,\n variable_protos,\n input_tensor_names,\n output_tensor_names,\n part_name='user',\n part_dir=FLAGS.user_model_dir)\n tf.logging.info('Exporting item part model...')\n export(\n model_dir,\n meta_graph_def,\n variable_protos,\n input_tensor_names,\n output_tensor_names,\n part_name='item',\n part_dir=FLAGS.item_model_dir)\n\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"easy_rec/python/tools/split_model_pai.py","file_name":"split_model_pai.py","file_ext":"py","file_size_in_byte":9616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"599559797","text":"import re\n\nn = int(input())\n\n\ndef is_closed_tag(tag):\n return tag.find('/') != -1\n\n\ndef remove_non_chars_from_str(s):\n return ''.join([i for i in s if i.isalpha()])\n\n\ndef is_same_tag(tag1, tag2):\n return remove_non_chars_from_str(tag1) == remove_non_chars_from_str(tag2)\n\n\ndef print_almost(tag):\n return print('ALMOST {}'.format(tag.upper()))\n\n\ndef is_correct(tags):\n stack = []\n\n if len(tags) == 1:\n return print_almost(tags[0])\n\n unnecessary_tag = None\n\n for tag in tags:\n if not is_closed_tag(tag):\n stack.append(tag)\n continue\n\n # обрабатываем закрывающий тег\n if len(stack) < 1:\n if unnecessary_tag is None:\n unnecessary_tag = tag\n continue\n return print('INCORRECT')\n\n # непустой стек\n last_open_tag = stack.pop()\n # совпадают - убираем последний, открывающий\n if is_same_tag(tag, last_open_tag):\n continue\n\n # не совпадают и один уже удаляли\n if unnecessary_tag is not None:\n return print('INCORRECT')\n\n # нужно удалить открывающий если перед ним совпадает с текущим закрывающим\n if len(stack) > 0 and is_same_tag(stack[-1], tag):\n stack.pop()\n unnecessary_tag = last_open_tag\n continue\n\n stack.append(last_open_tag)\n unnecessary_tag = tag\n\n if len(stack) == 0:\n if unnecessary_tag is None:\n return print('CORRECT')\n return print_almost(unnecessary_tag)\n\n if len(stack) == 1 and unnecessary_tag is None:\n return print_almost(stack.pop())\n\n return print('INCORRECT')\n\n\ndef unique_values(g):\n s = set()\n for x in g:\n if x in s: return False\n s.add(x)\n return True\n\n\ndef is_incorrect_tag(tag):\n if re.search(r'<[^/>][^>]*>', tag) or re.search(r']+>', tag):\n return 1\n return 0\n\n\ndef is_incorrect_tags(tags):\n incorrect_tags = list(filter(is_incorrect_tag, tags))\n\n return len(incorrect_tags) == 0\n\n\nfor i in range(0, n):\n n = int(input())\n tags = []\n\n for j in range(0, n):\n tags.append(input())\n\n tags = list(map(lambda s: s.lower(), tags))\n\n if unique_values(tags):\n is_correct(tags)\n","sub_path":"programming/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"403050927","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 15 13:29:43 2020\n\n@author: wkarlina\n\"\"\"\n\nimport streamlit as st\n\n# subheader for sidebar\nst.sidebar.subheader('Make Your Selections')\n\n# option for user to capitalise input text\nuppercase = st.sidebar.checkbox('Upper Case')\n\n# option to determine the number of repeat of input text\nrepeat = st.sidebar.slider('How many repetition?', 1, 3, 1)\n\n# option to reverse the order of input order\nreverse = st.sidebar.checkbox('Reverse Order')\n\n\n# define function for text processing of input text \ndef process_text(input_split):\n # reverse the order of list if reverse option is chosen\n if reverse:\n input_split = list(reversed(input_split))\n\n # generate output based on number of repeat chosen \n for i in range(len(input_split)):\n input_split[i] = input_split[i] * repeat\n\n # join list of lines to become string type\n join_text = '\\n'.join(input_split)\n\n # capitalise input text if uppercase is chosen\n if uppercase:\n join_text = str.upper(join_text)\n\n # return join_text as string output\n return join_text\n\n\n# input and output title\nst.title('Input and Output Text')\n\n# user can input multiline text\ninput_text = st.text_area('Input Text', 'Hello world.\\nToday is a good day.')\n\n# split the multiline text and return as list of lines\ninput_text_split = input_text.splitlines()\n\n# option to generate output text based on selection \nif st.button('Generate Output'):\n # process text based on defined function and selections made by user\n output_text = process_text(input_text_split)\n\n # print output_text as multiline text\n st.text_area('Output Text', output_text)\n","sub_path":"hello_world.py","file_name":"hello_world.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"190535631","text":"#!/usr/bin/python\n\nimport matplotlib as mpl\nmpl.use('Agg')\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport matplotlib.font_manager as font_manager\n\n\n\ndef int2str(mm):\n\tif(mm == '00'): ms = 'No Data'\n\tif(mm == '01'): ms = 'January'\n\tif(mm == '02'): ms = 'February'\n\tif(mm == '03'): ms = 'March'\n\tif(mm == '04'): ms = 'April'\n\tif(mm == '05'): ms = 'May'\n\tif(mm == '06'): ms = 'June'\n\tif(mm == '07'): ms = 'July'\n\tif(mm == '08'): ms = 'August'\n\tif(mm == '09'): ms = 'September'\n\tif(mm == '10'): ms = 'October'\n\tif(mm == '11'): ms = 'November'\n\tif(mm == '12'): ms = 'December'\n\treturn ms\n\t\n\nfdate = sys.argv[1] #(expects format like: 201301)\nyyyy = fdate[0:4]\nmm = fdate[4:]\nms = int2str(mm)\nlabeldate = ms+' '+yyyy\nif(mm == '00'): labeldate = ms\n\nimgsize = sys.argv[2] #(expects 620, 1000, DIY, HD, or HDSD)\n\npath = './Fonts/Trebuchet_MS.ttf'\npropr = font_manager.FontProperties(fname=path)\npath = './Fonts/Trebuchet_MS_Bold.ttf'\npropb = font_manager.FontProperties(fname=path)\n\nif(imgsize == '620'):\n\tfigxsize = 8.62\n\tfigysize = 0.695\n\tfigdpi = 72\n\tfsiz1 = 12\n\tfsiz2 = 11\n\tcbx = 0.2258; cbw = 0.5463; cby = 0.33; cbh = 0.259\n\tt1x = 0.310; t1y = 0.684\n\tt2x = 0.654; t2y = 0.686\n\tt3x = 0.006; t3y = 0.77\n\tt4x = 0.899; t4y = 0.77\n\tt5x = 0.904; t5y = 0.55\n\tt6x = 0.278; t6y = 0.14\n\tt7x = 0.495; t7y = 0.14\n\tt8x = 0.700; t8y = 0.14\n\tpngfile = \"temporary_cbar.png\"\n\t\nif(imgsize == '1000'):\n\tfigxsize = 13.89\n\tfigysize = 0.695\n\tfigdpi = 72\n\tfsiz1 = 12\n\tfsiz2 = 11\n\tcbx = 0.33; cbw = 0.339; cby = 0.33; cbh = 0.259\n\tt1x = 0.382; t1y = 0.685\n\tt2x = 0.596; t2y = 0.684\n\tt3x = 0.004; t3y = 0.77\n\tt4x = 0.938; t4y = 0.77\n\tt5x = 0.941; t5y = 0.55\n\tpngfile = \"temporary_cbar.png\"\n\nif(imgsize == 'DIY'):\n\tfigxsize = 8.89\n\tfigysize = 2.44\n\tfigdpi = 72\n\tfsiz1 = 12\n\tfsiz2 = 11\n\tcbx = 0.185; cbw = 0.63; cby = 0.38; cbh = 0.1\n\tt1x = 0.33; t1y = 0.565\n\tt2x = 0.67; t2y = 0.565\n\tt3x = 0.05; t3y = 0.82\n\tt4x = 0.85; t4y = 0.82\n\tt5x = 0.852; t5y = 0.73\n\tpngfile = \"temporary_cbar.eps\"\n\nif(imgsize == 'HD' or imgsize == 'HDSD'):\n\tfigxsize = 13.5\n\tfigysize = 0.69\n\tfigdpi = 72\n\tfsiz1 = 12\n\tfsiz2 = 11\n\tcbx = 0.0; cbw = 1.0; cby = 0.01; cbh = 0.99\n\tt1x = 0.33; t1y = 0.565\n\tt2x = 0.69; t2y = 0.565\n\tt3x = 0.05; t3y = 0.82\n\tt4x = 0.85; t4y = 0.82\n\tt5x = 0.86; t5y = 0.63\n\tpngfile = \"temporary_cbar.png\"\n\nfig = plt.figure(figsize=(figxsize,figysize))\n\n# create an axes instance, leaving room for colorbar at bottom.\nax1 = fig.add_axes([0.0,0.0,1.0,1.0], axisbg='#F5F5F5')\nax1.set_frame_on(False)\nax1.set_xticks([])\nax1.set_xticklabels([])\nax1.set_yticks([])\nax1.set_yticklabels([])\n\n\nif(imgsize == '620' or imgsize == '1000' or imgsize == 'DIY'):\n\tdval = \"Difference from average temperature\"\n\tplt.text(t1x, t1y, dval, fontproperties=propb, size=fsiz1, color='#333333')\n\tplt.text(t2x, t2y, \"($^\\circ$F)\", fontproperties=propr, size=fsiz1, color='#333333')\n\t\n\tif(mm != '00'):\n\t\tplt.text(t3x, t3y, labeldate, fontproperties=propr, size=fsiz2, color='#8D8D8D')\n\t\tplt.text(t3x, t3y-0.22, 'Compared to 1981-2010', fontproperties=propr, size=fsiz2, color='#8D8D8D')\n\tif(mm == '00'): \n\t\tplt.text(t3x, t3y, labeldate, fontproperties=propr, size=fsiz2, color='#8D8D8D')\n\t\tplt.text(t4x, t4y, 'Climate.gov', fontproperties=propr, size=fsiz2, color='#8D8D8D')\n\t\tplt.text(t5x, t5y, 'Data: NCEI', fontproperties=propr, size=fsiz2, color='#8D8D8D')\n\n\n\n\ncmap = plt.cm.bwr\n\n\nlevs = np.asarray([-11, 0, 11])\nnorm = colors.Normalize(levs[0], levs[-1])\n#norm = mpl.colors.BoundaryNorm(levs, cmap.N)\nax2 = fig.add_axes([cbx,cby,cbw,cbh], axisbg='#F5F5F5')\nax2.set_frame_on(False)\nax2.set_xticks([])\nax2.set_xticklabels([])\nax2.set_yticks([])\nax2.set_yticklabels([])\n\nif(imgsize == '620' or imgsize == '1000' or imgsize == 'DIY'):\n\tbarticks = levs\n\tbarlevs = levs\n\tbar = mpl.colorbar.ColorbarBase(ax2, cmap=cmap, norm=norm, orientation='horizontal', ticks=barticks)\n\tif(imgsize == 'DIY'):\n\t\tbar = mpl.colorbar.ColorbarBase(ax2, cmap=cmap, norm=norm, orientation='horizontal', ticks=barticks)\n\t\tbar = mpl.colorbar.ColorbarBase(ax2, cmap=cmap, norm=norm, orientation='horizontal', ticks=barticks)\n\tbar.outline.set_visible(True)\n\tbar.outline.set_linewidth(0.6)\n\tbar.ax.tick_params(size=0.01)\n\tbar.ax.set_xticklabels(barlevs, fontproperties=propr, size=fsiz2, va='top')\n\nif(imgsize == 'HD' or imgsize == 'HDSD'):\n\tbarticks = levs\n\tbarlevs = ['', '', '']\n\tbar = mpl.colorbar.ColorbarBase(ax2, cmap=cmap, norm=norm, orientation='horizontal', ticks=barticks)\n\tbar.outline.set_visible(True)\n\tbar.outline.set_linewidth(0.6)\n\tbar.ax.tick_params(size=0.01)\n\tbar.ax.set_xticklabels(barlevs, fontproperties=propr, size=fsiz2, va='top')\n\nif(imgsize != 'DIY'):\n\tplt.savefig(pngfile, dpi=figdpi, orientation='landscape', bbox_inches='tight', pad_inches=0.0)\n\nif(imgsize == 'DIY'):\n\tplt.savefig(pngfile, dpi=figdpi, orientation='portrait', bbox_inches='tight', pad_inches=0.0)","sub_path":"anomtavgColorbar.py","file_name":"anomtavgColorbar.py","file_ext":"py","file_size_in_byte":4868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"291058445","text":"#!/usr/bin/env python\n\nimport json\nimport subprocess\nimport os\nimport sys\nimport datetime\nimport shutil, errno\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-path', help = 'the config file path, default as config.json')\nargs = parser.parse_args()\n\ndef copyanything(src, dst):\n try:\n shutil.copytree(src, dst)\n except OSError as exc: # python >2.5\n if exc.errno == errno.ENOTDIR:\n shutil.copy(src, dst)\n else: raise\n\ndef list_tags() :\n sys.exit(\"XRT and platform do NOT match! \\\n Available platform and XRT combination:\\\n \\\n Platform XRT Version OS Version\\\n alveo-u200 2018.3 /2019.1 / 2019.2 / 2020.1 Ubuntu 16.04 / Ubuntu 18.04 / CentOS\\\n alveo-u250 2018.3 /2019.1 / 2019.2 / 2020.1 Ubuntu 16.04 / Ubuntu 18.04 / CentOS\\\n alveo-u280 2019.2 / 2020.1 Ubuntu 16.04 / Ubuntu 18.04 / CentOS\\\n alveo-u50 2019.2 / 2020.1 Ubuntu 16.04 / Ubuntu 18.04 / CentOS\")\n\nif args.path:\n with open(args.path) as d:\n repos = json.load(d)\nelse:\n with open('config.json') as d:\n repos = json.load(d)\n\nvendor = repos['vendor']\nprovisioners = repos['provisioners']\napp_info = repos['app_info']\npost_processors = repos['post_processors']\nif \"metadata\" in repos: \n metadata = repos['metadata']\n\nif vendor != \"on_premise\":\n sys.exit(\"Vendor is NOT supported! \")\n\nif not app_info['os_version']:\n sys.exit(\"OS version can NOT be empty!\")\n\nif not app_info['xrt_version']:\n sys.exit(\"XRT version can NOT be empty!\")\n\nif not app_info['platform']:\n sys.exit(\"Platform can NOT be empty!\")\n\nif not post_processors['repository']:\n sys.exit(\"Repository can NOT be empty!\")\nif not post_processors['tag']:\n sys.exit(\"Tag can NOT be empty!\")\n\nwith open('spec.json') as d:\n spec = json.load(d)\n\n# Xilinx Base Runtim Image Url\nimage_url = \"\" \ntarget_platforms = []\nif app_info['os_version'] in spec['os_version']:\n if app_info['xrt_version'] in spec['os_version'][app_info['os_version']]['xrt_version']:\n image_url = \"xilinx/xilinx_runtime_base:\" + \"alveo\" + \"-\" + app_info['xrt_version'] + \"-\" + app_info['os_version']\n for platform in app_info['platform']:\n if platform in spec['os_version'][app_info['os_version']]['xrt_version'][app_info['xrt_version']]['platform']:\n target_platforms.append(spec['os_version'][app_info['os_version']]['xrt_version'][app_info['xrt_version']]['platform'][platform])\n if platform == \"alveo-u50\" and app_info['xrt_version'] == \"2019.2\":\n image_url += \"-u50\"\n commands.append(\"ENV INTERNAL_BUILD=1\")\n else:\n print(\" [Warning] Invalide platform: \" + platform)\n\nif not image_url:\n list_tags()\n\ntimestamp = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\npath = \"build_history/\" + timestamp\n\ntry:\n os.mkdir(path)\nexcept OSError:\n sys.exit(\"[Error]: Can NOT create folder \" + path)\n\ncommands = []\nlabels = {}\n\nfor pro in provisioners:\n ctype = pro['type']\n if ctype == 'shell':\n commands.append(\"RUN \" + \" && \".join(pro['inline']))\n elif ctype == 'file':\n if not os.path.exists(pro['source']):\n sys.exit(pro['source'] + \" does NOT exists!\")\n filename = os.path.basename(os.path.normpath(pro['destination']))\n copyanything(pro['source'], path + \"/\" + filename)\n commands.append(\"COPY \" + filename + \" \" + pro['destination'])\n elif ctype == 'label':\n labels[pro['key']] = pro['value']\n else:\n print(\"Warning: Unknown type: \" + ctype + \"! \")\n\nwith open(path + \"/Dockerfile\", \"w\") as d:\n d.write(\"From \" + image_url + \"\\n\")\n for command in commands:\n d.write(command + \"\\n\")\n if labels:\n label_str = 'LABEL '\n for key in labels:\n label_str += key + '=\"' + labels[key] + '\" '\n d.write(label_str + \"\\n\")\n if metadata and \"entrypoint\" in metadata:\n d.write(\"ENTRYPOINT \" + metadata['entrypoint'])\n\n#Build application\n\nprint(\"Build docker image: \" + post_processors['repository'] + \":\" + post_processors[\"tag\"])\nsubprocess.check_call(\n \"docker build -t \" + post_processors['repository'] + \":\" + post_processors[\"tag\"] + \" \" + path,\n stderr=subprocess.STDOUT, shell=True)\n\nif post_processors['push_after_build']:\n print(\"docker push \" + post_processors['repository'] + \":\" + post_processors[\"tag\"])\n subprocess.check_call(\"docker push \" + post_processors['repository'] + \":\" + post_processors[\"tag\"],\n stderr=subprocess.STDOUT, shell=True)\n\nprint(\"Build history: \" + path)\nprint(\"Build successfully!\")\nexit(0)","sub_path":"on_premise.py","file_name":"on_premise.py","file_ext":"py","file_size_in_byte":4701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"99953797","text":"# Martin Deutsch\n# Project 8\n# CS 251\n# Spring 2017\n\nimport numpy as np\nimport math\n\n# A class to hold viewing parameters and build view matrix\nclass View:\n \n # initialize view paramters\n def __init__(self):\n self.vrp = np.matrix([])\n self.vpn = np.matrix([])\n self.vup = np.matrix([])\n self.u = np.matrix([])\n self.extent = []\n self.screen = []\n self.offset = []\n \n self.reset()\n \n # give view parameters default values\n def reset(self):\n self.vrp = np.matrix([0.5, 0.5, 1])\n self.vpn = np.matrix([0, 0, -1])\n self.vup = np.matrix([0, 1, 0])\n self.u = np.matrix([-1, 0, 0])\n self.extent = [1, 1, 1]\n self.screen = [400, 400]\n self.offset = [20, 20]\n \n # create view transformation matrix\n def build(self):\n # initialize view transformation matrix\n vtm = np.identity( 4, float )\n # translate VRP to origin\n t1 = np.matrix( [[ 1, 0, 0, -self.vrp[0, 0] ],\n [ 0, 1, 0, -self.vrp[0, 1] ],\n [ 0, 0, 1, -self.vrp[0, 2] ],\n [ 0, 0, 0, 1 ] ] )\n\n vtm = t1 * vtm\n # Calculate orthonormal axes\n tu = np.cross(self.vup, self.vpn)\n tvup = np.cross(self.vpn, tu)\n tvpn = self.vpn\n tu = self.normalize(tu)\n tvup = self.normalize(tvup)\n tvpn = self.normalize(tvpn)\n self.u = tu\n self.vup = tvup\n self.vpn = tvpn\n # align the axes\n r1 = np.matrix( [[ tu[0, 0], tu[0, 1], tu[0, 2], 0.0 ],\n [ tvup[0, 0], tvup[0, 1], tvup[0, 2], 0.0 ],\n [ tvpn[0, 0], tvpn[0, 1], tvpn[0, 2], 0.0 ],\n [ 0.0, 0.0, 0.0, 1.0 ] ] )\n vtm = r1 * vtm\n # translate lower left of view space to origin\n t2 = np.matrix( [[ 1.0, 0.0, 0.0, 0.5*self.extent[0] ],\n [ 0.0, 1.0, 0.0, 0.5*self.extent[1] ],\n [ 0.0, 0.0, 1.0, 0.0 ],\n [ 0.0, 0.0, 0.0, 1.0 ] ] )\n vtm = t2 * vtm\n # scale the screen\n s1 = np.matrix( [[ -self.screen[0]/self.extent[0], 0.0, 0.0, 0.0 ],\n [ 0.0, -self.screen[1]/self.extent[1], 0.0, 0.0 ],\n [ 0.0, 0.0, 1.0/self.extent[2], 0.0 ],\n [ 0.0, 0.0, 0.0, 1.0 ] ] )\n vtm = s1 * vtm\n # translate lower left of view space to origin and add buffer\n t3 = np.matrix( [[ 1, 0, 0, self.screen[0]+self.offset[0] ],\n [ 0, 1, 0, self.screen[1]+self.offset[1] ],\n [ 0, 0, 1, 0 ],\n [ 0, 0, 0, 1 ] ] )\n vtm = t3 * vtm\n return vtm\n \n # normalize given vector\n def normalize(self, v):\n length = math.sqrt( v[0, 0]*v[0, 0] + v[0, 1]*v[0,1] + v[0,2]*v[0,2] )\n return v / length\n \n # create new View object with same fields as current View object\n def clone(self):\n newView = View()\n newView.vrp = self.vrp\n newView.vpn = self.vpn\n newView.vup = self.vup\n newView.u = self.u\n newView.extent = self.extent\n newView.screen = self.screen\n newView.offset = self.offset\n return newView\n \n # rotate about the center of the view volume\n def rotateVRC(self, VUProtation, Urotation):\n t1 = np.matrix( [[ 1, 0, 0, -(self.vrp[0,0] + self.vpn[0,0] * self.extent[2] * 0.5) ],\n [ 0, 1, 0, -(self.vrp[0,1]+ self.vpn[0,1] * self.extent[2] * 0.5) ],\n [ 0, 0, 1, -(self.vrp[0,2]+ self.vpn[0,2] * self.extent[2] * 0.5) ],\n [ 0, 0, 0, 1 ] ] )\n Rxyz = np.matrix( [[ self.u[0,0], self.u[0,1], self.u[0,2], 0.0 ],\n [ self.vup[0,0], self.vup[0,1], self.vup[0,2], 0.0 ],\n [ self.vpn[0,0], self.vpn[0,1], self.vpn[0,2], 0.0 ],\n [ 0.0, 0.0, 0.0, 1.0 ] ] )\n r1 = np.matrix( [[ math.cos(VUProtation), 0, math.sin(VUProtation), 0 ],\n [ 0, 1, 0, 0 ],\n [ -math.sin(VUProtation), 0, math.cos(VUProtation), 0 ],\n [ 0, 0, 0, 1 ] ] )\n r2 = np.matrix( [[ 1, 0, 0, 0 ],\n [ 0, math.cos(Urotation), -math.sin(Urotation), 0 ],\n [ 0, math.sin(Urotation), math.cos(Urotation), 0 ],\n [ 0, 0, 0, 1 ] ] )\n t2 = np.matrix( [[ 1, 0, 0, self.vrp[0,0] + self.vpn[0,0] * self.extent[2] * 0.5 ],\n [ 0, 1, 0, self.vrp[0,1]+ self.vpn[0,1] * self.extent[2] * 0.5 ],\n [ 0, 0, 1, self.vrp[0,2]+ self.vpn[0,2] * self.extent[2] * 0.5 ],\n [ 0, 0, 0, 1 ] ] )\n tvrc = np.matrix( [[ self.vrp[0,0],self.vrp[0,1], self.vrp[0,2], 1 ],\n [ self.u[0,0], self.u[0,1], self.u[0,2], 0 ],\n [ self.vup[0,0], self.vup[0,1], self.vup[0,2], 0 ],\n [ self.vpn[0,0], self.vpn[0,1], self.vpn[0,2], 0 ] ] )\n tvrc = (t2 * Rxyz.T * r2 * r1 * Rxyz * t1 * tvrc.T).T\n self.vrp = tvrc[0, :3]\n self.u = self.normalize(tvrc[1, :3])\n self.vup = self.normalize(tvrc[2, :3])\n self.vpn = self.normalize(tvrc[3, :3])","sub_path":"view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":5394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"449039333","text":"import os\nimport webbrowser\nfrom .catalog import GeodataCatalog\nfrom geoserver.catalog import Catalog\nfrom geoserver.catalog import ConflictingDataError\nfrom . import log\nfrom . import feedback\nimport json\n\nclass GSConfigCatalogUsingNetworkAccessManager(Catalog):\n\n #A class that patches the gsconfig Catalog class, to allow using a custom network access manager \n def __init__(self, service_url, network_access_manager):\n self.service_url = service_url.strip(\"/\")\n self._cache = dict()\n self._version = None\n self.nam = network_access_manager\n self.username = ''\n self.password = ''\n\n def http_request(self, url, data=None, method='get', headers = {}):\n log.logInfo(\"Making '%s' request to '%s'\" % (method, url))\n resp = self.nam.request(url, method, data, headers)\n return resp\n\n def setup_connection(self):\n pass\n\nclass GeoServerCatalog(GeodataCatalog):\n\n def __init__(self, service_url, network_access_manager, workspace):\n super(GeoServerCatalog, self).__init__(service_url, network_access_manager)\n self.workspace = workspace\n self.gscatalog = GSConfigCatalogUsingNetworkAccessManager(service_url, network_access_manager)\n\n def base_url(self):\n return \"/\".join(self.service_url.split(\"/\")[:-1])\n\n def publish_vector_layer_from_file(self, filename, layername, crsauthid, style, stylename):\n log.logInfo(\"Publishing layer from file: %s\" % filename)\n self._ensureWorkspaceExists()\n self._deleteLayerIfItExists(layername)\n self.publish_style(stylename, zipfile = style)\n feedback.setText(\"Publishing data for layer %s\" % layername)\n if filename.lower().endswith(\".shp\"):\n basename, extension = os.path.splitext(filename)\n path = {\n 'shp': basename + '.shp',\n 'shx': basename + '.shx',\n 'dbf': basename + '.dbf',\n 'prj': basename + '.prj'\n }\n self.gscatalog.create_featurestore(layername, path, self.workspace, True)\n self._set_layer_style(layername, stylename)\n elif filename.lower().endswith(\".gpkg\"):\n with open(filename, \"rb\") as f:\n url = \"%s/workspaces/%s/datastores/%s/file.gpkg?update=overwrite\" % (self.service_url, self.workspace, layername)\n self.http_request(url, f.read(), \"put\")\n storeName = os.path.splitext(os.path.basename(filename))[0]\n url = \"%s/workspaces/%s/layers/%s.json\" % (self.service_url, self.workspace, storeName)\n #TODO ensure layer name \n log.logInfo(\"Feature type correctly created from GPKG file '%s'\" % filename)\n self._set_layer_style(layername, stylename)\n\n def publish_vector_layer_from_postgis(self, host, port, database, schema, table, \n username, passwd, crsauthid, layername, style, stylename):\n self._ensureWorkspaceExists()\n self._deleteLayerIfItExists(layername)\n self.publish_style(stylename, zipfile = style)\n feedback.setText(\"Publishing data for layer %s\" % layername) \n store = self.gscatalog.create_datastore(layername, self.workspace)\n store.connection_parameters.update(host=host, port=str(port), database=database, user=username, \n schema=schema, passwd=passwd, dbtype=\"postgis\")\n self.gscatalog.save(store) \n ftype = self.gscatalog.publish_featuretype(table, store, crsauthid, native_name=layername) \n if ftype.name != layername:\n ftype.dirty[\"name\"] = layername\n self.gscatalog.save(ftype)\n self._set_layer_style(layername, stylename)\n\n def publish_raster_layer(self, filename, style, layername, stylename):\n feedback.setText(\"Publishing data for layer %s\" % layername)\n self._ensureWorkspaceExists()\n self.publish_style(stylename, zipfile = style)\n self.gscatalog.create_coveragestore(layername, self.workspace, filename)\n self._set_layer_style(layername, stylename)\n\n def create_group(self, groupname, layernames):\n try: \n group = self.gscatalog.create_layergroup(groupname, layernames, layernames, None, workspace=self.workspace)\n self.gscatalog.save(group)\n except ConflictingDataError:\n layergroup = self.gscatalog.get_layergroups(groupname)[0]\n layergroup.dirty.update(layers = layernames, styles = layernames)\n\n def publish_style(self, name, sld=None, zipfile=None):\n feedback.setText(\"Publishing style for layer %s\" % name)\n self._ensureWorkspaceExists()\n styleExists = bool(self.gscatalog.get_styles(names=name, workspaces=self.workspace))\n if sld:\n self.gscatalog.create_style(name, sld, True)\n log.logInfo(\"Style %s correctly created from SLD file '%s'\" % (name, sld))\n elif zipfile:\n headers = {'Content-type': 'application/zip'}\n if styleExists:\n method = \"put\"\n url = self.service_url + \"/workspaces/%s/styles/%s\" % (self.workspace, name)\n else:\n url = self.service_url + \"/workspaces/%s/styles?name=%s\" % (self.workspace, name)\n method = \"post\"\n with open(zipfile, \"rb\") as f:\n self.http_request(url, f.read(), method, headers)\n log.logInfo(\"Style %s correctly created from Zip file '%s'\" % (name, zipfile))\n else:\n raise ValueError(\"A style definition must be provided, whether using a zipfile path or a SLD string\")\n\n def style_exists(self, name):\n if not self._workspaceExists():\n return False\n return len(self.gscatalog.get_styles(name, self.workspace)) > 0\n\n def delete_style(self, name):\n styles = self.gscatalog.get_styles(name, self.workspace)\n if styles:\n self.gscatalog.delete(styles[0])\n\n def layer_exists(self, name):\n return self._get_layer(name) is not None\n\n def delete_layer(self, name):\n layer = self._get_layer(name)\n self.gscatalog.delete(layer, recurse = True, purge = True)\n \n def open_wms(self, names, bbox, srs):\n url = self.layer_wms(names, bbox, srs)\n webbrowser.open_new_tab(url)\n\n def layer_wms(self, names, bbox, srs):\n baseurl = self.base_url()\n names = \",\".join([\"%s:%s\" % (self.workspace, name) for name in names])\n url = (\"%s/%s/wms?service=WMS&version=1.1.0&request=GetMap&layers=%s&format=application/openlayers&bbox=%s&srs=%s&width=800&height=600\" \n % (baseurl, self.workspace, names, bbox, srs))\n return url\n \n def set_layer_metadata_link(self, name, url):\n layer = self._get_layer(name)\n resource = layer.resource\n resource.metadata_links= [('text/html', 'other', url),]\n self.gscatalog.save(resource)\n\n def delete_workspace(self):\n ws = self.gscatalog.get_workspaces(self.workspace)\n if ws:\n self.gscatalog.delete(ws[0], recurse = True)\n\n ##########\n\n def _get_layer(self, name):\n fullname = self.workspace + \":\" + name\n for layer in self.gscatalog.get_layers():\n if layer.name.lower() == fullname.lower():\n return layer\n\n def _set_layer_style(self, layername, stylename):\n self.gscatalog._cache.clear() #We are doing stuff on the geoserver rest api without using gsconfig, so cache might be outdated\n layer = self._get_layer(layername)\n default = self.gscatalog.get_styles(stylename, self.workspace)[0]\n layer.default_style = default\n self.gscatalog.save(layer)\n log.logInfo(\"Style %s correctly assigned to layer %s\" % (stylename, layername))\n\n\n def _workspaceExists(self):\n ws = self.gscatalog.get_workspaces(self.workspace)\n return bool(ws)\n\n def _ensureWorkspaceExists(self):\n ws = self.gscatalog.get_workspaces(self.workspace)\n if not ws:\n log.logInfo(\"Workspace %s does not exist. Creating it.\" % self.workspace)\n self.gscatalog.create_workspace(self.workspace, \"http://%s.geocat.net\" % self.workspace) #TODO change URL\n\n\n def _deleteLayerIfItExists(self, name):\n layer = self._get_layer(name)\n if layer:\n self.gscatalog.delete(layer)\n try:\n stores = self.gscatalog.get_stores(name, self.workspace)\n if stores:\n store = stores[0]\n for res in store.get_resources():\n self.gscatalog.delete(res)\n self.gscatalog.delete(store)\n except:\n pass \n '''We swallow possible errors while deleting the underlying datastore.\n That shouldn't be a problem, since later we are going to upload using overwrite mode'''\n\n","sub_path":"bridgecommon/geoservercatalog.py","file_name":"geoservercatalog.py","file_ext":"py","file_size_in_byte":8937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"6128022","text":"\"\"\"\nauteur : Franck CHAMBON\nhttps://prologin.org/train/2003/semifinal/table_de_multiplications\n\"\"\"\n\ndef affiche_table(nombre: int) -> None:\n \"\"\"Affiche la table de multiplication de `nombre`\n\n >>> affiche_table(3)\n 3x1=3\n 3x2=6\n 3x3=9\n 3x4=12\n 3x5=15\n 3x6=18\n 3x7=21\n 3x8=24\n 3x9=27\n\n \"\"\"\n for k in range(1, 10):\n ## Version classique\n #print(nombre, \"x\", k, \"=\", k * nombre, sep=\"\")\n\n ## Version f-string ; recommandée\n print(f\"{nombre}x{k}={k * nombre}\")\n\n\nimport doctest\ndoctest.testmod()\n\nnombre = int(input())\n\naffiche_table(nombre)\n","sub_path":"Term_NSI/devoirs/4-dm2/Corrigé/tests/E7/E7.py","file_name":"E7.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"563801818","text":"\r\n\r\nimport time\r\nimport os\r\nimport sys\r\nfrom collections import defaultdict\r\nfrom scipy.io import wavfile\r\nimport numpy as np\r\n#import cPickle as pickle\r\nimport pickle\r\nimport traceback as tb\r\n\r\n#from feature import mix_feature\r\nimport MFCC\r\nimport LPC\r\nfrom skgmm import GMMSet\r\n\r\nimport pickle\r\n\r\n\r\nCHECK_ACTIVE_INTERVAL = 1 # seconds\r\n\r\nclass ModelInterface(object):\r\n #from feature.LPC import extract2\r\n #from feature.MFCC import extract\r\n UBM_MODEL_FILE = None\r\n\r\n def __init__(self):\r\n self.features = defaultdict(list)\r\n self.gmmset = GMMSet()\r\n #self.vad = VAD()\r\n '''\r\n def mix_feature(self, tup):\r\n mfcc = MFCC.extract(tup)\r\n lpc = LPC.extract(tup)\r\n if len(mfcc) == 0:\r\n print(sys.stderr, \"ERROR.. failed to extract mfcc feature:\", len(tup[1]))\r\n return np.concatenate((mfcc, lpc), axis=1)\r\n '''\r\n def enroll(self, name, fs, signal):\r\n \"\"\"\r\n add the signal to this person's training dataset\r\n name: person's name\r\n \"\"\"\r\n mfcc = MFCC.extract((fs, signal))\r\n lpc = LPC.extract2((fs, signal))\r\n feat = np.concatenate((mfcc, lpc), axis=1) # output : np.array of a wave file, \"\"[mfcc, lpc]\"\",\r\n self.features[name].extend(feat) # label : name of a person, feature : defaultdict\r\n\r\n def _get_gmm_set(self):\r\n return GMMSet()\r\n\r\n def train(self):\r\n self.gmmset = self._get_gmm_set() #gmmset.GMMSet()\r\n start = time.time()\r\n print(\"Start training...\")\r\n for name, feats in self.features.iteritems():\r\n print(name)\r\n self.gmmset.fit_new(feats, name)\r\n print(time.time() - start, \" seconds\")\r\n for i in range(len(self.gmmset.y)):\r\n with open(\"./pickled/{}\".format(self.gmmset.y[i]), 'wb') as ff:\r\n pickle.dump((self.gmmset.y[i], self.gmmset.x[i]),ff)\r\n sys.exit(1)\r\n def predict(self, fs, signal):\r\n \"\"\"\r\n return a label (name)\r\n \"\"\"\r\n #try:\r\n mfcc = MFCC.extract((fs, signal))\r\n lpc = LPC.extract2((fs, signal))\r\n feat = np.concatenate((mfcc, lpc), axis=1)\r\n #feat = mix_feature((fs, signal)) # feat : np.concatenate((mfcc, lpc), axis=1)\r\n #except:\r\n # pass\r\n return self.gmmset.predict_one(feat)\r\n\r\n def dump(self, fname):\r\n \"\"\" dump all models to file\"\"\"\r\n self.gmmset.before_pickle()\r\n with open(fname, 'w') as f:\r\n pickle.dump(self, f, -1)\r\n self.gmmset.after_pickle()\r\n\r\n @staticmethod\r\n def load(fname):\r\n \"\"\" load from a dumped model file\"\"\"\r\n with open(fname, 'r') as f:\r\n R = pickle.load(f)\r\n R.gmmset.after_pickle()\r\n return R\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n \"\"\" some testing\"\"\"\r\n m = ModelInterface()\r\n fs, signal = wavfile.read(\"../corpus.silence-removed/Style_Reading/f_001_03.wav\")\r\n m.enroll('h', fs, signal[:80000])\r\n fs, signal = wavfile.read(\"../corpus.silence-removed/Style_Reading/f_003_03.wav\")\r\n m.enroll('a', fs, signal[:80000])\r\n m.train()\r\n","sub_path":"modelinterface.py","file_name":"modelinterface.py","file_ext":"py","file_size_in_byte":3126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"147485823","text":"import os\nfrom os import walk\nimport random\nimport keras\nfrom keras import backend as K\nimport numpy as np\nimport json\nimport numpy as np\nimport PIL\nfrom PIL import ImageOps\n#from PIL import Image\nfrom PIL import ImageFilter\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport SQL_get_np_for_image as SQL_image\n\n# The \"help me translate\" program has a SQL backend.\n# This SQL dumps all the images in file located at\n# /media/john/dropbox/transcription_db/crop_sets/2019_07_24/crop_images\n# /dropbox/transcription_db/crop_sets/2019_07_24/crop_images\n# with one file per image and file name is id.png\n#\n# Inside Docker is is at /crops_set/crop_images\n#\n# Also created by the SQL is a ground truth translates for some of these images.\n# These are located at \n# /media/john/dropbox/transcription_db/crop_set/2019_07_24/transcribed_words\n# /dropbox/transcription_db/crop_set/2019_07_24/transcribed_words. Same as before\n# file names are id.txt\n#\n# Inside Docker is is at /crops_set/transcribed_words\n#\n# this reads the json, them crops all images and places them in\n# a temporary directory\n#import make_tmp_german_png\n\nprint(\"code under development ...\")\nprint(\"click debug in visual studio code\")\n# Setting up remote debugging:\n# https://code.visualstudio.com/docs/python/debugging\n\n# Allow other computers to attach to ptvsd at this IP address and port.\n# ptvsd.enable_attach(address=('1.2.3.4', 3000), redirect_output=True)\n#import ptvsd\n#ptvsd.enable_attach()\n\n# Pause the program until a remote debugger is attached\n#print(\"WAITING FOR DEBUGGER\")\n#ptvsd.wait_for_attach()\n\n# This generator will select words from the following database:\n# -------------------------------------------------------------\n# hand crafted db by jjg\n\nalphabet = u'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzÖÜßáäõöüăČ芚ẞ,-. '\nalphabet = u'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzÖÜßáäõöüăČ芚ẞ '\n#mypath = '/home/john/Documents/GitHub/ancient-german/db-of-words-crops/known_words.json'\nmypath_images = r'/crops_set/crop_images/'\nmypath_words = r'/crops_set/transcribed_words'\nwords_id_text_list = [] \n\n\ndef read_word_from_txt_file(id, file_to_open, words_id_text_list, max_string_len):\n\n word = open(file_to_open).read()\n\n if len(word)>0:\n #if len(words_id_text_list)<32: #for debug only, do not submit\n words_id_text_list.append( {\"id\": id, \"word\": word})\n\n\n\n\n\ndef read_all_words_in_directory(mypath, words_id_text_list, max_string_len):\n\n for (dirpath, dirnames, filenames) in walk(mypath):\n for name in filenames:\n if name.endswith(\".txt\"):\n file_to_open = os.path.join(dirpath, name)\n\n read_word_from_txt_file(name[:-4], file_to_open, words_id_text_list, max_string_len)\n\n\n\n# # alphabet is required for 1 hot encoding\ndef make_alphabet(words_id_text_list):\n alphabet = \"\"\n\n for w in words_id_text_list:\n for c in w[\"text\"]:\n if c not in alphabet:\n alphabet = alphabet + c\n\n # sort the alphabet used in these words so that it's easier to see if any are missing\n alphabet = ''.join(sorted(alphabet))\n #alphabet = alphabet + \" \"\n return alphabet\n\n# Translation of characters to unique integer values\ndef text_to_labels(text, absolute_max_string_len, alphabet):\n ret = []\n for i in range(absolute_max_string_len):\n if i15:\n return False\n # todo, for now, restrict to small words ... lets see if this helps\n #if len(in_str)>4:\n # return False\n\n # check for invalid chars, only usefull when not striping invalids \n for c in in_str:\n i = alphabet.find(c)\n if i == -1:\n return False\n\n return True\n\n\nclass German_Word_Generator(keras.callbacks.Callback):\n\n def __init__(self, minibatch_size,\n img_w, img_h, downsample_factor, \n absolute_max_string_len=16):\n\n self.minibatch_size = minibatch_size\n self.img_w = img_w\n self.img_h = img_h\n self.downsample_factor = downsample_factor\n #self.blank_label = self.get_output_size() - 1 # last entry of alphabet needs to be blank\n self.absolute_max_string_len = absolute_max_string_len\n self.words_id_text_list = [] \n self.validation_words_id_text_list = []\n self.index_into_word_id_text_list = 0\n self.build_word_list(16)\n\n def set_img_w(self, w):\n self.img_w = w\n\n def set_absolute_max_string_len(self, m):\n self.absolute_max_string_len = m\n\n def get_output_size(self):\n return len(alphabet) + 1\n\n def build_word_list(self, max_string_len): # tbd, why max_string_length here\n read_all_words_in_directory(mypath_words, self.words_id_text_list, max_string_len)\n #random.shuffle(self.words_id_text_list)\n\n # todo, rename this variable\n \n #self.words_id_text_list = json.loads(open(mypath).read())\n ##self.words_id_text_list = make_tmp_german_png.create_temp_png_files_and_return_records()\n random.shuffle(self.words_id_text_list)\n\n # 25% for validation\n validation_0_to_n = int(len(self.words_id_text_list)/4)\n self.validation_words_id_text_list = self.words_id_text_list[:validation_0_to_n]\n self.words_id_text_list = self.words_id_text_list[validation_0_to_n:]\n\n def get_batch(self, words_id_text_list, minibatch_size, train):\n # width and height are backwards from typical Keras convention\n # because width is the time dimension when it gets fed into the RNN\n if K.image_data_format() == 'channels_first':\n ## not implemented\n assert False\n else:\n # TensorFlow\n image_batch = []\n source_str_batch = [] # abc\n lables_batch = [] # [1, 2, 3, -1, -1 ] where 1=a, 2=b, 3=c\n lables_length_batch = [] # how long each text\n ctc_input_length = [] \n\n while len(image_batch))\n# Sylvain Garancher \n#\n# This file is a part of container\n#\n# container is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# container is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\n\nfrom osv import osv\nfrom osv import fields\n\n\nclass stock_move(osv.osv):\n _inherit = 'stock.move'\n\n def _check_tracking(self, cr, uid, ids, context=None):\n \"\"\" Checks if production lot is assigned to stock move or not.\n @return: True or False\n \"\"\"\n move_ids = []\n for move in self.browse(cr, uid, ids, context=context):\n if not move.container_id:\n move_ids.append(move.id)\n if move_ids:\n return super(stock_move, self)._check_tracking(cr, uid, move_ids, context=context)\n return True\n\n _columns = {\n 'container_id': fields.many2one('stock.container', 'Container', help='Container of this move'),\n }\n\n _constraints = [\n (_check_tracking,\n 'You must assign a production lot for this product',\n ['prodlot_id'])]\n\n def do_partial(self, cr, uid, ids, partial_datas, context=None):\n if context is None:\n context = {}\n move_ids = super(stock_move, self).do_partial(cr, uid, ids, partial_datas, context=context)\n container_id = context.get('container_id', False)\n if container_id:\n container_obj = self.pool.get('stock.container')\n move_ids.extend([move.id for move in container_obj.browse(cr, uid, container_id, context=context).incoming_move_list_ids])\n container_obj.write(cr, uid, [container_id], {'incoming_move_list_ids': [(6, 0, list(set(move_ids)))]}, context=context)\n\nstock_move()\n\n\nclass stock_picking(osv.osv):\n _inherit = 'stock.picking'\n\n def do_partial(self, cr, uid, ids, partial_datas, context=None):\n if context is None:\n context = {}\n res = super(stock_picking, self).do_partial(cr, uid, ids, partial_datas, context=context)\n container_ids = context.get('container_ids', [])\n # Check if in a container\n if container_ids:\n move_obj = self.pool.get('stock.move')\n container_obj = self.pool.get('stock.container')\n for container in container_obj.browse(cr, uid, container_ids, context=context):\n for picking in self.browse(cr, uid, ids, context=context):\n # Check if backorder, if yes, we must remove this picking of container and change location_id in stock move\n if picking.backorder_id:\n #FIXME : if not find partner ??\n if picking.partner_id:\n loc_id = picking.partner_id.property_stock_supplier.id\n for move in picking.move_lines:\n move_obj.write(cr, uid, [move.id], {'location_id': loc_id}, context=context)\n container_obj.write(cr, uid, [container.id], {'incoming_move_list_ids': [(3, move.id)]}, context=context)\n for move_backorder in picking.backorder_id.move_lines:\n container_obj.write(cr, uid, [container.id], {'incoming_move_list_ids': [(4, move_backorder.id)]}, context=context)\n return res\n\nstock_picking()\n\n\nclass stock_warehouse(osv.osv):\n _inherit = 'stock.warehouse'\n\n _columns = {\n 'lot_container_id': fields.many2one('stock.location', 'Container location', help='Container location for reservation'),\n }\n\nstock_warehouse()\n\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":4489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"504760823","text":"# py.test -p no:django -v\n\ndef sort(list_of_ints, descending):\n assert isinstance(list_of_ints, list)\n assert all(isinstance(x, int) for x in list_of_ints)\n if descending:\n return list_of_ints\n else:\n return [4, 2, 3]\n\n\nfrom hypothesis import given\nimport hypothesis.strategies as st\nimport collections\n\n@given(st.lists(st.integers()))\ndef test_presence_with_freq(values):\n result = sort(values, True)\n tmp1 = collections.Counter(values)\n tmp2 = collections.Counter(result)\n assert tmp1 == tmp2\n \n \n","sub_path":"content/Property Based Testing/code-snippets-1/test_sorting_incorrect_passing_presence_with_frequency_1.py","file_name":"test_sorting_incorrect_passing_presence_with_frequency_1.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"161754161","text":"\"\"\"\n\"\"\"\n\n\nclass ParserError(Exception):\n \"\"\" Represents the error the parser can raise \"\"\"\n\n def __init__(self, message, row, column):\n super().__init__(f\"(line: {row}, col: {column}) {message}\")\n self.message = message # Added because it is missing after super init\n self.row = row\n self.column = column\n\n def __repr__(self):\n return (\n f'ParserError(message=\"{self.message}\", row={self.row}'\n f\", column={self.column})\"\n )\n","sub_path":"compiler/errors/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"437104119","text":"# Copyright (C) 2017 Catalyst IT Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom datetime import datetime\nfrom datetime import timedelta\nimport hashlib\nimport json\nimport os\n\nimport mock\n\nfrom distil.collector import base as collector_base\nfrom distil.common import constants\nfrom distil import config\nfrom distil.db.sqlalchemy import api as db_api\nfrom distil.service import collector\nfrom distil.tests.unit import base\n\n\nclass CollectorTest(base.DistilWithDbTestCase):\n def setUp(self):\n super(CollectorTest, self).setUp()\n\n meter_mapping_file = os.path.join(\n os.environ[\"DISTIL_TESTS_CONFIGS_DIR\"],\n 'meter_mappings.yaml'\n )\n self.conf.set_default(\n 'meter_mappings_file',\n meter_mapping_file,\n group='collector'\n )\n\n transformer_file = os.path.join(\n os.environ[\"DISTIL_TESTS_CONFIGS_DIR\"],\n 'transformer.yaml'\n )\n self.conf.set_default(\n 'transformer_file',\n transformer_file,\n group='collector'\n )\n\n @mock.patch('distil.collector.base.BaseCollector.get_meter')\n def test_collect_swift_resource_id(self, mock_get_meter):\n project_id = 'fake_project_id'\n project_name = 'fake_project'\n project = {'id': project_id, 'name': project_name}\n start_time = datetime.strptime(\n '2017-02-27 00:00:00',\n \"%Y-%m-%d %H:%M:%S\"\n )\n end_time = datetime.strptime(\n '2017-02-27 01:00:00',\n \"%Y-%m-%d %H:%M:%S\"\n )\n\n # Add project to db in order to satisfy the foreign key constraint of\n # UsageEntry\n db_api.project_add(\n {\n 'id': project_id,\n 'name': 'fake_project',\n 'description': 'project for test'\n }\n )\n\n container_name = 'my_container'\n resource_id = '%s/%s' % (project_id, container_name)\n resource_id_hash = hashlib.md5(resource_id.encode('utf-8')).hexdigest()\n\n mock_get_meter.return_value = [\n {\n 'resource_id': resource_id,\n 'source': 'openstack',\n 'volume': 1024\n }\n ]\n\n collector = collector_base.BaseCollector()\n collector.collect_usage(project, [(start_time, end_time)])\n\n resources = db_api.resource_get_by_ids(project_id, [resource_id_hash])\n res_info = json.loads(resources[0].info)\n\n self.assertEqual(1, len(resources))\n self.assertEqual(container_name, res_info['name'])\n\n entries = db_api.usage_get(project_id, start_time, end_time)\n\n self.assertEqual(1, len(entries))\n self.assertEqual(resource_id_hash, entries[0].resource_id)\n\n @mock.patch(\n 'distil.collector.ceilometer.CeilometerCollector.collect_usage')\n @mock.patch('distil.common.openstack.get_ceilometer_client')\n @mock.patch('distil.common.openstack.get_projects')\n def test_last_collect_new_project(self, mock_get_projects, mock_cclient,\n mock_collect_usage):\n # Assume project_2 is a new project that doesn't exist in distil db.\n mock_get_projects.return_value = [\n {'id': '111', 'name': 'project_1', 'description': ''},\n {'id': '222', 'name': 'project_2', 'description': ''},\n ]\n\n # Insert project_0 and project_1 in the database, project_0 is not in\n # keystone anymore.\n project_0_collect = datetime(2017, 5, 17, 19)\n db_api.project_add(\n {\n 'id': '000',\n 'name': 'project_0',\n 'description': 'deleted',\n },\n project_0_collect\n )\n project_1_collect = datetime(2017, 5, 17, 20)\n db_api.project_add(\n {\n 'id': '111',\n 'name': 'project_1',\n 'description': '',\n },\n project_1_collect\n )\n\n svc = collector.CollectorService()\n svc.collect_usage()\n\n self.assertEqual(2, mock_collect_usage.call_count)\n mock_collect_usage.assert_called_with(\n {'id': '222', 'name': 'project_2', 'description': ''},\n [(project_1_collect, project_1_collect + timedelta(hours=1))]\n )\n\n @mock.patch(\n 'distil.collector.ceilometer.CeilometerCollector.collect_usage')\n @mock.patch('distil.common.openstack.get_ceilometer_client')\n @mock.patch('distil.common.openstack.get_projects')\n def test_last_collect_ignore_project(self, mock_get_projects, mock_cclient,\n mock_collect_usage):\n self.override_config('collector', ignore_tenants=['project_2'])\n\n mock_get_projects.return_value = [\n {'id': '111', 'name': 'project_1', 'description': ''},\n {'id': '222', 'name': 'project_2', 'description': ''},\n ]\n\n project1_time = datetime(2017, 5, 17, 20)\n db_api.project_add(\n {\n 'id': '111',\n 'name': 'project_1',\n 'description': '',\n },\n project1_time\n )\n project2_time = datetime(2017, 5, 17, 19)\n db_api.project_add(\n {\n 'id': '222',\n 'name': 'project_2',\n 'description': '',\n },\n project2_time\n )\n\n svc = collector.CollectorService()\n svc.collect_usage()\n\n mock_collect_usage.assert_called_once_with(\n {'id': '111', 'name': 'project_1', 'description': ''},\n [(project1_time, project1_time + timedelta(hours=1))]\n )\n\n @mock.patch('distil.common.openstack.get_ceilometer_client')\n @mock.patch('distil.common.openstack.get_projects')\n @mock.patch('distil.db.api.get_project_locks')\n def test_project_order_ascending(self, mock_get_lock, mock_get_projects,\n mock_cclient):\n mock_get_projects.return_value = [\n {'id': '111', 'name': 'project_1', 'description': ''},\n {'id': '222', 'name': 'project_2', 'description': ''},\n {'id': '333', 'name': 'project_3', 'description': ''},\n {'id': '444', 'name': 'project_4', 'description': ''},\n ]\n\n # Insert a project in the database in order to get last_collect time.\n db_api.project_add(\n {\n 'id': '111',\n 'name': 'project_1',\n 'description': '',\n },\n datetime.utcnow() - timedelta(hours=2)\n )\n\n svc = collector.CollectorService()\n svc.collector = mock.Mock()\n svc.collect_usage()\n\n expected_list = ['111', '222', '333', '444']\n actual_list = [call_args[0][0]\n for call_args in mock_get_lock.call_args_list]\n self.assertEqual(expected_list, actual_list)\n\n @mock.patch('distil.common.openstack.get_ceilometer_client')\n @mock.patch('distil.common.openstack.get_projects')\n @mock.patch('distil.db.api.get_project_locks')\n def test_project_order_descending(self, mock_get_lock, mock_get_projects,\n mock_cclient):\n self.override_config('collector', project_order='descending')\n\n mock_get_projects.return_value = [\n {'id': '111', 'name': 'project_1', 'description': ''},\n {'id': '222', 'name': 'project_2', 'description': ''},\n {'id': '333', 'name': 'project_3', 'description': ''},\n {'id': '444', 'name': 'project_4', 'description': ''},\n ]\n\n # Insert a project in the database in order to get last_collect time.\n db_api.project_add(\n {\n 'id': '111',\n 'name': 'project_1',\n 'description': '',\n },\n datetime.utcnow() - timedelta(hours=2)\n )\n\n svc = collector.CollectorService()\n svc.collector = mock.Mock()\n svc.collect_usage()\n\n expected_list = ['444', '333', '222', '111']\n actual_list = [call_args[0][0]\n for call_args in mock_get_lock.call_args_list]\n self.assertEqual(expected_list, actual_list)\n\n @mock.patch('distil.common.openstack.get_ceilometer_client')\n @mock.patch('distil.common.openstack.get_projects')\n @mock.patch('distil.db.api.get_project_locks')\n def test_project_order_random(self, mock_get_lock, mock_get_projects,\n mock_cclient):\n self.override_config('collector', project_order='random')\n\n mock_get_projects.return_value = [\n {'id': '111', 'name': 'project_1', 'description': ''},\n {'id': '222', 'name': 'project_2', 'description': ''},\n {'id': '333', 'name': 'project_3', 'description': ''},\n {'id': '444', 'name': 'project_4', 'description': ''},\n ]\n\n # Insert a project in the database in order to get last_collect time.\n db_api.project_add(\n {\n 'id': '111',\n 'name': 'project_1',\n 'description': '',\n },\n datetime.utcnow() - timedelta(hours=2)\n )\n\n svc = collector.CollectorService()\n svc.collector = mock.Mock()\n svc.collect_usage()\n\n unexpected_list = ['111', '222', '333', '444']\n actual_list = [call_args[0][0]\n for call_args in mock_get_lock.call_args_list]\n self.assertNotEqual(unexpected_list, actual_list)\n\n @mock.patch('os.kill')\n @mock.patch('distil.common.openstack.get_ceilometer_client')\n @mock.patch('distil.common.openstack.get_projects')\n def test_collect_with_end_time(self, mock_get_projects, mock_cclient,\n mock_kill):\n end_time = datetime.utcnow() + timedelta(hours=0.5)\n end_time_str = end_time.strftime(constants.iso_time)\n self.override_config(collect_end_time=end_time_str)\n\n mock_get_projects.return_value = [\n {\n 'id': '111',\n 'name': 'project_1',\n 'description': 'description'\n }\n ]\n # Insert the project info in the database.\n db_api.project_add(\n {\n 'id': '111',\n 'name': 'project_1',\n 'description': '',\n },\n datetime.utcnow()\n )\n\n srv = collector.CollectorService()\n srv.thread_grp = mock.Mock()\n srv.collect_usage()\n\n self.assertEqual(1, srv.thread_grp.stop.call_count)\n self.assertEqual(1, mock_kill.call_count)\n","sub_path":"distil/tests/unit/service/test_collector.py","file_name":"test_collector.py","file_ext":"py","file_size_in_byte":11230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"497989995","text":"# -*- coding: utf-8 -*-\nimport requests\n\n\nclass FacebookApi:\n FB_API_URL = 'https://graph.facebook.com/v2.6'\n\n def __init__(self, verify_token, page_access_token):\n self._verify_token = verify_token\n self._page_access_token = page_access_token\n\n @staticmethod\n def from_config(config):\n return FacebookApi(verify_token=config.VERIFY_TOKEN, page_access_token=config.PAGE_ACCESS_TOKEN)\n\n def verify(self, query_args):\n if query_args.get('hub.verify_token') == self._verify_token:\n return query_args.get('hub.challenge')\n else:\n return query_args\n\n def post(self, api_suffix, json):\n return requests.post(\n self.FB_API_URL + api_suffix,\n params={\n 'access_token': self._page_access_token\n },\n json=json\n )\n\n def get(self, api_suffix, params):\n params.update({'access_token': self._page_access_token})\n return requests.get(\n self.FB_API_URL + api_suffix,\n params=params\n )\n\n def send_message(self, client_id, text):\n return self.post(\n '/me/messages',\n {\n 'messaging_type': 'RESPONSE',\n 'message': {\n 'text': text\n },\n 'recipient': {\n 'id': client_id\n },\n 'notification_type': 'regular'\n }\n ).json()\n\n def send_tag_message(self, client_id, text, tag='ACCOUNT_UPDATE'):\n return self.post(\n '/me/messages',\n {\n 'messaging_type': 'MESSAGE_TAG',\n 'tag': tag,\n 'message': {\n 'text': text\n },\n 'recipient': {\n 'id': client_id\n },\n 'notification_type': 'regular'\n }\n ).json()\n\n @staticmethod\n def is_user_message(message):\n return (\n message.get('message') and\n message['message'].get('text') and\n not message['message'].get('is_echo')\n )\n\n def get_client_info(self, id):\n fields = [\n 'name',\n 'first_name',\n 'last_name',\n 'profile_pic',\n 'locale',\n 'timezone',\n 'gender'\n ]\n response = self.get(\n api_suffix='/{id}'.format(id=id),\n params={'fields': ','.join(fields)}\n ).json()\n result = {field: response.get(field) for field in fields}\n result['id'] = id\n if result.get('profile_pic'):\n result['profile_pic'] = requests.get(result.get('profile_pic')).raw.read()\n return result\n","sub_path":"services/web/project/facebook_api.py","file_name":"facebook_api.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"488888462","text":"import sys\nimport dolfin as df \nimport numpy as np \nimport os \nimport time as time_module \nimport matplotlib.pyplot as plt\nimport csv \nimport yaml #version higher than 5.0\n\ndf.parameters['ghost_mode']= 'shared_facet' #for parallel program \n#My library \nfrom modules.SystemMatrix import SystemMatrix # <--System Matrix class\nfrom modules.Mesh import H5Mesh # <-- Mesh class\nfrom modules.FunctionSpace import FunctionSpace #<--Function Space Class\nfrom modules.FormulateVariationalProblem import FormulateVariationalProblem\nfrom modules.Solver import Solver\n\n#Inputs \nif len(sys.argv) >1 :\n user_given_input_file= sys.argv[1]\nelse:\n print(\"Provide an input file\")\n quit()\n\nwith open(user_given_input_file,'r') as input_file:\n my_input_cls = yaml.load(input_file, Loader=yaml.FullLoader)\n\n#projection = False\n#manual = False\n#automatic = True\n\nfor current_mesh in range(len(my_input_cls[\"mesh_list\"])):\n #my_current_mesh = my_input_cls['mesh_list'][0] \n my_current_mesh = my_input_cls['mesh_list'][current_mesh] \n #step-1 : Convert current_mesh_name into a char_list \n #step-2: Select the 4th-to-last element in the char_list \n #Step-3: Convert the 4th-to-last element to integer \n #Now I can run any single mesh I want in whichever order\n mesh_number=0 #For saving files according to mesh number\n mesh_number = int(list(my_current_mesh)[-4]) \n print(mesh_number)\n\n\n #MESH\n my_mesh_cls = H5Mesh(my_current_mesh) #<-- Class instance is created\n #print(my_current_mesh) \n print(\"Maximum value of the mesh: \", my_mesh_cls.mesh.hmax()) \n #df.plot(my_mesh_cls.mesh)\n\n\n #FUNCTION SPACE\n problem_type = my_input_cls['problem_type']\n number_of_moments = my_input_cls['number_of_moments']\n my_function_space_cls = FunctionSpace(my_input_cls, my_mesh_cls) #<-- class instance\n my_function_space_cls.set_function_space()\n\n #SYSTEM MATRIX\n my_system_matrix_cls = SystemMatrix(my_input_cls, my_mesh_cls) # <-- class instance\n my_system_matrix_cls.convert_to_ufl_form()\n\n #VARIATIONAL PROBLEM\n my_var_prob_cls = FormulateVariationalProblem(\n my_input_cls,\n my_mesh_cls,\n my_system_matrix_cls,\n my_function_space_cls\n ) #<-- class instance\n my_var_prob_cls.create_lhs()\n my_var_prob_cls.create_rhs()\n\n #SOLVER\n my_solver_cls = Solver(my_input_cls, my_function_space_cls, my_var_prob_cls) #<-- class instance\n if problem_type == 'nonlinear':\n my_solver_cls.inbuilt_newton_solver()\n u = my_solver_cls.u\n else:\n my_solver_cls.inbuilt_linear_solver()\n u = my_solver_cls.u_Function\n\n #===============\n #Post-processing\n #===============\n sol = u.split()\n\n def write_func(field_name, variable_name, mesh_num):\n xdmffile_u = df.XDMFFile(df.MPI.comm_world,\n 'results_mathematica/{0}_{1}.xdmf'\n .format(variable_name,mesh_num))\n xdmffile_u.write(field_name)\n xdmffile_u.close()\n for i in range(my_input_cls[\"number_of_moments\"]):\n write_func(sol[i], i, mesh_number)\n print(\"Program terminated successfully\")\n moment_order = my_input_cls[\"moment_order\"]\n\n def ErrorCalculation(exact_sol,numerical_sol):\n #interpolate on the mesh\n es = df.interpolate(exact_sol,my_function_space_cls.V.sub(0).collapse())\n ns = df.interpolate(numerical_sol,my_function_space_cls.V.sub(0).collapse())\n #compute values at the vertex\n err_linf = np.max(np.abs(es.compute_vertex_values()- ns.compute_vertex_values()))\n #err_l2 = np.linalg.norm(es.compute_vertex_values()-ns.compute_vertex_values())\n err_l2 = df.errornorm(es,ns,\"L2\")#fenics inbuilt norm calculator\n max_l2= np.linalg.norm(es.compute_vertex_values())\n max_linf = np.max(np.abs(es.compute_vertex_values())) or 1\n normalised_err_l2= err_l2/max_linf\n normalised_err_linf = err_linf/max_linf\n return normalised_err_l2, normalised_err_linf\n def PressureErrorCalculation(exact_sol,numerical_sol1,numerical_sol2):\n #interpolate on the mesh\n es = interpolate(exact_sol, my_function_space_cls.V.sub(0).collapse())\n ns1 = interpolate(numerical_sol1, my_function_space_cls.V.sub(0).collapse())\n ns2 = interpolate(numerical_sol2, my_function_space_cls.V.sub(0).collapse())\n #compute values at the vertex\n #err_l2 = np.linalg.norm(es.compute_vertex_values()-\n # (ns1.compute_vertex_values()+ns2.compute_vertex_values()))\n err_l2 = df.errornorm(es,ns1+ns2,\"L2\")#fenics inbuilt norm calculator\n err_linf = np.max(np.abs(es.compute_vertex_values()-\n (ns1.compute_vertex_values()+ns2.compute_vertex_values())))\n max_l2= np.linalg.norm(es.compute_vertex_values())\n max_linf = np.max(np.abs(es.compute_vertex_values())) or 1\n normalised_err_l2= err_l2/max_linf\n normalised_err_linf = err_linf/max_linf\n return normalised_err_l2, normalised_err_linf\n\n if my_input_cls['moment_order']== 3:\n with open(\"01_coeffs.cpp\", \"r\") as file:\n exact_solution_cpp_code = file.read()\n load_value = df.compile_cpp_code(exact_solution_cpp_code)\n #Temperature\n t_e = df.CompiledExpression(load_value.Temperature(),degree=2)\n t_l2,t_linf = ErrorCalculation(t_e,sol[0])\n #Heat flux\n sx_e = df.CompiledExpression(load_value.Heatfluxx(),degree=2)\n sx_l2,sx_linf = ErrorCalculation(sx_e,sol[1])\n sy_e = df.CompiledExpression(load_value.Heatfluxy(),degree=2)\n sy_l2,sy_linf = ErrorCalculation(sy_e,sol[2])\n errors =[\n t_l2, t_linf,\n sx_l2, sx_linf,\n sy_l2,sy_linf ,\n ]\n print(errors) \n if my_input_cls['moment_order']== 'nono-6':\n with open(\"01_coeffs.cpp\", \"r\") as file:\n exact_solution_cpp_code = file.read()\n load_value = compile_cpp_code(exact_solution_cpp_code)\n #Pressure\n #p_e = CompiledExpression(load_value.Pressure(),degree=2)\n #p_l2, p_linf = PressureErrorCalculation(p_e,sol[0],sol[3])\n #velocity\n ux_e = CompiledExpression(load_value.Velocityx(),degree=2)\n ux_l2,ux_linf = ErrorCalculation(ux_e,sol[1])\n uy_e = CompiledExpression(load_value.Velocityy(),degree=2)\n uy_l2,uy_linf = ErrorCalculation(uy_e,sol[2])\n #Pressure\n t_e = CompiledExpression(load_value.Pressure(),degree=2)\n t_l2,t_linf = ErrorCalculation(t_e,sol[0])\n #Stress\n sxx_e = CompiledExpression(load_value.Stressxx(),degree=2)\n sxx_l2,sxx_linf = ErrorCalculation(sxx_e,sol[4])\n sxy_e = CompiledExpression(load_value.Stressxy(),degree=2)\n sxy_l2,sxy_linf = ErrorCalculation(sxy_e,sol[5])\n syy_e = CompiledExpression(load_value.Stressyy(),degree=2)\n syy_l2,syy_linf = ErrorCalculation(syy_e,sol[6])\n #Heat flux\n #sx_e = CompiledExpression(load_value.Heatfluxx(),degree=2)\n #sx_l2,sx_linf = ErrorCalculation(sx_e,sol[7])\n #sy_e = CompiledExpression(load_value.Heatfluxy(),degree=2)\n #sy_l2,sy_linf = ErrorCalculation(sy_e,sol[8])\n errors =[\n t_l2, t_linf,\n ux_l2, ux_linf,\n uy_l2,uy_linf ,\n sxx_l2,sxx_linf,\n sxy_l2,sxy_linf,\n syy_l2,syy_linf\n ]\n print(errors) \n\n\n#%%%%%%%%%%%%%%%%% Heat system Debug norm calculatation \n###########################################################\n'''\n #interpolating the function values on function space\n def interpolate_func(exact_sol,numerical_sol):\n field_exact = df.interpolate(exact_sol, my_function_space_cls.V.sub(0).collapse())\n field_numeric = df.interpolate(numerical_sol,my_function_space_cls.V.sub(0).collapse())\n return field_exact, field_numeric\n ##\n #Calculating L2 and Linf error\n def error_calc(field_exact,field_numerical):\n max_field_exact = np.max(np.abs(field_exact.compute_vertex_values())) or 1\n err_L2 = df.errornorm(field_exact,field_numerical,\"L2\")\n err_linf = np.max(np.abs(\n field_exact.compute_vertex_values()\n - field_numerical.compute_vertex_values()))\n return max_field_exact,err_L2,err_linf\n\n ##OLD CODE BLOCK TO READ EXACT SOLUTION FROM CPP FILES\n with open(\"01_coeffs.cpp\", \"r\") as file:\n exact_solution_cpp_code = file.read()\n load_value = df.compile_cpp_code(exact_solution_cpp_code)\n pressure_exact = df.CompiledExpression(load_value.Temperature(),degree=2)\n #sx_exact = CompiledExpression(load_value.VelocityX(),degree=2)\n #sy_exact = CompiledExpression(load_value.VelocityY(),degree=2)\n #pressure\n field_e_p,field_p = interpolate_func(pressure_exact,sol[0])\n #write_func(field_e_p,'e_p')\n max_exact_p,p_L2,p_linf = error_calc(field_e_p,field_p) \n print(\"Theta L2,Linf :\",p_L2/max_exact_p,p_linf/max_exact_p)\n'''\n\n","sub_path":"f2me/f2me.py","file_name":"f2me.py","file_ext":"py","file_size_in_byte":9510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"242598268","text":"import csv\r\n\r\nfrom flask import Flask, render_template, request, redirect\r\nfrom wtforms import Form, BooleanField, StringField, PasswordField, validators, IntegerField, DecimalField\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n@app.route('/', methods=['GET'])\r\ndef home():\r\n print(\"We received GET\")\r\n return render_template(\"homepage.html\")\r\n\r\n\r\n@app.route('/import/', methods=['GET'])\r\ndef imp():\r\n return redirect(\"/product/\")\r\n\r\n\r\n@app.route('/export/', methods=['GET'])\r\ndef exp():\r\n exp_fuc()\r\n return redirect(\"/product/\")\r\n\r\n\r\n@app.route('/product/', methods=['GET', 'POST'])\r\ndef list_products():\r\n print(\"We received GET\")\r\n form = ProductForm(request.form)\r\n if request.method == 'POST' and form.validate():\r\n print(form.data)\r\n product = Product(name=form.data['name'], unit=form.data['unit'], unit_price=form.data['unit_price'], quantity=form.data['quantity'])\r\n items2[product.name] = product\r\n pass\r\n return render_template(\"product_list.html\", items=items2, form=form)\r\n\r\n\r\n@app.route('/sell/', methods=[\"GET\", \"POST\"])\r\ndef sell_product(product_name):\r\n print(\"We received GET\")\r\n form = ProductSale(request.form)\r\n product = items2[product_name]\r\n if request.method == 'POST' and form.validate():\r\n print(form.data)\r\n ilosc = int(form.data['ilosc'])\r\n product = items2[product_name]\r\n product.quantity -= ilosc\r\n return render_template(\"sell_product.html\", product=product, form=form)\r\n\r\n\r\nclass Product:\r\n def __init__(self, name, unit, unit_price, quantity):\r\n self.name = name\r\n self.unit = unit\r\n self.unit_price = unit_price\r\n self.quantity = quantity\r\n\r\n\r\nclass ProductForm(Form):\r\n name = StringField('Name', [validators.Required()])\r\n quantity = IntegerField('Quanity', [validators.Required()])\r\n unit = IntegerField('Unit', [validators.Required()])\r\n unit_price = DecimalField('Quanity', [validators.Required()])\r\n\r\n\r\nclass ProductSale(Form):\r\n ilosc = IntegerField('Ilość', [validators.Required()])\r\n\r\n\r\nproduct_1 = Product(name=\"Ryż\", unit=\"kg\", unit_price=10, quantity=93)\r\nproduct_2 = Product(name=\"Sól\", unit=\"kg\", unit_price=2.34, quantity=7)\r\nproduct_3 = Product(name=\"Ziemniaki\", unit=\"l\", unit_price=9, quantity=35)\r\nproduct_4 = Product(name=\"Buraki\", unit=\"m\", unit_price=8, quantity=1)\r\nproduct_5 = Product(name=\"Śledzie\", unit=\"kg\", unit_price=23, quantity=66)\r\nproduct_6 = Product(name=\"Wołowina\", unit=\"kg\", unit_price=33, quantity=34)\r\n\r\nitems = [product_1, product_2, product_3, product_4, product_5, product_6]\r\n\r\nitems2 = {product_1.name: product_1,\r\n product_2.name: product_2,\r\n product_3.name: product_3,\r\n product_4.name: product_4,\r\n product_5.name: product_5,\r\n product_6.name: product_6\r\n }\r\n\r\n\r\ndef exp_fuc():\r\n with open('items.csv', 'w', encoding='utf-8') as csvfile:\r\n csvwriter = csv.writer(csvfile)\r\n for key, value in items2.items():\r\n l = [value.name, value.quantity, value.unit, value.unit_price]\r\n csvwriter.writerow(l)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"26952522","text":"from OpenGL.GL import *\nfrom ctypes import c_void_p, sizeof\nimport glm, math, time\nimport tkinter\nimport tkinter.ttk\nimport pyopengltk\n\nsh_vert = \"\"\"\n#version 460 core\n\nlayout (location = 0) in vec4 a_position;\nlayout (location = 1) in vec3 a_normal;\nlayout (location = 2) in vec3 a_uvw;\nout vec3 v_pos;\nout vec3 v_nv;\nout vec3 v_uvw;\nlayout (location = 0) uniform mat4 u_projection;\nlayout (location = 1) uniform mat4 u_view;\nlayout (location = 2) uniform mat4 u_model;\n\nvoid main() \n{\n mat3 normal_matrix = transpose(inverse(mat3(u_model)));\n vec4 world_pos = u_model * a_position;\n v_pos = world_pos.xyz;\n v_nv = normal_matrix * a_normal;\n v_uvw = a_uvw;\n gl_Position = u_projection * u_view * world_pos;\n}\n\"\"\"\n\nsh_frag = \"\"\"\n#version 460 core\n\nin vec3 v_pos;\nin vec3 v_nv;\nin vec3 v_uvw;\nout vec4 frag_color;\nlayout (location = 1) uniform mat4 u_view;\nlayout (location = 3) uniform vec3 u_light_direction = vec3(0.0, 1.0, 0.0); \nlayout (location = 4) uniform vec3 u_light_intensity = vec3(0.5);\n\nvec3 HUEtoRGB(in float H)\n{\n float R = abs(H * 6.0 - 3.0) - 1.0;\n float G = 2.0 - abs(H * 6.0 - 2.0);\n float B = 2.0 - abs(H * 6.0 - 4.0);\n return clamp( vec3(R,G,B), 0.0, 1.0 );\n}\n\nvoid main()\n{\n vec4 color = vec4(HUEtoRGB(v_uvw.z), 1.0);\n vec3 L = -normalize(u_light_direction);\n vec3 eye = inverse(u_view)[3].xyz;\n vec3 V = normalize(eye - v_pos);\n float face = sign(dot(v_nv, V));\n vec3 N = normalize(v_nv) * face;\n vec3 H = normalize(V + L);\n float ka = u_light_intensity[0];\n float kd = max(0.0, dot(N, L)) * u_light_intensity[1];\n float NdotH = max(0.0, dot(N, H));\n float sh = 100.0;\n float ks = pow(NdotH, sh) * u_light_intensity[2];\n frag_color = vec4(color.rgb * (ka + kd + ks), color.a);\n}\n\"\"\"\n\nv = [[-1,-1,1], [1,-1,1], [1,1,1], [-1,1,1], [-1,-1,-1], [1,-1,-1], [1,1,-1], [-1,1,-1]]\nt = [[0, 1], [1, 1], [1, 0], [0, 0]]\nn = [[0,0,1], [1,0,0], [0,0,-1], [-1,0,0], [0,1,0], [0,-1,0]]\ne = [[0,1,2,3], [1,5,6,2], [5,4,7,6], [4,0,3,7], [3,2,6,7], [1,0,4,5]]\nl = 1/math.sqrt(2)\nindices = [si*4+[0, 1, 2, 0, 2, 3][vi] for si in range(6) for vi in range(6)]\nattributes = []\nfor si in range(len(e)):\n for qi, vi in enumerate(e[si]):\n attributes += [v[vi][0]*l, v[vi][1]*l,v[vi][2]*l, *n[si], *t[qi], si/6]\nattributes = (GLfloat * len(attributes))(*attributes)\nindices = (GLuint * len(indices))(*indices)\nno_of_indices = len(indices)\n\nclass OpenGLApp(pyopengltk.OpenGLFrame):\n def __init__(self, *args, **kwds):\n super().__init__(*args, kwds) \n self.__opengl_initialized = False\n\n def initgl(self):\n if not self.__opengl_initialized:\n self.__opengl_initialized = True\n\n vendor, renderer = glGetString(GL_VENDOR).decode(\"utf-8\"), glGetString(GL_RENDERER).decode(\"utf-8\")\n version, glsl_version = glGetString(GL_VERSION).decode(\"utf-8\"), glGetString(GL_SHADING_LANGUAGE_VERSION).decode(\"utf-8\")\n major, minor = glGetInteger(GL_MAJOR_VERSION), glGetInteger(GL_MINOR_VERSION)\n extensions = [glGetStringi(GL_EXTENSIONS, i) for i in range(glGetInteger(GL_NUM_EXTENSIONS))]\n print(f\"\\n{vendor} / {renderer}\\n OpenGL: {version}\\n GLSL: {glsl_version}\\n Context {major}.{minor}\\n\")\n\n @GLDEBUGPROC\n def __CB_OpenGL_DebugMessage(source, type, id, severity, length, message, userParam):\n msg = message[0:length]\n print(msg.decode(\"utf-8\"))\n glDebugMessageCallback(__CB_OpenGL_DebugMessage, None)\n errors_only = False\n if errors_only:\n glDebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DONT_CARE, 0, None, GL_FALSE)\n glDebugMessageControl(GL_DEBUG_SOURCE_API, GL_DEBUG_TYPE_ERROR, GL_DONT_CARE, 0, None, GL_TRUE)\n else:\n glDebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DONT_CARE, 0, None, GL_TRUE)\n glEnable(GL_DEBUG_OUTPUT)\n glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS)\n glDebugMessageInsert(GL_DEBUG_SOURCE_APPLICATION, GL_DEBUG_TYPE_MARKER, 0, GL_DEBUG_SEVERITY_NOTIFICATION, -1, \"Starting debug messaging service\")\n\n vao = glGenVertexArrays(1)\n vbo = glGenBuffers(1)\n glBindVertexArray(vao)\n glBindBuffer(GL_ARRAY_BUFFER, vbo)\n glBufferData(GL_ARRAY_BUFFER, attributes, GL_STATIC_DRAW)\n glVertexAttribPointer(0, 3, GL_FLOAT, False, 9 * sizeof(GLfloat), None)\n glVertexAttribPointer(1, 3, GL_FLOAT, False, 9 * sizeof(GLfloat), c_void_p(3 * sizeof(GLfloat)))\n glVertexAttribPointer(2, 3, GL_FLOAT, False, 9 * sizeof(GLfloat), c_void_p(6 * sizeof(GLfloat)))\n glEnableVertexAttribArray(0)\n glEnableVertexAttribArray(1)\n glEnableVertexAttribArray(2)\n ebo = glGenBuffers(1)\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo)\n glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices, GL_STATIC_DRAW)\n\n program_obj = glCreateProgram()\n for sh_type, sh_code in [(GL_VERTEX_SHADER, sh_vert), (GL_FRAGMENT_SHADER, sh_frag)]:\n shader_obj = glCreateShader(sh_type)\n glShaderSource(shader_obj, sh_code)\n glCompileShader(shader_obj)\n if not glGetShaderiv(shader_obj, GL_COMPILE_STATUS):\n raise Exception(glGetShaderInfoLog(shader_obj).replace(b'\\\\n', b'\\n'))\n glAttachShader(program_obj, shader_obj)\n glLinkProgram(program_obj)\n if not glGetProgramiv(program_obj, GL_LINK_STATUS):\n raise Exception(glGetProgramInfoLog(program_obj).replace(b'\\\\n', b'\\n'))\n glUseProgram(program_obj)\n glUniform3fv(3, 1, [-0.5, 1.0, -0.5])\n glUniform3f(4, 0.2, 0.8, 0.4)\n\n glEnable(GL_MULTISAMPLE) # default\n glEnable(GL_DEPTH_TEST)\n glClearColor(0.0, 0.0, 0.0, 0.0)\n self.start_time = time.time()\n\n glViewport(0, 0, self.width, self.height)\n aspect = self.width / self.height \n self.projection_matrix = glm.perspective(glm.radians(90), aspect, 0.1, 10) \n\n def redraw(self):\n elapsed_time = time.time() - self.start_time\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n \n view_matrix = glm.lookAt(glm.vec3(0, -3, 0), glm.vec3(0, 0, 0), glm.vec3(0, 0, 1))\n model_matrix = glm.rotate(glm.mat4(1), glm.radians(elapsed_time * 90), glm.vec3(0.5, 0, 1))\n\n glUniformMatrix4fv(0, 1, GL_FALSE, glm.value_ptr(self.projection_matrix))\n glUniformMatrix4fv(1, 1, GL_FALSE, glm.value_ptr(view_matrix))\n glUniformMatrix4fv(2, 1, GL_FALSE, glm.value_ptr(model_matrix))\n glDrawElements(GL_TRIANGLES, no_of_indices, GL_UNSIGNED_INT, None)\n\nif __name__ == '__main__':\n root = tkinter.Tk()\n app = OpenGLApp(root, width=640, height=480)\n app.pack(fill=tkinter.BOTH, expand=tkinter.YES)\n app.animate = 1\n app.after(100, app.printContext)\n app.mainloop()\n exit()\n","sub_path":"example/python/opengl_hello_cube/hello_cube_tkinter.py","file_name":"hello_cube_tkinter.py","file_ext":"py","file_size_in_byte":7064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"287641669","text":"import paho.mqtt.client as mqtt\r\n\r\n# The callback for when the client receives a CONNACK response from the server.\r\ndef on_connect(client, userdata, flags, rc):\r\n\tprint(\"Connection returned result: \"+str(rc))\r\n\tfor i in range(10):\r\n\t\tclient.publish(topic = 'aland', payload = i, qos=1)\r\n\t\tprint('pub loop #' + str(i) + '\\n')\r\n\t#client.disconnect()\r\n# Subscribing in on_connect() means that if we lose the connection and\r\n# reconnect then subscriptions will be renewed.\r\n\r\ndef on_disconnect(client, userdata, rc):\r\n\tif rc != 0:\r\n\t\tprint('Unexpected Disconnect')\r\n\telse:\r\n\t\tprint('Expected Disconnect')\r\n# The default message callback.\r\n# (won’t be used if only publishing, but can still exist)\r\ndef on_publish(client, userdata, mid):\r\n\tprint('Published message with ID %i' % mid)\r\n\r\nclient = mqtt.Client()\r\n\r\nclient.on_connect = on_connect\r\nclient.on_disconnect = on_disconnect\r\nclient.on_publish = on_publish\r\n\r\nclient.connect('mqtt.eclipseprojects.io', 1883, 5)\r\nclient.loop_forever()","sub_path":"publisher.py","file_name":"publisher.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"241967948","text":"from bisect import insort\n\ndef mutate(nums, o, n):\n if o == 'r':\n try:\n idx = nums.index(n)\n except:\n return False\n nums.pop(idx)\n if len(nums) == 0:\n return False\n return True\n elif o == 'a':\n insort(nums, n)\n return True\n\n\ndef calc_median(nums):\n l = len(nums)\n m = l // 2\n if bool(l % 2):\n return nums[m]\n else:\n val = (nums[m - 1] + nums[m]) / 2\n if isinstance(val, float) and val.is_integer():\n return int(val)\n return val\n\n\ndef median(a,x,l):\n li = []\n \n for i in range(l):\n operation = a[i]\n num = x[i]\n mut = mutate(li, operation, num)\n if not mut:\n print('Wrong!')\n continue\n print(calc_median(li))\n \n\nN = int(input())\ns = []\nx = []\nfor i in range(0, N):\n tmp = input().strip().split(' ')\n a, b = [xx for xx in tmp]\n s.append(a)\n x.append(int(b))\nmedian(s,x,N)","sub_path":"Tree/MedianUpdates.py","file_name":"MedianUpdates.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"189736462","text":"# File for Character Class\n\nimport random\n\n#SEE THIS FOR UPDATES THAT MAY NEED MADE TO THIS CLASS TO KEEP FROM PASSING THROUGH WALLS\n#https://github.com/justinmeister/PyTMX-Examples/blob/master/Make%20Collideable%20Rects/main.py\n\nclass Character:\n \"\"\"Character Object Class\"\"\"\n def __init__(self, name, strength, dexterity, wisdom, intelligence, charisma, constitution, classChoice, raceChoice, sex):\n self.name = name\n self.strength = strength\n self.dexterity = dexterity\n self.wisdom = wisdom\n self.intelligence = intelligence\n self.charisma = charisma\n self.constitution = constitution\n self.classChoice = classChoice\n self.race = raceChoice\n self.sex = sex\n self.armorClass = 10\n self.equippedWeaponRight = None\n self.equippedWeaponLeft = None\n self.equippedArmorRight = None\n self.equippedArmorLeft = None\n self.equippedArmorTorso = None\n self.equippedArmorLegs = None\n self.equippedArmorArms = None\n self.equippedArmorWrists = None\n self.equippedArmorHead = None\n self.equippedArmorFeet = None\n self.inventory = []\n self.description = \"\"\n self.background = \"\"\n self.gp = 0\n self.sp = 0\n self.pp = 0\n self.cp = 0\n self.level = 1\n self.hp = 0\n self.mp = 0\n self.experiencePoints = 0\n self.alive = True\n\n def __repr__(self):\n print(\"Character, \" + self.name + \". Class, \" + self.classChoice)\n\n def __str__(self):\n print(self.name + \"\\nSex: \" + self.sex + \"\\nStrength: \" + str(self.strength) + \"\\nDexterity: \" + str(self.dexterity))\n print(\"Wisdom: \" + str(self.wisdom) + \"\\nIntelligence: \" + str(self.intelligence) + \"\\nCharisma: \" + str(self.charisma))\n print(\"Constitution: \" + str(self.constitution) +\"\\nRace: \" + self.race + \"\\nClass: \" + self.classChoice)\n\n def attack(self, targetEnemy, minDamage, maxDamage, damageType, unblockableDamage):\n \"\"\"Attack with equipped weapon\"\"\"\n if targetEnemy.alive == True:\n\n potDamage = random.randint(minDamage, maxDamage)\n\n if potDamage > 0:\n if potDamage > targetEnemy.armorClass:\n damageDealt = potDamage - targetEnemy.armorClass\n targetEnemy.deadOrAlive()\n else:\n damageDealt = unblockableDamage\n targetEnemy.deadOrAlive()\n\n else:\n damageDealt = unblockableDamage\n targetEnemy.deadOrAlive()\n\n print(self.name + \" attacked \" + targetEnemy.name + \" for \" + str(damageDealt) + \" \" + damageType + \" damage!\")\n targetEnemy.hp -= damageDealt\n if targetEnemy.hp < 0:\n targetEnemy.hp = 0\n targetEnemy.deadOrAlive()\n\n else:\n print(targetEnemy.name + \" is already dead.\")\n\n def equipWeaponRight(self, Weapon):\n \"\"\"Equip Weapon Right Hand\"\"\"\n self.equippedWeaponRight = Weapon\n print(Weapon.name + \" equipped to right hand\")\n\n def equipWeaponLeft(self, Weapon):\n \"\"\"Equip Weapon Left Hand\"\"\"\n self.equippedWeaponLeft = Weapon\n print(Weapon.name + \" equipped to left hand\")\n\n def unequipWeaponLeft(self, Weapon):\n \"\"\"Unequip Weapon from Left Hand\"\"\"\n print(Weapon.name + \" unequipped\")\n self.equipWeaponLeft(None)\n\n def unequipWeaponRight(self, Weapon):\n \"\"\"Unequip Weapon from Right Hand\"\"\"\n print(Weapon.name + \" unequipped\")\n self.equipWeaponRight(None)\n\n def equipArmorRight(self, Armor):\n \"\"\"Equip Shield to Right Hand\"\"\"\n self.equippedArmorRight = Armor\n if Armor != None:\n print(Armor.name + \" equipped\")\n self.armorClass += Armor.armorClassModifier\n print(\"AC is \" + str(self.armorClass))\n\n def equipArmorLeft(self, Armor):\n \"\"\"Equip Shield to Left Hand\"\"\"\n self.equippedArmorLeft = Armor\n if Armor != None:\n print(Armor.name + \" equipped\")\n self.armorClass += Armor.armorClassModifier\n print(\"AC is \" + str(self.armorClass))\n\n def equipArmorTorso(self, Armor):\n \"\"\"Equip Armor to Torso\"\"\"\n self.equippedArmorTorso = Armor\n if Armor != None:\n print(Armor.name + \" equipped\")\n self.armorClass += Armor.armorClassModifier\n print(\"AC is \" + str(self.armorClass))\n\n def equipArmorLegs(self, Armor):\n \"\"\"Equip Armor to Legs\"\"\"\n self.equippedArmorLegs = Armor\n if Armor != None:\n print(Armor.name + \" equipped\")\n self.armorClass += Armor.armorClassModifier\n print(\"AC is \" + str(self.armorClass))\n\n def equipArmorArms(self, Armor):\n \"\"\"Equip Armor to Arms\"\"\"\n self.equippedArmorArms = Armor\n if Armor != None:\n print(Armor.name + \" equipped\")\n self.armorClass += Armor.armorClassModifier\n print(\"AC is \" + str(self.armorClass))\n\n def equipArmorWrists(self, Armor):\n \"\"\"Equip Armor to Wrists\"\"\"\n self.equippedArmorWrists = Armor\n if Armor != None:\n print(Armor.name + \" equipped\")\n self.armorClass += Armor.armorClassModifier\n print(\"AC is \" + str(self.armorClass))\n\n def equipArmorHead(self, Armor):\n \"\"\"Equip Armor to Head\"\"\"\n self.equippedArmorHead = Armor\n if Armor != None:\n print(Armor.name + \" equipped\")\n self.armorClass += Armor.armorClassModifier\n print(\"AC is \" + str(self.armorClass))\n\n def equipArmorFeet(self, Armor):\n \"\"\"Equip Armor to Feet\"\"\"\n self.equippedArmorFeet = Armor\n if Armor != None:\n print(Armor.name + \" equipped\")\n self.armorClass += Armor.armorClassModifier\n print(\"AC is \" + str(self.armorClass))\n\n def unequipArmorTorso(self, Armor):\n \"\"\"Unequip Armor from Torso\"\"\"\n self.armorClass -= Armor.armorClassModifier\n print(Armor.name + \" unequipped\")\n self.equipArmorTorso(None)\n print(\"AC is \" + str(self.armorClass))\n\n def unequipArmorLegs(self, Armor):\n \"\"\"Unequip Armor from Legs\"\"\"\n self.armorClass -= Armor.armorClassModifier\n print(Armor.name + \" unequipped\")\n self.equipArmorLegs(None)\n print(\"AC is \" + str(self.armorClass))\n\n def unequipArmorArms(self, Armor):\n \"\"\"Unequip Armor from Arms\"\"\"\n self.armorClass -= Armor.armorClassModifier\n print(Armor.name + \" unequipped\")\n self.equipArmorArms(None)\n print(\"AC is \" + str(self.armorClass))\n\n def unequipArmorWrists(self, Armor):\n \"\"\"Unequip Armor from Wrists\"\"\"\n self.armorClass -= Armor.armorClassModifier\n print(Armor.name + \" unequipped\")\n self.equipArmorWrists(None)\n print(\"AC is \" + str(self.armorClass))\n\n def unequipArmorHead(self, Armor):\n \"\"\"Unequip Armor from Head\"\"\"\n self.armorClass -= Armor.armorClassModifier\n print(Armor.name + \" unequipped\")\n self.equipArmorHead(None)\n print(\"AC is \" + str(self.armorClass))\n\n def unequipArmorFeet(self, Armor):\n \"\"\"Unequip Armor from Feet\"\"\"\n self.armorClass -= Armor.armorClassModifier\n print(Armor.name + \" unequipped\")\n self.equipArmorFeet(None)\n print(\"AC is \" + str(self.armorClass))\n\n def addGP(self, gp):\n \"\"\"adds to current gp\"\"\"\n self.gp += gp\n\n def addSP(self, sp):\n \"\"\"adds to current sp\"\"\"\n self.sp += sp\n\n def addPP(self, pp):\n \"\"\"adds to current pp\"\"\"\n self.pp += pp\n\n def addCP(self, cp):\n \"\"\"adds to current cp\"\"\"\n self.cp += cp\n\n def addXP(self, xp):\n \"\"\"adds to current xp\"\"\"\n self.experiencePoints += xp\n\n def addHP(self, hp):\n \"\"\"adds hp to current value\"\"\"\n self.hp += hp\n\n def getHp(self):\n hitpoints = self.name + \" Current HP=\" + str(self.hp)\n return hitpoints\n\n def getMoney(self):\n money = \"PP=\" + str(self.pp) + \", GP=\" + str(self.gp) + \", SP=\" + str(self.sp) + \", CP=\" + str(self.cp)\n return money\n\n def checkLevels(self, characterClass):\n if characterClass.upper() == \"FIGHTER\":\n lvlXP = {1000:2, 3000:3, 6000:4, 10000:5, 15000:6, 21000:7}\n elif characterClass.upper() == \"CLERIC\":\n lvlXP = {1000:2, 4000:3, 8000:4, 13000:5, 19000:6, 26000:7}\n elif characterClass.upper() == \"ROGUE\":\n lvlXP = {1000:2, 3000:3, 5000:4, 9000:5, 14000:6, 20000:7}\n elif characterClass.upper() == \"WIZARD\":\n lvlXP = {1000:2, 3000:3, 7000:4, 12500:5, 19500:6, 27000:7}\n elif characterClass.upper() == \"BARD\":\n lvlXP = {1000:2, 3000:3, 6000:4, 10000:5, 15000:6, 21000:7}\n\n return lvlXP\n\n def levelUp(self, lvlXP):\n for i in lvlXP:\n if self.experiencePoints > i:\n self.level = lvlXP[i]\n print(self.name + \" is now level \" + str(self.level))\n\n def deadOrAlive(self):\n if self.hp <= 0:\n self.hp = 0\n self.alive = False\n print(self.name + \" has died...\")\n\n def addItemToInventory(self, item):\n \"\"\"Add and Item to Inventory\"\"\"\n self.inventory.append(item)\n print(item.name + \" was added to inventory\")\n\n def useItem(self, item):\n \"\"\"Use an Item from Inventory\"\"\"\n print(\"Used \" + item.name)\n self.inventory.remove(item)\n\n def pickUpItem(self, item):\n pickup = input(\"Would you like to pick up \" + item.name + \" ? (Y/N)\")\n if pickup.upper() == 'Y':\n self.addItemToInventory(item)\n else:\n pass\n\n def getWeaponTypeRight(self):#set this to return 'hands' if no weapon equipped\n return self.equippedWeaponRight.weaponType\n\n\n# MAKE MONSTERS A CHILD CLASS OF CHARACTER CLASS, SINCE CHARACTERS CAN BE ENEMIES ALSO\nclass Monster(Character):\n\n def __init__(self, name, strength, dexterity, wisdom, intelligence, charisma, constitution, classChoice, raceChoice, sex):\n Character.__init__(self, name, strength, dexterity, wisdom, intelligence, charisma, constitution, classChoice, raceChoice, sex)\n self.item = None\n","sub_path":"Character.py","file_name":"Character.py","file_ext":"py","file_size_in_byte":10342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"471175622","text":"from controllers.RedisController import RedisController\nfrom extensions import api, app, config\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\n\n\n@app.route('/')\ndef hello_world():\n return 'Hello World!'\n\n\napi.add_resource(RedisController, \"/lab5\")\n\n\nif __name__ == '__main__':\n\n try:\n writer_mode = config['writer']\n except KeyError:\n logging.error(\"Writer mode not found in config\")\n raise\n\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"204003498","text":"import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport numpy as np\nfrom tqdm import tqdm_notebook as tqdm\n\n# 1. Constants of the problem and helper functions\nmax_cars = 10\nmax_credit = 200\nmax_move = 5\ngamma = 0.9\nlambda_req_1 = 3\nlambda_req_2 = 4\nlambda_ret_1 = 3\nlambda_ret_2 = 2\ncredit = 10\n\ndef plot(pol):\n\n policy = np.reshape(pol, (max_cars+1, max_cars+1))\n\n v = np.zeros((max_cars+1, max_cars+1))\n\n for i in range(0, max_cars+1):\n for j in range(0, max_cars+1):\n v[i, j] = int(policy[i, j])\n\n data = v\n # create discrete colormap\n cmap = cmap = matplotlib.cm.YlOrBr # colors.ListedColormap(['black','violet', 'red', 'yellow', 'pink', 'orange', 'green', 'lime', 'blue', 'cyan'])\n norm = matplotlib.colors.Normalize(vmin=300, vmax=600)\n #norm = colors.BoundaryNorm(bounds, cmap.N)\n fig, ax = plt.subplots()\n ax.imshow(data, cmap=cmap, norm=norm)\n\n\n\n# 2. Environment and its Dynamics\n\n# State Space\nS_jack = []\nfor i in range(0,max_cars + 1):\n for j in range(0, max_cars + 1):\n S_jack.append([i, j])\n\nlen_state_space = len(S_jack)\n\n# Action Space\nA_jack = np.arange(-max_move, max_move + 1)\n\n# Poisson Distribution\ndef poisson(lambd, n):\n return (lambd**n * np.exp(-lambd))/np.math.factorial(n)\n\n\ndef compute_prob(s_diff, r, s_after_moving):\n\n\n s_diff1 = s_diff[0]\n s_diff2 = s_diff[1]\n\n min_req_1 = int(abs(min(0, s_diff1)))\n max_req_1 = int(s_after_moving[0])\n min_ret_1 = int(min_req_1 + s_diff1)\n max_ret_1 = int(max_req_1 + s_diff1)\n\n min_req_2 = int(abs(min(0, s_diff2)))\n max_req_2 = int(s_after_moving[1])\n min_ret_2 = int(min_req_2 + s_diff2)\n max_ret_2 = int(max_req_2 + s_diff2)\n\n\n p = 0\n for req1 in range(min_req_1, max_req_1 + 1):\n for req2 in range(min_req_2, max_req_2 + 1):\n ret1 = req1 + s_diff1\n ret2 = req2 + s_diff2\n print(req1, ret1, req2, ret2)\n #credit = req1 * 10 + req2 * 10\n #if credit == r:\n # p += poisson(lambda_req_1, req1) * poisson(lambda_ret_1, ret1) * poisson(lambda_req_2, req2) * poisson(lambda_ret_2, ret2)\n # else:\n # continue\n # return p\n\ndef expected_return_jack(s, a, V):\n \"\"\"\n s_prime -> int np.array of length 2\n s -> int np.array of length 2\n a -> int\n \"\"\"\n a_array = np.array([-a, a]) # convinient to have this\n s_after_moving = np.minimum(s + a_array, [max_cars, max_cars])\n\n check = s_after_moving >= np.zeros(2)\n\n p = 0\n returns = -2 * abs(a)\n\n if check[0] and check[1]:\n\n for req1 in range(0, int(s_after_moving[0]) + 1):\n for ret1 in range(0, max_cars+1):\n for req2 in range(0, int(s_after_moving[1]) + 1):\n for ret2 in range(0, max_cars+1):\n\n reward = (req1 + req2) * credit\n\n s_prime = s_after_moving + np.array([ret1 - req1, ret2 - req2])\n s_prime = np.minimum(s + a_array, [max_cars, max_cars])\n\n p = poisson(lambda_req_1, req1) * poisson(lambda_ret_1, ret1) *\\\n poisson(lambda_req_2, req2) * poisson(lambda_ret_2, ret2)\n\n returns += p * (reward + gamma * V[int(s_prime[0]), int(s_prime[1])])\n\n\n return returns\n\n else:\n return 0\n\n\n# This function wasn't written by me. Credit goes to: ShangtongZhang (GitHub)\ndef expected_return(state, action, state_value, constant_returned_cars):\n # initailize total return\n returns = 0.0\n\n # cost for moving cars\n returns -= 2 * abs(action)\n\n # go through all possible rental requests\n for rental_request_first_loc in range(0, 11):\n for rental_request_second_loc in range(0, 11):\n # moving cars\n num_of_cars_first_loc = int(min(state[0] - action, max_cars))\n num_of_cars_second_loc = int(min(state[1] + action, max_cars))\n\n # valid rental requests should be less than actual # of cars\n real_rental_first_loc = min(num_of_cars_first_loc, rental_request_first_loc)\n real_rental_second_loc = min(num_of_cars_second_loc, rental_request_second_loc)\n\n # get credits for renting\n reward = (real_rental_first_loc + real_rental_second_loc) * credit\n num_of_cars_first_loc -= real_rental_first_loc\n num_of_cars_second_loc -= real_rental_second_loc\n\n # probability for current combination of rental requests\n prob = poisson(rental_request_first_loc, lambda_req_1) * \\\n poisson(rental_request_second_loc, lambda_req_2)\n\n if constant_returned_cars:\n # get returned cars, those cars can be used for renting tomorrow\n returned_cars_first_loc = lambda_ret_1\n returned_cars_second_loc = lambda_ret_2\n num_of_cars_first_loc = min(num_of_cars_first_loc + returned_cars_first_loc, max_cars)\n num_of_cars_second_loc = min(num_of_cars_second_loc + returned_cars_second_loc, max_cars)\n returns += prob * (reward + gamma * state_value[num_of_cars_first_loc, num_of_cars_second_loc])\n else:\n for returned_cars_first_loc in range(0, 11):\n for returned_cars_second_loc in range(0, 11):\n num_of_cars_first_loc_ = min(num_of_cars_first_loc + returned_cars_first_loc, max_cars)\n num_of_cars_second_loc_ = min(num_of_cars_second_loc + returned_cars_second_loc, max_cars)\n prob_ = poisson(returned_cars_first_loc,lambda_ret_1) * \\\n poisson(returned_cars_second_loc, lambda_ret_2) * prob\n returns += prob_ * (reward + gamma * state_value[num_of_cars_first_loc_, num_of_cars_second_loc_])\n return returns\n\n# 3. Policy Iteration\ndef policy_iteration():\n # Initialization\n V = np.zeros((max_cars+1, max_cars+1))\n policy = np.zeros((max_cars+1, max_cars+1))\n\n for i in range(0,5):\n\n # Policy Evaluation\n theta = 0.0001\n while(True):\n delta = 0\n v = np.copy(V)\n for s in tqdm(S_jack):\n V[int(s[0]), int(s[1])] = expected_return(s, policy[int(s[0]), int(s[1])], V, False )\n norm = np.linalg.norm(v - V)\n delta = max(delta, norm)\n print(delta)\n if delta < theta:\n break\n\n\n\n # Policy Iteration\n policy_stable = True\n for s in S_jack:\n s_index = S_jack.index(s)\n old_policy = np.copy(policy)\n V_a = []\n for a in A_jack:\n if (a >= 0 and s[0] >= a) or (a < 0 and s[1] >= abs(a)):\n V_a.append(expected_return(s, a, V, False))\n else:\n V_a.append(-float('inf'))\n\n policy[s[0], s[1]] = A_jack[V_a.index(max(V_a))]\n print(policy)\n\n return V, policy\n","sub_path":"Ex_4_2.py","file_name":"Ex_4_2.py","file_ext":"py","file_size_in_byte":7039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"17537848","text":"import pandas as pd\nimport xlsxwriter\nimport os\nimport datetime\n\n# find the files neede to process\nhotel_file = \"\"\n# cancel_file = \"\"\nbooking_file = \"\"\nfiles = [f for f in os.listdir('.') if os.path.isfile(f)]\n\nfor f in files:\n f = f.lower()\n if \"report\" in f and f[:2] != \"~$\":\n hotel_file = f\n if \"expedia\" in f and f[:2] != \"~$\":\n booking_file = f\n\nmsg = \"Files detected:\\n\" + booking_file + \"\\n\" + hotel_file + \"\\n\" + \"Press Enter key to begin the process.\"\na = raw_input(msg)\n\n\n# developed with python 3.5 for pyinstaller function\n#############################################################\n# check conditions and print out\ndef same_name(booking_name, hotel_name):\n if hotel_name[:5] == 'name\\n':\n hotel_name = hotel_name[5:]\n\n hotel_name = hotel_name.split(\",\")\n hotel_name = hotel_name[1][:] + \" \" + hotel_name[0]\n\n # print(\"H:\", hotel_name, \" W:\", booking_name)\n if hotel_name == booking_name:\n return True\n else:\n return False\n\n\ndef calc_hotel_price(hotel_price, arrival, departure):\n # print(\"input: \", booking_price, hotel_price, arrival, departure)\n\n if departure[:2] == \"De\":\n departure = departure[9:]\n arrival = arrival[7:]\n hotel_price = hotel_price[4:]\n\n date_format = \"%m/%d/%Y\"\n a = datetime.datetime.strptime(departure, date_format)\n b = datetime.datetime.strptime(arrival, date_format)\n delta = a - b\n # print(\"hotel price: \", float(delta.days) * float(hotel_price), \"booking price: \", float(booking_price))\n # print(float(delta.days) * float(hotel_price) == float(booking_price))\n return (float(delta.days) * float(hotel_price))\n\n\n######################################################################################\nmatch = 0\ndiff_price = 0\ncanceled = 0\nnot_found = 0\n\n# load and prepare the booking df\nxl = pd.ExcelFile(booking_file)\nbooking_df = xl.parse(header=0, keep_default_na=False)\n\nbooking_col_names = list(booking_df)\nprint(booking_col_names)\nfor name in booking_col_names:\n a = name.split(\" \")\n if a[-1] == \"\":\n b = \" \".join(a[:-1])\n else:\n b = \" \".join(a)\n booking_df = booking_df.rename(columns={name: b})\n\nbooking_col_names = list(booking_df)\n# print(booking_col_names)\n\nwebsite_df = booking_df[booking_df['Status'] == 'Booked']\n#\n# print(\"expedia_file\")\n# for index, row in website_df.iterrows():\n# print(\"index:::\", index, row['Guest'], row['Check-In'], row['Check-Out'])\n\n##########################################\n# loading the ok DF\nxl = pd.ExcelFile(hotel_file)\no_df = xl.parse(header=0, keep_default_na=False, )\n\n#\n# # print o_df\n# print(\"hotel_file\")\n# for index, row in o_df.iterrows():\n# print(\"index:::\", index, row['GuestName'], row['ArrivalDt'], row['DaysStay'], row['CancelDt'])\n\n# #########################################\nprint(\"begin comparison\")\n# create worksheet\nworkbook = xlsxwriter.Workbook('Expenses.xlsx')\nworksheet = workbook.add_worksheet()\nrowm = 0\ncoln = 0\nworksheet.write(rowm, coln, \"Confirmation Number\")\nworksheet.write(rowm, coln + 1, \"CRS Number\")\nworksheet.write(rowm, coln + 2, \"Name\")\nworksheet.write(rowm, coln + 3, \"Price\")\nworksheet.write(rowm, coln + 4, \"Description\")\n\nrowm += 1\n\nworkbook_nf = xlsxwriter.Workbook('Customers Not found.xlsx')\nworksheet_nf = workbook_nf.add_worksheet()\nrow_nf = 0\n\nprint(\"before check loop\")\ngood = 0\n##########################################################################\nfor index, row in website_df.iterrows():\n\n # initialzie what to compare\n name = row['Guest']\n check_in_date = row['Check-In']\n check_out_date = row['Check-Out']\n # print(\"check_in_date: \", check_in_date)\n check_in_date = str(check_in_date).split(\"-\")\n # print(check_in_date)\n # print(\"check_in_date after split: \", check_in_date)\n check_out_date = str(check_out_date).split(\"-\")\n # print(check_out_date)\n # price = row['Price'][:-3]\n found = False\n\n # check if in ok, but different price\n for index_ok, row_ok in o_df.iterrows():\n # print(\"test\")\n # print(int(row_ok['DaysStay']) * row_ok['1st Night Rate'])\n # print(\"end test\")\n if (same_name(name.lower(), row_ok['GuestName'].lower())):\n found = True\n\n # if name matched\n # check if in canceled\n print(\"checking if in cancel file first\")\n if row_ok['CancelDt'] != \"\":\n worksheet.write(rowm, coln, row['Confirmation #'])\n worksheet.write(rowm, coln + 1, row_ok['CRSBookNum'])\n worksheet.write(rowm, coln + 2, name)\n worksheet.write(rowm, coln + 3, int(row_ok['DaysStay']) * row_ok['1st Night Rate'])\n worksheet.write(rowm, coln + 4, \"Cancelled\")\n print(name, \" found in CANCEL file\")\n rowm += 1\n canceled += 1\n break\n\n # if not found in cancel file, we check the date\n # hotel date\n date_format = \"%m/%d/%Y\"\n arrival = row_ok['ArrivalDt']\n h_arrival = datetime.datetime.strptime(arrival, date_format)\n h_departure = h_arrival + datetime.timedelta(days=row_ok['DaysStay'])\n # + int(row_ok['DaysStay'])\n\n # booking.com date\n\n b_checkin = datetime.datetime(int(check_in_date[0]), int(check_in_date[1]), int(check_in_date[2][:2]))\n b_checkout = datetime.datetime(int(check_out_date[0]), int(check_out_date[1]), int(check_out_date[2][:2]))\n found = True\n match += 1\n\n # print('h_arrival: ', h_arrival)\n # print('h_departure: ', h_departure)\n # print(\"b_checkin: \", b_checkin)\n # print(\"b_checkout\", b_checkout)\n if not ((h_arrival == b_checkin) & (h_departure == b_checkout)):\n worksheet.write(rowm, coln, row['Confirmation #'])\n worksheet.write(rowm, coln + 1, row_ok['CRSBookNum'])\n worksheet.write(rowm, coln + 2, name)\n worksheet.write(rowm, coln + 3, int(row_ok['DaysStay']) * row_ok['1st Night Rate'])\n worksheet.write(rowm, coln + 4, \"Checked in, but different date\")\n\n print(name, \" Found in OK file, but different date\")\n rowm += 1\n break\n else:\n good += 1\n print(name,\" Everything match\")\n break\n\n if found == False:\n # print(name, \" Can't find customer Name\")\n not_found += 1\n worksheet_nf.write(row_nf, 0, name)\n row_nf += 1\n print(name, \" not found\")\nworkbook.close()\nworkbook_nf.close()\nprint(\"good:\", good)\nprint(\"match: \", match)\nprint(\"canceled: \", canceled)\nprint(\"not found: \", not_found)\n","sub_path":"working_windows_extension/version_4/best_western/expedia/best_western_expedia_py27_V4.py","file_name":"best_western_expedia_py27_V4.py","file_ext":"py","file_size_in_byte":6750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"49684012","text":"\n# Conversion List - Tuple\naList = [55, 89, 144]\naTuple = tuple(aList)\naList = list(aTuple)\n\n# Conversion List - Set\naSet = set(aList)\naList = list(aSet)\n\n# Conversion Tuple - Set\naTuple = tuple(aSet)\naSet = set(aTuple)\n\n# Turn a list into a string (separate items with the defined separator)\nlistt = [\"Bunch\", \"of\", \"random\", \"words\"]\nstringg = \" \".join(listt)\n\n# Convert a string (a sentence) into a list of words\nlistt = stringg.split()\n\n# Convert a string (a sentence) into a list of lines\nlisttt = stringg.splitlines()\n","sub_path":"4 python cheet sheet/zkea_sequences_conversions.py","file_name":"zkea_sequences_conversions.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"510521785","text":"#! /usr/bin/env python\nimport os, sys\nimport ChickenOracle\n\ndef main():\n sys.path.append(\"../../../common/include/python\")\n import topcoder as tc\n with open(sys.argv[1], \"r\") as fi:\n input = fi.read()\n reader = tc.Reader(input)\n n = reader.next(\"int\")\n reader.next()\n eggCount = reader.next(\"int\")\n reader.next()\n lieCount = reader.next(\"int\")\n reader.next()\n liarCount = reader.next(\"int\")\n\n result = ChickenOracle.theTruth(n, eggCount, lieCount, liarCount)\n with open(sys.argv[2], \"w\") as fo:\n fo.write(tc.write(result, \"String\"))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"d1d2_under_80/ChickenOracle/solve/python/ChickenOracleRunner.py","file_name":"ChickenOracleRunner.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"652557458","text":"def termina_en_cero(lista):\n \"\"\"\n\n :param lista: de numeros, no deberia estar vacia\n :return: nueva lista con los numeros que finalizan en cero\n \"\"\"\n\n if len(lista) == 0:\n return lista\n\n nueva_lista = []\n\n for i in range(len(lista)):\n if (lista[i]/10) % 2 == 0:\n nueva_lista.append(lista[i])\n\n return nueva_lista\n\n\n# ejecucion\nlista1 = [4, 23, 40, -7, 0, 14, 1000, -760]\nprint(lista1)\nlista2 = termina_en_cero(lista1)\nprint(lista2)\n","sub_path":"Parcialitos/Primero/termina en cero.py","file_name":"termina en cero.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"298311859","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 6 11:34:21 2018\r\n\r\n@author: Nodar.Okroshiashvili\r\n\"\"\"\r\n\r\n\r\n\"\"\"\r\nIf you have this kind of data frame this script extracts each index/level as a new column.\r\nThis is useful to calculate covariance and correlation coefficient\r\n\r\n\"\"\"\r\n\r\n\r\nimport pandas as pd\r\n\r\ndf = {'C': {0: 16470, 1: 12000, 2: 3975, 3: 14170, 4: 71355, 5: 108455},\r\n 'A': {0: '022', 1: '022', 2: '022', 3: '023', 4: '023', 5: '023'},\r\n 'B': {0: '26-Jan-18', 1: '27-Jan-18', 2: '28-Jan-18', 3: '12-Jul-16', 4: '13-Jul-16', 5: '14-Jul-16'}}\r\n\r\ndf = pd.DataFrame(df)\r\n\r\ndf = pd.concat({k:v.reset_index(drop=True) for k, v in df.groupby('A')}, axis=1)\r\n\r\ndf.columns = df.columns.map('_'.join)\r\n\r\ndf.to_excel('result.xlsx')\r\n\r\n\r\n#%%\r\n\r\n# Read result file\r\nresult_df = pd.read_excel('result.xlsx')\r\n\r\n\r\n# Drop first two columns\r\nresult_df.drop(['022_A', '022_B'], axis=1, inplace=True)\r\n\r\n\r\n# Select every third columns. They are our interest\r\nx = result_df[result_df.columns[::3]]\r\n\r\n# Write result into excel file\r\nx.to_excel('our_interest.xlsx')\r\n\r\n#%%\r\n\r\n#Read the result file\r\nour_interest = pd.read_excel('our_interest.xlsx')\r\n\r\n\r\n# Compute Correlation coefficient\r\ncorrelation = our_interest.corr()\r\n\r\n# Write result into excel file\r\ncorrelation.to_excel('correlation_matrix.xlsx')\r\n\r\n\r\n# Compute Covariance\r\ncovariance = our_interest.cov()\r\n\r\n# Write result into excel file\r\ncovariance.to_excel('covariance_matrix.xlsx')\r\n\r\n#%%\r\n","sub_path":"Split Multi Level Data.py","file_name":"Split Multi Level Data.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"453932311","text":"# class Solution:\n# def fourSum(self,nums,target):\n# \"\"\"\n\n# :type nums: List[int]\n# :type target: int\n# :rtype: List[List[int]]\n# \"\"\"\n\n# ret,dict = set(),{}\n# numsLen = len(nums)\n# nums.sort()\n# for i in range(numsLen):\n# for j in range(i+1,numsLen):\n# key = nums[i] + nums[j]\n# if key not in dict.keys():\n# dict[key]=[(i,j)]\n# else:\n# dict[key].append((i,j))\n\n# for i in range(numsLen):\n# for j in range(i+1,numsLen):\n# temp = target - nums[i] - nums[j]\n# if temp in dict.keys():\n# for tempIndex in dict[temp]:\n# if tempIndex[0] > j:\n# ret.add((nums[i],nums[j], nums[tmpIndex[0]], nums[tmpIndex[1]])))\n# return [list(i) for i in ret]\nclass Solution:\n # 输入:nums = [1,0,-1,0,-2,2], target = 0\n # [-2,-1,0,0,1,2]\n # 输出:[[-2,-1,1,2],[-2,0,0,2],[-1,0,0,1]]\n def fourSum(self, nums, target): \n results = []\n nums.sort()\n for index_a,a in enumerate(nums):\n #选择a时候的去重\n if index_a >=1 and nums[index_a-1] == a:\n continue\n for index_b in range(index_a+1,len(nums)):\n b = nums[index_b]\n if index_b >index_a+1 and nums[index_b-1] == b:\n continue\n left = index_b + 1 \n right = len(nums)-1\n while left < right:\n c = nums[left]\n d = nums[right]\n total = a + b + c + d\n if target - total > 0:\n left = left + 1\n elif target - total < 0:\n right = right-1\n else:\n results.append([a,b,c,d])\n print(a,b,left,right)\n # a确定之后,后面的left和right指向的元素去重\n while left largestPalindrome and isPalindrome(product):\n largestPalindrome = product \n\n return largestPalindrome\n\ndef isPalindrome(n):\n return str(n) == str(n)[::-1]\n\nassert isPalindrome(0)\nassert isPalindrome(2)\nassert isPalindrome(11)\nassert isPalindrome(101)\nassert isPalindrome(10011001)\nassert not isPalindrome(10)\nassert not isPalindrome(10010)\nprint(largestPalindromicProduct())\n","sub_path":"p4/p4.py","file_name":"p4.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"629342411","text":"######################################################################\n#\n# stage0.py - First stage of the pipeline. Handle data, create bad pixels mask...\n# More details in individual fonctions.\n\n#\n# dependencies:\n# numpy 1.8+\n# astropy 1.0+\n######################################################################\n\nimport numpy as np\nimport os\nfrom astropy.io import fits\nimport astropy.units as u\nimport sys\nimport json\n\nfrom pyDANDIA import config_utils\nfrom astropy.nddata import Cutout2D\n\nfrom pyDANDIA import metadata\nfrom pyDANDIA import pixelmasks\n#import metadata\n#import pixelmasks\nfrom pyDANDIA import logs\nfrom pyDANDIA import quality_control\nfrom pyDANDIA import bad_pixel_mask\nfrom pyDANDIA import image_handling\n\ndef run_stage0(setup):\n \"\"\"Main driver function to run stage 0: data preparation.\n The tasks of this stage are to ensure that all images are prepared for\n reduction, and to make sure the reduction metadata is up to date.\n Input: setup - is an instance of the ReductionSetup class. See\n reduction_control.py\n Output: prepares the metadata file\n \"\"\"\n\n stage0_version = 'stage0 v0.1.1'\n\n log = logs.start_stage_log(setup.red_dir, 'stage0', version=stage0_version)\n log.info('Setup:\\n' + setup.summary() + '\\n')\n\n # find and update the pipeline config\n pipeline_config = read_the_config_file(setup.pipeline_config_dir, log=log)\n\n\n reduction_metadata = create_or_load_the_reduction_metadata(setup,\n setup.red_dir,\n metadata_name='pyDANDIA_metadata.fits',\n log=log)\n\n update_reduction_metadata_with_config_file(reduction_metadata,\n pipeline_config, log=log)\n\n\n # find all images\n\n all_images = reduction_metadata.find_all_images(setup, reduction_metadata,\n os.path.join(setup.red_dir, 'data'), log=log)\n\n # find and update the inst pipeline config\n\n image_name = all_images[0]\n image_structure = image_handling.determine_image_struture(os.path.join(setup.red_dir, 'data', image_name), log)\n\n inst_config_file_name = find_the_inst_config_file_name(setup, reduction_metadata, image_name,\n setup.pipeline_config_dir,\n image_index=image_structure['sci'],\n log=None)\n\n if inst_config_file_name == None:\n\n status = 'ERROR'\n report = 'Cannot find a pipeline configuration file for this dataset'\n\n return status, report, None\n\n inst_config = read_the_inst_config_file(setup.pipeline_config_dir, inst_config_file_name, log=log)\n update_reduction_metadata_with_inst_config_file(reduction_metadata,\n inst_config, log=log)\n\n\n\n # find images need to be run, based on the metadata file, if any. If rerun_all = True, force a rereduction\n\n new_images = reduction_metadata.find_images_need_to_be_process(setup, all_images,\n stage_number=0, rerun_all=None, log=log)\n # create new rows on reduction status for new images\n reduction_metadata.update_reduction_metadata_reduction_status(new_images, stage_number=0, status=0, log=log)\n\n # construct the stamps if needed\n #central_pixel = bool(reduction_metadata.reduction_parameters[1]['CENTRAL_PIXEL'][0])\n central_pixel =json.loads(reduction_metadata.reduction_parameters[1]['CENTRAL_PIXEL'][0].lower())\n\n\n if reduction_metadata.stamps[1]:\n pass\n else:\n\n image_structure = image_handling.determine_image_struture(os.path.join(setup.red_dir, 'data',new_images[0]), log)\n\n open_image = open_an_image(setup, reduction_metadata.data_architecture[1]['IMAGES_PATH'][0],\n new_images[0], log, image_index=image_structure['sci'])\n\n update_reduction_metadata_stamps(setup, reduction_metadata, open_image,\n stamp_size=(1000,1000),\n arcseconds_stamp_size=(110, 110),\n pixel_scale=None,\n number_of_overlaping_pixels=10, central_stamp=central_pixel,log=log)\n\n if len(new_images) > 0:\n\n update_reduction_metadata_headers_summary_with_new_images(setup,\n reduction_metadata, new_images, log=log)\n\n set_bad_pixel_mask_directory(setup, reduction_metadata,\n bpm_directory_path=os.path.join(setup.red_dir, 'data'),\n log=log)\n\n instrument_bpm = bad_pixel_mask.BadPixelMask()\n\n instrument_bpm.load_latest_instrument_mask(reduction_metadata.reduction_parameters[1]['INSTRID'][0],setup,log=log)\n\n logs.ifverbose(log, setup, 'Updating metadata with info on new images...')\n\n for new_image in new_images:\n image_structure = image_handling.determine_image_struture(os.path.join(setup.red_dir, 'data',new_image), log)\n\n open_image = open_an_image(setup, reduction_metadata.data_architecture[1]['IMAGES_PATH'][0],\n new_image, log, image_index=image_structure['sci'])\n\n image_bpm = open_an_image(setup, reduction_metadata.data_architecture[1]['BPM_PATH'][0],\n new_image, log, image_index=image_structure['bpm'])\n\n bpm = bad_pixel_mask.construct_the_pixel_mask(setup, reduction_metadata,\n open_image, image_bpm, [1,3], log,\n low_level=0,\n instrument_bpm=instrument_bpm)\n\n save_the_pixel_mask_in_image(reduction_metadata, new_image, bpm)\n logs.ifverbose(log, setup, ' -> ' + new_image)\n\n reduction_metadata.update_reduction_metadata_reduction_status(new_images, stage_number=0, status=1, log=log)\n\n reduction_metadata.save_updated_metadata(\n reduction_metadata.data_architecture[1]['OUTPUT_DIRECTORY'][0],\n reduction_metadata.data_architecture[1]['METADATA_NAME'][0],\n log=log)\n\n (status,report) = quality_control.verify_stage0_output(setup,log)\n\n logs.close_log(log)\n\n return status, report, reduction_metadata\n\ndef open_the_variables_catalog(variables_catalog_directory, variables_catalog_name):\n '''\n NOT IMPLEMENTED YET\n '''\n\n variable_catalog = None\n pass\n\n\ndef read_the_config_file(config_directory, config_file_name='config.json',\n log=None):\n '''\n This read the required informations from the config file.\n\n :param string config_directory: the directory to the config file\n :param string config_file_name: the name of the config file\n\n :return: the config file\n :rtype: dictionnary\n '''\n\n if os.path.isdir(config_directory) == False:\n raise IOError('Cannot find pipeline configuration directory '+config_directory)\n\n config_file_path = os.path.join(config_directory, config_file_name)\n\n if os.path.isfile(config_file_path) == False:\n raise IOError('Cannot find the configuration file '+config_file_path)\n\n pipeline_configuration = config_utils.read_config(config_file_path)\n\n if log != None:\n log.info('Read pipeline configuration from ' + config_file_path)\n\n return pipeline_configuration\n\n\ndef find_the_inst_config_file_name(setup, reduction_metadata, image_name, inst_config_directory, image_index=0,\n log=None):\n '''\n This found the name of the inst_config_file needs for the reduction.\n\n :param object reduction_metadata: the metadata object\n :param string image_name: the image name of the astropy fits object\n :param string inst_config_directory: the directory of the inst config file\n\n :param int image_index: the image index of the astropy fits object\n :param object log: the log object to add info in\n\n :return: the name of inst_config_file\n :rtype: string\n '''\n\n inst_config_files = [i for i in os.listdir(inst_config_directory) if ('inst_config' in i)]\n\n potential_cameras_names = [i.split('_')[-1][:-5] for i in inst_config_files]\n\n open_image = open_an_image(setup, reduction_metadata.data_architecture[1]['IMAGES_PATH'][0],\n image_name, log, image_index=image_index)\n\n potential_inst_names = open_image.header.values()\n\n inst_config_file_name = None\n\n for name in potential_inst_names:\n\n if name in potential_cameras_names:\n good_camera_name = name\n inst_config_file_name = 'inst_config_' + good_camera_name + '.json'\n return inst_config_file_name\n\n if inst_config_file_name == None:\n raise ValueError('No instrument configuration found for the instrument IDs in the image header data')\n\n return None\n\n\ndef read_the_inst_config_file(inst_config_directory, inst_config_file_name='inst_config.json', log=None):\n '''\n This read the required informations from the config file, i.e the pipeline configuration.\n\n :param string inst_config_directory: the directory to the instrument config file\n :param string inst_config_file_name: the name of the instrument config file\n\n :return: the config file\n :rtype: dictionnary\n '''\n\n config_file_path = os.path.join(inst_config_directory, inst_config_file_name)\n\n instrument_configuration = config_utils.read_config(config_file_path)\n\n if log != None:\n log.info('Read instrument configuration from ' + config_file_path)\n\n return instrument_configuration\n\n\ndef update_reduction_metadata_with_inst_config_file(reduction_metadata,\n inst_config_dictionnary, log=None):\n '''\n Update the metadata with the config files\n\n :param object reduction_metadata: the metadata object\n :param dictionnary inst_config_dictionnary: a python dictionnary containing the instrument parameters\n\n '''\n\n keys = inst_config_dictionnary.keys()\n existing_keys_in_metadata = reduction_metadata.reduction_parameters[1].keys()\n for key in keys:\n\n try:\n\n value = inst_config_dictionnary[key]['value']\n format = inst_config_dictionnary[key]['format']\n unit = inst_config_dictionnary[key]['unit']\n\n if key.upper() in existing_keys_in_metadata:\n reduction_metadata.update_a_cell_to_layer('reduction_parameters', 0, key.upper(), value)\n\n else:\n\n reduction_metadata.add_column_to_layer('reduction_parameters', key, [value], format, unit)\n\n except:\n\n if log != None:\n log.info('Error in inst config file on key' + key)\n sys.exit(1)\n\n if log != None:\n log.info('Updated metadata with instrument configuration parameters')\n\n\ndef create_or_load_the_reduction_metadata(setup, output_metadata_directory,\n metadata_name='pyDANDIA_metadata.fits',\n log=None):\n '''\n This creates (new reduction) or load (ongoing reduction) the metadata file linked to this reduction.\n\n :param string output_metadata_directory: the directory where to place the metadata\n :param string metadata_name: the name of the metadata file\n :param boolean verbose: switch to True to have more informations\n\n :return: the metadata object\n :rtype: metadata object\n '''\n try:\n\n meta_data_exist = [i for i in os.listdir(output_metadata_directory) if (i == metadata_name)]\n\n if meta_data_exist == []:\n\n reduction_metadata = metadata.MetaData()\n\n reduction_metadata.create_metadata_file(output_metadata_directory, metadata_name)\n\n logs.ifverbose(log, setup,\n 'Successfully created the reduction metadata file')\n\n else:\n\n reduction_metadata = metadata.MetaData()\n reduction_metadata.load_all_metadata(output_metadata_directory, metadata_name)\n logs.ifverbose(log, setup, 'Successfully found the reduction metadata')\n except:\n\n logs.ifverbose(log, setup, 'No metadata created or loaded : check this!')\n\n sys.exit(1)\n\n return reduction_metadata\n\n\ndef set_bad_pixel_mask_directory(setup, reduction_metadata,\n bpm_directory_path=None,\n verbose=False, log=None):\n '''\n This found all the images.\n\n :param object reduction_metadata: the metadata object\n :param string images_directory_path: the directory of the images\n :param boolean verbose: switch to True to have more informations\n\n :return: the list of images (strings)\n :rtype: list\n '''\n\n if 'BPM_PATH' in reduction_metadata.data_architecture[1].keys():\n reduction_metadata.update_a_cell_to_layer('data_architecture', 0, 'BPM_PATH', bpm_directory_path)\n\n else:\n\n reduction_metadata.add_column_to_layer('data_architecture',\n 'BPM_PATH', [bpm_directory_path],\n new_column_format=None,\n new_column_unit=None)\n\n logs.ifverbose(log, setup, 'Set bad pixel mask directory to '+\\\n bpm_directory_path)\n\n\ndef open_an_image(setup, image_directory, image_name, log,\n image_index=0):\n '''\n Simply open an image using astropy.io.fits\n\n :param object reduction_metadata: the metadata object\n :param string image_directory: the image name\n :param string image_name: the image name\n :param string image_index: the image index of the astropy fits object\n\n :param boolean verbose: switch to True to have more informations\n\n :return: the opened image\n :rtype: astropy.image object\n '''\n image_directory_path = image_directory\n\n logs.ifverbose(log, setup,\n 'Attempting to open image ' + \\\n os.path.join(image_directory_path, image_name))\n\n try:\n\n image_data = fits.open(os.path.join(image_directory_path, image_name),\n mmap=True)\n image_data = image_data[image_index]\n\n logs.ifverbose(log, setup, image_name + ' open : OK')\n\n return image_data\n\n except IndexError:\n\n logs.ifverbose(log, setup, image_name + \\\n ' open : not OK! Cannot open FITS extension '+str(image_index))\n\n return None\n\n\ndef open_an_bad_pixel_mask(reduction_metadata, bpm_name, bpm_index=1, verbose=False):\n '''\n Simply open an image using astropy.io.fits\n\n :param object reduction_metadata: the metadata object\n :param string bpm_name: the bad pixel mask name\n :param string bpm_index: the bad pixel mask index of the astropy fits object\n\n :param boolean verbose: switch to True to have more informations\n\n :return: the opened bad pixel mask\n :rtype: astropy.image object\n\n WARNING: BAD PIXEL MASK FUNCTIONS HAVE BEEN MOVED TO bad_pixel_mask.py\n '''\n bpm_directory_path = reduction_metadata.data_architecture[1]['BPM_PATH'][0]\n\n try:\n\n image_data = fits.open(bpm_directory_path + bpm_name, mmap=True)\n image_data = image_data[bpm_index]\n\n if verbose == True:\n print(bpm_name + ' open : OK')\n\n return image_data\n except:\n if verbose == True:\n print(bpm_name + ' open : not OK!')\n\n return None\n\n\ndef save_the_pixel_mask_in_image(reduction_metadata, image_name, bpm):\n '''\n Construct the global pixel mask using a bitmask approach.\n\n :param object reduction_metadata: the metadata object\n :param string image_name: the name of the image\n :param array_like master_mask: the master mask which needs to be kept\n\n '''\n master_pixels_mask = fits.ImageHDU(bpm.master_mask)\n master_pixels_mask.name = 'pyDANDIA_PIXEL_MASK'\n\n open_image = fits.open(os.path.join(reduction_metadata.data_architecture[1]['IMAGES_PATH'][0], image_name))\n\n try:\n open_image['pyDANDIA_PIXEL_MASK'] = master_pixels_mask\n except:\n\n open_image.append(master_pixels_mask)\n\n open_image.writeto(os.path.join(reduction_metadata.data_architecture[1]['IMAGES_PATH'][0], image_name),\n overwrite=True)\n\n\ndef update_reduction_metadata_with_config_file(reduction_metadata,\n config_dictionnary, log=None):\n '''\n Update the metadata with the config files\n\n :param object reduction_metadata: the metadata object\n :param dictionnary config_dictionnary: a python dictionnary containing the pyDANDIA parameters\n\n '''\n\n keys = config_dictionnary.keys()\n\n data = []\n for key in keys:\n\n if key != 'psf_factors':\n\n try:\n data.append([key, config_dictionnary[key]['value'], config_dictionnary[key]['format'],\n config_dictionnary[key]['unit']])\n\n except:\n if log != None:\n log.info('Error in config file on key' + key)\n sys.exit(1)\n\n data = np.array(data)\n names = [i.upper() for i in data[:, 0]]\n formats = data[:, 2]\n units = data[:, 3]\n\n if reduction_metadata.reduction_parameters[1]:\n\n for index, key in enumerate(names):\n reduction_metadata.update_a_cell_to_layer('reduction_parameters', 0, key, data[index, 1])\n\n\n else:\n reduction_metadata.create_reduction_parameters_layer(names, formats, units, data[:, 1])\n\n\n data = []\n\n for i in range(0,len(config_dictionnary['psf_factors']['value']),1):\n\n data.append([str(i+1),\n config_dictionnary['psf_factors']['value'][i],\n 0.0])\n\n reduction_metadata.create_psf_dimensions_layer(np.array(data))\n\n if log != None:\n log.info('Updated metadata with pipeline configuration parameters')\n\n\ndef parse_the_image_header(reduction_metadata, open_image):\n '''\n Update the metadata with the header keywords\n\n :param object reduction_metadata: the metadata object\n :param astropy.image open_image: the opened image\n\n :return an array containing the needed header info\n :rtype array_like\n '''\n\n header_infos = []\n image_header = open_image.header\n reduction_parameters_table = reduction_metadata.reduction_parameters[1]\n\n # If the reduction_metadata has a headers_summary table already, use it\n # to get the list of header keywords to extract. From a previously-existing\n # reduction, this will include HJD, which is calculated later on. This\n # parameter is not in the reduction_parameters as it is computed.\n try:\n headers_summary_table = reduction_metadata.headers_summary[1]\n\n for key, col in headers_summary_table.items():\n if 'IMAGES' not in key:\n if key in reduction_parameters_table.keys():\n image_header_key = reduction_parameters_table[key][0]\n\n info = [key, image_header[image_header_key], col.dtype]\n else:\n info = [key, 0.0, col.dtype]\n\n header_infos.append(info)\n\n # New reductions will not yet have a headers_summary table, so instead we\n # extract the parameters from the reduction_parameters table and include\n # all of the parameters for which entries exist in the image header. HJD\n # has to be added to this list.\n except AttributeError:\n reduction_parameter_keys = reduction_parameters_table.keys()\n\n for key in reduction_parameter_keys:\n image_header_key = reduction_parameters_table[key][0]\n\n if image_header_key in image_header.keys():\n info = [key, image_header[image_header_key],\n reduction_parameters_table[key].dtype]\n header_infos.append(info)\n\n for key in ['HJD']:\n info = [key, 0.0, np.float]\n header_infos.append(info)\n\n return np.array(header_infos)\n\n\ndef update_reduction_metadata_headers_summary_with_new_images(setup,\n reduction_metadata,\n new_images, log=None):\n '''\n Update the metadata with the header keywords\n\n :param object reduction_metadata: the metadata object\n :param list new_images: list of strings\n\n :return an array containing the needed header info\n :rtype array_like\n '''\n\n for image_name in new_images:\n layer = reduction_metadata.headers_summary[1]\n\n image_structure = image_handling.determine_image_struture(os.path.join(setup.red_dir, 'data', image_name), log=log)\n\n open_image = open_an_image(setup, reduction_metadata.data_architecture[1]['IMAGES_PATH'][0],\n image_name, log, image_index=image_structure['sci'])\n\n header_infos = parse_the_image_header(reduction_metadata, open_image)\n\n log.info('HEADER INFO: '+repr(header_infos))\n log.info(str(len(header_infos)))\n\n names = np.append('IMAGES', header_infos[:, 0])\n values = np.append(image_name, header_infos[:, 1])\n formats = np.append('S200', header_infos[:, 2])\n\n log.info(str(values))\n log.info(str(len(values)))\n\n if layer:\n\n reduction_metadata.add_row_to_layer('headers_summary', values.astype(str))\n\n\n else:\n\n reduction_metadata.create_headers_summary_layer(names, formats,\n units=None,\n data=values)\n\n if log != None:\n log.info('Added data on new images to the metadata')\n\n\ndef construct_the_stamps(open_image, stamp_size=None, arcseconds_stamp_size=(110, 110),\n pixel_scale=None,\n fraction_of_overlaping_pixels=0.01,number_of_overlaping_pixels=None, log=None):\n '''\n Define the stamps for an image variable kernel definition\n\n :param object reduction_metadata: the metadata object\n :param list stamp_sizes: list of integer give the X,Y stamp size , i.e [150,52] give 150 pix in X, 52 in Y\n :param tuple arcseconds_stamp_size: list of integer give the X,Y stamp size in arcseconds units\n :param float pixel_scale: pixel scale of the CCD, in arcsec/pix\n :param float fraction_of_overlaping_pixels : half of number of pixels as 1D substamp fraction\n :param boolean verbose: switch to True to have more informations\n\n\n\n :return an array containing the pixel index, Y_min, Y_max, X_min, X_max (i.e matrix index definition)\n :rtype array_like\n '''\n\n\n image = open_image.data\n full_image_y_size, full_image_x_size = image.shape\n\n if stamp_size:\n\n y_stamp_size = stamp_size[0]\n x_stamp_size = stamp_size[1]\n\n else:\n try:\n y_stamp_size = int(arcseconds_stamp_size[0] / pixel_scale)\n x_stamp_size = int(arcseconds_stamp_size[1] / pixel_scale)\n #we want to distribute the stamp size as evenly as possible\n #that requires to use the corresponding fraction of the envisaged\n #stamp size fits into the frame (ceiling with overlap...)\n subimage_shape = [int(np.ceil(float(full_image_x_size)/x_stamp_size)), int(np.ceil(float(full_image_y_size)/y_stamp_size))]\n x_subsize = int(full_image_x_size/subimage_shape[0])\n y_subsize = int(full_image_y_size/subimage_shape[1])\n\n\n subimage_slices = []\n for idx in range(subimage_shape[0]):\n for jdx in range(subimage_shape[1]):\n subimage_element = subimage_shape+[idx,jdx]\n x_subsize, y_subsize = full_image_x_size/subimage_element[0], full_image_y_size/subimage_element[1]\n\n xslice = [subimage_element[2] * x_subsize , (subimage_element[2] + 1) * x_subsize]\n yslice = [subimage_element[3] * y_subsize , (subimage_element[3] + 1) * y_subsize]\n #this is the slice without overlapping region, but for\n #obtaining a higher accurracy and to defeat edge effects\n #we check if the slice starts or ends at the edge and add\n #the corresponding overlap\n except Exception as e:\n status = 'ERROR'\n report = 'No pixel scale found!'+str(e)\n log.info(status + ': ' + report)\n return status, report, np.zeros(1)\n if (y_stamp_size>full_image_y_size/2) | (x_stamp_size>full_image_x_size/2):\n\n stamps = [0,0,full_image_y_size,0,full_image_x_size]\n status = 'OK'\n report = 'Completed successfully'\n return status, report, np.array(stamps)\n\n # overlapping fraction in pixels\n if number_of_overlaping_pixels:\n overlap_x = number_of_overlaping_pixels\n overlap_y = number_of_overlaping_pixels\n else:\n overlap_x = int(fraction_of_overlaping_pixels * x_stamp_size)\n overlap_y = int(fraction_of_overlaping_pixels * y_stamp_size)\n x_stamps_center = np.arange(int(x_stamp_size / 2), full_image_x_size, x_stamp_size)\n y_stamps_center = np.arange(int(y_stamp_size / 2), full_image_y_size, y_stamp_size)\n if x_stamps_center.size == 0:\n x_stamps_center = np.array([int(x_stamp_size / 2)])\n if y_stamps_center.size == 0:\n y_stamps_center = np.array([int(y_stamp_size / 2)])\n stamps_center_x, stamps_center_y = np.meshgrid(x_stamps_center, y_stamps_center)\n\n stamps_y_min = stamps_center_y - int(y_stamp_size / 2) - overlap_y\n mask = stamps_y_min < 0\n stamps_y_min[mask] = 0\n\n stamps_y_max = stamps_center_y + int(y_stamp_size / 2) + overlap_y\n stamps_y_max[-1,:] = [image.shape[0]]*len(stamps_y_max[-1,:])\n mask = stamps_y_max > full_image_y_size\n stamps_y_max[mask] = full_image_y_size\n\n stamps_x_min = stamps_center_x - int(x_stamp_size / 2) - overlap_x\n mask = stamps_x_min < 0\n stamps_x_min[mask] = 0\n stamps_x_max = stamps_center_x + int(x_stamp_size / 2) + overlap_x\n\n stamps_x_max[:,-1] = [image.shape[1]]*len(stamps_x_max[:,-1])\n\n mask = stamps_x_max > full_image_x_size\n stamps_x_max[mask] = full_image_x_size\n stamps = [[stamps_x_min.shape[1] * i + j, stamps_y_min[i, j], stamps_y_max[i, j], stamps_x_min[i, j],\n stamps_x_max[i, j]]\n for i in range(stamps_x_min.shape[0]) for j in range(stamps_x_min.shape[1])]\n\n status = 'OK'\n report = 'Completed successfully'\n return status, report, np.array(stamps)\n\n\ndef update_reduction_metadata_stamps(setup, reduction_metadata, open_image,\n stamp_size=None, arcseconds_stamp_size=(110, 110),\n pixel_scale=None, number_of_overlaping_pixels=25,central_stamp=False,\n log=None):\n '''\n Create the stamps definition in the reduction_metadata\n\n :param object reduction_metadata: the metadata object\n :param astropy.image open_image: the opened image\n :param list stamp_sizes: list of integer give the X,Y stamp size , i.e [150,52] give 150 pix in X, 52 in Y\n :param tuple arcseconds_stamp_size: list of integer give the X,Y stamp size in arcseconds units\n :param float pixel_scale: pixel scale of the CCD, in arcsec/pix\n :param int number_of_overlaping_pixels : half of number of pixels in both direction you want overlaping\n\n '''\n\n if pixel_scale:\n pass\n else:\n pixel_scale = float(reduction_metadata.reduction_parameters[1]['PIX_SCALE'][0])\n\n if central_stamp:\n (status, report, stamps) = construct_central_stamp(open_image, stamp_size, arcseconds_stamp_size,\n pixel_scale, log=log)\n\n else:\n\n (status, report, stamps) = construct_the_stamps(open_image, stamp_size, arcseconds_stamp_size,\n pixel_scale, number_of_overlaping_pixels=number_of_overlaping_pixels, log=log)\n\n names = ['PIXEL_INDEX', 'Y_MIN', 'Y_MAX', 'X_MIN', 'X_MAX']\n formats = ['int', 'S200', 'S200', 'S200', 'S200']\n units = ['', 'pixel', 'pixel', 'pixel', 'pixel']\n\n reduction_metadata.create_stamps_layer(names, formats, units, stamps)\n\n logs.ifverbose(log, setup, 'Updated reduction metadata stamps')\n\n\ndef construct_central_stamp(open_image, stamp_size=None, arcseconds_stamp_size=(110, 110),\n pixel_scale=None, log=None):\n '''\n Define the central stamp\n\n :param object reduction_metadata: the metadata object\n :param list stamp_sizes: list of integer give the X,Y stamp size , i.e [150,52] give 150 pix in X, 52 in Y\n :param tuple arcseconds_stamp_size: list of integer give the X,Y stamp size in arcseconds units\n :param float pixel_scale: pixel scale of the CCD, in arcsec/pix\n\n\n\n :return an array containing the pixel index, Y_min, Y_max, X_min, X_max (i.e matrix index definition)\n :rtype array_like\n '''\n\n\n image = open_image.data\n full_image_y_size, full_image_x_size = image.shape\n\n if stamp_size:\n\n y_stamp_size = stamp_size[0]\n x_stamp_size = stamp_size[1]\n else:\n\n y_stamp_size = 1000\n x_stamp_size = 1000\n\n stamps_center_x, stamps_center_y = ( int(full_image_y_size/2), int(full_image_x_size/2))\n\n stamps = [[0, stamps_center_y-int(y_stamp_size/2),stamps_center_y+int(y_stamp_size/2),stamps_center_x-int(x_stamp_size/2),stamps_center_x+int(x_stamp_size/2)]]\n\n status = 'OK'\n report = 'Completed successfully'\n return status, report, np.array(stamps)\n","sub_path":"pyDANDIA/stage0.py","file_name":"stage0.py","file_ext":"py","file_size_in_byte":30073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"9028400","text":"import click\nfrom typing import Callable, Union, List\n\nfrom command_line import CommandLine\n\n\nclass Main:\n def __init__(self, url: str, bad_chars: Union[List[str], str]) -> None:\n command: Callable = CommandLine(url, bad_chars).command\n try:\n command()\n except Exception as e:\n print(e)\n click.secho('Some error happened. please try again', bg='black', fg='red')\n\n\nif __name__ == '__main__':\n bad_chars = ['!~@#$%^&*']\n url = 'https://jsonplaceholder.typicode.com/posts'\n Main(url, bad_chars)\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"518488420","text":"import os\nimport pandas as pd\nimport shutil\n\ndef dzielenie_listy(l, n):\n \"\"\"\n Funkcja do dzielenia listy na n podlist.\n\n :param l: lista wejsiowa\n :type l: list\n :param n: liczba na ile podlist dzielono liste l\n :type n: int\n \"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\ndef agregacja_zdjec(input):\n \"\"\"\n :param input: sciezka do folderu z obrazami\n :type input: path as string\n \"\"\"\n ramka = pd.read_csv(\"images.csv\")\n ramka_sort = ramka.sort_values(by=\"Mediana jasnosci po przekonwertowaniu na odcienie szarosci\", ascending=True)\n nazwy_plikow = list(ramka_sort[\"Nazwa pliku\"])\n nazwy_plikow = list(dzielenie_listy(nazwy_plikow, 4))\n\n for i in range(len(nazwy_plikow)):\n os.makedirs('agg-images/' + f'{i + 1}-images')\n for plik in nazwy_plikow[i]:\n shutil.copy(input + plik, 'agg-images/' + f'{i+1}-images')\n\n shutil.make_archive('agg-images', 'zip', 'agg-images')\n\n\nkat = \"C:/Users/admin/Desktop/Kolo_DS/images/\"\nagregacja_zdjec(kat)","sub_path":"Rozwiązania/Morokov/agregacja_zdjec.py","file_name":"agregacja_zdjec.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"102337939","text":"# https://github.com/NREL/gdx-pandas\nimport pandas as pd\n# import XlsxWriter\nimport gdxpds\nfrom ctypes import c_bool\nimport logging\n\n\"\"\"\nPython dicts of {symbol_name: pandas.DataFrame}, where \neach pandas.DataFrame contains data for a single set, parameter, equation, or \nvariable.\n\"\"\"\n# gdx_file = \"C:\\Users\\Adel\\Documents\\Test_wash\\WASH_5yrs_OutputData.gdx\"\n# gdx_file = \"C:\\Users\\Adel\\Documents\\Test_wash\\Systems-model-in-Wetlands-to-Allocate-water-and-Manage-Plant-Spread\\Version1.2-WetlandUnitsAsTanks\\GUI_v1.2\\BRMBR_Input.gdx\"\n# read from this one\ngdx_file = \"C:\\RC\\Final_Wizard_April_2018\\controller\\WASH\\WASH-Data.gdx\"\ndataframes = gdxpds.to_dataframes(gdx_file)\n\n# Create a Pandas Excel writer using XlsxWriter as the engine.\n\n#1.\nwriter = pd.ExcelWriter('WAHS_input_GDX3_as_is.xlsx', engine='xlsxwriter')\n#2.\n# writer = pd.ExcelWriter('SWAMPS_Input_GDX.xlsx', engine='xlsxwriter')\n#3.\n# writer = pd.ExcelWriter('WASH_5yrs_OutputData_5yrs.xlsx', engine='xlsxwriter')\n\ndef ReadGdx():\n\n # except Exception as e:\n # print e\n # continue\n df_all = {}\n for symbol_name, df in dataframes.items():\n\n # if symbol_name=='Z' or symbol_name=='Q' or symbol_name=='WSI' or symbol_name=='W': continue\n # if symbol_name=='FlowMarginal' or symbol_name=='lng' or symbol_name=='dReq': continue\n # try:\n # if symbol_name not in 'links':\n # print(\"Doing work with {}.\".format(symbol_name))\n # print df\n\n if 'Value' in df.keys():\n for i, sub_key in enumerate(df['Value']):\n # if df['Value'][i] == c_bool(True):\n if len(df['Value']) > i and isinstance(df['Value'][i], c_bool):\n df['Value'][i] = True\n\n # sotre all the dfs from here so we can pass them to another function\n # keep the symbol_name with each df\n df_all[symbol_name] = df\n\n\n # Convert the dataframe to an XlsxWriter Excel object.\n df.to_excel(writer, sheet_name=symbol_name)\n #Save each data from into a sheet in one excel file\n\n\n\n # Close the Pandas Excel writer and output the Excel file.\n writer.save()\n\n return df_all\n\n# to view the frames in Excel (for fun)\ndef WriteToWaMDaM(df_all):\n\n # read the the j paramter and pass it to the function write_nodes that exist in this file Write_WaMDaM_Workbook\n # use the paramter name \"j\" for ObjectType value\n # use the paramter values for the NodeInstanceName\n filename=\"WASH_Ready_WaMDaM_May10.xlsx\"\n\n from Write_WaMDaM_Workbook import SaveExcel\n SaveExcel(df_all, filename)\n\n\n# df.to_excel(writer, sheet_name=symbol_name)\n\ndf_all = ReadGdx()\nWriteToWaMDaM(df_all)","sub_path":"src/controller/WASH/ReadGDX.py","file_name":"ReadGDX.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"483649782","text":"import numpy as np\nimport random\nfrom cmaes import CovarianceMatrixAdaptationEvolutionStrategy\n\n\ndef initialSerachPointGenerator(dimensions, startValue=0, endValue=1):\n randomArray = randomArrayGenerator(dimensions, startValue, endValue)\n return np.array(randomArray)\n\n\ndef randomArrayGenerator(dimensions, startValue=0, endValue=1):\n valuesArray = []\n while(dimensions > 0):\n valuesArray.append(random.uniform(startValue, endValue))\n dimensions -= 1\n return valuesArray\n\n\ndef objectiveFunction(vector):\n param1, param2 = vector\n return param1**2 + param2**2\n\n\ndimensionsNumber = 2\ninitialMeanVector = initialSerachPointGenerator(dimensionsNumber)\n\ncma_es = CovarianceMatrixAdaptationEvolutionStrategy(function=objectiveFunction,\n intialPoint=initialMeanVector,\n stepSize=None,\n populationSizeGeneration=None,\n parentsNumber=None,\n recombinationWeights=None,\n learningRateCumulationStepSize=None,\n updateMitigationStepSize=None,\n learningRateCumulationRankOneUpdate=None,\n learningRateRankOneUpdate=None,\n learningRateRankMuUpdate=None,\n learningRateMeanVectorUpdate=None,\n absoluteChangeEarlyStopping=None,\n relativeChangeEarlyStopping=None,\n generationsNumber=None)\n\nresult = cma_es.minimizar()\n\nprint('\\n', result)\n","sub_path":"Framework/cmaes/library/v2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"258921156","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nimport pymysql\n\n\n# 获取种子文件ID\ndef sec_page(sec_url):\n html_sec = requests.get(sec_url).text\n soup_sec = BeautifulSoup(html_sec, features=\"lxml\")\n all_sec_url = soup_sec.select(\".ajaxdialog\")\n for z in all_sec_url:\n c = z[\"href\"]\n # print(c)\n # 这可以改一下,改成c = z[\"href\"][3]直接把链接提出来提取,要快些\n if \"torrent\" in c:\n d = re.match(\".*torrent.*?-(\\d{6})-.*\", c)\n return d.group(1)\n else:\n continue\n\n\ndef poster(sec_url):\n html_poster = requests.get(sec_url).text\n soup_poster = BeautifulSoup(html_poster, features=\"lxml\")\n # 获取到一个列表,首先取值,然后str值,最后将空格、换行符等去掉\n url_almost = str(soup_poster.select(\".message\")[0]).replace('\\n', '').replace('\\t', '').replace(' ', '')\n # print(url_almost)\n # url_jpg = re.match(\".*?(https://img3.doubanio.com/view/photo/s_ratio_poster/public/p\\d+.webp)\", url_almost)\n url_jpg = re.match(\".*?src=\\\"(.*?jpg|webp)\", url_almost)\n # print(url_jpg.group(1))\n if url_jpg:\n return url_jpg.group(1)\n\n\n\n# 连接数据库, 并创建表\n# db = pymysql.connect(host=\"localhost\", port=3306, user=\"root\",\n# passwd=\"123456\", db=\"bilibt_db\", charset='utf8')\n# cursor = db.cursor()\n# cursor.execute('DROP TABLE IF EXISTS all_movie')\n# sql = \"\"\"\n# CREATE TABLE all_movie(\n# ID INT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n# Movie_Name CHAR(100) NOT NULL,\n# Movie_url CHAR(100) UNIQUE NOT NULL\n# )\n# \"\"\"\n# cursor.execute(sql)\n# db.commit()\n# cursor.close()\n# db.close()\n\n\ndef insert_data(movie_name, movie_url, movie_poster):\n db_1 = pymysql.connect(host=\"localhost\", port=3306, user=\"root\",\n passwd=\"123456\", db=\"show_bilibt\", charset='utf8')\n cursor_1 = db_1.cursor()\n sql_1 = \"\"\"\n INSERT INTO all_movie(\n Movie_Name, Movie_url, Movie_poster\n ) VALUE (\"%s\", \"%s\", \"%s\" )\n \"\"\" % (movie_name, movie_url, movie_poster)\n\n # cursor_1.execute(sql_1)\n # db_1.commit()\n # cursor_1.close()\n # db_1.close()\n\n # 捕获value相同的异常,当value相同,则证明现在爬的东西之前已经\n # 爬过了,直接退出爬虫\n try:\n cursor_1.execute(sql_1)\n db_1.commit()\n print(\"done\")\n cursor_1.close()\n db_1.close()\n except pymysql.err.IntegrityError:\n print(\"已完成\")\n cursor_1.close()\n db_1.close()\n exit()\n\n\n# 获取电影名及种子链接,并插入到数据库\nfor y in range(1, 10):\n x = str(y)\n html_index = requests.get(\"http://www.bilibt.com/?index-index-page-\"+x+\".htm\")\n soup_index = BeautifulSoup(html_index.text, features=\"lxml\")\n all_movie = soup_index.select(\".subject_link\")\n # print(all_movie)\n for i in all_movie:\n # print(i)\n if len(i.text) > 62:\n a = i.text[:55]\n else:\n a = i.text\n b = i[\"href\"]\n f = poster(b)\n # 这个判断是因为这个网站有些电影可能没有下载链接\n if sec_page(b) is None:\n continue\n e = \"http://www.bilibt.com/?attach-download-fid-1-aid-\" + sec_page(b) + \".htm\"\n # print(f)\n try:\n insert_data(a, e, f)\n except pymysql.err.ProgrammingError:\n continue\n # print(a, e)o90p\n # f = open(\"url.txt\", \"a\")\n # f.write(a+\"\\n\"+b+\"\\n\")\n # f.close()\n\n\n","sub_path":"bilibt.py","file_name":"bilibt.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"99893883","text":"#!/usr/bin python3\r\n# -*- coding: utf-8 -*-\r\n# @Time : 18-12-25 上午10:41\r\n# @Author : 林利芳\r\n# @File : hyperparams.py\r\n\r\n\r\nclass HyperParams(object):\r\n\tdef __init__(self):\r\n\t\t# training\r\n\t\tself.seg = 'LSTM' # [GRU,LSTM,IndRNN,F-LSTM]\r\n\t\tself.batch_size = 128 # alias = N\r\n\t\tself.lr = 0.0001 # learning rate. In paper, learning rate is adjusted to the global step.\r\n\t\tself.num_layer = 2\r\n\t\t# model\r\n\t\tself.max_len = 50 # Maximum number of words in a sentence. alias = T.\r\n\t\tself.model_dir = 'model_ckpt'\r\n\t\tself.log_dir = 'log'\r\n\t\t# Feel free to increase this if you are ambitious.\r\n\t\tself.min_cnt = 20 # words whose occurred less than min_cnt are encoded as .\r\n\t\tself.num_units = 512 # alias = C\r\n\t\tself.num_blocks = 6 # number of encoder/decoder blocks\r\n\t\tself.num_epochs = 100\r\n\t\tself.num_heads = 8\r\n\t\tself.filters = [2, 3, 4, 5]\r\n\t\tself.clip = 5\r\n\t\tself.dropout_rate = 0.1\r\n\t\tself.eps = 1e-9\r\n\t\tself.sinusoid = False # If True, use sinusoid. If false, positional embedding.\r\n","sub_path":"hyperparams.py","file_name":"hyperparams.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"109657701","text":"import turtle\nfresco = turtle.Turtle()\n\nfor i in range (300):\n fresco.forward(i)\n fresco.right(81)\n# angle 150 for a window into a starry night\n# 50 for spiral thing\n# 144 for a star\n# 190 for a black sun / sea urchin\n# 81 for a square-ish spiral\n\nturtle.mainloop()\n","sub_path":"Cs120/hw/spiral.py","file_name":"spiral.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"300863924","text":"from base import BusyObservable, change\n\nimport requests\n\n\nclass Downloadable(BusyObservable):\n \"\"\"An interface to be implemented by resources that are\n downloadable.\n\n\n Attributes\n ----------\n \n _download_url: str\n The URL from where the resource can be downloaded.\n\n _download_target: str\n The target location on the local system where the resource\n is to be stored.\n \n _download_bytes: int\n (Only during download): the number of bytes already downloaded.\n\n _download_size: int\n (Only during download): the size of the currently downloaded\n file in bytes.\n \"\"\"\n\n _download_url: str = None\n _download_target: str = None\n \n _downloaded_bytes: int = None\n _download_size: int = None\n\n @property\n def download_url(self) -> str:\n \"\"\"The URL from where we the resource can be downloaded.\n \"\"\"\n return self._download_url\n\n @property\n def download_target(self):\n \"\"\"The target (on the local system) where the resource should be\n stored when downloaded.\n \"\"\"\n return self._download_target\n\n @property\n def downloaded(self) -> bool:\n \"\"\"Check if the resource was downloaded. \n \"\"\"\n if not self._download_target:\n raise RuntimeError(\"No target location for download was specified.\")\n\n @property\n def downloading(self) -> bool:\n \"\"\"Check if the resource is currently downloaded. \n \"\"\"\n return self._downloaded_bytes is not None\n\n @property\n def download_progress(self) -> (int, int):\n \"\"\"Check the current progress in downloading.\n \"\"\"\n return (self._downloaded_bytes, self._download_size)\n\n # FIXME[todo]: may be run in the background ...\n # FIXME[todo]: may notify observers ...\n # FIXME[todo]: resume download: resume_header = {'Range': 'bytes=%d-' % resume_byte_pos}\n # self._downloading = True\n # check the file size with from pathlib import Path;\n # path = Path(..); # -> path.stat().st_size\n def download(self, force: bool=False, chunk_size: int=8192) -> None:\n \"\"\"Download the resource and store it at the target location.\n\n Arguments\n ---------\n force: bool\n Force download even if the resource is already available.\n\n Raises\n ------\n \"\"\"\n if self.downloaded and not force:\n return\n\n if not self._download_url:\n raise RuntimeError(\"No download URL was specified.\")\n\n if not self._download_url:\n raise RuntimeError(\"No download URL was specified.\")\n\n self._downloaded_bytes = 0\n\n #file_name = url.split('/')[-1]\n file_name = self._download_target\n request = requests.get(self._download_url, stream=True)\n\n self._download_size = request.headers.get('Content-Length')\n print(\"Downloading: %s Bytes: %s\" % (file_name, self._download_size))\n\n with open(file_name, 'wb') as output_file:\n for chunk in request.iter_content(chunk_size=chunk_size):\n self._downloaded_bytes += len(chunk)\n output_file.write(chunk)\n status = r\"%10d [%3.2f%%]\" % (self._downloaded_bytes,\n self._downloaded_bytes * 100. / self._download_size)\n status = status + chr(8)*(len(status)+1)\n print(status)\n\n delattr(self, '_downloaded_bytes')\n delattr(self, '_download_size')\n delattr(self, '_downloading')\n","sub_path":"util/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":3562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"334040363","text":"from django.test import TestCase\n\nimport requests_mock\nfrom zgw_consumers.constants import APITypes, AuthTypes\nfrom zgw_consumers.models import Service\n\nfrom bptl.activiti.models import ServiceTask\nfrom bptl.credentials.tests.factories import AppServiceCredentialsFactory\nfrom bptl.tasks.tests.factories import DefaultServiceFactory\n\nfrom ..tasks import IsAboveAge\n\nBRP_API_ROOT = \"http://brp.example.com/\"\nPERSON_URL = f\"{BRP_API_ROOT}ingeschrevenpersonen/999999011?fields=leeftijd\"\n\n\n@requests_mock.Mocker()\nclass IsAboveAgeTaskTests(TestCase):\n @classmethod\n def setUpTestData(cls):\n super().setUpTestData()\n\n cls.fetched_task = ServiceTask.objects.create(\n topic_name=\"some-topic\",\n variables={\n \"bptlAppId\": \"some-app-id\",\n \"burgerservicenummer\": \"999999011\",\n \"age\": 18,\n },\n )\n brp = Service.objects.create(\n api_root=BRP_API_ROOT,\n api_type=APITypes.orc,\n auth_type=AuthTypes.api_key,\n header_value=\"12345\",\n header_key=\"X-Api-Key\",\n )\n DefaultServiceFactory.create(\n task_mapping__topic_name=\"some-topic\",\n service=brp,\n alias=\"brp\",\n )\n AppServiceCredentialsFactory.create(\n app__app_id=\"some-app-id\",\n service=brp,\n header_key=\"Other-Header\",\n header_value=\"foobarbaz\",\n )\n\n def test_above_age_service_credentials(self, m):\n del self.fetched_task.variables[\"bptlAppId\"]\n self.fetched_task.save()\n m.get(\n PERSON_URL, json={\"leeftijd\": 36, \"_links\": {\"self\": {\"href\": PERSON_URL}}}\n )\n task = IsAboveAge(self.fetched_task)\n\n result = task.perform()\n\n self.assertEqual(result, {\"isAboveAge\": True})\n\n # check auth\n self.assertEqual(m.last_request.headers[\"X-Api-Key\"], \"12345\")\n\n def test_above_age(self, m):\n m.get(\n PERSON_URL, json={\"leeftijd\": 36, \"_links\": {\"self\": {\"href\": PERSON_URL}}}\n )\n task = IsAboveAge(self.fetched_task)\n\n result = task.perform()\n\n self.assertEqual(result, {\"isAboveAge\": True})\n\n # check auth\n self.assertEqual(m.last_request.headers[\"Other-Header\"], \"foobarbaz\")\n\n def test_equal_age(self, m):\n m.get(\n PERSON_URL, json={\"leeftijd\": 18, \"_links\": {\"self\": {\"href\": PERSON_URL}}}\n )\n task = IsAboveAge(self.fetched_task)\n\n result = task.perform()\n\n self.assertEqual(result, {\"isAboveAge\": True})\n\n def test_below_age(self, m):\n m.get(\n PERSON_URL, json={\"leeftijd\": 17, \"_links\": {\"self\": {\"href\": PERSON_URL}}}\n )\n task = IsAboveAge(self.fetched_task)\n\n result = task.perform()\n\n self.assertEqual(result, {\"isAboveAge\": False})\n\n def test_none_age(self, m):\n m.get(PERSON_URL, json={\"_links\": {\"self\": {\"href\": PERSON_URL}}})\n task = IsAboveAge(self.fetched_task)\n\n result = task.perform()\n\n self.assertEqual(result, {\"isAboveAge\": None})\n","sub_path":"src/bptl/work_units/brp/tests/test_task_is_age_above.py","file_name":"test_task_is_age_above.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"190943650","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 10 11:19:47 2017\n\n@author: LaVie\n\"\"\"\n\nimport pandas as pd\n\ndf = pd.read_csv('XBI.12272016hldg.010417data.csv', index_col=0)\n\nimport plotly.graph_objs as go\n\ncolor = []\nfor i in range(len(df)-1):\n color = color + ['rgb({},{},{}),'.format(np.random.randint(0, 255),\n np.random.randint(0, 255), np.random.randint(0, 255))]\n\ntrace0 = go.Scatter(\n x=df.vsFloat,\n y=df.netValue,\n text=df.Ticker,\n mode='markers',\n marker=dict(\n color=color,\n # size=df.netTrans,\n )\n)\n\ndata = [trace0]\npy.iplot(data, filename='bubblechart-text')\n","sub_path":"InsiderTrading/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"189035293","text":"import semver\nfrom setuptools_scm.git import DEFAULT_DESCRIBE, GitWorkdir, _git_parse_describe, warn_on_shallow\nfrom setuptools_scm.utils import has_command\nfrom setuptools_scm.version import meta\n\n\ndef parse(root, *, config):\n \"\"\"\n Based on https://github.com/pypa/setuptools_scm/blob/master/src/setuptools_scm/git.py#parse\n\n This is almost a verbatim copy, except that we tell setuptools_scm that the tag is preformatted\n to prevent them from applying Python's version normalisation.\n \"\"\"\n if not has_command(\"git\"):\n return\n\n wd = GitWorkdir.from_potential_worktree(config.absolute_root)\n if wd is None:\n return\n warn_on_shallow(wd)\n\n describe_command = config.git_describe_command or DEFAULT_DESCRIBE\n\n out, unused_err, ret = wd.do_ex(describe_command)\n if ret:\n # If 'git git_describe_command' failed, try to get the information otherwise.\n tag = '0.1.0'\n distance = wd.count_all_nodes()\n dirty = wd.is_dirty()\n node = None\n branch = None\n\n rev_node = wd.node()\n if rev_node is not None:\n node = f'g{rev_node}'\n branch = wd.get_branch()\n else:\n tag, distance, node, dirty = _git_parse_describe(out)\n branch = wd.get_branch()\n\n version = meta(\n semver.parse_version_info(tag),\n distance=distance,\n dirty=dirty,\n node=node,\n preformatted=True,\n config=config,\n branch=branch\n )\n version.preformatted = False\n\n return version\n","sub_path":"setuptools_scm_git_semver/git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"291789118","text":"import sys\n\nsys.path.insert(1, \"../../src/\")\nfrom db import PKPDB\n\nDB = PKPDB()\n\nwith open(\"pdb_entry_type.txt\") as f:\n for line in f:\n cols = line.strip().split(\"\\t\")\n pdbcode, p_type, exp = cols\n if DB.check_pdbcode_exists(pdbcode):\n sql_query = f\"\"\"\nUPDATE Protein\nSET PROTEIN_TYPE = '{p_type}'\nWHERE IDCODE = '{pdbcode}'\"\"\"\n DB.exec_statement(sql_query)\n else:\n sql_query = f\"\"\"\nINSERT INTO Protein(IDCODE, PROTEIN_TYPE)\nVALUES ('{pdbcode}', '{p_type}') \"\"\"\n DB.exec_statement(sql_query)\n\nDB.commit()\nprint(\"Successfully updated PROTEIN: PROTEIN_TYPE\")\n","sub_path":"initial/PDB_data/read_entry_type.py","file_name":"read_entry_type.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"535111894","text":"import clr\nclr.AddReference(\"TicTacToe\")\nimport random\n\nfrom TicTacToe.Imports import *\nfrom System import Array\n\nclass GameController:\n def __init__(self):\n self.tiles = Array.CreateInstance(str, 3, 3)\n\n def update_game_state(self, game_state):\n self.tiles = game_state.Board\n \n def find_first_unoccupied(self):\n for x in range(3):\n for y in range(3):\n if not self.tiles[x,y]:\n return x,y\n return 0,0\n \n def place_symbol(self):\n if not self.tiles[1,1]:\n return SymbolPlacement(1,1)\n x,y = self.find_first_unoccupied()\n return SymbolPlacement(x,y)","sub_path":"Samples/TicTacToe/middle_or_first.py","file_name":"middle_or_first.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"477233830","text":"\ndef main():\n MILLION = 1000000\n\n sum = 0\n for i in range(1, MILLION+1):\n if palindrome_10(i) and palindrome_2(i):\n sum += i\n print('The sum of all bi-palindromic numbers under {} is {}'.format(MILLION, sum))\n\n\ndef palindrome_10(n):\n num = str(n)\n l = len(num)\n for i in range(l):\n if num[i] != num[l - i - 1]:\n return False\n return True\n\n\ndef palindrome_2(n):\n store = n\n reverse = 0\n while n > 0:\n # shift reverse to the left\n reverse = reverse << 1\n # if right most digit of n is 1, append 1 to reverse\n if n & 1:\n reverse = reverse | 1\n # chop off right most digit of n\n n = n >> 1\n # if n is a binary palindrome, n XOR reverse will be 0\n return not store ^ reverse\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"solved/p036.py","file_name":"p036.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"41804143","text":"#!/usr/bin/env python\n\nimport rospy\nimport time\nfrom std_msgs.msg import Float32\nfrom MAX518 import MAX518_Controller\n\nclass Output_Controller(MAX518_Controller):\n\n def __init__(self):\n rospy.init_node('output_control')\n\n self.haptic_name = rospy.get_param('~name')\n i2c_address = rospy.get_param('~i2c_address')\n scale = rospy.get_param('~scale')\n\n MAX518_Controller.__init__(self,i2c_address)\n time.sleep(1)\n\n self._A0max = 4.1*scale\n self._A1max = 1.05*scale\n\n\n self.int_sub = rospy.Subscriber('/'+self.haptic_name+'/intensity/', Float32, self.int_callback, queue_size = 1)\n\n rospy.on_shutdown(self.close)\n rospy.spin()\n\n def int_callback(self, intensity):\n intensity = intensity.data\n if self._i2cbus:\n self.DAC_output(self._A0max*intensity, self._A1max*intensity)\n\n def close(self):\n self.MAX518_close()\n\nif __name__ == '__main__':\n controller = Output_Controller()\n \n","sub_path":"hybrid_act/output_controller/src/output_control.py","file_name":"output_control.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"559520938","text":"import RobotAPI as rapi\nimport numpy as np\nimport cv2\nimport zbar\n\nscanner = zbar.Scanner()\n\nrobot = rapi.RobotAPI()\n\ntimer_vcc = robot.millis()\nlast_vcc = robot.vcc()\n\n\ndef vcc_test():\n global timer_vcc, last_vcc\n status = 0\n if timer_vcc + 1000 < robot.millis():\n v = robot.vcc()\n delta = v - last_vcc\n print(\"v\", v)\n if v > 11.8:\n print(\"HIGH VCC\", v)\n robot.green()\n status = 1\n\n if abs(delta) > 0.6:\n\n if delta > 0:\n robot.green()\n status = 1\n print(\"HIGH DELTA\", delta)\n\n if delta < 0:\n robot.color_off()\n status = 0\n\n last_vcc = v\n timer_vcc = robot.millis()\n return status\n\n\ncount_dist = 0\n\n\ndef parking():\n global count_dist\n robot.step(150, 150)\n if vcc_test() == 1:\n return 1\n print(robot.dist())\n if robot.dist() < 15:\n count_dist += 1\n if count_dist > 15:\n print(\"small dist\")\n robot.step(-200, -200, time_step=1500)\n robot.wait(500)\n count_dist = 0\n return -1\n return 0\n\n\ndef find_qr():\n all_qr = []\n results = scanner.scan(cv2.cvtColor(robot.get_frame(), cv2.COLOR_BGR2GRAY))\n for result in results:\n contour_rect = np.array(result.position)\n name = result.data.decode(\"utf-8\")\n area = cv2.contourArea(contour_rect)\n x, y, w, h = cv2.boundingRect(contour_rect)\n all_qr.append([name, area, x, y, w, h, result.position])\n return all_qr\n\n\ndef distance_between_points(xa, ya, xb, yb, za=0, zb=0):\n return np.sqrt(np.sum((np.array((xa, ya, za)) - np.array((xb, yb, zb))) ** 2))\n\n\n# faza = \"parking\"\nfaza = \"chargeS\"\n# faza = \"chargeB\"\n# faza = \"charge\"\ncount_fail_chargeS = 0\nrobot.serv(0)\n\nwhile True:\n if robot.button() == 1:\n break\n if robot.manual(1) == 1:\n continue\n\n frame = robot.get_frame()\n\n qrs = []\n if faza != \"parking\" or faza == \"charge\":\n qrs = find_qr()\n\n if faza == \"chargeS\":\n # выполняем движение робота ориентируясь по маленькому знаку\n for qr in qrs:\n print(qr)\n name, area, x, y, h, w, pos = qr\n continue\n\n if faza == \"parking\":\n print(\"parking\")\n res = parking()\n # если функция парковки вернула успех, то завершаем парковку\n if res == 1:\n robot.sound1()\n robot.wait(1000)\n faza = \"charging\"\n if res == -1:\n # функция парковки вернула результат сбоя парковки\n print(\"Fail parking\")\n robot.step(-250, -250, 1000)\n faza = \"chargeS\"\n # возможно требуется перейти на стадию ориентации по QR коду\n pass\n\n if faza == \"charging\":\n\n # следущий этап проверим, что зарядка действительно идет\n # если напряжение недостаточное, то перепарковываемся\n vcc = robot.vcc()\n print(vcc, \"CHARGING\")\n robot.wait(5000)\n if vcc < 11.0:\n print(vcc, \"LOW VCC, reparking...\")\n faza = \"parking\"\n\n robot.set_frame(frame)\n","sub_path":"ComputerVision2/Examples/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":3441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"130005223","text":"#code from crash course, mostly\n#made did_hit myself after learning how to use colliderect\nimport sys\nimport pygame\nfrom mc import Character as mc\nfrom settings import Settings\nfrom wall import Wall\nfrom wall import Box\n\n\ndef check_events(mc, boxes, walls): \n # while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n #Movement of the character, from crash course\n if event.key == pygame.K_d:\n mc.move_right = True\n if event.key == pygame.K_w:\n mc.move_up = True\n if event.key == pygame.K_s:\n mc.move_down = True\n if event.key == pygame.K_a:\n mc.move_left = True\n if event.key == pygame.K_SPACE:\n mc.move_box(boxes, walls)\n mc.box_hit_wall(boxes, walls)\n mc.box_hit_box(boxes)\n if event.key == pygame.K_y:\n print(mc.rect.x)\n print(mc.rect.y)\n #this is for debugging so i know where the player is exactly\n if event.key == pygame.K_b:\n if mc.game_over == False and mc.waiting == True:\n pygame.mixer.music.play(-1)\n mc.waiting = False\n if event.key == pygame.K_r:\n if mc.game_over == True:\n mc.score = 76\n mc.waiting = True\n mc.game_over = False\n elif event.type == pygame.KEYUP:\n #toggles T/F and in mc class, will move if key down, from crash course\n if event.key == pygame.K_d:\n mc.move_right = False\n if event.key == pygame.K_w:\n mc.move_up = False\n if event.key == pygame.K_s:\n mc.move_down = False\n if event.key == pygame.K_a:\n mc.move_left = False\n elif event.type == pygame.MOUSEBUTTONUP:\n mouse_pos = pygame.mouse.get_pos()\n print(mouse_pos)\n #this event is for my own debugging, i used this to tell where I am in the gameboard\n #and how to access specific points. I got mouse.get_pos() and mousebuttonup from pygame.org\n\n#this function's idea is from python crash course, i then made my own functions\ndef update_screen(settings, screen, hero, walls, boxes):\n hero.blitme()\n #crash course\n hero.did_hit(walls)\n #original, for need of surface detection\n hero.did_hit_box(boxes)\n #ditto ^\n pygame.display.flip()\n #i still don't know what this does lol ^\n hero.change_image()\n #original code, to change direction of link","sub_path":"game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"482073214","text":"# 1003 / calculate called number of fibo(0) and fibo(1)\nnums = {0: [1, 0], 1: [0, 1]}\n\n\ndef fibo(n):\n\n if n not in nums:\n n1, n2 = fibo(n - 1), fibo(n - 2)\n nums[n] = [n1[0] + n2[0], n1[1] + n2[1]]\n\n return nums[n]\n\n\nfor _ in range(int(input())):\n res = fibo(int(input()))\n print(res[0], res[1])\n","sub_path":"dp/1003.py","file_name":"1003.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"334520799","text":"import sys\n\nLEFT_ASSOC = 0\nRIGHT_ASSOC = 1\n\nOPERATORS = {\n '+': (0, LEFT_ASSOC),\n '-': (0, LEFT_ASSOC),\n '#': (10, RIGHT_ASSOC),\n '*': (5, LEFT_ASSOC),\n '/': (5, LEFT_ASSOC),\n '%': (5, LEFT_ASSOC),\n '^': (10, RIGHT_ASSOC),\n}\n\ndef is_operator(token):\n return token in OPERATORS\n\ndef is_associative(token, assoc):\n if not is_operator(token):\n raise ValueError('Invalid token %s' % token)\n return OPERATORS[token][1] == assoc\n\ndef cmp_precedence(token1, token2):\n if not is_operator(token1) or not is_operator(token2):\n raise ValueError('Invalid tokens: {0} {1}'.format(token1, token2))\n return OPERATORS[token1][0] - OPERATORS[token2][0]\n\ndef infix_to_rpn(tokens):\n rpn = []\n stack = []\n for token in tokens:\n if is_operator(token):\n while len(stack) and is_operator(stack[-1]):\n if ((is_associative(token, LEFT_ASSOC) and\n cmp_precedence(token, stack[-1]) <= 0) or\n (is_associative(token, RIGHT_ASSOC) and\n cmp_precedence(token, stack[-1]) < 0)):\n rpn.append(stack.pop())\n continue\n break\n stack.append(token)\n elif token == '(':\n stack.append(token)\n elif token == ')':\n while len(stack) and stack[-1] != '(':\n rpn.append(stack.pop())\n stack.pop()\n else:\n rpn.append(token)\n while len(stack):\n rpn.append(stack.pop())\n return rpn\n\ndef evaluate_rpn(tokens):\n stack = []\n for token in tokens:\n if token.replace('.', '').replace('-', '').isdigit():\n stack.append(token)\n elif token == '#':\n try:\n v = stack.pop()\n stack.append('({0})'.format(eval('-{0}'.format(v))))\n except IndexError:\n return\n else:\n try:\n v2 = stack.pop()\n v1 = stack.pop()\n if token == '^': token = '**'\n stack.append(eval('{0}{1}{2}'.format(v1, token, v2)))\n except IndexError:\n return\n if len(stack) == 1:\n return stack[0]","sub_path":"calc/shunt.py","file_name":"shunt.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"221705156","text":"import os, sys, subprocess, argparse, functools, platform\r\n\r\n# My ffmpeg directory, just because\r\n#\"F:\\Downloads\\ffmpeg-20181007-0a41a8b-win64-static\\ffmpeg-20181007-0a41a8b-win64-static\\bin\\ffmpeg.exe\"\r\n# Required command for join\r\n#C:/Users/Jamie/AppData/Local/Programs/Python/Python35/python.exe icfp.py -f \"F:\\Downloads\\ffmpeg-20181007-0a41a8b-win64-static\\ffmpeg-20181007-0a41a8b-win64-static\\bin\" -ha join --heads ICFP18Norm\\\\heads --tails ICFP18Norm\\\\tails -o ICFP18Norm\\\\final\r\n\r\nrm = \"del\" if platform.system() == \"Windows\" else \"rm\"\r\nsep = \"\\\\\" if platform.system() == \"Windows\" else \"/\"\r\ndrop = 5 if platform.system() == \"Windows\" else 3\r\nexe = \".exe\" if platform.system() == \"Windows\" else \"\"\r\n\r\n## Script Functionality ##\r\n\r\n#TODO - Complete\r\ndef normalise(args):\r\n ffmpeg = args.ffmpeg\r\n hwaccel = args.hardware_acceleration\r\n vids = args.videos\r\n output_dir = args.output_dir\r\n if output_dir is None: output_dir = path(vids, \"normalized\")\r\n \r\ndef join(args):\r\n ffmpeg = args.ffmpeg\r\n hwaccel = args.hardware_acceleration\r\n head_dir = args.heads\r\n tail_dir = args.tails\r\n output_dir = args.output_dir\r\n vids = [v for v in os.listdir(head_dir) if v.endswith(\"mp4\")]\r\n for vid in vids:\r\n concat(path(ffmpeg, \"ffmpeg\" + exe),\r\n to=path(output_dir, vid),\r\n head=path(head_dir, vid),\r\n tail=path(tail_dir, vid))\r\n\r\ndef split(args):\r\n ffmpeg = args.ffmpeg\r\n hwaccel = args.hardware_acceleration\r\n heads = args.heads\r\n tails = args.tails\r\n head_length = args.head_length\r\n vid_dir = args.videos\r\n if heads is None: heads = path(vids, \"heads\")\r\n if tails is None: tails = path(vids, \"tails\")\r\n head_end = \"00:00:{}\".format(pad(str(head_length), 2, '0'))\r\n vids = [v for v in os.listdir(vid_dir) if v.endswith(\"mp4\")]\r\n for vid in vids:\r\n length = find_length(path(ffmpeg, \"ffprobe\" + exe), path(vid_dir, vid)).split(\":\")\r\n secs = 0\r\n for t in length: secs = secs * 60 + int(t)\r\n secs -= head_length\r\n hours = str(secs // 3600)\r\n mins = str((secs % 3600) // 60)\r\n secs = str(secs % 60)\r\n tail_length = \":\".join([pad(hours, 2, '0'), pad(mins, 2, '0'), pad(secs, 2, '0')])\r\n slice_vid(path(ffmpeg, \"ffmpeg\" + exe), path(vid_dir, vid), \"00:00:00\", head_end, path(heads, vid))\r\n slice_vid(path(ffmpeg, \"ffmpeg\" + exe), path(vid_dir, vid), head_end, tail_length, path(tails, vid))\r\n\r\ndef add_logo(args):\r\n ffmpeg = args.ffmpeg\r\n hwaccel = args.hardware_acceleration\r\n vid_dir = args.videos\r\n output_dir = args.output_dir\r\n logo = args.logo\r\n cmd = \"{ffmpeg} -hwaccel nvdec -i {vid} -framerate 30000/1001 -loop 1 -i {logo} -filter_complex \\\"[1:v] fade=out:st=3:d=2:alpha=1 [ov]; [0:v][ov] overlay=10:10 [v]\\\" -map \\\"[v]\\\" -map 0:a -c:v h264_nvenc -c:a copy -shortest {out}\"\r\n vids = [v for v in os.listdir(vid_dir) if v.endswith(\"mp4\")]\r\n for vid in vids:\r\n execute(cmd.format(ffmpeg=path(ffmpeg, \"ffmpeg\" + exe), vid=path(vid_dir, vid), out=path(output_dir, vid), logo=logo))\r\n \r\n## Helper Functions ##\r\n\r\ndef path(*parts): return sep.join(parts)\r\ndef pad(s, n, d): return (n - len(s)) * d + s\r\ndef execute(cmd):\r\n return str(subprocess.run(cmd, shell=True, stdout=subprocess.PIPE).stdout)[2:-drop]\r\n\r\ndef concat(ffmpeg, head, tail, to, fast=False):\r\n cmd1 = \"{ffmpeg} -hwaccel nvdec -i {head} -c copy -bsf:v h264_mp4toannexb -f mpegts tmp-1.ts\"\r\n cmd2 = \"{ffmpeg} -hwaccel nvdec -i {tail} -c copy -bsf:v h264_mp4toannexb -f mpegts tmp-2.ts\"\r\n cmd3 = \"{ffmpeg} -hwaccel nvdec -f mpegts -i \\\"concat:tmp-1.ts|tmp-2.ts\\\" -c copy -bsf:a aac_adtstoasc {to}\"\r\n cmd4 = \"{rm} tmp-1.ts tmp-2.ts\".format(rm=rm)\r\n #cmd = \"{ffmpeg} -hwaccel nvdec -i {head} -i {tail} -filter_complex \\\"[0:v:0][0:a:0][1:v:0][1:a:0]concat=n=2:v=1:a=1[outv][outa]\\\" -map \\\"[outv]\\\" -map \\\"[outa]\\\" -c:v h264_nvenc {to}\"\r\n execute(cmd1.format(ffmpeg=ffmpeg, head=head))\r\n execute(cmd2.format(ffmpeg=ffmpeg, tail=tail))\r\n execute(cmd3.format(ffmpeg=ffmpeg, to=to))\r\n execute(cmd4)\r\n #execute(cmd.format(ffmpeg=ffmpeg, head=head, tail=tail, to=to))\r\n\r\ndef slice_vid(ffmpeg, vid, start, end, out):\r\n #cmd = ffmpeg + \" -i {vid} -vcodec copy -acodec copy -ss {start} -t {duration} {out}\"\r\n cmd = ffmpeg + \" -hwaccel nvdec -ss {start} -i {vid} -t {duration} -c:v h264_nvenc -c:a aac -strict experimental -b:a 128k {out}\"\r\n run = cmd.format(vid=vid, out=out, start=start, duration=end)\r\n execute(run)\r\n\r\ndef find_length(ffprobe, vid):\r\n cmd = ffprobe + \" -v error -show_entries format=duration -sexagesimal -of default=noprint_wrappers=1:nokey=1 {vid}\"\r\n run = cmd.format(vid=vid)\r\n return execute(run).split(\".\")[0]\r\n\r\n\r\n## Main ##\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser(description=\"ICFP Video Processing Script\")\r\n parser.add_argument(\"-f\",\r\n \"--ffmpeg\",\r\n help=\"install directory of ffmpeg\",\r\n required=True)\r\n subparsers = parser.add_subparsers(help=\"help for subcommand\")\r\n parser_normalise = subparsers.add_parser(\"normalize\", help=\"normalizes the audio of every video in a directory\")\r\n parser_normalise.add_argument(\"-v\",\r\n \"--videos\",\r\n help=\"directory containing videos, defaults to current directory\",\r\n default=\".\")\r\n parser_normalise.add_argument(\"-o\",\r\n \"--output-dir\",\r\n help=\"directory to place results, defaults to video directory with new subfolder \\\"normalized\\\"\",\r\n default=None)\r\n parser_normalise.set_defaults(func=normalise)\r\n parser_split = subparsers.add_parser(\"split\", help=\"splits every video in a directory into a head and a tail, with the head of a specified length\")\r\n parser_split.add_argument(\"-v\",\r\n \"--videos\",\r\n help=\"directory containing videos, defaults to current directory\",\r\n default=\".\")\r\n parser_split.add_argument(\"-hd\",\r\n \"--heads\",\r\n help=\"directory to place heads, defaults to video directory with new subfolder \\\"heads\\\"\",\r\n default=None)\r\n parser_split.add_argument(\"-td\",\r\n \"--tails\",\r\n help=\"directory to place tails, defaults to video directory with new subfolder \\\"heads\\\"\",\r\n default=None)\r\n parser_split.add_argument(\"-l\",\r\n \"--head-length\",\r\n help=\"how long the prefix split for the videos should be, in seconds (default 10)\",\r\n type=int,\r\n default=10)\r\n parser_split.set_defaults(func=split)\r\n parser_join = subparsers.add_parser(\"join\", help=\"remerges back videos split into heads and tails\")\r\n parser_join.add_argument(\"-o\",\r\n \"--output-dir\",\r\n help=\"directory to place results, defaults to a new subfolder \\\"joined\\\"\",\r\n default=\"joined\")\r\n parser_join.add_argument(\"-hd\",\r\n \"--heads\",\r\n help=\"directory containing heads, defaults to directory \\\"heads\\\" in current directory\",\r\n default=\"heads\")\r\n parser_join.add_argument(\"-td\",\r\n \"--tails\",\r\n help=\"directory containing tails, defaults to directory \\\"tails\\\" in current directory\",\r\n default=\"tails\")\r\n parser_join.set_defaults(func=join)\r\n parser_icon = subparsers.add_parser(\"add-logos\", help=\"adds the ICFP logo to each video in a directory, image is scaled automatically\")\r\n parser_icon.add_argument(\"-v\",\r\n \"--videos\",\r\n help=\"directory containing videos, defaults to current directory\",\r\n default=\".\")\r\n parser_icon.add_argument(\"-l\",\r\n \"--logo\",\r\n help=\"this year's ICFP logo in png format\",\r\n required=True)\r\n parser_icon.add_argument(\"-o\",\r\n \"--output-dir\",\r\n help=\"directory to place results, defaults to a new subfolder \\\"titled\\\"\",\r\n default=\"titled\")\r\n parser.add_argument(\"-ha\",\r\n \"--hardware-acceleration\",\r\n help=\"attempt to perform hardware acceleration where possible\",\r\n action=\"store_true\")\r\n parser_icon.set_defaults(func=add_logo)\r\n args = parser.parse_args()\r\n args.func(args)\r\n","sub_path":"icfp.py","file_name":"icfp.py","file_ext":"py","file_size_in_byte":8978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"174865301","text":"\nimport os\nimport random\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\n\nseed = 1\ntorch.manual_seed(seed)\nnp.random.seed(seed)\nrandom.seed(seed)\n\nclass Addition(Dataset):\n\n MODE_TRAIN = 0\n MODE_VAL = 1\n MODE_TEST = 2\n\n def __init__(self, mode=MODE_TRAIN):\n self.mode = mode\n self.full_data = torch.load(os.path.join(os.path.dirname(__file__), 'traindata.pt'))\n\n n_elements = self.full_data.shape[0]\n self.n_train = int(0.8 * n_elements)\n self.n_val = int(0.1 * n_elements)\n self.n_test = n_elements - self.n_train - self.n_val\n\n # n seq x tsteps x dim\n self.train_inputs = torch.from_numpy(self.full_data[:self.n_train, :, :2]).float()\n self.train_targets = torch.from_numpy(self.full_data[:self.n_train, :, 2:]).float()\n self.val_inputs = torch.from_numpy(self.full_data[self.n_train:self.n_train+self.n_val, :, :2]).float()\n self.val_targets = torch.from_numpy(self.full_data[self.n_train:self.n_train+self.n_val, :, 2:]).float()\n self.test_inputs = torch.from_numpy(self.full_data[self.n_train+self.n_val:, :, :2]).float()\n self.test_targets = torch.from_numpy(self.full_data[self.n_train+self.n_val:, :, 2:]).float()\n\n self.input_dimension = self.train_inputs.size(2)\n self.output_dimension = self.train_targets.size(2)\n\n def train(self):\n self.mode = Addition.MODE_TRAIN\n\n def val(self):\n self.mode = Addition.MODE_VAL\n\n def test(self):\n self.mode = Addition.MODE_TEST\n\n def __len__(self):\n if self.mode == Addition.MODE_TRAIN:\n return self.n_train\n elif self.mode == Addition.MODE_VAL:\n return self.n_val\n else:\n return self.n_test\n\n def __getitem__(self, i):\n if self.mode == Addition.MODE_TRAIN:\n return self.train_inputs[i], self.train_targets[i]\n elif self.mode == Addition.MODE_VAL:\n return self.val_inputs[i], self.val_targets[i]\n else:\n return self.test_inputs[i], self.test_targets[i]\n\nif __name__ == '__main__':\n # generate data\n L = 50 # sequence length\n N = 100000 # n samples\n # samples x length, (2 x in, 1 x out)\n d = np.random.uniform(size=(N, L, 3)).astype(np.float32)\n print(d.shape)\n for i in range(N):\n d[i, :, 1:] = 0 # set all signals to 0\n # generate the two random ind\n ind1 = np.random.randint(1, L//10)\n d[i, ind1, 1] = 1\n ind2 = np.random.randint(L//10, L//2)\n d[i, ind2, 1] = 1\n # set final ind as sum\n d[i, -1, :] = 0\n d[i, -1, 2] = (d[i, ind1, 0] + d[i, ind2, 0])\n torch.save(d, open('traindata.pt', 'wb'))\n","sub_path":"tasks/addition/addition.py","file_name":"addition.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"378386259","text":"T=int(input())\n\nfor k in range(0,T):\n num=int(input())\n temp=list(input().split(\" \"))\n lists=[]\n\n for x in temp:\n lists.append(int(x))\n\n res=-1\n for i in range(0,len(lists)):\n for j in range(i,len(lists)):\n if lists[j]>lists[i]:\n res=max(res,lists[j]-lists[i])\n\n print(res)","sub_path":"Code/CodeRecords/2429/60710/321018.py","file_name":"321018.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"124559378","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 22 13:24:53 2019\n\n@author: shosh\n\nThis script preforms some filtrations for the output table yielded by REDItools DNARNA script\nIt creates the filtered table, and if a transcriptome is available (for example, trinity)\nit can also create the editing sites table wrt trascriptome, given the alignment of transcripts to refrence genome used in REDI process\n\"\"\"\nimport argparse\nimport re\nimport pandas as pd\nimport numpy as np\nimport statsmodels.stats.multitest as p_adjust\nfrom scipy.stats import binom_test\nfrom copy import deepcopy\n\n\ncov_dict = {0:'A',\n 1:'C',\n 2:'G',\n 3:'T'} #this is the order in which nucs coverage appear in lists in REDI output\n\nreversed_cov_dict = {'A':0,\n 'C':1,\n 'G':2,\n 'T':3}\n\n\n\ndef read_redi_uotput(path, min_dna_cov=1, min_rna_cov=1, rna_allowed_mm=3, clean_dna = True):\n \n data = []\n# output_file = '/'+'/'.join(path.split('/')[:-1]) + '/filtered_mm.txt'\n# with open(output_file,\"w\") as output:\n \n# nucs = ['A','C','G','T']\n \n with open(path, \"r\") as f:\n \n columns = ['Region', 'Position', 'Reference', 'Strand', 'Coverage', 'MeanQ',\n 'BaseCount', 'AllSubs', 'Frequency', 'gCoverage', 'gMeanQ',\n 'gBaseCount', 'gAllSubs', 'gFrequency'] #Columns names in REDI output with slight changes\n columns += ['mismatch']\n \n content = f.readlines()\n print(str(len(content)) + ' lines in REDI file')\n for j, line in enumerate(content[1:]):\n \n fields = line.split(\"\\t\")\n fields[-1] = fields[-1].replace('\\n','')\n\n if fields[7]=='-' or fields[11]=='-': #pass on rows with no mm and no dna support\n pass\n else:\n fields[1] = int(fields[1])\n fields[4] = int(fields[4])\n fields[5] = float(fields[5])\n fields[6] = eval(fields[6])\n fields[8] = float(fields[8])\n fields[9] = int(fields[9])\n fields[10] = float(fields[10])\n fields[11] = eval(fields[11])\n fields[13] = float(fields[13])\n \n keep_row = True #flag for rows that pass filter to sites list (before)\n dna_nuc = cov_dict[fields[11].index(max(fields[11]))] #the prevalent nuc in DNA reads\n rna_cov = fields[6]\n mm_types = fields[7].split(\" \")\n target_editing = [cov_dict[i] for i, t in enumerate(rna_cov) if t>0 and cov_dict[i]!=dna_nuc]\n \n if dna_nuc!=fields[2]: #discard detection in which dna does not agree with reference genome/transcriptome\n keep_row = False \n if fields[9] != '-': # keep rows with above minimal DNA coverage \n if int(fields[9]) < min_dna_cov:\n keep_row = False\n if clean_dna: # keep only records with cleane DNA reads (no mismatch in DNA)\n if len([1 for i in range(4) if fields[11][i]>0])!=1:\n keep_row = False\n if len(target_editing)>rna_allowed_mm:\n keep_row = False\n \n if keep_row: \n for t in target_editing:\n row = deepcopy(fields)\n row += [dna_nuc+t]\n data.append(row)\n \n f.close()\n print(str(len(data)) + str(' candidates before statistical filtration'))\n return pd.DataFrame(data = data, columns = columns)\n \n \ndef calc_editing_level(row):\n target = row['mismatch'][1]\n return row['BaseCount'][reversed_cov_dict[target]]/row['Coverage']\n \n\ndef calc_binomial_snp(row, snp_p=0.5):\n target = row['mismatch'][1]\n original = row['mismatch'][0] \n prob_for_snp = snp_p**row['gCoverage']\n edited = row['BaseCount'][reversed_cov_dict[target]]\n unedited = row['BaseCount'][reversed_cov_dict[original]]\n p_val_snp = binom_test(x=np.array([edited,unedited]), p=prob_for_snp)\n return p_val_snp\n \n \ndef calc_binomial_sequencing_err(row, err_p=0.001):\n target = row['mismatch'][1]\n original = row['mismatch'][0]\n edited = row['BaseCount'][reversed_cov_dict[target]]\n unedited = row['BaseCount'][reversed_cov_dict[original]]\n p_val_sequencing_err = binom_test(x=np.array([edited,unedited]), p=0.001)\n return p_val_sequencing_err\n \n\n\ndef pval_correction(df, Method = \"fdr_bh\", FDR=0.05, is_sorted = False, p_column_name = 'p_val'): \n \"\"\"\n correction with a given FDR\n parameters - \n a dataframe contatining a 'p_val' column\n method = correction method - optional. default is benjamini hochberg\n FDR - optional. default is 5%\n is sorted - is dataframe sorted by p_vals - optional. default = Fslse\n return dataframe with additional \"corrected_p_val\" column\n see statsmodels.stats.multitest documentation for info regarding different correction methods\n \"\"\"\n if not is_sorted:\n df.sort_values(p_column_name, axis = 0, inplace = True)\n p = p_adjust.multipletests(df[p_column_name].values ,alpha = FDR, method=Method ,is_sorted = True)\n rejected = pd.Series(p[0], name=p_column_name+'_rejected_by_'+str(FDR)+'_FDR')\n corrected_p_val = pd.Series(p[1], name='corrected_' + p_column_name)\n df = df.reset_index()\n corrected_df = pd.concat([df, corrected_p_val, rejected], axis=1)\n return corrected_df\n\n\nif __name__ == '__main__':\n \n# =============================================================================\n# redi_file = 'E:/RNA_editing_Large_files/bobtail/bob_redi_100000_rows.txt'\n# min_editing_level = 0.01\n# min_dna_cov = 1\n# rna_allowed_mm = 3\n# snp_fdr_correction = 0.1\n# seq_err_fdr_correction = 0.1\n# clean_dna = True\n# create_graphs = True\n# =============================================================================\n \n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description='Filtration parameters for REDI DNARNA output')\n run_parser = parser.add_argument_group('Filter')\n run_parser.add_argument('-redi_file', dest='redi_file', action='store', required = True, help='Path to REDI DNARNA output table')\n run_parser.add_argument('-min_dna_cov', dest='min_dna_cov', action='store', default='1', help='Minimal DNA coverage to keep row')\n run_parser.add_argument('-rna_allowed_mm', dest='rna_allowed_mm', action='store', default='3', help='max allowd mismatches in RNA')\n run_parser.add_argument('-min_editing_level', dest='min_editing_level', action='store', default = '0.01', help='Keep only rows with editing level above minimal')\n run_parser.add_argument('-clean_dna', dest='clean_dna', action='store', default = 'True', help='Keep only rows with clean DNA')\n run_parser.add_argument('-snp_fdr_correction', dest='snp_fdr_correction', action='store', default='0.1', help='FDR correction for p_val for snp binomial test')\n run_parser.add_argument('-seq_err_fdr_correction', dest='seq_err_fdr_correction', action='store', default='0.1', help='FDR correction for p_val for sequencing error binomial test')\n run_parser.add_argument('-create_graphs', dest='create_graphs', action='store', default='False', help='Print editing signal graphs')\n \n arguments = parser.parse_args()\n redi_file = arguments.redi_file\n min_dna_cov = int(arguments.min_dna_cov)\n rna_allowed_mm = int(arguments.rna_allowed_mm)\n min_editing_level = float(arguments.min_editing_level)\n clean_dna = eval(arguments.clean_dna)\n snp_fdr_correction = float(arguments.snp_fdr_correction)\n seq_err_fdr_correction = float(arguments.seq_err_fdr_correction)\n create_graphs = eval(arguments.create_graphs)\n \n file_name = redi_file.split('/')[-1]\n file_path = '/'.join(redi_file.split('/')[:-1])+'/'\n \n print('Reading REDI output')\n editing_candidates = read_redi_uotput(redi_file, min_dna_cov=min_dna_cov, rna_allowed_mm=rna_allowed_mm, clean_dna = clean_dna)\n print('Calculating editing levels')\n editing_candidates['editing_level'] = editing_candidates.apply(lambda row: calc_editing_level(row), axis=1)\n print('Filtering for minmum editing levels')\n editing_candidates = editing_candidates[editing_candidates['editing_level']>min_editing_level]\n print('Calculating P-val for snp and sequencing errors')\n editing_candidates['p_val_snp'] = editing_candidates.apply(lambda row: calc_binomial_snp(row), axis=1)\n editing_candidates['p_val_sequencing_err'] = editing_candidates.apply(lambda row: calc_binomial_sequencing_err(row), axis=1)\n \n print('Correcting P-vals using BH procedure')\n editing_candidates = pval_correction(editing_candidates, FDR=snp_fdr_correction, p_column_name = 'p_val_snp')\n editing_candidates = pval_correction(editing_candidates, FDR=seq_err_fdr_correction, p_column_name = 'p_val_sequencing_err')\n editing_candidates.drop(columns = ['index','level_0'], inplace = True)\n editing_candidates.to_csv(file_path+'filtered_editing_sites_no_statistical_filtration_from_'+file_name,sep='\\t',index=False)\n print('Filtering for required corrected FDRs')\n editing_candidates = editing_candidates[np.logical_and(editing_candidates['p_val_snp'+'_rejected_by_'+str(snp_fdr_correction)+'_FDR'],editing_candidates['p_val_sequencing_err'+'_rejected_by_'+str(seq_err_fdr_correction)+'_FDR'])]\n print('Writing filtered results')\n# editing_candidates.drop(columns = ['index','level_0'], inplace = True)\n editing_candidates.to_csv(file_path+'filtered_editing_sites_from_'+file_name,sep='\\t',index=False)\n \n if create_graphs:\n# path = 'E:/RNA_editing_Large_files/bobtail/filtered_editing_sites_from_outTable_736454655'\n# editing_candidates = pd.read_csv(path, sep='\\t')\n import matplotlib.pyplot as plt\n mm_types = ['AC','AG','AT','CA','CG','CT','GA','GC','GT','TA','TC','TG']\n mm_cnts = [len(editing_candidates[editing_candidates['mismatch']==m]) for m in mm_types]\n y_pos = np.arange(len(mm_cnts))\n plt.bar(y_pos, mm_cnts, align='center', alpha=0.5)\n plt.xticks(y_pos, mm_types)\n plt.ylabel('Mismatch type')\n plt.title('Filtered Editing Events')\n plt.savefig(file_path+'editing_signals_after_filtrations.png')\n plt.close()\n","sub_path":"scripts/REDI/filter_REDI_DNARNA_results.py","file_name":"filter_REDI_DNARNA_results.py","file_ext":"py","file_size_in_byte":10527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"584580682","text":"from __future__ import division\nimport numpy as np\nimport numpy.linalg as la\nimport matplotlib.pyplot as plt\nimport matplotlib.axes as axes\n\ngamma = 3\nlength = 2\ndelta_x = .2\ndelta_t = delta_x**2/10\nmu = (gamma*delta_t)/delta_x**2\nn = int(length/delta_x)\n\nalpha = 0\nbeta = 0\n\ntime = 10\n\ndef main():\n\tprint(\"DELTA T VALUE:\")\n\tprint(delta_t)\n\tprint(.2**2/.004)\n\tA_hat = np.zeros((n-1, n-1))\n\n\tfor i in range(n-1):\n\t\tA_hat[i,i] = 1+2*mu\n\n\tfor i in range(n-2):\n\t\tA_hat[i,i+1] = -mu\n\t\tA_hat[i+1,i] = -mu\n\n\tprint(A_hat)\n\n\txvals = np.zeros(n-1)\n\n\tfor i in range(n-1):\n\t\txvals[i] = (i+1)*delta_x\n\n\tprint(xvals)\n\n\tu0 = np.zeros(n-1)\n\n\tfor i in range(n-1):\n\t\tu0[i] = initialConditions((i+1)*delta_x)\n\n\tprint(u0)\n\n\tb = np.zeros(n-1)\n\n\tb[0] = mu*alpha\n\tb[n-2] = mu*beta\n\n\tprint(b)\n\n\tut = u0\n\n\tt = time\n\twhile(t > 0):\n\t\tut = np.matmul(la.inv(A_hat), np.subtract(ut, b))\n\t\tt = t - 1\n\n\n\t#u1 = np.add(np.matmul(A, np.transpose(u0)), np.transpose(b))\n\n\t#print(u1)\n\n\t#u2 = np.add(np.matmul(A, np.transpose(u1)), np.transpose(b))\n\n\t#print(u2)\n\n\t#plt.plot(xvals, u2)\n\t#plt.show()\n\n\tplotGraph(ut, xvals)\n\ndef plotGraph(u, xvals):\n\ty = np.zeros(n+1)\n\ty[0] = alpha\n\ty[n] = beta\n\n\tx = np.zeros(n+1)\n\tx[0] = 0\n\tx[n] = length\n\n\tfor i in range(n-1):\n\t\tx[i+1] = xvals[i]\n\t\ty[i+1] = u[i]\n\n\tprint(x)\n\tprint(y)\n\n\n\tplt.plot(x, y)\n\tplt.plot(x, np.zeros(n+1))\n\n\tplt.show()\n\n\ndef initialConditions2(x):\n\tif x < 0:\n\t\treturn null\n\telif x < 1/5:\n\t\treturn -x\n\telif x < 7/10:\n\t\treturn x - 2/5\n\telif x <= 1:\n\t\treturn 1-x\n\telse:\n\t\treturn null\n\ndef initialConditions(x):\n\treturn -x**2+2*x\n\n#def printSolution(xvals, ut):\n\nif __name__ == \"__main__\":\n\tmain()\n\n","sub_path":"HeatEquation2.py","file_name":"HeatEquation2.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"2419130","text":"\"\"\"Back in middle school, I had a peculiar way of dealing with super boring classes. I would take my handy pocket calculator and play a \"Game of Threes\". Here's how you play it:\r\n\r\nFirst, you mash in a random large number to start with. Then, repeatedly do the following:\r\n\r\nIf the number is divisible by 3, divide it by 3.\r\nIf it's not, either add 1 or subtract 1 (to make it divisible by 3), then divide it by 3.\r\nThe game stops when you reach \"1\".\r\n\r\nWhile the game was originally a race against myself in order to hone quick math reflexes, it also poses an opportunity for some interesting programming challenges. Today, the challenge is to create a program that \"plays\" the Game of Threes.\"\"\"\r\n\r\ndef main():\r\n inp = input(\"What is the input: \")\r\n threes(int(inp))\r\n\r\ndef threes(x):\r\n while x != 1:\r\n print(x)\r\n if x%3 == 0:\r\n x = x/3\r\n elif x%3 == 1:\r\n x = x - 1;\r\n elif x%3 == 2:\r\n x = x + 1\r\n else:\r\n print(\"I am error\")\r\n print(x)\r\n\r\nmain()","sub_path":"GameOfThrees.py","file_name":"GameOfThrees.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"97911596","text":"import numpy as np\nimport pandas as pd\nfrom scipy import stats\nfrom dateutil import parser\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport matplotlib.dates as mdates\nimport statistics\nfrom time import sleep as sl\nimport itertools\nfrom collections import defaultdict\nimport json\nimport random\nimport os\nimport locale\nlocale.setlocale(locale.LC_ALL,'')\n\n\n\nDATA_PATH = '../trading_history/'\nfile_of_the_day = 0\nall_files = os.listdir(DATA_PATH)\nrandom.shuffle(all_files)\npnl = defaultdict(list)\nindex = 0\ntrades_total = 0\ntrade_win_pct = 0\n\ndata = pd.read_csv(DATA_PATH + all_files[file_of_the_day],sep='\\t')\nprint (DATA_PATH + all_files[file_of_the_day])\n\nprint( data.shape )\nprint( data.columns )\nprint( data.dtypes )\n\nbuyorsell = data['Action']\nprice_list = data['Price']\nqty_list = data['Qty']\n\n\ndata['Cost']=data.Commission.map(lambda x: locale.atof(x.strip('$'))) + \\\n data.Fees.map(lambda x: locale.atof(x.strip('$')))\n\nfees = sum(data['Cost'])\n\noptions = 0\nprice_paid = 0\nprice_sold = 0\nnet = 0\nwon = 0\nlost = 0\ncomplete_trades = 0\navg_win = 0\navg_loss = 0\nwin_list = []\nlost_list = []\nwin_pct = 0\nlose_pct = 0\navg_bet = 0\n\nfor i in range(len(buyorsell) - 1, -1, -1):\n #print(options)\n if buyorsell[i] == \"Buy To Open\":\n options = options + qty_list[i]\n price_paid = price_paid + (qty_list[i] * price_list[i])\n print (\"Buy: \", qty_list[i], \"at:\",price_list[i],\"holding:\",options)\n else:\n options = options - qty_list[i]\n print (\"sold:\", qty_list[i], \"at:\", price_list[i], \"holding:\",options)\n price_sold = price_sold + (qty_list[i] * price_list[i])\n if options == 0:\n if (price_sold - price_paid) > 0:\n win_pct += (price_sold - price_paid)/price_paid\n else:\n lose_pct += (price_sold - price_paid)/price_paid\n\n print( \"bought\", round(price_paid*50,2), \"sold:\",round(price_sold*50,2), \"Net $: \", round((price_sold - price_paid)*50,2), \"\\t\",round((price_sold - price_paid)/price_paid, 2)*100, \"%\" )\n print()\n net += (price_sold - price_paid)*50\n complete_trades += 1\n avg_bet += price_paid\n if (price_sold - price_paid) > 0:\n won += 1\n avg_win += (price_sold - price_paid)*50\n win_list.append((price_sold - price_paid)*50)\n else:\n lost += 1\n avg_loss += (price_sold - price_paid)*50\n lost_list.append((price_sold - price_paid)*50)\n options = 0\n price_paid = 0\n price_sold = 0\n\n\navg_win = avg_win/won\navg_loss = avg_loss/lost\nprint ('Net Profit:', round(net - fees, 2), \"Fees paid:\", round(fees,2),\"Winrate%:\", (won/complete_trades*100), \\\n \"Avg Win:\", avg_win, \"Avg Loss:\", avg_loss, \"Trades:\", won+lost)\nprint (\"WinPct:\", round(win_pct/won,2)*100, \"LosePct:\", round(lose_pct/lost, 2)*100, \"Avg Bet Size:\", avg_bet)\nA = lose_pct/lost\nB = win_pct/won\nbankroll = avg_bet\nA = avg_win/bankroll\nB = avg_loss/bankroll\nW = won/complete_trades\nKelly = W/A - (1 - W)/B\nprint (\"Kelly Bet Percentage:\", Kelly )\n#If the downside-case loss is less than 100%, as in the scenario above,\n# a different Kelly formula is required:\n# Kelly % = W/A – (1 – W)/B,\n# where:\n# W is the win probability,\n# B is the profit in the event of a win (20%),\n# A is the potential loss (also 20%).\n\nif 1:\n plt.style.use('ggplot')\n #print ( day )\n # create figure and axis objects with subplots()\n fig,ax_all = plt.subplots(2,1) #2,1, gridspec_kw={'height_ratios': [5, 1]})\n ax = ax_all\n\n plt.autoscale(True)\n\n ax[0].plot(win_list,'g')\n ax[1].plot(lost_list, 'r')\n plt.show()\n #plt.plot(lost_list,color='r')\n #line2, = ax.plot(lost_list,color='red')\n\n","sub_path":"scripts/report_stats.py","file_name":"report_stats.py","file_ext":"py","file_size_in_byte":3806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"53309660","text":"from bs4 import BeautifulSoup\nimport argparse\n\n\ndef strip_garbage(filename):\n with open(filename, 'r') as f:\n sal = [line.strip() for line in f.readlines()]\n sal = '\\n'.join([line for line in sal])\n soup = BeautifulSoup(sal, 'lxml')\n return soup\n\n\ndef write_garbage(soup, filename, append=False):\n mode = 'a' if append else 'w'\n with open(filename, mode) as f:\n f.writelines(str(soup.findAll(\"div\", \"container\")[0]))\n\n\ndef main(*args):\n filename, outname, append = args\n write_garbage(strip_garbage(filename), outname, append)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='Converts notebooks to site-friendly format.')\n # Add arguments\n parser.add_argument('-f', '--filename', type=str, help='in file name', required=True)\n parser.add_argument(\n '-o', '--outfilename', type=str, help='outfilename', required=True, nargs='+')\n parser.add_argument(\n '-a', '--append', type=str, help='Keyword search', required=False, default=False)\n # Array for all arguments passed to script\n args = parser.parse_args()\n # Assign args to variables\n filename = args.filename\n outname = args.outfilename[0]\n append = args.append\n # Return all variable values\n main(*[filename, outname, append])\n","sub_path":"_posts/nb_stripper.py","file_name":"nb_stripper.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"524412813","text":"\"\"\"\nTento program zjistuje jestli lze sestrojit trojuhelnik.\n\nPokud ano, tak se vypocita delka jeho stran, obsah, obvod\na jestli je pravouhly.\nProgram prijima souradnice bodu A,B,C z konzole a vypisuje vysledky na konzoli.\n\nTvurce: Tomas Blaho\n\"\"\"\n\nfrom math import sqrt\n\n\n# PyTest sekce ------------------------------\ndef test_obvod():\n \"\"\"Test obvod().\"\"\"\n assert obvod(3, 4, 5) == 12\n assert obvod(5, 4, 3) == 12\n assert obvod(3, 5, 4) == 12\n\n\ndef test_obsah():\n \"\"\"Test obsah().\"\"\"\n assert obsah(3, 4, 5) == 6\n assert obsah(5, 4, 3) == 6\n assert obsah(3, 5, 4) == 6\n\n\ndef test_pravouhelnost():\n \"\"\"Test pravouhelnost().\"\"\"\n assert pravouhelnost(3, 4, 5) is True\n assert pravouhelnost(5, 4, 3) is True\n assert pravouhelnost(3, 5, 4) is True\n\n\ndef test_sestrojitelnost():\n \"\"\"Test sestrojitelnost().\"\"\"\n assert sestrojitelnost(3, 4, 5) is True\n assert sestrojitelnost(5, 4, 3) is True\n assert sestrojitelnost(4, 5, 3) is True\n\n\ndef test_sAB():\n \"\"\"Test sAB().\"\"\"\n assert sAB(9, 12, 12, 16) == 5\n\n\ndef test_sBC():\n \"\"\"Test sBC().\"\"\"\n assert sBC(9, 12, 12, 16) == 5\n\n\ndef test_sCA():\n \"\"\"Test sCA().\"\"\"\n assert sCA(9, 12, 12, 16) == 5\n# PyTest konec -----------------------------\n\n\ndef sestrojitelnost(ab, bc, ca):\n \"\"\"\n Funkce zjistuje jestli lze sestrojit trojuhelnik.\n\n Vraci:\n true = trojuhelnik lze sestrojit\n false = trojhelnik nelze sestrojit\n \"\"\"\n lze = ab + bc > ca and bc + ca > ab and ab + ca > bc\n\n return lze\n\n\ndef obsah(ab, bc, ca):\n \"\"\"\n Funkce pocita obsah trojuhelniku.\n\n Vraci:\n float s vypocitanou hodnotou\n \"\"\"\n s = (ab + bc + ca) / 2\n\n return sqrt(s * (s - ab) * (s - bc) * (s - ca))\n\n\ndef obvod(ab, bc, ca):\n \"\"\"\n Funkce pocita obvod trojuhelniku.\n\n Vraci:\n float s vypocitanou hodnotou\n \"\"\"\n return ab + bc + ca\n\n\ndef pravouhelnost(ab, bc, ca):\n \"\"\"\n Funkce zjistuje jesli je trojuhelnik pravouhly.\n\n Vraci:\n true = trojuhelnik je pravouhlu\n false = trojuhelnik neni pravouhlu\n \"\"\"\n nejdelsi = max(ab, bc, ca)\n\n if nejdelsi == ca:\n je = float(\"{:.3f}\".format(ca ** 2)) == \\\n float(\"{:.3f}\".format(ab ** 2 + bc ** 2))\n elif nejdelsi == bc:\n je = float(\"{:.3f}\".format(bc ** 2)) == \\\n float(\"{:.3f}\".format(ab ** 2 + ca ** 2))\n elif nejdelsi == ab:\n je = float(\"{:.3f}\".format(ab ** 2)) == \\\n float(\"{:.3f}\".format(bc ** 2 + ca ** 2))\n\n return je\n\n\ndef sAB(ax, ay, bx, by):\n \"\"\"\n Funkce pocita delku strany AB.\n\n Vraci:\n float s vypocitanou hodnotou\n \"\"\"\n return sqrt(((int(bx) - int(ax)) ** 2) + ((int(by) - int(ay)) ** 2))\n\n\ndef sBC(bx, by, cx, cy):\n \"\"\"\n Funkce pocita delku strany BC.\n\n Vraci:\n float s vypocitanou hodnotou\n \"\"\"\n return sqrt(((int(cx) - int(bx)) ** 2) + ((int(cy) - int(by)) ** 2))\n\n\ndef sCA(ax, ay, cx, cy):\n \"\"\"\n Funkce pocita delku strany CA.\n\n Vraci:\n float s vypocitanou hodnotou\n \"\"\"\n return sqrt(((int(ax) - int(cx)) ** 2) + ((int(ay) - int(cy)) ** 2))\n\n\nif __name__ == '__main__':\n # Zkouska cisel jestli jsou ve spravnem tvaru.\n try:\n # Program pozaduje uzivatelsky vstup.\n Ax = int(input('Napis souradnici x bodu A: '))\n Ay = int(input('Napis souradnici y bodu A: '))\n Bx = int(input('Napis souradnici x bodu B: '))\n By = int(input('Napis souradnici y bodu B: '))\n Cx = int(input('Napis souradnici x bodu C: '))\n Cy = int(input('Napis souradnici y bodu C: '))\n except Exception:\n # Nejsou ve spravnem tvaru. Program skoci na konec.\n print('Cisla byla zadana ve spatnem tvaru! Zadej pouze cela cisla.')\n else:\n # Jsou ve spravnem tvaru.\n # Zde se pocita delka sran AB, BC, CA.\n AB = sAB(Ax, Ay, Bx, By)\n BC = sBC(Bx, By, Cx, Cy)\n CA = sCA(Ax, Ay, Cx, Cy)\n\n # Vystup pozadovany v zadani.\n if sestrojitelnost(AB, BC, CA):\n print('Trojuhelnik lze sestrojit.')\n\n print('Strana AB je dlouha: ', AB)\n print('Strana BC je dlouha: ', BC)\n print('Strana CA je dlouha: ', CA)\n\n print('Obsah je: ', obsah(AB, BC, CA),\n ' a obvod je: ', obvod(AB, BC, CA))\n\n if pravouhelnost(AB, BC, CA):\n print('Trojuhelnik je pravouhly.')\n else:\n print('Trojuhelnik neni pravouhly.')\n else:\n print('Trojuhelnik nelze sestrojit.')\n\n # Konec. Zde program ceka na uzivatelsky vstup k ukonceni.\n input('Zmackni Enter k zavreni programu.')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"639930062","text":"\"\"\"empty message\n\nRevision ID: c667cf8142d5\nRevises: 1dafd9a05a96\nCreate Date: 2019-04-10 00:31:41.016847\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c667cf8142d5'\ndown_revision = '1dafd9a05a96'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('recommendation', sa.Column('recommender', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'recommendation', 'user', ['recommender'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'recommendation', type_='foreignkey')\n op.drop_column('recommendation', 'recommender')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/c667cf8142d5_.py","file_name":"c667cf8142d5_.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"13883126","text":"import sys\nfrom math import sqrt\nfrom timeit import default_timer\n\ndef distance_squared(c1, c2):\n\treturn (c1['x'] - c2['x'])**2 + (c1['y'] - c2['y'])**2\n\ndef nearest_neighbor(cities, city):\n\tneighbors = {}\n\n\tfor neighbor in cities:\n\t\tneighbors[distance_squared(city, neighbor)] = neighbor\n\n\tnn = neighbors[min(neighbors)]\n\td = int(round(sqrt(min(neighbors))))\n\n\treturn nn, d\n\ndef TSP_NN(cities):\n\tstart = default_timer()\n\ttour = []\n\tbest_distance = sys.maxint\n\n\tfor first in cities:\n\t\ttotal = 0\n\t\tvisited = [first]\n\t\tunvisited = []\n\n\t\tfor city in cities:\n\t\t\tif city is not first:\n\t\t\t\tunvisited.append(city)\n\n\t\twhile len(unvisited) > 0:\n\t\t\tnn, d = nearest_neighbor(unvisited, visited[-1])\n\t\t\tvisited.append(nn)\n\t\t\tunvisited.remove(nn)\n\t\t\ttotal += d\n\n\t\ttotal += int(round(sqrt(distance_squared(visited[0], visited[-1]))))\n\n\t\tif total < best_distance:\n\t\t\ttour = visited\n\t\t\tbest_distance = total\n\n\t\tif ((default_timer() - start) >= 10):\n\t\t\treturn tour, best_distance\n\n\treturn tour, best_distance\n\ndef main():\n\n\ttry:\n\t\tinFile = open(sys.argv[1], 'r')\n\t\toutFile = open(sys.argv[1] + '.tour', 'w')\n\texcept IndexError:\n\t\tprint(\"[ERROR] Missing argument for testfile. \\n\")\n\t\tquit()\n\texcept (IOError, OSError):\n\t\tprint(\"[ERROR] Testfile not found. \\n\")\n\t\tquit()\n\n\tcities = []\n\tfor line in inFile:\n\t\tspLine = line.split()\n\t\tcity = {'id':int(spLine[0]), 'x':int(spLine[1]), 'y':int(spLine[2])}\n\t\tcities.append(city)\n\n\tstart = default_timer()\n\ttour, d = TSP_NN(cities)\n\tend = default_timer()\n\tprint(\"Runtime: \" + str(end-start) + \" seconds.\")\n\t\n\toutFile.write(str(d) + '\\n')\n\n\tfor city in tour:\n\t\toutFile.write(str(city['id']) + ' ' + str(city['x']) + ' ' + str(city['y']) + '\\n')\n\n\tinFile.close()\n\toutFile.close()\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"Greedy/Python /greedy.py","file_name":"greedy.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"610780248","text":"\"\"\" Example 010: Send binary docs with multipart mime: Remote signer, cc; the envelope has three documents\"\"\"\n\nfrom os import path\n\nfrom flask import render_template, Blueprint\n\nfrom .controller import Eg010Controller\nfrom ....docusign import authenticate\nfrom ....ds_config import DS_CONFIG\n\neg = \"eg010\" # reference (and url) for this example\neg010 = Blueprint(\"eg010\", __name__)\n\n\n@eg010.route(\"/eg010\", methods=[\"POST\"])\n@authenticate(eg=eg)\ndef send_bynary_docs():\n \"\"\"\n 1. Get required arguments\n 2. Call the worker method\n 3. Render success response\n \"\"\"\n\n # 1. Get required arguments\n args = Eg010Controller.get_args()\n # 2. Call the worker method\n results = Eg010Controller.worker(args)\n\n if results[\"status_code\"] < 299:\n # 3. Render Success response\n return render_template(\n \"example_done.html\",\n title=\"Envelope sent\",\n h1=\"Envelope sent\",\n message=f\"\"\"The envelope has been created and sent!
Envelope ID {results[\"results\"][\"envelopeId\"]}.\"\"\"\n )\n else:\n # Problem!\n error_body = results[\"results\"]\n # we can pull the DocuSign error code and message from the response body\n error_code = error_body and \"errorCode\" in error_body and error_body[\"errorCode\"]\n error_message = error_body and \"message\" in error_body and error_body[\"message\"]\n # In production, may want to provide customized error messages and\n # remediation advice to the user.\n return render_template(\n \"error.html\",\n err=None,\n error_code=error_code,\n error_message=error_message\n )\n\n\n@eg010.route(\"/eg010\", methods=[\"GET\"])\n@authenticate(eg=eg)\ndef get_view():\n \"\"\"Responds with the form for the example\"\"\"\n\n return render_template(\n \"eg010_send_binary_docs.html\",\n title=\"Send binary documents\",\n source_file=path.basename(path.dirname(__file__)) + \"/controller.py\",\n source_url=DS_CONFIG[\"github_example_url\"] + path.basename(path.dirname(__file__)) + \"/controller.py\",\n documentation=DS_CONFIG[\"documentation\"] + eg,\n show_doc=DS_CONFIG[\"documentation\"],\n signer_name=DS_CONFIG[\"signer_name\"],\n signer_email=DS_CONFIG[\"signer_email\"]\n )\n","sub_path":"app/eSignature/examples/eg010_send_binary_docs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"209693047","text":"'''Pyhon program to count duplicate items in list'''\r\ndef countDuplicates(theList):\r\n count = 1\r\n for i in range(len(theList)):\r\n for j in range(i+1, len(theList)):\r\n if theList[i] == theList[j]:\r\n count += 1\r\n break\r\n print(\"Number of duplicate elements:\", count)\r\n\r\nmyList = [27, 27, 33, 44, 27, 27, 44]\r\ncountDuplicates(myList)\r\n\r\n","sub_path":"Practice problems involving lists/countDuplicates.py","file_name":"countDuplicates.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"499931765","text":"# UnionFind class\nclass UnionFind:\n def __init__(self, size):\n self.root_arr = [i for i in range (size)]\n\n def find(self, child):\n root = self.root_arr[child]\n while(child != root):\n new_child = root\n new_root = self.root_arr[new_child]\n \n child = new_child\n root = new_root\n return root\n\t\t\n def union(self, x, y):\n root_x = self.find(x)\n root_y = self.find(y)\n if root_x != root_y:\n self.root_arr[root_y] = root_x \n \n\n def connected(self, x, y):\n return self.find(x) == self.find(y)\n\n\n# Test Case\nuf = UnionFind(10)\n# 1-2-5-6-7 3-8-9 4\nuf.union(1, 2)\nuf.union(2, 5)\nuf.union(5, 6)\nuf.union(6, 7)\nuf.union(3, 8)\nuf.union(8, 9)\nprint(uf.connected(1, 5)) # true\nprint(uf.connected(5, 7)) # true\nprint(uf.connected(4, 9)) # false\n# 1-2-5-6-7 3-8-9-4\nuf.union(9, 4)\nprint(uf.connected(4, 9)) # true\n# 1-2-5-6-7 3-8-9-4\nuf.union(2,4)\nprint(uf.connected(2,9)) # true\n","sub_path":"Leetcode/Graph/quick_union.py","file_name":"quick_union.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"175153764","text":"from models import arrival_history, util, trynapi, nextbus\nimport json\nimport math\nimport argparse\nfrom datetime import datetime, timedelta\nimport pytz\nimport boto3\nimport gzip\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Compute and cache arrival history')\n parser.add_argument('--route', nargs='*')\n parser.add_argument('--date', help='Date (yyyy-mm-dd)')\n parser.add_argument('--start-date', help='Start date (yyyy-mm-dd)')\n parser.add_argument('--end-date', help='End date (yyyy-mm-dd), inclusive')\n parser.add_argument('--s3', dest='s3', action='store_true', help='store in s3')\n parser.set_defaults(s3=False)\n\n args = parser.parse_args()\n route_ids = args.route\n agency = 'sf-muni'\n\n if route_ids is None:\n route_ids = [route.id for route in nextbus.get_route_list(agency)]\n\n date_str = args.date\n\n if args.date:\n dates = util.get_dates_in_range(args.date, args.date)\n elif args.start_date is not None and args.end_date is not None:\n dates = util.get_dates_in_range(args.start_date, args.end_date)\n else:\n raise Exception('missing date, start-date, or end-date')\n\n tz = pytz.timezone('America/Los_Angeles')\n\n\n incr = timedelta(days=1)\n\n for d in dates:\n start_dt = tz.localize(datetime(d.year,d.month, d.day, hour=3)) # start each \"day\" at 3 AM local time so midnight-3am buses are associated with previous day\n end_dt = start_dt + incr\n\n start_time = int(start_dt.timestamp())\n end_time = int(end_dt.timestamp())\n\n # Request data from trynapi in smaller chunks to avoid internal server errors.\n # The more routes we have, the smaller our chunk size needs to be in order to\n # avoid getting internal server errors from trynapi.\n chunk_minutes = math.ceil(720 / len(route_ids))\n\n print(f\"route = {route_ids}\")\n print(f\"time = [{start_dt}, {end_dt}) (chunk_minutes={chunk_minutes})\")\n\n route_state_map = {}\n chunk_start_time = start_time\n while chunk_start_time < end_time:\n\n chunk_end_time = min(chunk_start_time + 60 * chunk_minutes, end_time)\n\n # trynapi returns all route states in the UTC minute containing the end timestamp, *inclusive*.\n # This would normally cause trynapi to return duplicate route states at the end of one chunk and\n # the beginning of the next chunk. Since chunk_end_time is always the first second in a UTC minute,\n # subtracting 1 from the corresponding millisecond will be the last millisecond in the previous minute,\n # so it should avoid fetching duplicate vehicle states at chunk boundaries\n chunk_state = trynapi.get_state(agency, chunk_start_time*1000, chunk_end_time*1000 - 1, route_ids)\n\n if 'message' in chunk_state: # trynapi returns an internal server error if you ask for too much data at once\n raise Exception(f\"trynapi error for time range {chunk_start_time}-{chunk_end_time}: {chunk_state['message']}\")\n\n if not ('data' in chunk_state):\n print(chunk_state)\n raise Exception(f'trynapi returned no data')\n\n for chunk_route_state in chunk_state['data']['trynState']['routes']:\n route_id = chunk_route_state['rid']\n if route_id not in route_state_map:\n route_state_map[route_id] = chunk_route_state\n else:\n route_state_map[route_id]['routeStates'].extend(chunk_route_state['routeStates'])\n\n chunk_start_time = chunk_end_time\n\n for route_id, route_state in route_state_map.items():\n history = arrival_history.compute_from_state(agency, route_id, start_time, end_time, route_state)\n arrival_history.save_for_date(history, d, args.s3)","sub_path":"compute_arrivals.py","file_name":"compute_arrivals.py","file_ext":"py","file_size_in_byte":3856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"598671586","text":"# usr/bin/python3\n# echo 1 > /proc/sys/net/ipn4/ip_forward\nimport scapy.all as scapy\nimport time\nimport sys\n\ntarget_ip = input(\"Enter Your Target Ip >>\")\ngateway_ip = input(\"Enter Your Router Gateway Ip >> \")\ndef get_mac(ip):\n arp_request = scapy.ARP(pdst=ip)\n broadcast = scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")\n arp_request_broadcast = broadcast / arp_request\n answered_list = scapy.srp(arp_request_broadcast, timeout=1, verbose=False)[0]\n\n return answered_list[0][1].hwsrc\n\n\ndef spoof(target_ip, spoof_ip):\n target_mac = get_mac(target_ip)\n packet = scapy.ARP(op=2, pdst=target_ip, hwdst=target_mac, psrc=spoof_ip)\n scapy.send(packet, verbose=False)\n\n\ndef restore(detination_ip, source_ip):\n destination_mac = get_mac(detination_ip)\n source_mac = get_mac(source_ip)\n packet = scapy.ARP(op=2, pdst=detination_ip, hwdst=destination_mac, psrc=source_ip, hwsrc=source_mac)\n scapy.send(packet, count=4, verbose=False)\n\n\nsent_packets_count = 0\n\ntry:\n while True:\n spoof(target_ip, gateway_ip) # upside down\n spoof(gateway_ip, target_ip) # upside down\n sent_packets_count = sent_packets_count + 2\n print(\"\\r[+] Packets Send > \" + str(sent_packets_count), end=\"\")\n sys.stdout.flush()\n time.sleep(2)\nexcept KeyboardInterrupt:\n print(\"[+] You Pressed CTRL + C ....... Resetting ARP tables............ Please Wait. \\n\")\n restore(target_ip, gateway_ip)\n restore(gateway_ip, target_ip)\n\n\n","sub_path":"arp_spoofer.py","file_name":"arp_spoofer.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"601385619","text":"# coding: utf-8\nimport os\nimport numpy as np\nimport copy\nfrom chainer import reporter\nimport chainer.functions as F\nfrom chainer.functions import sigmoid_cross_entropy, binary_accuracy\nfrom chainer.training import extensions\nfrom sklearn.svm import LinearSVC\nfrom collections import namedtuple\nimport random\nimport chainer\nfrom .util import compute_map_mrr\n\nclass WikiQAEvaluator(extensions.Evaluator):\n\n def __init__(self, iterator, target, device, converter):\n super(WikiQAEvaluator, self).__init__(\n iterator=iterator, target=target, device=device, converter=converter)\n\n def collect_prediction_for_train_data(self):\n \"\"\"\n collect prediction scores from the model.\n this is needed for training SVM/LR\n \"\"\"\n iterator = self._iterators['train']\n target = self._targets['main']\n it = copy.copy(iterator)\n\n train_X = []\n train_y = []\n for batch in it:\n padded_batch = self.converter(batch, device=self.device)\n x1s = padded_batch['x1s']\n x2s = padded_batch['x2s']\n wordcnt = padded_batch['wordcnt']\n wgt_wordcnt = padded_batch['wgt_wordcnt']\n x1s_len = padded_batch['x1s_len']\n x2s_len = padded_batch['x2s_len']\n y = padded_batch['y']\n\n y_score, sim_scores = target(x1s, x2s, wordcnt, wgt_wordcnt, x1s_len, x2s_len)\n x = np.concatenate([x.data for x in sim_scores] + [wordcnt, wgt_wordcnt, x1s_len, x2s_len], axis=1)\n train_X.append(x)\n train_y.append(y)\n\n train_X = np.concatenate(train_X, axis=0)\n train_y = np.concatenate(train_y, axis=0)\n return train_X, train_y\n\n\n def evaluate(self):\n train_X, train_y = self.collect_prediction_for_train_data()\n model = LinearSVC()\n model.fit(X=train_X, y=train_y)\n\n iterator = self._iterators['dev']\n target = self._targets['main']\n # this is necessary for more-than-once-evaluation\n it = copy.copy(iterator)\n\n label_scores = []\n svm_label_scores = []\n summary = reporter.DictSummary()\n for n, batch in enumerate(it):\n observation = {}\n with reporter.report_scope(observation):\n padded_batch = self.converter(batch, device=self.device)\n x1s = padded_batch['x1s']\n x2s = padded_batch['x2s']\n wordcnt = padded_batch['wordcnt']\n wgt_wordcnt = padded_batch['wgt_wordcnt']\n x1s_len = padded_batch['x1s_len']\n x2s_len = padded_batch['x2s_len']\n y = padded_batch['y']\n\n y_score, sim_scores = target(x1s, x2s, wordcnt, wgt_wordcnt, x1s_len, x2s_len)\n\n # compute loss\n loss = F.sigmoid_cross_entropy(x=y_score, t=y).data\n reporter.report({'loss': loss}, target)\n\n # We evaluate WikiQA by MAP and MRR\n # for direct evaluation\n label_score = np.c_[y, y_score.data]\n label_scores.append(label_score)\n # for SVM/LR\n x = np.concatenate([x.data for x in sim_scores] + [wordcnt, wgt_wordcnt, x1s_len, x2s_len], axis=1)\n y_score = model.decision_function(x)\n svm_label_score = np.c_[y, y_score]\n svm_label_scores.append(svm_label_score)\n summary.add(observation)\n\n stats = compute_map_mrr(label_scores)\n svm_stats = compute_map_mrr(svm_label_scores)\n summary_dict = summary.compute_mean()\n summary_dict[\"validation/main/svm_map\"] = svm_stats.map\n summary_dict[\"validation/main/svm_mrr\"] = svm_stats.mrr\n summary_dict[\"validation/main/map\"] = stats.map\n summary_dict[\"validation/main/mrr\"] = stats.mrr\n return summary_dict\n","sub_path":"bin/BCNN/wikiqa_evaluator.py","file_name":"wikiqa_evaluator.py","file_ext":"py","file_size_in_byte":3876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"115775833","text":"# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Decode from trained T2T models.\n\nThis binary performs inference using the Estimator API.\n\nExample usage to decode from dataset:\n\n t2t-decoder \\\n --data_dir ~/data \\\n --problem=algorithmic_identity_binary40 \\\n --model=transformer\n --hparams_set=transformer_base\n\nSet FLAGS.decode_interactive or FLAGS.decode_from_file for alternative decode\nsources.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\n# Fathom start\nimport fathomt2t\nfrom fathomt2t.common_flags import setup_decoder_flags, dataset_to_t2t_mode\nfrom fathomtf.services.model_management import fathom_t2t_model_setup\nfrom fathomairflow.dags.dag_management.xcom_manipulation import echo_yaml_for_xcom_ingest\n# Fathom end\n\n# Dependency imports\n\nfrom tensor2tensor.bin import t2t_trainer\nfrom tensor2tensor.data_generators import problem # pylint: disable=unused-import\nfrom tensor2tensor.data_generators import text_encoder\nfrom tensor2tensor.utils import decoding\nfrom tensor2tensor.utils import registry\nfrom tensor2tensor.utils import trainer_lib\nfrom tensor2tensor.utils import usr_dir\n\nimport tensorflow as tf\n\nflags = tf.flags\nFLAGS = flags.FLAGS\n\n# Additional flags in bin/t2t_trainer.py and utils/flags.py\nflags.DEFINE_string(\"checkpoint_path\", None,\n \"Path to the model checkpoint. Overrides output_dir.\")\nflags.DEFINE_bool(\"keep_timestamp\", False,\n \"Set the mtime of the decoded file to the \"\n \"checkpoint_path+'.index' mtime.\")\nflags.DEFINE_bool(\"decode_interactive\", False,\n \"Interactive local inference mode.\")\nflags.DEFINE_integer(\"decode_shards\", 1, \"Number of decoding replicas.\")\nflags.DEFINE_string(\"problems\", \"\", \"Problem to use in decode\")\nflags.DEFINE_string(\"score_file\", \"\", \"File to score. Each line in the file \"\n \"must be in the format input \\t target.\")\n# Fathom start\nsetup_decoder_flags()\nflags.DEFINE_bool(\"fathom_output_predictions\", False, \"Output predictions based on problem?\")\nflags.DEFINE_bool(\"use_original_input\", False,\n \"Use the input that was used for validation during training?\")\n# Fathom end\nflags.DEFINE_bool(\"decode_in_memory\", False, \"Decode in memory.\")\n\n\ndef create_hparams():\n return trainer_lib.create_hparams(\n FLAGS.hparams_set,\n FLAGS.hparams,\n data_dir=os.path.expanduser(FLAGS.data_dir),\n problem_name=FLAGS.problem)\n\n\ndef create_decode_hparams():\n decode_hp = decoding.decode_hparams(FLAGS.decode_hparams)\n decode_hp.shards = FLAGS.decode_shards\n decode_hp.shard_id = FLAGS.worker_id\n decode_hp.decode_in_memory = FLAGS.decode_in_memory\n decode_hp.decode_to_file = FLAGS.decode_to_file\n decode_hp.decode_reference = FLAGS.decode_reference\n return decode_hp\n\n\ndef decode(estimator, hparams, decode_hp):\n \"\"\"Decode from estimator. Interactive, from file, or from dataset.\"\"\"\n if FLAGS.decode_interactive:\n if estimator.config.use_tpu:\n raise ValueError(\"TPU can only decode from dataset.\")\n decoding.decode_interactively(estimator, hparams, decode_hp,\n checkpoint_path=FLAGS.checkpoint_path)\n elif FLAGS.decode_from_file:\n if estimator.config.use_tpu:\n raise ValueError(\"TPU can only decode from dataset.\")\n decoding.decode_from_file(estimator, FLAGS.decode_from_file, hparams,\n decode_hp, FLAGS.decode_to_file,\n checkpoint_path=FLAGS.checkpoint_path)\n if FLAGS.checkpoint_path and FLAGS.keep_timestamp:\n ckpt_time = os.path.getmtime(FLAGS.checkpoint_path + \".index\")\n os.utime(FLAGS.decode_to_file, (ckpt_time, ckpt_time))\n else:\n\n # Fathom\n predictions = decoding.decode_from_dataset(\n estimator,\n FLAGS.problem,\n hparams,\n decode_hp,\n decode_to_file=FLAGS.decode_to_file,\n dataset_split=dataset_to_t2t_mode(FLAGS.dataset_split),\n return_generator=FLAGS.fathom_output_predictions,\n # save logs/summaries to a directory with the same name as decode_output_file\n # in situations where we are calling decode without write permissions\n # to the model directory\n output_dir=os.path.splitext(FLAGS.decode_output_file)[0])\n\n # Fathom\n if FLAGS.fathom_output_predictions:\n print('Assuming only one problem...')\n assert '-' not in FLAGS.problems\n # if we already have built problem instance in hparams, no need to create\n # it second time (as it's downloading files from gcs)\n if hasattr(hparams, 'problem'):\n problem = hparams.problem\n else:\n problem = registry.problem(FLAGS.problems)\n problem.output_predictions(\n predictions=predictions,\n num_examples=FLAGS.num_examples)\n\n\ndef score_file(filename):\n \"\"\"Score each line in a file and return the scores.\"\"\"\n # Prepare model.\n hparams = create_hparams()\n encoders = registry.problem(FLAGS.problem).feature_encoders(FLAGS.data_dir)\n has_inputs = \"inputs\" in encoders\n\n # Prepare features for feeding into the model.\n if has_inputs:\n inputs_ph = tf.placeholder(dtype=tf.int32) # Just length dimension.\n batch_inputs = tf.reshape(inputs_ph, [1, -1, 1, 1]) # Make it 4D.\n targets_ph = tf.placeholder(dtype=tf.int32) # Just length dimension.\n batch_targets = tf.reshape(targets_ph, [1, -1, 1, 1]) # Make it 4D.\n features = {\n \"inputs\": batch_inputs,\n \"targets\": batch_targets,\n } if has_inputs else {\"targets\": batch_targets}\n\n # Prepare the model and the graph when model runs on features.\n model = registry.model(FLAGS.model)(hparams, tf.estimator.ModeKeys.EVAL)\n _, losses = model(features)\n saver = tf.train.Saver()\n\n with tf.Session() as sess:\n # Load weights from checkpoint.\n ckpts = tf.train.get_checkpoint_state(FLAGS.output_dir)\n ckpt = ckpts.model_checkpoint_path\n saver.restore(sess, ckpt)\n # Run on each line.\n with tf.gfile.Open(filename) as f:\n lines = f.readlines()\n results = []\n for line in lines:\n tab_split = line.split(\"\\t\")\n if len(tab_split) > 2:\n raise ValueError(\"Each line must have at most one tab separator.\")\n if len(tab_split) == 1:\n targets = tab_split[0].strip()\n else:\n targets = tab_split[1].strip()\n inputs = tab_split[0].strip()\n # Run encoders and append EOS symbol.\n targets_numpy = encoders[\"targets\"].encode(\n targets) + [text_encoder.EOS_ID]\n if has_inputs:\n inputs_numpy = encoders[\"inputs\"].encode(inputs) + [text_encoder.EOS_ID]\n # Prepare the feed.\n feed = {\n inputs_ph: inputs_numpy,\n targets_ph: targets_numpy\n } if has_inputs else {targets_ph: targets_numpy}\n # Get the score.\n np_loss = sess.run(losses[\"training\"], feed)\n results.append(np_loss)\n\n # Fathom\n if FLAGS.fathom_output_predictions:\n print('Assuming only one problem...')\n assert '-' not in FLAGS.problems\n problem = registry.problem(FLAGS.problems)\n problem.output_predictions(predictions)\n\n return results\n \n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n trainer_lib.set_random_seed(FLAGS.random_seed)\n # Fathom start\n checkpoint_path = fathom_t2t_model_setup()\n # Fathom end\n usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)\n\n\n if FLAGS.score_file:\n filename = os.path.expanduser(FLAGS.score_file)\n if not tf.gfile.Exists(filename):\n raise ValueError(\"The file to score doesn't exist: %s\" % filename)\n results = score_file(filename)\n if not FLAGS.decode_to_file:\n raise ValueError(\"To score a file, specify --decode_to_file for results.\")\n write_file = tf.gfile.Open(os.path.expanduser(FLAGS.decode_to_file), \"w\")\n for score in results:\n write_file.write(\"%.6f\\n\" % score)\n write_file.close()\n return\n\n hp = create_hparams()\n decode_hp = create_decode_hparams()\n\n estimator = trainer_lib.create_estimator(\n FLAGS.model,\n hp,\n t2t_trainer.create_run_config(hp),\n decode_hparams=decode_hp,\n use_tpu=FLAGS.use_tpu)\n\n decode(estimator, hp, decode_hp)\n\n # Fathom\n # This xcom is here so that tasks after decode know the local path to the\n # downloaded model. Train does this same xcom echo.\n # Decode, predict, and evaluate code should\n # converge to use the same fathom_t2t_model_setup.\n # TODO: since the truncation-boundary xcom value should be available in\n # the hparams_set, we should probably have consumers access this via a\n # SavedModel.hparams property rather than XCOM\n echo_yaml_for_xcom_ingest({'output-dir': os.path.dirname(checkpoint_path),\n 'output-file': FLAGS.decode_output_file,\n 'truncation-boundary': hp.max_input_seq_length})\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run()\n","sub_path":"tensor2tensor/bin/t2t_decoder.py","file_name":"t2t_decoder.py","file_ext":"py","file_size_in_byte":9497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"253273156","text":"import requests\nimport json\n\nuserKey = \"\"\nversion = \"v2.1\"\nbaseUrl = \"https://developers.zomato.com/api/\"\n\n\ndef getCurrentLocation():\n return {\n \"latitude\" : 12.918847,\n \"longitude\" : 77.629428\n }\n\ndef getCityId(latitude, longitude):\n citiesExtension = \"/cities\"\n url = baseUrl + version + citiesExtension\n headers = {\n \"Accept\" : \"application/json\",\n \"user-key\" : userKey,\n }\n params = {\n \"lat\" : latitude,\n \"lon\" : longitude\n }\n response = requests.get(url=url, headers = headers, params=params)\n data = response.json()\n cityId = data[\"location_suggestions\"][0][\"id\"]\n cityName = data[\"location_suggestions\"][0][\"name\"]\n print(\"You are in \" + cityName)\n return cityId\n\ndef getCuisines(cityId):\n cuisinesExtension = \"/cuisines\"\n url = baseUrl + version + cuisinesExtension\n headers = {\n \"Accept\" : \"application/json\",\n \"user-key\" : userKey,\n }\n params = {\n \"city_id\" : cityId\n }\n cuisines = requests.get(url = url, headers = headers, params = params)\n return cuisines.json()\n\ndef getEntityTypeAndId(locationName):\n locationsExtension = \"/locations\"\n url = baseUrl + version + locationsExtension\n headers = {\n \"Accept\" : \"application/json\",\n \"user-key\" : userKey,\n }\n params = {\n \"query\" : locationName\n }\n response = requests.get(url = url, headers = headers, params = params)\n data = response.json()\n return [data[\"location_suggestions\"][0][\"entity_type\"], data[\"location_suggestions\"][0][\"entity_id\"]]\n\ndef getBestRatedRestaurantsNearby(entityType, entityId, averageCostForTwo, count):\n locationDetailsExtension = \"/location_details\"\n url = baseUrl + version + locationDetailsExtension\n headers = {\n \"Accept\" : \"application/json\",\n \"user-key\" : userKey,\n }\n params = {\n \"entity_type\" : entityType,\n \"entity_id\" : entityId\n }\n response = requests.get(url = url, headers = headers, params = params)\n data = response.json()\n \n bestRatedRestaurants = sorted(data[\"best_rated_restaurant\"], \n key= lambda s: s[\"restaurant\"][\"user_rating\"][\"aggregate_rating\"])\n bRRUnderBudget = [x for x in bestRatedRestaurants if x[\"restaurant\"][\"average_cost_for_two\"] <= averageCostForTwo] \n length = len(bRRUnderBudget)\n return bRRUnderBudget[:length if length < count else count]\n\n\n\n\nif __name__ == \"__main__\":\n print(\"enter user key:\")\n userKey = input()\n location = getCurrentLocation()\n getCityId(location[\"latitude\"], location[\"longitude\"])\n locationName = \"HSR\"\n entity_type, entity_id = getEntityTypeAndId(locationName)\n brrUnderBudget = getBestRatedRestaurantsNearby(entityId= entity_id, \n entityType = entity_type,\n averageCostForTwo = 1500,\n count = 10)\n print([x[\"restaurant\"][\"name\"] for x in brrUnderBudget])\n\n\n\n\n","sub_path":"source/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"354514219","text":"from elasticsearch import Elasticsearch, helpers\n\nfrom parse_files import get_contents\nfrom snippeteer.parse.names import split_name\n\nINDEX_NAME = 'index'\n\n\ndef connect_elasticsearch():\n\t_es = None\n\t_es = Elasticsearch([{'host': 'localhost', 'port': 9200}])\n\tif not _es.ping():\n\t\traise Exception('Elasticsearch is not connected')\n\treturn _es\n\n\ndef create_data(values):\n\tfor v, id, url, content, star_count in values:\n\t\tfor f in v.functions:\n\n\t\t\tdocstring = \"\" if f.docstring is None else f.docstring\n\t\t\ttry:\n\t\t\t\tif isinstance(docstring, bytes):\n\t\t\t\t\tdocstring = docstring.decode()\n\t\t\t\telif not isinstance(docstring, str):\n\t\t\t\t\tdocstring = str(docstring)\n\t\t\texcept:\n\t\t\t\tdocstring = \"\"\n\n\t\t\tcode = '\\n'.join(content[f.first_line: f.last_line])\n\n\t\t\tyield {\n\t\t\t\t\"_index\": INDEX_NAME,\n\t\t\t\t\"name\": f.name,\n\t\t\t\t\"search_name\": ' '.join(split_name(f.name)),\n\t\t\t\t\"docstring\": docstring,\n\t\t\t\t\"first_line\": f.first_line,\n\t\t\t\t\"last_line\": f.last_line,\n\t\t\t\t\"db_id\": id,\n\t\t\t\t\"code\": code,\n\t\t\t\t\"url\": url,\n\t\t\t\t\"star_count\": star_count,\n\t\t\t\t\"keywords\": list(f.keywords),\n\t\t\t\t\"arguments\": list(f.arguments),\n\t\t\t\t\"dependencies\": list(f.dependencies),\n\t\t\t\t\"num_operations\": f.num_operations,\n\t\t\t\t\"returns\": list(f.returns),\n\t\t\t}\n\n\ndef populate_index(es):\n\tes.indices.delete(index=INDEX_NAME, ignore=[400, 404])\n\tes.indices.create(index=INDEX_NAME, ignore=400)\n\tvalues = get_contents()\n\thelpers.bulk(es, create_data(values))\n\n\nif __name__ == '__main__':\n\tes = connect_elasticsearch()\n\tpopulate_index(es)\n","sub_path":"snippeteer/index/create_index.py","file_name":"create_index.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"304503535","text":"import pandas as pd\n\n\n\nhc_df = pd.read_csv('FSL_HC_Heatmap.csv').set_index('region').astype(float)\nad_df = pd.read_csv('FSL_AD_Heatmap.csv').set_index('region').astype(float)\n\nnew_df = ad_df - hc_df\n\nnew_df.to_csv('FSL_diff_Heatmap.csv')","sub_path":"figures/get_df_diff.py","file_name":"get_df_diff.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"340327865","text":"# =============================================================================\n# Copyright (c) 2015, Cisco Systems, Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n# THE POSSIBILITY OF SUCH DAMAGE.\n# =============================================================================\nfrom models import Package\nfrom models import ModulePackageState\nfrom constants import PackageState\nfrom parsers.base import BasePackageParser\nimport re\n\nclass CLIPackageParser(BasePackageParser):\n def get_packages_from_cli(self, host, install_inactive_cli=None, install_active_cli=None, install_committed_cli=None):\n inactive_packages = {}\n active_packages = {}\n committed_packages = {}\n host_packages = []\n\n if install_inactive_cli is not None:\n inactive_packages = self.parse_inactive(install_inactive_cli, PackageState.INACTIVE)\n\n if install_active_cli is not None:\n active_packages = self.parse_active_and_committed(install_active_cli, PackageState.ACTIVE)\n\n if install_committed_cli is not None:\n committed_packages = self.parse_active_and_committed(install_committed_cli, PackageState.ACTIVE_COMMITTED)\n\n if committed_packages:\n for package_name in active_packages:\n # Extracts the Package object\n active_package = active_packages[package_name]\n committed_package = committed_packages[package_name]\n if committed_package is not None:\n # Peeks into the ModulePackageStates to see if the same line card\n # with the same package appears in both active and committed areas.\n for active_module_package_state in active_package.modules_package_state:\n for committed_module_package_state in committed_package.modules_package_state:\n if active_module_package_state.module_name == committed_module_package_state.module_name:\n active_module_package_state.package_state = PackageState.ACTIVE_COMMITTED\n\n for package in active_packages.values():\n host_packages.append(package)\n\n for package in inactive_packages.values():\n host_packages.append(package)\n\n if len(host_packages) > 0:\n host.packages = host_packages\n return True\n\n return False\n\n \"\"\"\n Used to parse 'show install inactive' CLI output.\n\n 19 inactive package(s) found:\n ncs6k-mcast-5.0.1\n ncs6k-mgbl-5.0.1\n ncs6k-mpls-5.0.1\n ncs6k-k9sec-5.0.1\n ncs6k-xr-5.0.1\n ncs6k-doc-5.0.1\n \"\"\"\n def parse_inactive(self, lines, package_state):\n packages_dict = {}\n\n if lines is None:\n return packages_dict\n\n found = False\n lines = lines.splitlines()\n\n for line in lines:\n if found:\n location = None\n name = line.strip()\n\n # Skip anything after the blank line\n if len(name) == 0:\n break\n\n package = Package(location=location, name=name, state=package_state)\n packages_dict[name] = package\n\n elif 'package' in line:\n found = True\n\n return packages_dict\n\n \"\"\"\n Used to parse 'show install inactive' CLI output.\n Package\n ModulePackageState\n ModulePackageState\n \"\"\"\n def parse_active_and_committed(self, lines, package_state):\n packages_dict = {}\n\n if lines is None:\n return packages_dict\n\n lines = lines.splitlines()\n\n trunks = self.get_trunks(lines)\n if len(trunks) > 0:\n # Collect all the packages\n package_list = []\n for module in trunks:\n for package in trunks[module]:\n if not package in package_list and re.match(\"(ncs.*|asr9k.*)\", package):\n package_list.append(package)\n\n for package_name in package_list:\n package = Package(\n name=package_name,\n location=None,\n state=package_state)\n\n # Check which module has this package\n for module in trunks:\n for line in trunks[module]:\n if line == package_name:\n package.modules_package_state.append(ModulePackageState(\n module_name=module,\n package_state=package_state))\n\n packages_dict[package_name] = package\n\n return packages_dict\n\n \"\"\"\n Return the CLI outputs in trunks. Each Trunk is a section of module and its packages.\n Below is an example of two trunks.\n\n Node 0/RP0/CPU0 [RP]\n Boot Partition: xr_lv36\n Active Packages: 7\n ncs6k-xr-5.2.1 version=5.2.1 [Boot image]\n ncs6k-doc-5.2.1\n ncs6k-k9sec-5.2.1\n ncs6k-mcast-5.2.1\n ncs6k-mgbl-5.2.1\n ncs6k-mpls-5.2.1\n ncs6k-5.2.1.CSCur01489-1.0.0\n\n Node 0/RP1/CPU0 [RP]\n Boot Partition: xr_lv36\n Active Packages: 7\n ncs6k-xr-5.2.1 version=5.2.1 [Boot image]\n ncs6k-doc-5.2.1\n ncs6k-k9sec-5.2.1\n ncs6k-mcast-5.2.1\n ncs6k-mgbl-5.2.1\n ncs6k-mpls-5.2.1\n ncs6k-5.2.1.CSCur01489-1.0.0\n \"\"\"\n def get_trunks(self, cli_output):\n trunks = {}\n trunk = []\n module = None\n\n for line in cli_output:\n line = line.strip()\n if len(line) == 0: continue\n\n m = re.match(\"(Node.*)\", line)\n if m:\n if module is not None:\n trunks[module] = trunk\n\n trunk = []\n # Node 0/RP1/CPU0 [RP]\n module = line.split()[1]\n else:\n if module is not None:\n if re.match(\"(ncs.*|asr9k.*)\", line):\n # For situation: ncs6k-xr-5.2.1 version=5.2.1 [Boot image]\n trunk.append(line.split()[0])\n else:\n trunk.append(line)\n\n if module is not None:\n trunks[module] = trunk\n\n return trunks\n","sub_path":"csmserver/parsers/platforms/eXR.py","file_name":"eXR.py","file_ext":"py","file_size_in_byte":7550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"594838126","text":"from django_filters import rest_framework as filters\nfrom rest_framework import generics\nfrom rest_framework.generics import get_object_or_404\n\nfrom .models import Product, Organization\nfrom .serializers import ProductDetailSerializer, OrganizationDetailSerializer, OrganizationListSerializer\n\n\nclass ProductDetailView(generics.RetrieveAPIView):\n \"\"\"\n Return instance of class Product by id(pk) or 404\n \"\"\"\n serializer_class = ProductDetailSerializer\n queryset = Product.objects.all()\n\n def get_object(self):\n queryset = self.get_queryset()\n obj = get_object_or_404(queryset, pk=self.kwargs['pk'])\n return obj\n\n\nclass OrganizationDetailView(generics.RetrieveAPIView):\n \"\"\"\n Return instance of class Organization by id(pk) or 404\n \"\"\"\n serializer_class = OrganizationDetailSerializer\n queryset = Organization.objects.all()\n\n def get_object(self):\n queryset = self.get_queryset()\n obj = get_object_or_404(queryset, pk=self.kwargs['pk'])\n return obj\n\n\nclass OrganizationDistrictView(generics.ListAPIView):\n \"\"\"\n Return list of Organization with selected district(district_id)\n \"\"\"\n serializer_class = OrganizationListSerializer\n\n def get_queryset(self):\n queryset = Organization.objects.filter(districts__district=self.kwargs['district_id']).all()\n return queryset\n\n\nclass OrganizationFilter(filters.FilterSet):\n \"\"\"\n Filter by category name or/and product name or/and min and max price\n \"\"\"\n min_price = filters.NumberFilter(field_name='products__price', lookup_expr='gte')\n max_price = filters.NumberFilter(field_name='products__price', lookup_expr='lte')\n category = filters.CharFilter(field_name='products__product__category__name')\n product = filters.CharFilter(field_name='products__product__name')\n\n class Meta:\n model = Organization\n fields = ['product', 'category', 'min_price', 'max_price', ]\n\n\nclass OrganizationListView(generics.ListAPIView):\n \"\"\"\n Return list of Organizations after filtering\n \"\"\"\n serializer_class = OrganizationListSerializer\n queryset = Organization.objects.all()\n filter_backends = (filters.DjangoFilterBackend,)\n filter_class = OrganizationFilter\n","sub_path":"task_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"37857304","text":"import tensorflow as tf\nimport numpy as np\n\nx = np.array([\n [0, 0],\n [1, 0],\n [1, 1],\n [0, 0],\n [0, 0],\n [0, 1],\n])\ny = np.array([\n [1, 0, 0],\n [0, 1, 0],\n [0, 0, 1],\n [1, 0, 0],\n [1, 0, 0],\n [0, 0, 1],\n])\n\nX = tf.placeholder(tf.float32, name='X')\nY = tf.placeholder(tf.float32, name='Y')\n\nW1 = tf.Variable(tf.random_uniform([2, 10], -1, 1))\nW2 = tf.Variable(tf.random_uniform([10, 3], -1, 1))\n\nb1 = tf.Variable(tf.random_uniform([10], -1, 1))\nb2 = tf.Variable(tf.random_uniform([3], -1, 1))\n\nL1 = tf.nn.relu(tf.matmul(X, W1) + b1)\nY_predict = tf.nn.relu(tf.matmul(L1, W2) + b2)\n\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y, logits=Y_predict))\n\noptimizer = tf.train.AdamOptimizer(learning_rate=0.01)\ntrain_option = optimizer.minimize(cost)\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for step in range(100):\n sess.run(train_option, feed_dict = {X: x, Y: y})\n\n if step + 1 % 10 == 0:\n print(step + 1, sess.run(cost, feed_dict = {X: x, Y: y}))\n\n print('Prediction: ', sess.run(Y_predict, feed_dict={X: x}))\n print('Truth: ', sess.run(Y, feed_dict={Y: y}))\n\n prediction = tf.argmax(Y_predict,1)\n target = tf.argmax(Y,1)\n print('Prediction: ', sess.run(prediction,feed_dict = {X: x}))\n print('Truth: ', sess.run(target, feed_dict={Y: y}))\n\n is_correct = tf.equal(prediction, target)\n\n print('Is_Corr: ', sess.run(is_correct, feed_dict={X: x, Y: y}))\n print('Cast: ', sess.run(tf.cast(is_correct, tf.float32), feed_dict={X: x, Y: y}))\n\n accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))\n print('Accuracy: %2.2f' % sess.run(accuracy * 100, feed_dict = {X: x, Y: y}))","sub_path":"Golbin/ex071_multilayer_perceptron.py","file_name":"ex071_multilayer_perceptron.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"269811210","text":"__author__ = 'Administrator'\n\nimport os\n\ndef FileInputStream(filename):\n try:\n file = open(filename)\n for line in file:\n yield bytearray\n except StopIteration:\n file.close()\n return\n\ndef FileOutputStream(inputStream, filename):\n try:\n file = open(filename,\"w\")\n while True:\n byte = inputStream.next()\n file.write(byte)\n except StopIteration:\n file.close()\n return\n\nif __name__ == \"__main__\":\n FileOutputStream(FileInputStream(\"d:\\\\aaa.txt\"),\"d:\\\\hello.txt\")","sub_path":"src/fundamental/file/handle_file_fun/fileStream.py","file_name":"fileStream.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"243372094","text":"import json\nimport os\n\n# from httpretty import HTTPretty\nfrom .oauth import OAuth1Test\n\n\nclass JiraOAuthTest(OAuth1Test):\n backend_path = 'social_core.backends.jira.JiraOAuth'\n expected_username = 'foobar'\n request_token_body = json.dumps({\n 'oauth_token_secret': 'foobar-secret',\n 'oauth_token': 'foobar',\n })\n access_token_body = json.dumps({\n 'access_token': 'foobar',\n 'token_type': 'bearer'\n })\n user_data_url = 'https://www.example.com/rest/api/2/myself'\n user_data_body = json.dumps({\n u'self': u'https://www.example.com/jira/rest/api/2/user?username=foobar',\n u'key': u'foobar',\n u'accountId': u'99:27935d01-92a7-4687-8272-a9b8d3b2ae2e',\n u'name': u'foobar',\n u'emailAddress': u'foobar@example.com',\n u'avatarUrls': {\n u'48x48': u'https://www.example.com/jira/secure/useravatar?size=large&ownerId=foobar',\n u'24x24': u'https://www.example.com/jira/secure/useravatar?size=small&ownerId=foobar',\n u'16x16': u'https://www.example.com/jira/secure/useravatar?size=xsmall&ownerId=foobar',\n u'32x32': u'https://www.example.com/jira/secure/useravatar?size=medium&ownerId=foobar'\n },\n u'displayName': u'Foobar F. User',\n u'active': True,\n u'timeZone': u'Australia/Sydney',\n u'groups': {u'size': 3, u'items': []},\n u'applicationRoles': {u'size': 1, u'items': []}\n })\n\n def setUp(self):\n super(JiraOAuthTest, self).setUp()\n test_root = os.path.dirname(os.path.dirname(__file__))\n self.private_key = open(os.path.join(test_root, 'testkey.pem'), 'r').read().strip()\n self.strategy.set_settings({\n 'SOCIAL_AUTH_JIRA_SCHEME': 'https',\n 'SOCIAL_AUTH_JIRA_HOST': 'www.example.com',\n 'SOCIAL_AUTH_JIRA_RSA_PRIVATE_KEY': self.private_key,\n })\n\n def test_login(self):\n self.do_login()\n","sub_path":"social_core/tests/backends/test_jira.py","file_name":"test_jira.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"372654630","text":"def heading():\n print('Michael Taylor-Innes mtay537')\n\ndef load_table(filename):\n file = open(filename, 'r')\n file_lines = file.readlines()\n file.close()\n for i in range(0, len(file_lines)):\n if i != len(file_lines) - 1:\n file_lines[i] = file_lines[i].split(', ')\n file_lines[i][4] = file_lines[i][4][:-1]\n else:\n file_lines[i] = file_lines[i].split(', ')\n return file_lines\n\n\ndef all_conference_lists(sorted_list):\n conference_one_list = []\n conference_two_list = []\n conference_three_list = []\n conference_four_list = []\n for team in sorted_list:\n if team[1] == '1':\n conference_one_list.append(team)\n elif team[1] == '2':\n conference_two_list.append(team)\n elif team[1] == '3':\n conference_three_list.append(team)\n elif team[1] == '4':\n conference_four_list.append(team)\n return conference_one_list, conference_two_list, conference_three_list, conference_four_list\n\n\ndef sort(list1):\n for passnum in range(len(list1) - 1, 0, -1):\n for i in range(passnum):\n if int(list1[i][2]) < int(list1[i + 1][2]):\n list1[i], list1[i + 1] = list1[i + 1], list1[i]\n if int(list1[i][2]) == int(list1[i + 1][2]):\n goals_for_team1 = list1[i][3]\n goals_against_team1 = list1[i][4]\n diff_team1 = int(goals_for_team1) - int(goals_against_team1)\n goals_for_team2 = list1[i + 1][3]\n goals_against_team2 = list1[i + 1][4]\n diff_team2 = int(goals_for_team2) - int(goals_against_team2)\n if diff_team1 < diff_team2:\n list1[i], list1[i + 1] = list1[i + 1], list1[i]\n if diff_team1 == diff_team2:\n if int(list1[i][3]) < int(list1[i + 1][3]):\n list1[i], list1[i + 1] = list1[i + 1], list1[i]\n return list1\n\n\ndef display(a_list):\n i = 1\n print(\" \", \"Team\", \" \" * 10, \"Conference\", \"Points \", \"Diff\", \" Goals\")\n for x in a_list:\n print(str(i).rjust(2), sep=\"\", end=\"\")\n print(\". \", sep=\"\", end=\"\")\n print(x[0].ljust(22), sep=\"\", end=\"\")\n print(x[1].ljust(8), sep=\"\", end=\"\")\n print(x[2].rjust(2), sep=\"\", end=\"\")\n print(' ', str(int(x[3]) - int(x[4])).rjust(4), sep=\"\", end=\"\")\n print(' ', x[3], ':', x[4], sep=\"\")\n i += 1\n\n\ndef main():\n heading()\n initial_list = load_table(\"table1.txt\")\n sorted_list = sort(initial_list)\n conference_one_list, conference_two_list, conference_three_list, conference_four_list = all_conference_lists(\n sorted_list)\n display(sorted_list)\n print()\n print(\" Conference 1\")\n display(conference_one_list)\n print()\n print(\" Conference 2\")\n display(conference_two_list)\n print()\n print(\" Conference 3\")\n display(conference_three_list)\n print()\n print(\" Conference 4\")\n display(conference_four_list)\n\n\nmain()\n","sub_path":"Assignment 2/Q4/ResultTable.py","file_name":"ResultTable.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"175156502","text":"import numpy as np \nimport tensorflow as tf \nimport matplotlib.pyplot as plt \nfrom training_db import *\nfrom random import randint, shuffle\nimport re\nimport time\nimport threading\n\nclass train_model:\n\n\tdef __init__(self):\n\t\tprint('Starting training class')\n\t\tself.special_chars = re.compile('[^A-Za-z0-9 ]+')\n\t\tself.batch_size = 128\n\t\tself.lstm_units = 64\n\t\tself.num_classes = 2\n\t\tself.epochs = 150\n\t\tself.dimensions = 300\n\t\tself.max_seq_length = 0\n\t\tself.learning_rate = 0.0001\n\n\tdef load_words(self):\n\t\tprint('Loading wordsList and wordVectors')\n\t\twords_list = np.load('utils/wordsList.npy')\n\t\twords_list = words_list.tolist()\n\t\twords_list = [word.decode('UTF-8') for word in words_list]\n\t\tword_vectors = np.load('utils/wordVectors.npy')\n\t\tprint('WordsList and wordVectors loaded')\n\t\treturn words_list, word_vectors\n\n\tdef load_training_data(self):\n\t\tdb = training_db()\n\t\tdb.open()\n\t\tpositive = db.get_positive()\n\t\tnegative = db.get_negative()\n\t\tdb.close()\n\t\treturn positive, negative, positive + negative\n\n\tdef load_new_data(self):\n\t\tpositive = []\n\t\tnegative = []\n\t\twith open('positive.txt', 'r') as f:\n\t\t\tpositive = f.readlines()\n\n\t\twith open('negative.txt', 'r') as f:\n\t\t\tnegative = f.readlines()\n\n\t\tpositive = positive[:int((0.5) * len(positive))]\n\t\tnegative = negative[:int((0.5) * len(negative))]\n\t\treturn positive, negative, positive + negative\n\n\tdef split_training_data(self, positive, negative):\n\t\tshuffle(positive)\n\t\tshuffle(negative)\n\t\ttrain_pos = positive[:int((0.8) * len(positive))]\n\t\ttrain_neg = negative[:int((0.8) * len(negative))]\n\t\ttest_pos = positive[int((0.8) * len(positive)):]\n\t\ttest_neg = negative[int((0.8) * len(negative)):]\n\t\treturn train_pos, train_neg, test_pos, test_neg\n\n\tdef get_training_data_params(self, positive, negative):\n\t\ttotal_articles = len(positive) + len(negative)\n\t\tnum_positive = [ len(line.split()) for line in positive ]\n\t\tnum_negative = [ len(line.split()) for line in negative ]\n\t\tnum_words = num_positive + num_negative\n\t\tself.max_seq_length = int(np.ceil(np.percentile(num_words, 75)))\n\t\tprint('Number of words ', sum(num_words))\n\t\tprint('Number of articles ', total_articles)\n\t\tprint('Average number of words per article ', sum(num_words) / total_articles)\n\t\tprint('75th percentile ', self.max_seq_length)\n\t\treturn num_words, num_positive, num_negative\n\n\tdef plot_data(self, num_words):\n\t\tplt.hist(num_words, 50)\n\t\tplt.axvline(self.max_seq_length, color = 'red')\n\t\tplt.xlabel('Word Sequence Length')\n\t\tplt.ylabel('Frequency')\n\t\tplt.show()\n\n\tdef clean_data(self, article):\n\t\treturn re.sub(self.special_chars, \"\", article.lower())\n\n\tdef populate_dataset(self, dataset, data):\n\t\tarticle_index = 0\n\t\tcount = 0 \n\t\tfor article in data:\n\t\t\tprint('Progress: ' + str(count) + '//' + str(len(data)))\n\t\t\tword_index = 0\n\t\t\tclean_article = self.clean_data(article)\n\t\t\tfor word in clean_article:\n\t\t\t\ttry:\n\t\t\t\t\tdataset[article_index][word_index] = words_list.index(word)\n\t\t\t\texcept ValueError:\n\t\t\t\t\tdataset[article_index][word_index] = 399999\n\n\t\t\t\tword_index += 1\n\t\t\t\tif word_index >= self.max_seq_length:\n\t\t\t\t\tbreak\n\n\t\t\tarticle_index += 1\n\t\t\tcount += 1\n\n\tdef build_datasets(self, words_list, train_pos, train_neg, test_pos, test_neg):\n\t\tX_train_pos = np.zeros((len(train_pos), self.max_seq_length), dtype = 'int32')\n\t\tX_train_neg = np.zeros((len(train_neg), self.max_seq_length), dtype = 'int32')\n\t\tY_train = np.array([ [1,0] for el in train_pos ] + [ [0,1] for el in train_neg ])\n\t\tX_test_pos = np.zeros((len(test_pos), self.max_seq_length), dtype = 'int32')\n\t\tX_test_neg = np.zeros((len(test_neg), self.max_seq_length), dtype = 'int32')\n\t\tY_test = np.array([ [1,0] for el in test_pos ] + [ [0,1] for el in test_neg ])\n\t\tt1 = threading.Thread(target=self.populate_dataset, args=(X_train_pos, train_pos, ))\n\t\tt2 = threading.Thread(target=self.populate_dataset, args=(X_train_neg, train_neg, ))\n\t\tt3 = threading.Thread(target=self.populate_dataset, args=(X_test_pos, test_pos, ))\n\t\tt4 = threading.Thread(target=self.populate_dataset, args=(X_test_neg, test_neg, ))\n\t\tt1.start()\n\t\tt2.start()\n\t\tt3.start()\n\t\tt4.start()\n\t\tt1.join()\n\t\tt2.join()\n\t\tt3.join()\n\t\tt4.join()\n\t\tX_train = np.concatenate((X_train_pos, X_train_neg), axis=0)\n\t\tX_test = np.concatenate((X_test_pos, X_test_neg), axis=0)\n\t\tnp.save('X_train', X_train)\n\t\tnp.save('Y_train', Y_train)\n\t\tnp.save('X_test', X_test)\n\t\tnp.save('Y_test', Y_test)\n\t\treturn X_train, Y_train, X_test, Y_test\n\n\tdef load_data_matrices(self):\n\t\treturn np.load('X_train.npy'), np.load('Y_train.npy'), np.load('X_test.npy'), np.load('Y_test.npy')\n\t\t\n\tdef get_training_batch(self, X_train, Y_train):\n\t\tn = X_train.shape[0]\n\t\tpermutation = list(np.random.permutation(n))\n\t\tshuffled_X_train = X_train[permutation, :]\n\t\tshuffled_Y_train = Y_train[permutation, :]\n\t\tmini_batch_size = int(np.floor(n / self.batch_size))\n\t\tmini_batches = []\n\n\t\tfor k in range(mini_batch_size):\n\t\t \tmini_batch_X = shuffled_X_train[k * mini_batch_size:(k + 1) * mini_batch_size, :]\n\t\t \tmini_batch_Y = shuffled_Y_train[k * mini_batch_size:(k + 1) * mini_batch_size, :]\n\t\t \tmini_batch = (mini_batch_X, mini_batch_Y)\n\t\t \tmini_batches.append(mini_batch)\n\n\t\treturn mini_batches\n\n\tdef load_model(self):\n\t\twith tf.InteractiveSession() as sess:\n\t\t\tsaver = tf.train.Saver()\n\t\t\tsaver.restore(sess, tf.train.latest_checkpoint('models'))\n\n\tdef create_placeholders(self):\n\t\tX = tf.placeholder(tf.int32, [None, self.max_seq_length])\n\t\tY = tf.placeholder(tf.float32, [None, self.num_classes])\n\t\treturn X, Y\n\n\tdef initialize_parameters(self, X, word_vectors):\n\t\tdata = tf.Variable(tf.zeros([self.batch_size, self.max_seq_length, self.dimensions], dtype='float32'))\n\t\tdata = tf.nn.embedding_lookup(word_vectors, X)\n\n\t\tlstm_cell = tf.contrib.rnn.BasicLSTMCell(self.lstm_units)\n\t\tlstm_cell = tf.contrib.rnn.DropoutWrapper(cell=lstm_cell, output_keep_prob = 0.75)\n\n\t\treturn data, lstm_cell\n\n\tdef forward_propagate(self, data, lstm_cell):\n\t\tZ, _ = tf.nn.dynamic_rnn(lstm_cell, data, dtype=tf.float32)\n\n\t\tW = tf.Variable(tf.truncated_normal([self.lstm_units, self.num_classes]))\n\t\tb = tf.Variable(tf.constant(0.1, shape=[self.num_classes]))\n\t\tZ = tf.transpose(Z, [1, 0, 2])\n\t\tA = tf.gather(Z, int(Z.get_shape()[0]) - 1)\n\t\tprediction = (tf.matmul(A, W) + b)\n\t\treturn prediction\n\n\tdef compute_cost(self, prediction, Y):\n\t\treturn tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=Y))\n\n\tdef train_model(self, word_vectors, X_train, Y_train, X_test, Y_test):\n\t\ttf.reset_default_graph()\n\t\tX, Y = self.create_placeholders()\n\t\tdata, lstm_cell = self.initialize_parameters(X, word_vectors)\n\t\tprediction = self.forward_propagate(data, lstm_cell)\n\t\tcost = self.compute_cost(prediction, Y)\n\t\toptimizer = tf.train.AdamOptimizer(learning_rate = self.learning_rate).minimize(cost)\n\t\twith tf.Session() as sess:\n\t\t\tsaver = tf.train.Saver()\n\t\t\tsess.run(tf.global_variables_initializer())\n\t\t\tcosts = []\n\n\t\t\tfor epoch in range(self.epochs):\n\t\t\t\tprint('Epoch: ', epoch)\n\t\t\t\tminibatch_cost = 0\n\t\t\t\tnum_batches = int(X_train.shape[0] / self.batch_size)\n\t\t\t\tminibatches = self.get_training_batch(X_train,Y_train)\n\n\t\t\t\tfor batch in minibatches:\n\t\t\t\t\t(batch_X, batch_Y) = batch\n\t\t\t\t\t_, loss = sess.run((optimizer, cost), feed_dict = {X: batch_X, Y: batch_Y})\n\n\t\t\t\t\tminibatch_cost += loss / num_batches\n\n\t\t\t\tif epoch % 10 == 0:\n\t\t\t\t\tprint('Cost after epoch %i: %f' % (epoch, minibatch_cost))\n\t\t\t\t\tsaver.save(sess, './models/my_model.ckpt', global_step=epoch)\n\t\t\t\tif epoch % 5 == 0:\n\t\t\t\t\tcosts.append(minibatch_cost)\n\n\t\t\t\tplt.plot(costs)\n\t\t\t\tplt.ylabel('cost')\n\t\t\t\tplt.xlabel('iterations')\n\t\t\t\tplt.show(block = False)\n\t\t\t\tplt.pause(0.0001)\n\n\t\t\tcorrect_prediction = tf.equal(tf.argmax(prediction,1), tf.argmax(Y,1))\n\t\t\taccuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n\t\t\tprint(\"Train Accuracy:\", accuracy.eval({X: X_train, Y: Y_train}))\n\t\t\tprint(\"Test Accuracy:\", accuracy.eval({X: X_test, Y: Y_test}))\n\t\t\tplt.show()\n\n\nif __name__ == '__main__':\n\tmd = train_model()\n\twords_list, word_vectors = md.load_words()\n\tpositive_test, negative_test, articles = md.load_training_data()\n\tpositive_train, negative_train, articles = md.load_new_data()\n\t# positive, negative, articles = md.load_training_data()\n\t# positive, negative, articles = md.load_new_data()\n\t# num_words, num_positive, num_negative = md.get_training_data_params(positive, negative)\n\tnum_words, num_positive, num_negative = md.get_training_data_params(positive_train, negative_train)\n\t# md.plot_data(num_words)\n\t# X_train, Y_train, X_test, Y_test = md.build_datasets(words_list, positive_train, negative_train, positive_test, negative_test)\n\t# X_train, Y_train, X_test, Y_test = md.build_datasets(words_list, train_pos, train_neg, test_pos, test_neg)\n\tX_train, Y_train, X_test, Y_test = md.load_data_matrices()\n\tmd.train_model(word_vectors, X_train, Y_train, X_test, Y_test)\n","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":8772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"51723915","text":"from random import choice, sample, uniform\nfrom itertools import zip_longest\n\nimport numpy as np\n\nSNP_OCCURENCE = 1/100\nSNP_COUNT = [50, 100, 150]\n# 1 centimorgan is on average 1 million bases in humans and 1\n# centimorgan is defined as the distance for a recombination\n# probability to be 1%\nRECOMBINATION_RATE = 1/1000000 * 1/100\n# TODO: Use something other than constant recomination rate throughout autosome\n\n\n# From https://en.wikipedia.org/wiki/Human_genome\n# TODO: fill in\nHUMAN_AUTOSOME_LENGTHS = [249250621, 243199373, 198022430]\n\n#TODO: NEED TO GENERATE LOCATIONS FOR SNPs.\n\nclass GenomeGenerator():\n def __init__(self, autosome_count = 1, snp_count = SNP_COUNT):\n # For each autosome, contains a lists of allele pairs for each SNP\n self._alleles = []\n # For each autosome, contains a list of the loci for SNPs\n self._gene_locations = []\n # For each autosome, contains a list of frequencies of each\n # allele in self._alleles in the general population. The given\n # number represents the frequency of the first allele pair in\n # self._alleles.\n self._allele_frequency = []\n for i in range(autosome_count):\n autosome_size = choice(snp_count) * 1000\n frequency = np.random.random(autosome_size)\n # Ensure that the minor allele frequency is at least SNP_OCCURENCE%\n # by adjusting the range of the random generator.\n frequency = (1 - 2 * SNP_OCCURENCE) * frequency + SNP_OCCURENCE\n self._allele_frequency.append(frequency)\n nucleotides = [0, 1, 2, 3]\n self._alleles.append([sample(nucleotides, 2) for i in range(autosome_size)])\n locations = self._generate_snp_locations(HUMAN_AUTOSOME_LENGTHS[i],\n autosome_size)\n self._gene_locations.append(locations)\n \n def _generate_snp_locations(self, chromosome_size, snp_count):\n \"\"\"\n Generate SNP locations along the genome.\n \"\"\"\n return np.sort(np.random.choice(chromosome_size, snp_count,\n replace = False))\n\n \n\n def generate_genome(self):\n autosomes = []\n for frequencies, alleles in zip(self._allele_frequency,\n self._alleles):\n homolog_1 = [_pick_allele(allele_pair, frequency)\n for frequency, allele_pair in zip(frequencies, alleles)]\n homolog_2 = [_pick_allele(allele_pair, frequency)\n for frequency, allele_pair in zip(frequencies, alleles)]\n autosomes.append((np.array(homolog_1, dtype = np.uint8),\n np.array(homolog_2, dtype = np.uint8)))\n return Genome(autosomes, self._gene_locations)\n\ndef _pick_allele(alleles, prob):\n \"\"\"\n Return the first element in alleles with probability prob,\n thus the second element with 1 - prob.\n \"\"\"\n if uniform(0, 1) < prob:\n return alleles[0]\n return alleles[1]\n\ndef recombination(autosome_pair, autosome_num, gene_locations):\n # Model the number of recombination locations as n independent coin flips.\n num_recombination = np.random.binomial(HUMAN_AUTOSOME_LENGTHS[autosome_num],\n RECOMBINATION_RATE)\n locations = np.random.choice(HUMAN_AUTOSOME_LENGTHS[autosome_num],\n num_recombination, replace = False)\n recombination_indices = np.searchsorted(gene_locations, locations,\n side = \"right\")\n for start, stop in zip_longest(recombination_indices[::2],\n recombination_indices[1::2]):\n if stop is None:\n # This recombination event goes to the end of the strand\n stop = len(gene_locations) - 1\n temp = np.copy(autosome_pair[0][start:stop])\n autosome_pair[0][start:stop] = autosome_pair[1][start:stop]\n autosome_pair[1][start:stop] = temp\n\n \n \n return autosome_pair\n\nclass Genome():\n def __init__(self, autosomes, gene_locations):\n self._autosomes = autosomes\n self._locations = gene_locations\n\n def mate(self, other):\n offspring_autosomes = []\n for i, autosome_pair in enumerate(self._autosomes):\n this_parent_autosome = choice(recombination(autosome_pair, i,\n self._locations[i]))\n other_parent_autosome = choice(recombination(other._autosomes[i], i,\n other._locations[i]))\n offspring_autosomes.append((this_parent_autosome,\n other_parent_autosome))\n return Genome(offspring_autosomes, self._locations)\n \n","sub_path":"predict/genome.py","file_name":"genome.py","file_ext":"py","file_size_in_byte":4899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"141906763","text":"from lib import majority\n\nclass KNN:\n def __init__(this,k,distance_func):\n this.k=k\n this.distance=distance_func\n\n def train(this,dataset):\n this.dataset=dataset\n\n def predict(this,feature):\n distances=map(lambda data:(data[0],this.distance(feature,data[1:])),this.dataset)\n distances.sort(key=lambda x:x[1])\n top=distances[0:this.k]\n return majority(top)\n\nif __name__ == '__main__':\n from test.main_decision_tree import main,distance\n main(KNN(3,distance))\n","sub_path":"knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"173018276","text":"#!/usr/bin/env python\n# coding: utf8\n\n### !!! DON'T EDIT ME !!! ###\n### This file is managed by Ansible via the smc-cluster-mgmt repo ###\n\n\"\"\"\nThis script saves the number of concurrent rsync operations for monitoring purposes.\n\nArguments: [key=value...] \n\nRequirements:\n\n0. python2 only!\n\n1. It requires a working \"creds.dat\" file for submitting the data to the monitoring API for stackdriver.\n There is a working creds.dat in the smc project where it has been developed (SMC ops)\n\n2. $ pip install --user --update google-api-python-client\n\n\"\"\"\nfrom __future__ import print_function\nimport os\nimport psutil\n# make process low priority for cpu and idle I/O class\nos.nice(19)\npsutil.Process(os.getpid()).ionice(ioclass=psutil.IOPRIO_CLASS_IDLE)\n#\nimport time\nimport sys\nfrom oauth2client import file\nfrom apiclient.discovery import build\nimport httplib2\nfrom pytz import utc\nfrom dateutil.parser import parse as dtparse\nfrom collections import defaultdict\nfrom os.path import expanduser, join, exists\nfrom os import makedirs\nfrom pytz import utc\nimport socket\nimport yaml\n#####\n\n# global variables (don't change them)\nPROJECT_ID = \"137606465756\" # sage-math-inc\nCUSTOM_METRIC_DOMAIN = \"custom.cloudmonitoring.googleapis.com\"\n\n# CUSTOM_METRIC_NAME = \"test\"\n\n\ndef get_service():\n # To obtain the `service`, one needs to have working creds.dat credentials\n # if it doesn't work, go back to the smc project and re-run the \"auth.py\"\n\n this_dir = os.path.dirname(os.path.realpath(__file__))\n storage = file.Storage(os.path.join(this_dir, 'creds.dat'))\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n raise Exception(\"Someone has to run $ python auth.py --noauth_local_webserver\")\n\n # Create an httplib2.Http object to handle our HTTP requests and authorize it with our good Credentials.\n http = credentials.authorize(httplib2.Http())\n service = build(serviceName=\"cloudmonitoring\", version=\"v2beta2\", http=http)\n\n return service\n\n\ndef make_data(name, value, **kwargs):\n\n # The current timestamp, used for start&end in the timeseries\n now_rfc3339 = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime())\n\n timeseries_descriptor = {\n \"project\": PROJECT_ID,\n \"metric\": \"%s/%s\" % (CUSTOM_METRIC_DOMAIN, name)\n }\n\n if len(kwargs) >= 0:\n timeseries_descriptor[\"labels\"] = {}\n for k, v in kwargs.iteritems():\n k = \"%s/%s\" % (CUSTOM_METRIC_DOMAIN, k)\n timeseries_descriptor[\"labels\"][k] = v\n\n # Specify a new data point for the time series.\n timeseries_data = {\n \"timeseriesDesc\": timeseries_descriptor,\n \"point\": {\n \"start\": now_rfc3339,\n \"end\": now_rfc3339,\n }\n }\n if isinstance(value, int):\n timeseries_data[\"point\"][\"int64Value\"] = value\n else:\n timeseries_data[\"point\"][\"doubleValue\"] = value\n\n return timeseries_data\n\n\ndef extract_data_yaml(dp):\n # yes, it's a bit stupid to extract this from here,\n # but that way it works in general\n name = dp[\"timeseriesDesc\"][\"metric\"].split(\"/\")[-1]\n labels = dict((k.split(\"/\")[-1], v) for k, v in dp[\"timeseriesDesc\"][\"labels\"].iteritems())\n ts = dtparse(dp[\"point\"][\"start\"]).replace(tzinfo = utc)\n value = dp[\"point\"].get(\"int64Value\", None) or dp[\"point\"].get(\"doubleValue\")\n\n entry = {\"name\": name, \"timestamp\": ts, \"value\": value}\n if labels:\n entry[\"labels\"] = labels\n return entry\n\ndef extract_data_csv(data):\n datasets = defaultdict(dict)\n for dp in data:\n ts = dtparse(dp[\"point\"][\"start\"]).replace(tzinfo = utc, microsecond = 0)\n name = dp[\"timeseriesDesc\"][\"metric\"].split(\"/\")[-1] # benchmark or single\n labels = dict((k.split(\"/\")[-1], v) for k, v in dp[\"timeseriesDesc\"][\"labels\"].iteritems())\n kind = labels.get('kind', '?')\n # print(\"name: %s, kind: %s\" % (name, kind))\n if kind == \"single\": # special case for nb_sagemath, ...\n kind = name[3:]\n name = \"instances\"\n value = dp[\"point\"].get(\"int64Value\")\n # value must be a string!\n if value is None:\n dv = dp[\"point\"].get(\"doubleValue\")\n if name == \"instances\":\n value = '%d' % int(dv)\n else:\n value = '%f' % dv\n else:\n value = '%d' % value\n datasets[name][\"timestamp\"] = ts.isoformat()\n datasets[name][kind] = value\n\n csv = {}\n for name, data in datasets.iteritems():\n # sort by kind, and timestamp in the front\n header, line = zip(*sorted(data.items(), key = lambda x : (x[0] != 'timestamp', x[0])))\n csv[name] = (header, line)\n return csv\n\ndef log_data(data, logfile_prefix = None, logfile_path = \"~/logs/\", format = \"csv\", DELIM = \",\"):\n assert format in [\"yaml\", \"csv\"]\n if logfile_prefix is None:\n raise ValueError(\"you have to specify a logfile_prefix in the arguments\")\n\n path = expanduser(logfile_path)\n if not exists(path):\n makedirs(path)\n\n def get_logfile_name(name = None):\n hostname = socket.gethostname()\n logfn = '%s.log' % '-'.join(x for x in [logfile_prefix, name, hostname] if x is not None)\n logfile_path = join(path, logfn)\n return logfile_path\n\n if format == \"csv\":\n datasets = extract_data_csv(data)\n for name, (header, line) in datasets.iteritems():\n lfn = get_logfile_name(name)\n first_line = not exists(lfn)\n with open(lfn, \"a+\") as logfile:\n if first_line:\n logfile.write(DELIM.join('\"%s\"' % h for h in header))\n logfile.write(os.linesep)\n logfile.write(DELIM.join(line))\n logfile.write(os.linesep)\n\n elif format == \"yaml\":\n with open(get_logfile_name(), \"a+\") as logfile:\n ds = [extract_data_yaml(_) for _ in data]\n y = yaml.dump(ds, default_flow_style=False, canonical=False)\n #print(y)\n logfile.write(y)\n\n\ndef submit_data(*data):\n # Submit the write request.\n service = get_service()\n request = service.timeseries().write(\n project=PROJECT_ID,\n body={\"timeseries\": data}\n )\n request.execute()\n\n\ndef main(*args):\n args = list(args)\n # print(args)\n name = args.pop(0)\n kwargs = {}\n for arg in args:\n if \"=\" in arg:\n k, v = arg.split(\"=\", 1)\n kwargs[k] = v\n try:\n value = int(args[-1])\n except:\n value = float(args[-1])\n ts = make_data(name, value, **kwargs)\n submit_data(ts)\n\n\nif __name__==\"__main__\":\n # The name and labels need to be defined first (can't be arbitrary)\n # CUSTOM_METRIC_NAME = \"concurrent_rsyncs\"\n # CUSTOM_METRIC_NAME = \"test\"\n\n from sys import argv, exit\n if len(argv) <= 2:\n print(\"\"\"\\\nYou have to specify the metric name (e.g. 'concurrent_rsyncs') as the first argument\nThe valid labels next, e.g. 'host=storage0'\nand finally as the last argument the value which will be parsed as an integer\n[fix me properly using argparse if that's not enough for you]\n\"\"\")\n exit(1)\n main(*argv[1:])\n\n","sub_path":"files/record_metric.py","file_name":"record_metric.py","file_ext":"py","file_size_in_byte":7230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"55794243","text":"from ....Enums import bd_enums\nfrom ....Operands.Assembly.BDFunction import BDFunction\nfrom ....Abstracts.Attribute import Attribute\nfrom typing import Dict, Optional\n\n\nclass FunctionRecursion(Attribute):\n \"\"\"\n Check if the function is recursive (calls itself) or not.\n \"\"\"\n\n def __init__(self):\n super().__init__(name='FunctionRecursion', value_type=bd_enums.AttrScope.InVariant,\n ir_type=bd_enums.IRType.Assembly, target_type=bd_enums.TargetType.Function)\n\n def extract_attribute(self, base_object: BDFunction) -> Optional[Dict]:\n # Check if value already exists\n FunctionRecursion_value = base_object.get_attribute_value('FunctionRecursion')\n\n recursive = False\n if FunctionRecursion_value:\n pass\n else:\n for callee in base_object.underlying_obj.callees:\n if callee.start == base_object.underlying_obj.start:\n recursive = True\n\n FunctionRecursion_value = {\n 'recursive': recursive\n }\n\n base_object.add_attribute_value('FunctionRecursion', FunctionRecursion_value)\n\n return FunctionRecursion_value if FunctionRecursion_value else None\n","sub_path":"Operation/Attributes/Assembly/FunctionRecursion.py","file_name":"FunctionRecursion.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"35208722","text":"'''\nNaive implementation of logistic regression without using any external libraries.\n'''\nimport sys\nimport math\nimport re\n\nclass LogisticRegression:\n\tdef __init__(self):\n\t\t#number of passes over our training data\n\t\tself.epochs = 10000\n\t\t#size of the step in computing gradient ascent\n\t\tself.eta = 0.0000001\n\t\t#number of features in the feature vector\n\t\tself.m = 0\n\t\t#number of vectors \n\t\tself.n = 0\n\t\t#list of thetas, with the length being equivalent to the number of features\n\t\tself.thetas = []\n\n\tdef train(self, args):\n\t\tf = open(args, 'r')\n\t\tself.m = int(f.readline())\n\t\tself.n = int(f.readline())\n\t\t#create a list of the vectors in the dataset\n\t\tvectors = []\n\t\tfor line in f:\n\t\t\tline = re.sub(r'[\\n:]', '', line)\n\t\t\tline = line.split(' ')\n\t\t\tline = [int(x) for x in line]\n\t\t\tvectors.append(line)\n\t\t#initialize thetas to zero\n\t\tself.thetas = [0] * (self.m + 1)\n\t\tfor epoch in range(self.epochs):\n\t\t\t#initialize the list of gradients to zero\n\t\t\tgradients = [0] * (self.m + 1)\t\t\t\n\t\t\tfor vector in vectors:\n\t\t\t\ty = vector[self.m]\n\t\t\t\t#add contribution to gradient for each data point\n\t\t\t\t#calculate z\n\t\t\t\tz = self.thetas[0]\n\t\t\t\tthetaList = self.thetas[1:]\n\t\t\t\t#pythonic dot product\n\t\t\t\tz = z + sum([s * t for (s, t) in zip(thetaList, vector)])\n\t\t\t\trightExpression = y - (1.0 / (1.0 + math.exp(-z)))\n\t\t\t\tgradients[0] = gradients[0] + rightExpression\n\t\t\t\tfor j in range(1, self.m + 1):\n\t\t\t\t\tgradients[j] = gradients[j] + (vector[j-1] * rightExpression)\n\t\t\t#update all thetas\n\t\t\tfor i in range(self.m+1):\n\t\t\t\tself.thetas[i] = self.thetas[i] + (self.eta * gradients[i])\n\n\tdef test(self, args):\n\t\tcorrectLabels = 0\n\t\ttotalLabels = 0\n\t\tclass1 = 0\n\t\tclass0 = 0\n\t\tcorrectClass1 = 0\n\t\tcorrectClass0 = 0\n\t\tf = open(args, 'r')\n\t\tnum_vec_elems = int(f.readline())\n\t\tnum_lines = int(f.readline())\n\t\tfor line in f:\n\t\t\tline = re.sub(r'[\\n:]', '', line)\n\t\t\tline = line.split(' ')\n\t\t\tline = [int(x) for x in line]\n\t\t\ty = line[num_vec_elems]\n\t\t\tif y == 1:\n\t\t\t\tclass1 = class1 + 1\n\t\t\telse:\n\t\t\t\tclass0 = class0 + 1\n\t\t\tz = self.thetas[0]\n\t\t\tthetaList = self.thetas[1:]\n\t\t\tz = z + sum([s * t for (s, t) in zip(thetaList, line)])\n\t\t\tp = 1.0 / (1.0 + math.exp(-z))\n\t\t\ttotalLabels = totalLabels + 1\n\n\t\t\tlabel = 0\n\t\t\tif p > 0.5:\n\t\t\t\tlabel = 1\n\t\t\telse:\n\t\t\t\tlabel = 0\n\t\t\tif y == label:\n\t\t\t\tcorrectLabels = correctLabels + 1\n\t\t\t\tif y == 1:\n\t\t\t\t\tcorrectClass1 = correctClass1 + 1\n\t\t\t\telse:\n\t\t\t\t\tcorrectClass0 = correctClass0 + 1\n\t\tprint (\"Class 0: tested %d, correctly classified %d\" % (class0, correctClass0))\n\t\tprint (\"Class 1: tested %d, correctly classified %d\" % (class1, correctClass1))\n\t\tprint (\"Overall: tested %d, correctly classified %d\" % (totalLabels, correctLabels))\n\t\tprint (\"Accuracy: \", correctLabels/totalLabels)\n\ndef main():\n\tif len(sys.argv) == 3:\n\t\tclassifier = LogisticRegression()\n\t\tclassifier.train(sys.argv[1])\n\t\tclassifier.test(sys.argv[2])\n\telse:\n\t\tprint(\"Please enter the name of txt file to train on followed by the txt file to test on.\")\nif __name__ == \"__main__\":\n\tmain()","sub_path":"logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"154624892","text":"# coding: latin1\nimport decimal\nimport os\nimport re\nfrom decimal import Decimal\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.contrib.gis.geos import Point\nfrom triplander.geoinfo import gmaplocal_coordinates, city_timezone, _wikipedia_article, wikipedia_ranking, panoramio_ranking, yahootravel_ranking\nfrom triplander.models import City, Country\nfrom triplander.functions import get_wikiname_by_url\nfrom triplander.cache import autocomplete, blue_delete_all\n\ndef add_new_city(name, country):\n # we need a name and a country at first!\n std_error_obj = {'error': 2, 'city': None}\n if(country is None): return std_error_obj\n \n # by name\n coords = gmaplocal_coordinates(name,country.name)\n if(coords is None): return std_error_obj\n # let's see if this city already exists\n new_city = None\n try:\n new_city = City.objects.get(name__iexact=coords['city'])\n if(new_city is None): new_city = City.objects.get(local_name__iexact=coords['city'])\n except ObjectDoesNotExist:\n new_city = None\n \n if new_city is not None: return {'id': new_city.id, 'slug': new_city.slug, 'error': 1}\n \n new_city = City()\n new_city.name = coords['city']\n new_city.coordinates = Point((float(coords['latitude']),float(coords['longitude'])))\n new_city.latitude = Decimal(coords['latitude'])\n new_city.longitude = Decimal(coords['longitude'])\n new_city.country = country\n \n new_city.save()\n \n # now it has an ID\n new_city_id = new_city.id\n # take timezone info\n timezone = city_timezone(new_city_id)\n new_city.timezone = timezone['offset']\n # wikipedia tentative article name & rankings\n wname = _wikipedia_article(new_city_id)\n if (wname is not None): \n new_city.wikiname = wname['wikiname']\n \n new_city.save()\n # rank, and it's done\n rank_city(new_city)\n \n return {'id': new_city_id, 'slug': new_city.slug, 'error': 0}\n\ndef rank_city(city,save=True):\n # here we are, the rankings\n if(city.wikiname is not None):\n r1 = (wikipedia_ranking(city.wikiname))\n r1 = r1['content']\n else:\n r1 = 0.0\n \n r2 = (panoramio_ranking(city.id))\n r3 = (yahootravel_ranking(city.id))\n\n city.setRating(r1,r2,r3)\n if(save): city.save()\n\ndef update_city(id,name,country,latitude,longitude,wikiname,localname=None,population=None,rerank=False):\n city = City.objects.get(pk=id)\n if(city is None): return\n old_name = city.name\n \n city.name = name\n city.country = country\n city.coordinates = Point(float(latitude),float(longitude))\n city.latitude = Decimal(latitude)\n city.longitude = Decimal(longitude)\n city.wikiname = get_wikiname_by_url(wikiname)\n city.local_name = localname\n city.population = population\n \n city.save()\n\n # empty cache\n blue_delete_all(id)\n \n autocomplete.CityAutocompleter.reset_cache(name[0].lower())\n if(old_name[0].lower != name[0].lower()):\n autocomplete.CityAutocompleter.reset_cache(old_name[0].lower()) \n \n # reranking\n if(rerank): rank_city(city,save=True)","sub_path":"branches/production-src/triplander/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"98883175","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nfig = plt.figure()\nax = fig.add_subplot()\nax.set_xlim([10, 35])\nax.set_ylim([0, 200])\nax.set_title(\"Lineární regrese\")\n\nx = np.array([])\ny = np.array([])\n\naxes = fig.gca()\nx_vals = np.array([])\ny_vals = np.array([])\nregre_plot, = ax.plot(x_vals, y_vals, '--')\n\ndef onclick(event):\n ix, iy = event.xdata, event.ydata\n if ix is not None and iy is not None:\n global x\n global y\n x = np.append(x, ix)\n y = np.append(y, iy)\n\n ax.scatter(ix, iy)\n\n # Linear regression\n n = np.size(x)\n\n if n > 1:\n # mean of x and y vector\n m_x = np.mean(x)\n m_y = np.mean(y)\n\n # calculating cross-deviation and deviation about x\n SS_xy = np.sum(y*x) - n*m_y*m_x\n SS_xx = np.sum(x*x) - n*m_x*m_x\n\n # calculating regression coefficients\n b_1 = SS_xy / SS_xx\n b_0 = m_y - b_1*m_x\n\n\n axes = fig.gca()\n\n x_vals = np.array(axes.get_xlim())\n y_vals = b_0 + b_1 * x_vals\n regre_plot.set_ydata(y_vals)\n regre_plot.set_xdata(x_vals)\n\n fig.canvas.draw()\n fig.canvas.flush_events()\n\n\ncid = fig.canvas.mpl_connect('button_press_event', onclick)\n\nplt.show()\n","sub_path":"linear-regression-demo.py","file_name":"linear-regression-demo.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"593997015","text":"#!/usr/bin/env python\n\nimport tornado.ioloop\nimport tornado.web\nimport tornado.httpclient\n\nfrom tornado.httpserver import HTTPServer\nfrom os import environ\nfrom fontcut.handler import MainHandler\n\n\ndef make_app():\n return tornado.web.Application([\n (r\"/\", MainHandler),\n ])\n\nif __name__ == \"__main__\":\n app = make_app()\n tornado.httpclient.AsyncHTTPClient.configure(\n 'tornado.curl_httpclient.CurlAsyncHTTPClient',\n )\n app.client = tornado.httpclient.AsyncHTTPClient()\n server = HTTPServer(app)\n server.bind(int(environ.get('PORT', 8888)))\n server.start()\n tornado.ioloop.IOLoop.current().start()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"449280749","text":"from math import pi, ceil\r\nfrom fiberCollection import fiberCollection\r\nfrom FibCollocationDiscretization import ChebyshevDiscretization\r\nfrom RPYVelocityEvaluator import EwaldSplitter\r\nfrom Domain import PeriodicShearedDomain\r\nfrom TemporalIntegrator import CrankNicolson\r\nfrom CrossLinkedNetwork import KMCCrossLinkedNetwork\r\nfrom FileIO import prepareOutFile, writeArray\r\nimport numpy as np\r\n\r\n\"\"\"\r\nStrainingCrossLinkedNetwork.py \r\nThis file runs the small amplitude oscillatory shear test on \r\nthe steady state fiber network. \r\n\"\"\"\r\n\r\ndef saveCurvaturesAndStrains(omega,nFib,nCL,allFibers,CLNet,HydroStr,wora='a'):\r\n Xf = allFibers.getX();\r\n LinkStrains = CLNet.calcLinkStrains(allFibers.getUniformPoints(Xf), Dom);\r\n FibCurvatures = allFibers.calcCurvatures(Xf);\r\n if (nCL > 0):\r\n writeArray('DynamicLinkStrains'+HydroStr+'Om'+str(omega)+'F'+str(nFib)+'C'+str(nCL)+'.txt',LinkStrains,wora=wora)\r\n writeArray('DynamicFibCurves'+HydroStr+'Om'+str(omega)+'F'+str(nFib)+'C'+str(nCL)+'.txt',FibCurvatures,wora=wora)\r\n\r\nimport sys\r\nsys.path.append(\"/NetworkSteadyStates/\") \r\n\r\n# Inputs for the slender body simulation\r\nnFib = 700; # number of fibers\r\nnCL = 12*nFib; # maximum # of CLs\r\nN=16 # number of points per fiber\r\nLf=2 # length of each fiber\r\nLd=4 # length of the periodic domain\r\nxi = 0.5*(N*nFib)**(1/3)/Ld; # Ewald param\r\nmu=1 # fluid viscosity\r\neps=1e-3 # slenderness ratio\r\nEb=0.01 # fiber bending stiffness\r\ngrav=0 # value of gravity if it exists\r\nKspring=1 # cross linker stiffness\r\nrl=0.5 # cross linker rest length\r\nkonCL = 1000; # cross linker binding rate (not applicable here, but an input to the class)\r\nkoffCL = 1e-16; # cross linker unbinding rate (not applicable here, but an input to the class)\r\n\r\n# Array of frequencies in Hz \r\nomHzs = [0.01, 0.02, 0.05, 0.10, 0.20, 0.50, 1, 2, 5, 10];\r\niO = 6; # index of the Omega we are doing \r\nomHz = omHzs[iO];\r\nomega = 2*pi*omHz;\r\nif (iO > 0):\r\n loadOmega = omHzs[iO-1]; # previous omega to load\r\nelse:\r\n loadOmega = 0; # will load from steady state\r\n \r\n# Set up simulation variables \r\ngam0 = 0.1*omega # strain amplitude\r\nT = 1.0/omHz; # one period\r\nnCyc = ceil(omHz) + 3; # 3 cycles + 1 second to relax the network \r\ntf = nCyc*T;\r\ndt = min(T/20,5e-3); # maximum stable timestep is 1e-3\r\nsaveEvery = 10;#int(T/(2*dt)+1e-10); # measure curvature at the start and middle of each cycle\r\nprint('Omega %f: stopping time %f' %(omHz,tf))\r\n\r\nnonLocal=1; # 0 for local drag, 1 for nonlocal hydro\r\nnIts = 1; # number of iterations\r\nHydroStr='';\r\nif (nonLocal==1):\r\n HydroStr='HYDRO';\r\n nIts = 2;\r\n\r\nnp.random.seed(1);\r\n\r\n# Initialize the domain and spatial database\r\nDom = PeriodicShearedDomain(Ld,Ld,Ld);\r\n\r\n# Initialize Ewald for non-local velocities\r\nEwald = EwaldSplitter(np.sqrt(1.5)*eps*Lf,mu,xi,Dom,N*nFib);\r\n\r\n# Initialize fiber discretization\r\nfibDisc = ChebyshevDiscretization(Lf,eps,Eb,mu,N);\r\n\r\n# Initialize the master list of fibers\r\nallFibers = fiberCollection(nFib,fibDisc,nonLocal,mu,omega,gam0,Dom,nThreads=4);\r\n\r\n# Initialize the fiber list\r\nfibList = [None]*nFib;\r\nif (loadOmega > 0):\r\n print('Loading previous dynamics using Omega in Hz = %f' %loadOmega)\r\n XFile = 'NetworkSteadyStates/DynamicSSLocationsOm'+str(loadOmega)+'F'+str(nFib)+'C'+str(nCL)+'.txt';\r\n XsFile = 'NetworkSteadyStates/DynamicTangentVecsOm'+str(loadOmega)+'F'+str(nFib)+'C'+str(nCL)+'.txt';\r\nelse:\r\n print('Loading from steady state')\r\n XFile = 'NetworkSteadyStates/SSLocationsF'+str(nFib)+'C'+str(nCL)+'E0.01.txt';\r\n XsFile = 'NetworkSteadyStates/TangentVecsF'+str(nFib)+'C'+str(nCL)+'E0.01.txt';\r\nallFibers.initFibList(fibList,Dom,pointsfileName=XFile,tanvecfileName=XsFile);\r\nallFibers.fillPointArrays();\r\n\r\n# Initialize the network of cross linkers\r\n# New seed for CLs\r\nCLseed = 2;\r\nnp.random.seed(CLseed);\r\nCLNet = KMCCrossLinkedNetwork(nFib,N,fibDisc.getNumUniform(),Lf,nCL,Kspring,rl,konCL,koffCL,CLseed,Dom,fibDisc,nThreads=4);\r\nCLNet.setLinksFromFile('NetworkSteadyStates/F'+str(nFib)+'C'+str(nCL)+'.txt',Dom);\r\n \r\n# Initialize the temporal integrator\r\nTIntegrator = CrankNicolson(allFibers, CLNet);\r\nTIntegrator.setMaxIters(nIts);\r\n\r\n# Prepare the output file and write initial locations\r\nof = prepareOutFile('DynamicLocations'+HydroStr+'Om'+str(omHz)+'LocsF'+str(nFib)+'C'+str(nCL)+'.txt');\r\nallFibers.writeFiberLocations(of);\r\nsaveCurvaturesAndStrains(omHz,nFib,nCL,allFibers,CLNet,HydroStr,wora='w')\r\n\r\n# Run to steady state\r\nstopcount = int(tf/dt+1e-10);\r\nLamstress = np.zeros(stopcount); \r\nElstress = np.zeros(stopcount); \r\nCLstress = np.zeros(stopcount);\r\nfor iT in range(stopcount): \r\n wr=0;\r\n if ((iT % saveEvery) == (saveEvery-1)):\r\n print('Time %1.2E' %(float(iT)*dt));\r\n wr=1;\r\n maxX = TIntegrator.updateAllFibers(iT,dt,stopcount,Dom,Ewald,grav/Lf,of,write=wr);\r\n Lamstress[iT],Elstress[iT],CLstress[iT] = TIntegrator.computeStress(Dom,iT,dt)\r\n if (iT==699):\r\n writeArray('Lambdas'+HydroStr+'.txt',TIntegrator._allFibers.getLambdas());\r\n if (wr==1):\r\n saveCurvaturesAndStrains(omHz,nFib,nCL,allFibers,CLNet,HydroStr)\r\n print('Max x: %f' %(maxX));\r\n\r\nwriteArray('NewLambdaStress'+HydroStr+'Om'+str(omHz)+'F'+str(nFib)+'C'+str(nCL)+'.txt',Lamstress)\r\nwriteArray('NewElasticStress'+HydroStr+'Om'+str(omHz)+'F'+str(nFib)+'C'+str(nCL)+'.txt',Elstress)\r\nwriteArray('NewCLStress'+HydroStr+'Om'+str(omHz)+'F'+str(nFib)+'C'+str(nCL)+'.txt',CLstress)\r\n\r\n# Destruction and cleanup\r\nof.close();\r\ndel Dom;\r\ndel Ewald;\r\ndel fibDisc;\r\ndel allFibers;\r\ndel TIntegrator; \r\n\r\n\r\n\r\n","sub_path":"Python/Examples/StrainingCrossLinkedNetwork.py","file_name":"StrainingCrossLinkedNetwork.py","file_ext":"py","file_size_in_byte":5718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"50723777","text":"import xml.etree.cElementTree as ET\r\nfrom xml.dom import minidom\r\nimport time\r\nimport os\r\nimport serial\r\n\r\nTSData = ET.Element(\"TSData\")\r\nHeader = ET.SubElement(TSData, \"Header\")\r\n\r\nArduinoSerial = serial.Serial('COM3',9600) \r\ntime.sleep(2)\r\n\r\nprint('KUAL TECH MANUAL CARD INSERT v1.1')\r\n\r\n\r\n\r\ndef main():\r\n new = ET.Element('TSdata')\r\n Header = ET.SubElement(new, 'Header')\r\n Version = ET.SubElement(Header, 'Version')\r\n Version.text = 'HSHIF25'\r\n Issuer = ET.SubElement(Header, 'Issuer')\r\n Issuer.text = '1'\r\n Receiver = ET.SubElement(Header, 'Receiver')\r\n Receiver.text = '1'\r\n ID = ET.SubElement(Header, 'ID')\r\n ID.text = '{}' .format(int(time.time()))\r\n WLR = ET.SubElement(new, 'WhitelistRecord')\r\n UTID = ET.SubElement(WLR, 'UTID')\r\n \r\n \r\n print('Введите номер карты:')\r\n def read():\r\n p = str(ArduinoSerial.readline().rstrip().decode(\"utf-8\"))\r\n txt = p\r\n print(txt)\r\n return txt\r\n UTID.text = read()\r\n Coding = ET.SubElement(WLR, 'Coding')\r\n Coding.text = '33554432'\r\n Action = ET.SubElement(WLR, 'Action')\r\n Action.text = 'U'\r\n Permission = ET.SubElement(WLR, 'Permission')\r\n UPID = ET.SubElement(Permission, 'UPID')\r\n UPID.text = UTID.text\r\n TSPTICKET = ET.SubElement(Permission, 'TSProperty', Type=\"TICKETTYPE\")\r\n TID = ET.SubElement(TSPTICKET, \"ID\")\r\n TID.text = '1'\r\n TSPAREA = ET.SubElement(Permission, 'TSProperty', Type=\"AREA\")\r\n AID = ET.SubElement(TSPAREA, \"ID\")\r\n def choose_zone():\r\n print('Выберите зону доступа')\r\n print ('1 первый этаж')\r\n print ('2 второй этаж')\r\n print ('3 третий этаж')\r\n print ('4 четвертый этаж')\r\n AID.text = input()\r\n if AID.text > str(4):\r\n print('Ошибка зоны')\r\n return choose_zone()\r\n \r\n else:\r\n print ('Доступ разрешен')\r\n print('Сохранено. Номер карты: ' + str(UTID.text))\r\n \r\n choose_zone() \r\n\r\n\r\n TSPEVENT = ET.SubElement(Permission, 'TSProperty', Type=\"EVENT\")\r\n EID = ET.SubElement(TSPEVENT, \"ID\")\r\n EID.text = '1'\r\n TSPERSON = ET.SubElement(Permission, 'TSProperty', Type=\"PERSONCATEGORY\")\r\n PID = ET.SubElement(TSPERSON, \"ID\")\r\n PID.text = '1'\r\n \r\n print()\r\n print()\r\n print()\r\n\r\n \r\n save_xml('whitelist_{}.txt' .format(int(time.time())), new)\r\n\r\n\r\ndef save_xml(filename, xml_code):\r\n xml_string = ET.tostring(xml_code).decode()\r\n \r\n xml_prettyxml = minidom.parseString(xml_string).toprettyxml()\r\n with open(filename, 'w') as xml_file:\r\n xml_file.write(xml_prettyxml)\r\n\r\n\r\n\r\n \r\nif __name__ == '__main__':\r\n main()\r\nwhile True:\r\n main()\r\n","sub_path":"test_XML_v2_1_reader_Arduino.py","file_name":"test_XML_v2_1_reader_Arduino.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"254828015","text":"from .track import Track\nfrom .colors import colors\nfrom .worker import Worker\nfrom .abnormal_det import MovingAverage\n\nfrom io import BytesIO\nfrom threading import Thread\nfrom queue import Queue\nfrom time import sleep\n\nimport numpy as np\nimport requests\nimport cv2\nimport os\n\nos.environ[\"OPENCV_FFMPEG_CAPTURE_OPTIONS\"] = \"rtsp_transport:udp\"\n# from termcolor import colored\n\nPAR = True\nINTEVAL = 5 # det every $INTEVAL frames\nREFRESH_INTEVAL = 3\n\nATTRIBUTES = ['Female', 'Front', 'Side', 'Back', 'Hat',\n 'Glasses', 'Hand Bag', 'Shoulder Bag', 'Backpack',\n 'Hold Objects in Front', 'Short Sleeve', 'Long Sleeve',\n 'Long Coat', 'Trousers', 'Skirt & Dress']\n\n\n\n\n\ndef _nd2file(img_nd):\n return BytesIO(cv2.imencode('.jpg', img_nd)[1])\n\n\ndef _cvt_ltrb2ltwh(boxes):\n boxes_ = []\n for b in boxes['dets']:\n boxes_.append(b['x1y1x2y2'])\n boxes = np.array(boxes_)\n boxes[:, 2: 4] -= boxes[:, :2]\n return boxes[:, :4]\n\n\ndef _crop(frame, trk_box):\n H, W, _ = frame.shape\n left, t, w, h = map(int, trk_box)\n left = max(left, 0)\n t = max(t, 0)\n r = min(left + w, W)\n b = min(t + h, H)\n crop = frame[t: b, left: r, :]\n return cv2.resize(crop, (128, 256))\n\n\nclass Agent:\n\n \"\"\"\n interact with `display_queue` and `control_queue`\n display_queue: rendered images as output\n control_queue: (x, y) coordinate pairs as input\n \"\"\"\n\n def __init__(self, source, host='localhost'):\n try:\n source = int(source)\n except ValueError:\n pass\n self.source = os.path.expanduser(source)\n self.cap = cv2.VideoCapture(self.source)\n self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)\n self.cap.set(cv2.CAP_PROP_FPS, 12)\n # self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 704)\n # self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n self.display_queue = Queue(32)\n self.control_queue = Queue(1)\n self.q_reg = Queue(32) # register queue\n self.frame_count = 0\n self.api_calls = {k: 0 for k in ['register', 'detection', 'feature',\n 'query', 'refresh', 'attributes']}\n # HOST = '192.168.1.100' # 192.168.20.122\n # HOST = '192.168.1.253' # 192.168.20.122\n # HOST = '192.168.20.191' # 192.168.20.122\n\n DET_URL = 'http://%s:6666/det' % host\n EXT_URL = 'http://%s:6667/ext' % host\n PAR_URL = 'http://%s:6668/par' % host\n CMP_URL = 'http://%s:6669/{}' % host\n\n def det(img_file, api_calls):\n api_calls['detection'] += 1\n response = requests.post(DET_URL, files={'img': img_file})\n return response.json()\n\n def ext(img_file, api_calls):\n api_calls['feature'] += 1\n response = requests.post(EXT_URL, files={'img': img_file})\n return response.json()\n\n def par(img_file, api_calls):\n api_calls['attributes'] += 1\n # print(img_file)\n response = requests.post(PAR_URL, files={'img': img_file})\n # print(response)\n return np.array(response.json()['predictions'], dtype=np.uint8)\n\n def up(identity, feature, api_calls):\n api_calls['register'] += 1\n response = requests.post(CMP_URL.format('update'),\n json={'id': identity, 'feature': feature})\n response.json()\n\n def query(feature, api_calls):\n api_calls['query'] += 1\n response = requests.post(CMP_URL.format('query'),\n json={'id': '', 'feature': feature})\n return response.json()\n\n self.CMP_URL = CMP_URL\n self.ext = ext\n self.up = up\n self.w_det = Worker(lambda x: (x, det(_nd2file(x), self.api_calls)))\n self.w_ext = Worker(lambda i, x: (i, ext(_nd2file(x), self.api_calls)))\n self.w_cmp = Worker(lambda i, x: (i, query(x, self.api_calls)))\n self.w_par = Worker(lambda i, x: (i, par(_nd2file(x), self.api_calls)))\n self.matches = {}\n # self.sim_ema = {}\n\n class _Track(Track):\n ALL = set()\n current_id = 0\n\n self.Track = _Track\n self.running = True\n self.suspend = False\n \n #self.th = Thread(target=self.loop)\n #self.th.start()\n\n def loop(self):\n while self.running:\n # sleep(0.1)\n if self.suspend == True:\n sleep(0.5)\n ret, frame = self.cap.read()\n # ret, frame = self.cap.read()\n # ret, frame = self.cap.read()\n\n if not ret or frame is None:\n self.cap = cv2.VideoCapture(self.source)\n # print('renewed', self.source)\n continue\n # frame = cv2.resize(frame, (0, 0), fx=.5, fy=.5) # down-sampling\n frame_ = frame.copy()\n self.Track.step(frame_)\n if self.frame_count % INTEVAL == 0:\n self.w_det.put(frame_)\n self.Track.decay()\n self._post_det_procedure()\n self._post_ext_procedure()\n self._post_cmp_procedure(frame_)\n self._post_reg_procedure()\n if PAR:\n self._post_par_procedure()\n if not self.control_queue.empty():\n x, y = self.control_queue.get()\n self.click_handle(frame_, x, y)\n self._render(frame)\n # print(self.display_queue.qsize())\n # print(self.w_cmp.p.qsize(), self.w_cmp.q.qsize())\n self.display_queue.put(frame[...,::-1]) # give RGB\n self.frame_count += 1\n self._kill_workers()\n\n def reset(self):\n print('sending reset request')\n response = requests.post(self.CMP_URL.format('reset'), json={})\n return response.json()\n\n def save(self):\n response = requests.post(self.CMP_URL.format('save'), json={})\n return response.json()\n\n def stop(self):\n self.running = False\n self.th.join(.1)\n\n def click_handle(self, frame, x, y):\n H, W, _ = frame.shape\n x *= W\n y *= H\n # print(x, y, 'agent', self.source)\n for trk in self.Track.ALL:\n l, t, w, h = trk.box\n if l < x < l + w and t < y < t + h:\n def work():\n img_roi = _crop(frame, trk.box)\n trk.feature = self.ext(_nd2file(img_roi), self.api_calls)\n self.up(str(trk.id), trk.feature, self.api_calls)\n self.q_reg.put(1)\n\n th = Thread(target=work)\n th.start()\n th.join()\n break # only match once\n\n def _post_det_procedure(self):\n if self.w_det.has_feedback():\n frame_, boxes = self.w_det.get()\n if len(boxes):\n boxes = _cvt_ltrb2ltwh(boxes)\n self.Track.update(frame_, boxes)\n for t in self.Track.ALL:\n if t.visible:\n if isinstance(t.id, int):\n if t.age % REFRESH_INTEVAL == 0:\n if t.age // REFRESH_INTEVAL:\n self.api_calls['refresh'] += 1\n img_roi = _crop(frame_, t.box)\n self.w_ext.put(t, img_roi)\n else:\n for t in self.Track.ALL:\n t.visible = False\n t.health -= 1 if t.age > self.Track.PROBATION else 9999\n\n def _post_ext_procedure(self):\n if not self.w_ext.p.empty():\n t, feature = self.w_ext.get()\n t.feature = feature\n self.w_cmp.put(t, feature)\n\n def _post_cmp_procedure(self, frame_):\n if not self.w_cmp.p.empty():\n t, ret = self.w_cmp.get()\n i = ret.get('id')\n # assert isinstance(i, str)\n c = colors[ret.get('idx')]\n if i is not None and i != -1:\n t.similarity = ret.get('similarity')\n # sim_ema = self.sim_ema.setdefault(i, MovingAverage(\n # t.similarity, conf_band=2.5))\n if PAR:\n self.w_par.put(t, _crop(frame_, t.box))\n if t.similarity > .94:\n # if sim_ema(t.similarity):\n if i in self.matches:\n f = self.matches[i]\n if t > f:\n f.color = Track.color\n f.id = int(f.id)\n f.similarity = 0\n self.matches[i] = t\n else:\n self.matches[i] = t\n self.matches[i].color = c\n self.matches[i].id = i\n\n def _post_reg_procedure(self):\n if not self.q_reg.empty():\n self.q_reg.get()\n for t in self.Track.ALL:\n if t.feature is not None:\n self.w_cmp.put(t, t.feature)\n\n def _post_par_procedure(self):\n if not self.w_par.p.empty():\n t, att = self.w_par.get() # person attributes\n setattr(t, 'par', att)\n\n def _render(self, frame):\n self.Track.render(frame)\n for t in self.Track.ALL:\n if hasattr(t, 'par') and t.visible:\n x, y, w, h = map(int, t.box)\n y += h//4\n for a, m in zip(ATTRIBUTES, t.par):\n if a == 'Female' and not m:\n a = 'Male'\n m = True\n if m:\n cv2.putText(frame, a, (x + w + 3, y),\n cv2.FONT_HERSHEY_SIMPLEX, 1.0, t.color, 2)\n y += h//8\n for i, kv in enumerate(self.api_calls.items()):\n cv2.putText(frame, '{:<10}'.format(kv[0]), (10, i*20 + 60),\n cv2.FONT_HERSHEY_COMPLEX, 0.6, (0, 0, 0), 1)\n cv2.putText(frame, '{:>6}'.format(kv[1]), (100, i*20 + 60),\n cv2.FONT_HERSHEY_COMPLEX, 0.6, (0, 0, 0), 1)\n\n def _kill_workers(self):\n for w in [self.w_ext, self.w_det, self.w_cmp, self.w_par]:\n w.suicide()\n\n def reset(self):\n print('sending reset request')\n response = requests.post(self.CMP_URL.format('reset'), json={})\n return response.json()\n\n def save(self):\n response = requests.post(self.CMP_URL.format('save'), json={})\n return response.json()\n","sub_path":"scr/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":10570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"237602249","text":"# -*- coding:utf-8 -*-\n# 解题思路:\n# 1 什么是能接水的空间,是一个柱子后有个下降的空间,等于是后面柱子的高度小于当前柱子的高度,同时后面还要有个上升空间\n# 2 知道了接水的空间后再计算接水的大小,1宽度*1高度为一个单位,下降的空间会有占用空间的柱子,要把这个删除\n# 3 使用一个栈和两个指针,第一个指针指向第一个元素,第二个指针依次指向第一个指针后面的元素\n# 4 发现一个柱子比栈顶小就入栈,比栈顶高栈顶元素出栈,出栈后再计算与新的栈顶之间的水,\n# min(height[current], height[st[top]]) * (current - st[top] - 1)\n# 可以将两边中一边最高的元素作为水池的高,减去中间任意大于0的柱子 heights[i],\n# \n\n\nclass Solution(object):\n def trap(self, height):\n \"\"\"\n :type height: List[int]\n :rtype: int\n \"\"\"\n if len(height) < 3:\n return 0\n \n sum = 0 # 总蓄水量\n current = 0 # 当前指针\n st = [] # stack \n \n\n for current in range(0, len(height)):\n # 如果栈不空并且当前指向的高度大于栈顶高度就一直循环\n while len(st) > 0 and height[current] > height[st[0]]:\n h = height[st[0]]; # 取出要出栈的元素\n st.pop(0); #出栈\n if (len(st) == 0): # 栈空就出去\n break; \n \n distance = current - st[0] - 1 #两堵墙之前的距离。\n minVal = min(height[st[0]], height[current])\n sum = sum + distance * (minVal - h)\n \n st.insert(0, current) # 当前指向的墙入栈\n current += 1 # 指针后移\n \n return sum\n\nif __name__ == \"__main__\":\n obj = Solution() \n # print(obj.trap([0,1,0,2,0,1,0,1,3,2,1,2,1]))\n # print(obj.trap([1,2,3,4,0,5])) \n # print(obj.trap([2,0,2]))\n print(obj.trap([9,2,9,3,2,2,1,4,8]))\n ","sub_path":"Week_01/G20200343030585/LeetCode_42_585.py","file_name":"LeetCode_42_585.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"210711091","text":"import os\nimport errno\nimport stat\nimport socket\n\nimport yu.ssh.ssh as ssh\nfrom yu.network.Location import Location\n\n\nclass RemoteNode(object):\n def __init__(self, ip_address):\n self.location = Location(ip_address)\n self.m_connected = False\n self.m_connected_as_root = False\n self.m_sshSession = ssh.Session(self.location.address)\n self.connected_username = None\n self.password = None\n self.ssh_key = None\n self.configured_hostname = None\n self.connectivity_status = None\n\n def set_ssh_key(self, ssh_key_path):\n if not os.path.isfile(ssh_key_path):\n raise IOError(ssh_key_path + \" is not a valid key\")\n self.ssh_key = ssh_key_path\n\n def connect(self, username, password=None):\n if self.ssh_key is None and password is None:\n raise RuntimeError(\"No authentication provided\")\n\n if self.ssh_key:\n if not os.path.isfile(self.ssh_key):\n raise IOError(self.ssh_key + \" is not a valid key\")\n self.m_sshSession.connect(username, ssh_key=self.ssh_key)\n else:\n pwd = password\n if password is None:\n pwd = self.password\n self.m_sshSession.connect(username, pwd)\n\n self.m_connected = True\n self.connected_username = username\n if username == \"root\":\n self.m_connected_as_root = True\n\n def reconnect(self, username=None, password=None):\n self.m_sshSession.close()\n if username is None:\n if self.connected_username is not None:\n username = str(self.connected_username)\n self.connected_username = None\n else:\n raise RuntimeError(\"No username has been provided for the reconnect\")\n self.connect(username, password)\n\n def command(self, command, timeout=None, shell=False):\n \"\"\"\n Performs a command on this node\n\n :param command: the command, as a string, to perform on the remote node\n :param timeout: the maximum amount of time to allow the command to run\n :param shell: whether or not to execute the command in a shell\n If your command needs to use shell operations like \"~\" for home dir or you want\n to use pipes (|) and redirects (< or >) then you need to set this to True\n\n :return: a tuple of result code and result string\n :raises: socket.timeout if the command exceeds the provided timeout\n \"\"\"\n if not self.m_connected:\n return 1, \"Node session to \" + self.location.address + \"not connected\"\n\n try:\n return self.m_sshSession.exec_command(command, timeout=timeout, shell=shell)\n\n except paramiko as e:\n self.m_connected = False\n # Have one go at reconnecting\n try:\n self.reconnect()\n return self.m_sshSession.exec_command(command, timeout=timeout, shell=shell)\n except socket.timeout as e:\n raise e\n except:\n return 1, \"Node session to \" + self.location.address + \" not connected (attempted one retry)\"\n\n def copy_file_to(self, path_to_file_to_copy, destination_filename=None, destination_dir=None):\n \"\"\"\n Copy a file to this node from the local node\n You can optionally choose a new filename for the file and what directory the file will be copied to\n The file will be copied to the home directory of the connection (root by default) and use the filename\n of the original file\n\n :param path_to_file_to_copy: The file to copy\n :param destination_filename: The name to give the file on the remote node (Optional)\n Will default to the original filename provided by path_to_file_to_copy\n :param destination_dir: The destination on the remote node to copy to (Optional)\n Will default the home dir of the user if omitted\n :raises RuntimeError if the copy fails\n \"\"\"\n if not self.m_connected:\n raise RuntimeError(\"Node session to \" + self.location.address + \"not connected\")\n\n try:\n self.m_sshSession.copy_file_to(path_to_file_to_copy, destination_filename, destination_dir)\n except RuntimeError as e:\n self.m_connected = False\n # Have one go at reconnecting\n self.reconnect()\n self.m_sshSession.copy_file_to(path_to_file_to_copy, destination_filename, destination_dir)\n\n def copy_dir_to(self, local_dir_to_copy, destination_dir=None):\n \"\"\"\n Copy a directory to this RTDB node from the local node\n You can optionally choose the base directory you want to copy the directory to.\n e.g. when copying the directory \"lab_results\" and providing destination_dir=\"/home/ignaz\" the directory\n will be created as /home/ignaz/lab_results on the remote node\n If no destination directory is provided then the directory will be copied to the home directory\n of the user of the connection (root by default)\n\n :param local_dir_to_copy: the path to the directory you want to copy\n :param destination_dir: the destination root on the remote node that you want to copy the directory to\n :raises RuntimeError if the copy fails\n \"\"\"\n if not self.m_connected:\n raise RuntimeError(\"Node session to \" + self.location.address + \"not connected\")\n\n try:\n self.m_sshSession.copy_dir_to(local_dir_to_copy, destination_dir)\n except RuntimeError as e:\n self.m_connected = False\n # Have one go at reconnecting\n self.reconnect()\n self.m_sshSession.copy_dir_to(local_dir_to_copy, destination_dir)\n\n def copy_file_from(self, path_to_file_on_remote, destination_filename=None, destination_dir=None):\n \"\"\"\n Copy a file from this RTDB node to the local node\n You can optionally choose a new filename for the copied file and what directory the file will be copied to\n The file will be copied to the current working directory and use the filename of the original file by default\n\n :param path_to_file_on_remote: The file on the remote node to copy\n :param destination_filename: The name to give the copied file\n :param destination_dir: The destination to copy the file to locally\n :raises RuntimeError if the copy fails\n \"\"\"\n if not self.m_connected:\n raise RuntimeError(\"Node session to \" + self.location.address + \"not connected\")\n\n try:\n self.m_sshSession.copy_file_from(path_to_file_on_remote, destination_filename, destination_dir)\n except RuntimeError as e:\n self.m_connected = False\n # Have one go at reconnecting\n self.reconnect()\n self.m_sshSession.copy_file_from(path_to_file_on_remote, destination_filename, destination_dir)\n\n def copy_dir_from(self, path_to_dir_on_remote, destination_dir=None):\n \"\"\"\n Copy a directory from this RTDB node to the local node\n You can optionally choose a new directory name for the copied dir\n The directory will be copied to the current working directory with the same directory name as on the remote\n by default\n\n :param path_to_dir_on_remote: The dir on the remote node to copy\n :param destination_dir: The destination to copy the directory to\n :raises RuntimeError if the copy fails\n \"\"\"\n if not self.m_connected:\n raise RuntimeError(\"Node session to \" + self.location.address + \"not connected\")\n\n try:\n self.m_sshSession.copy_dir_from(path_to_dir_on_remote, destination_dir)\n except RuntimeError as e:\n self.m_connected = False\n # Have one go at reconnecting\n self.reconnect()\n self.m_sshSession.copy_dir_from(path_to_dir_on_remote, destination_dir)\n\n def delete_file(self, remote_path, error_if_not_exists=True):\n \"\"\"\n Delete the provided file from this node\n\n :param remote_path: The path to the file to delete\n :param error_if_not_exists: when set to True; if the file doesn't exist before this operation then an IOError\n will be raised.\n When set to False the file not existing will be ignored and will be considered a\n success case.\n Optional: Default is True\n :raises RuntimeError is the delete operation failed\n :raises IOError if the file did not exist before the delete and error_if_not_exists is True\n \"\"\"\n if not self.m_connected:\n raise RuntimeError(\"Node session to \" + self.location.address + \"not connected\")\n\n try:\n self.m_sshSession.delete_file(remote_path, error_if_not_exists)\n except RuntimeError as e:\n self.m_connected = False\n # Have one go at reconnecting\n self.reconnect()\n self.m_sshSession.delete_file(remote_path)\n\n def delete_dir(self, remote_directory, contents_only=False):\n \"\"\"\n Delete the provided directory from this node.\n Note: if the directory does not exist then this function will consider this a success case\n\n :param remote_directory: The directory to remove\n :param contents_only: if this is set to True then the provided directory will not be deleted itself;\n it's contents will be deleted but the directory itself will remain after the operation\n i.e. delete_dir(\"/opt/sdl/journal\", contents_only=True)\n would delete everything in /opt/sdl/journal but the directory would still exist\n (optional) default = False\n\n :raises RuntimeError is the delete operation failed\n \"\"\"\n if not self.m_connected:\n raise RuntimeError(\"Node session to \" + self.location.address + \"not connected\")\n\n try:\n self.m_sshSession.delete_dir(remote_directory, contents_only=contents_only)\n except RuntimeError as e:\n self.m_connected = False\n # Have one go at reconnecting\n self.reconnect()\n try:\n self.m_sshSession.delete_dir(remote_directory, contents_only=contents_only)\n except IOError as e:\n if e.errno == errno.ENOENT:\n return\n raise RuntimeError(\n \"The delete of \" + remote_directory + \" on \" + self.location.address + \" failed: \" + str(e))\n\n def mkdir(self, new_dir_path):\n \"\"\"\n Create the provided directory on the remote node.\n Note: The directory must be a leaf directory. i.e. if you provide /not_a_path/new_dir and \"not_a_path\" doesn't\n already exist then this command will fail with an IOError\n\n :param new_dir_path: the new directory to create\n :raises: RuntimeError if the creation fails\n :raises: IOError if the directory to be created would not be a leaf directory\n \"\"\"\n if not self.m_connected:\n raise RuntimeError(\"Node session to \" + self.location.address + \"not connected\")\n\n try:\n base_dir, dir_name = os.path.split(new_dir_path)\n if not self.is_dir(base_dir):\n raise IOError(\"Cannot create \" + new_dir_path + \" on \" + self.location.address + \": It is not a \"\n \"leaf directory\")\n self.m_sshSession.mkdir(new_dir_path)\n except RuntimeError as e:\n self.m_connected = False\n # Have one go at reconnecting\n self.reconnect()\n try:\n self.m_sshSession.mkdir(new_dir_path)\n except Exception as e:\n raise RuntimeError(\n \"The creation of \" + new_dir_path + \" on \" + self.location.address + \" failed: \" + str(e))\n\n def exists(self, remote_path, follow_symlinks=True):\n \"\"\"\n Check if the remote_path exists on this node.\n\n :param remote_path: the remote path to check for existence\n :param follow_symlinks: behaviour to take if remote path is a symlink\n :return: True if remote_path exists; False otherwise\n \"\"\"\n if not self.m_connected:\n raise RuntimeError(\"Node session to \" + self.location.address + \"not connected\")\n\n try:\n self.m_sshSession.stat(remote_path, follow_symlinks)\n return True\n except IOError:\n return False\n\n def is_file(self, remote_path, follow_symlinks=True):\n \"\"\"\n Check if the remote_path exists and is a file on this node.\n\n :param remote_path: the remote path to check\n :param follow_symlinks: behaviour to take if remote path is a symlink\n :return: True if remote_path exists and is a file; False otherwise\n \"\"\"\n if not self.m_connected:\n raise RuntimeError(\"Node session to \" + self.location.address + \"not connected\")\n\n try:\n stat_info = self.m_sshSession.stat(remote_path, follow_symlinks)\n return stat.S_ISREG(stat_info.st_mode)\n except IOError:\n return False\n\n def is_dir(self, remote_path, follow_symlinks=True):\n \"\"\"\n Check if the remote_path exists and is a directory on this node.\n\n :param remote_path: the remote path to check\n :param follow_symlinks: behaviour to take if remote path is a symlink\n :return: True if remote_path exists and is a directory; False otherwise\n \"\"\"\n if not self.m_connected:\n raise RuntimeError(\"Node session to \" + self.location.address + \"not connected\")\n\n try:\n stat_info = self.m_sshSession.stat(remote_path, follow_symlinks)\n return stat.S_ISDIR(stat_info.st_mode)\n except IOError:\n return False\n\n def extract_tar(self, path_to_tar):\n \"\"\"\n Extract a tar.gz file on this node\n\n :param path_to_tar: the remote path of the file to extract\n :raises: RuntimeError if the extract fails\n \"\"\"\n directory, filename = os.path.split(path_to_tar)\n self.perform_command_on_host(\"cd \" + directory + \"; tar -xvzf \" + filename)\n\n def is_connected_as_root(self):\n return self.m_connected_as_root\n\n def close(self):\n self.m_sshSession.close()\n\n def get_connectivity_status(self):\n return self.connectivity_status\n\n def get_location(self):\n return self.location\n\n def get_host_to_connect_to(self):\n return self.location.address\n\n def get_configured_hostname(self):\n if self.configured_hostname:\n return self.configured_hostname\n\n cat_command = 'cat /proc/sys/kernel/hostname'\n result_code, result_string = self.perform_command_on_host(cat_command)\n if result_code is not 0:\n # TODO raise an exception\n return None\n\n self.configured_hostname = result_string.strip()\n return self.configured_hostname\n","sub_path":"src/yu/network/RemoteNode.py","file_name":"RemoteNode.py","file_ext":"py","file_size_in_byte":15388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"154439292","text":"\nimport argparse\n\nfrom cfcall.utils import cliutils\narg = cliutils.arg\n\n\nclass cfcSubCmd(object):\n \"\"\"Fake docs?.\"\"\"\n\n def __init__(self, prog, actmodule):\n self.prog = prog\n self.actions = actmodule\n\n def get_base_parser(self):\n parser = argparse.ArgumentParser(\n prog=self.prog,\n # description=__doc__.strip(),\n add_help=False,\n )\n\n # Global arguments\n parser.add_argument('-h', '--help',\n action='store_true',\n help=argparse.SUPPRESS,\n )\n return parser\n\n def do_bash_completion(self, _args):\n \"\"\"\n Prints all of the commands and options to stdout so that the\n nova.bash_completion script doesn't have to hard code them.\n \"\"\"\n commands = set()\n options = set()\n for sc_str, sc in self.subcommands.items():\n commands.add(sc_str)\n for option in sc._optionals._option_string_actions.keys():\n options.add(option)\n\n commands.remove('bash-completion')\n commands.remove('bash_completion')\n print(' '.join(commands | options))\n\n def _add_bash_completion_subparser(self, subparsers):\n subparser = subparsers.add_parser('bash_completion',\n add_help=False,\n )\n self.subcommands['bash_completion'] = subparser\n subparser.set_defaults(func=self.do_bash_completion)\n\n def get_subcommand_parser(self):\n self.parser = self.get_base_parser()\n\n self.subcommands = {}\n subparsers = self.parser.add_subparsers(metavar='')\n\n self._find_actions(subparsers, self.actions)\n self._find_actions(subparsers, self)\n\n self._add_bash_completion_subparser(subparsers)\n\n return self.parser\n\n def _find_actions(self, subparsers, actions_module):\n for attr in (a for a in dir(actions_module) if a.startswith('do_')):\n # I prefer to be hyphen-separated instead of underscores.\n \n command = attr[3:].replace('_', '-')\n callback = getattr(actions_module, attr)\n desc = callback.__doc__ or ''\n action_help = desc.strip()\n arguments = getattr(callback, 'arguments', [])\n\n subparser = subparsers.add_parser(command,\n help=action_help,\n description=desc,\n add_help=False,\n )\n subparser.add_argument('-h', '--help',\n action='help',\n help=argparse.SUPPRESS,\n )\n self.subcommands[command] = subparser\n for (args, kwargs) in arguments:\n subparser.add_argument(*args, **kwargs)\n subparser.set_defaults(func=callback)\n\n @arg('command', metavar='', nargs='?',\n help='Display help for ')\n def do_help(self, args):\n if args.command:\n if args.command in self.subcommands:\n self.subcommands[args.command].print_help()\n else:\n raise exc.CommandError(_(\"'%s' is not a valid subcommand\") %\n args.command)\n else:\n self.parser.print_help()\n","sub_path":"config-all/cfcall/utils/clisubcmd.py","file_name":"clisubcmd.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"272592328","text":"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras.models import load_model\nimport h5py\n\nprint(tf.__version__)\n\nfrom sklearn.model_selection import train_test_split\n\n\n# train = pd.read_csv('train.csv',header=None,index_col=None)\n# train.head()\n\n\n# np.save('train.npy', train)\ntrain=np.load('train.npy')\n# print(train.shape)\n\ny_data=train[:,0]\n# print(y_data.shape)\n\nx_data=train[:,1:]\n\nx_data=x_data.reshape([462,257,395,1])\n\n# do train test split \nx_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.15, random_state=22)\n\n# x_train.shape\n# y_train.shape\n\ninp_shape=(257,395,1)\n\n# yy=tf.keras.layers.Conv2D(32,(3,3),2,input_shape=inp_shape)(x_train)\n# yy.shape\n# zz=tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='valid', data_format=None)(yy)\n# zz.shape\n# tt=tf.keras.layers.Conv2D(8, (3,3), 2)(zz)\n# tt.shape\n# kk=tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='valid', data_format=None)(tt)\n# kk.shape\n# tf.keras.layers.Flatten()(kk).shape\n\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(32, (3,3), 2,activation='relu',input_shape=inp_shape),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='valid', data_format=None),\n tf.keras.layers.Conv2D(8, (3,3), 2,activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='valid', data_format=None),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(1028, activation='relu'),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dense(16, activation='relu'),\n tf.keras.layers.Dropout(0.25),\n tf.keras.layers.Dense(2, activation='softmax')\n])\n\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n\nnum_epochs=40\nfor ii in range(0,num_epochs):\n model.fit(x_train, y_train, epochs=1)\n model.evaluate(x_test, y_test, verbose=2)\n\n\nmodel.save('C:/Users/dgnhk_000/Downloads/ARSU 2017/20170330_Uhu/Waldschnepfe_recog/my_model_epochs36_v2.h5')\n\ny_predict=model.predict(x_test)\n\n\ny_pred = np.argmax(y_predict, axis=1)\ny_pred\n\n\nfrom sklearn.metrics import confusion_matrix\nconfusion_mtx = confusion_matrix(y_test, y_pred) \nprint(confusion_mtx)\n\n\nmodel.save('C:/Users/dgnhk_000/Downloads/ARSU 2017/20170330_Uhu/Waldschnepfe_recog/my_model_epochs20.h5')\n\n","sub_path":"04_Model_Training.py","file_name":"04_Model_Training.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"476692379","text":"# -*- coding: utf-8 -*-\n\nfrom openerp import http\nfrom openerp.http import request\n\n\nclass FrstZgruppeController(http.Controller):\n\n @http.route('/frst/group/approve', type='http', auth=\"public\", website=True)\n def frst_group_approve(self, group_approve_fson_zgruppedetail_id=None, **post):\n try:\n zgruppedetail_id = int(group_approve_fson_zgruppedetail_id)\n except Exception as e:\n zgruppedetail_id = None\n if zgruppedetail_id:\n zgruppedetail = request.env['frst.zgruppedetail'].sudo().search([('id', '=', zgruppedetail_id)])\n else:\n zgruppedetail = None\n return request.website.render('fso_frst_groups_email.frst_group_approve', {'zgruppedetail': zgruppedetail})\n","sub_path":"addons-own/fso_frst_groups_email/controller/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"384365384","text":"# -*- coding:utf-8 -*-\r\nclass Ex13:\r\n \"\"\"打印一个矩阵的转置\"\"\"\r\n def t(self, array):\r\n m = len(array)\r\n n = len(array[0])\r\n new_array = []\r\n for i in range(n):\r\n new_row = []\r\n for j in range(m):\r\n new_row.append(array[j][i])\r\n new_array.append(new_row)\r\n return new_array\r\n\r\nif __name__ == '__main__':\r\n ex = Ex13()\r\n\r\n array = [[*range(10)]]*5\r\n print(array)\r\n\r\n array_t = ex.t(array)\r\n print(array_t)\r\n\r\n\r\n","sub_path":"chapter1_1/ex13.py","file_name":"ex13.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"537262584","text":"from django.db import models\nfrom ..user_app.models import User\nfrom django.contrib import messages\nfrom ..user_app.utils import existe, mayor_que, solo_letras\n\n\nclass BookManager(models.Manager):\n def validar_libro(self, postData):\n errors = {}\n mayor_que(postData['title'], 2, 'título')\n existe(postData['title'], 'título')\n\n return errors\n\nclass AuthorManager(models.Manager):\n def valida_autor(self, postData):\n errors = {}\n solo_letras(postData['nuevo_autor'], 'autor')\n mayor_que(postData['nuevo_autor'], 2, 'autor')\n existe(postData['nuevo_autor'], 'autor')\n\n return errors\n\n\nclass Author(models.Model):\n author_name = models.CharField(max_length=100)\n author_lastname = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n objects = AuthorManager()\n\n def __str__(self):\n return self.author_name\n\nclass Book(models.Model):\n title = models.CharField(max_length=100)\n author = models.ForeignKey(Author, related_name='books', on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n objects = BookManager()\n\n def __str__(self):\n return self.title\n\n","sub_path":"apps/book_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"380411216","text":"import json\nfrom matplotlib import pyplot as plt\n\nwith open('data_boring.json', 'r') as fp:\n data = json.load(fp)\n\nf_no = []\nhappiness = []\nsadness = []\nsurprise = []\n\nfor fr in data:\n\tf_no.append(int(fr))\n\nf_no = sorted(f_no)\n\nfor fr in sorted(f_no):\n\tprint(fr)\n\t# f_no.append(fr)\n\thp = 0\n\tsd = 0\n\tsr = 0\n\tcount = 0.0\n\tfor face in data[str(fr)]:\n\t\thp += face[\"faceAttributes\"][\"emotion\"][\"happiness\"]\n\t\tsd += face[\"faceAttributes\"][\"emotion\"][\"sadness\"]\n\t\tsr += face[\"faceAttributes\"][\"emotion\"][\"surprise\"]\n\t\tcount += 1\n\thappiness.append(hp/count)\n\tsadness.append(sd/count)\n\tsurprise.append(sr/count)\n\nplt.plot(f_no, happiness, color ='green', linewidth = 2)\nplt.plot(f_no, sadness, color = 'red', linewidth = 2)\nplt.plot(f_no, surprise, color = 'blue', linewidth = 2)\nplt.show()","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"605329988","text":"def any_lowercase1(s):\n for c in s:\n if c.islower():\n return True # returns True if first letter is lowercase\n else:\n return False # returns False if first letter is not lowercase\n\ndef any_lowercase2(s):\n for c in s:\n if \"c\".islower(): # This will always return the string 'True' b/c the letter 'c' is lowercase\n return 'True'\n else:\n return 'False'\n\ndef any_lowercase3(s):\n for c in s:\n flag = c.islower() # The flag will change with every letter and stay with the value of the last character\n return flag # only returns the flag for the final character\n\ndef any_lowercase4(s):\n flag = False\n for c in s:\n flag = flag or c.islower() # If c is uppercase, the flag changes to true and remains that way\n return flag\n\ndef any_lowercase5(s):\n for c in s:\n if not c.islower():\n return False # will return False if any letter is uppercase\n return True # will return True if every letter was lowercase \n","sub_path":"ch8/ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"427546111","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport jinja2\nfrom jinja2.runtime import StrictUndefined\n\n\ndef load_file(path):\n with open(path) as f:\n return f.read()\n\n\ndef template_string(string, variables, templates_path):\n jinja_env = jinja2.Environment(\n trim_blocks=True,\n undefined=StrictUndefined,\n loader=jinja2.FileSystemLoader(templates_path)\n )\n\n try:\n template = jinja_env.from_string(string)\n except jinja2.exceptions.TemplateSyntaxError as e:\n raise ValueError(u\"Template error: {}\".format(e))\n\n try:\n return template.render(variables)\n except jinja2.exceptions.UndefinedError as e:\n raise ValueError(u\"Variable {} in '{}'\".format(e, string))\n\n\ndef main(template_name):\n variables = {\n key.replace('DM_', '').lower(): value\n for key, value in os.environ.items()\n if key.startswith('DM_')\n }\n\n variables.update({\n 'static_files_root': '/usr/share/nginx/html'\n })\n\n with open(template_name, 'r') as template_file:\n return template_string(\n template_file.read(),\n variables=variables,\n templates_path=os.path.dirname(template_name)\n )\n\n\nif __name__ == \"__main__\":\n print(main(sys.argv[1]))\n","sub_path":"scripts/render-template.py","file_name":"render-template.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"132583213","text":"import Networking.Client\nimport Objects.Zone\n# _________________\n# ___/ Module Imports \\________________________________________________________\n#System\nimport time\nimport thread\n\n#PyOnline\nimport Video\nimport Video.PyGame\nimport Video.FPS\nimport Video.Rendering\nimport Events.Handle\nimport Maps.Tiled\nimport Camera.Manage\nimport Player.MainPlayer\nimport Dialog.Popup\nimport Dialog.Global\n\n#PyGame\nimport pygame\nfrom pygame.locals import *\n\n# ________________________\n# ___/ Variable Declarations \\_________________________________________________\nConsole_Surface = None\nConsole_Background_Surface = None\nConsole_Data_Surfaces = []\nConsole_Input_Prompt_Surface = None\nConsole_Input_Text = None\nConsole_Input_Text_Surface = []\nConsole_Font = None\n\nConsole_XPOS = None\nConsole_YPOS = None\nConsole_Focused = None\n\nScreen_Message = None #Surface\nScreen_Message_Str = None #Message\n\n\n# ________________________\n# ___/ Function Declarations \\_________________________________________________\ndef Init():\n global Console_Surface\n global Console_Background_Surface\n global Console_Data_Surfaces\n global Console_Input_Prompt_Surface\n global Console_Input_Text\n global Console_Input_Text_Surface\n global Console_Font\n global Console_XPOS\n global Console_YPOS\n global Console_Focused\n\n Console_Data_Surfaces = []\n\n Console_Font = pygame.font.Font(\"data/fonts/VeraMoBd.ttf\", 10)\n \n #Load Console Background Surface\n Console_Background_Surface = pygame.image.load(\"data/images/Console_Background.png\")\n\n Console_Surface = pygame.Surface((Console_Background_Surface.get_width(), Console_Background_Surface.get_height()), SRCALPHA)\n\n Console_Input_Prompt_Surface = Console_Font.render(\">\", False, (255,255,255))\n Console_Input_Text = \"\"\n Console_Input_Text_Surface = []\n\n Console_XPOS = Video.PyGame.Screen_Width - Console_Background_Surface.get_width()\n Console_YPOS = Video.PyGame.Screen_Height - Console_Background_Surface.get_height()\n Console_Focused = False\n\n#Updates Console Surfaces With Current Data\ndef Update():\n global Console_Surface\n global Console_Background_Surface\n global Console_Data_Surfaces\n global Console_Input_Prompt_Surface\n global Console_Input_Text\n global Console_Input_Text_Surface\n global Console_Font\n global Console_XPOS\n global Console_YPOS\n global Console_Focused\n\n #Make sure the game window is focused\n if pygame.mouse.get_focused() == True:\n #Get mouse x,y\n x, y = pygame.mouse.get_pos()\n\n #Get mouse buttons\n b1, b2, b3 = pygame.mouse.get_pressed()\n\n #Mouse Click (Button1)\n if b1 == True:\n #Check X,Y\n if x >= Console_XPOS and y >= Console_YPOS:\n #Focus Console for event input\n Console_Focused = True\n #Change key input speed(good speed for typing)\n pygame.key.set_repeat(200,200)\n else:\n #UnFocus / Keep Unfocued\n Console_Focused = False\n #Change key input speed(default for player movement)\n pygame.key.set_repeat(1,1)\n\n #Clear Console_Surface\n Console_Surface.fill((0,0,0, 150))\n\n Console_Input_Text_Surface = []\n\n #Reverse Console_Data_Surfaces\n #tmp = Console_Data_Surfaces\n #tmp.reverse()\n tmp = []\n\n for i in range(len(Console_Data_Surfaces), 0, -1):\n tmp.append(Console_Data_Surfaces[i-1])\n \n #Loop through recent Console Data\n for i in range(0, len(tmp)):\n #Only render recent 13 elements\n if i < 13:\n #Render so most recent element is lowest, while older elements are higher\n Console_Surface.blit(tmp[i], (0, (Console_Background_Surface.get_height()-28)-(i * 14)))\n else:\n #Render nothing else, break from loop\n break\n\n #Render Console Input Prompt\n Console_Surface.blit(Console_Input_Prompt_Surface, (0, Console_Background_Surface.get_height()-14))\n\n #Render Input Data To Memory\n char_index = 0\n for char in Console_Input_Text:\n #25 chars only\n if char_index < 36:\n InputWrite(char, (255,255,255))\n char_index += 1\n\n #Check if there is any console input data to render\n if len(Console_Input_Text_Surface) > 0:\n for i in range(0, len(Console_Input_Text_Surface)):\n Console_Surface.blit(Console_Input_Text_Surface[i], (14+(i*6), Console_Background_Surface.get_height()-14))\n\n#Renders Console\ndef Render():\n global Console_Surface\n global Console_XPOS\n global Console_YPOS\n\n #Update Console XPOS, YPOS -- To be safe (Incase fullscreen or resize of screen)\n Console_XPOS = Video.PyGame.Screen_Width - Console_Background_Surface.get_width()\n Console_YPOS = Video.PyGame.Screen_Height - Console_Background_Surface.get_height()\n \n Video.PyGame.Screen.blit(Console_Surface, (Console_XPOS, Console_YPOS))\n\n\n#Adds Line of Text\ndef Write(string, color):\n global Console_Surface\n global Console_Background_Surface\n global Console_Data_Surfaces\n global Console_Font\n\n #Surface array for each charater in \"string\"\n Char_Surface = []\n\n #Create a surface for each character\n for i in range(0, len(string)):\n Char_Surface.append(Console_Font.render(str(string[i]), False, color))\n\n #Render Message To Console Accordingly\n count = 0\n tmpSurface = pygame.Surface((Console_Background_Surface.get_width(), 14), SRCALPHA)\n\n #Go through each character\n for i in range(0, len(Char_Surface)):\n #39 char per line max\n if count <= 39:\n #Blit Surface Character\n tmpSurface.blit(Char_Surface[i], (count*6,0))\n\n #Increase count\n count += 1\n else:\n #Blit Surface Character\n tmpSurface.blit(Char_Surface[i], (count*6,0))\n \n #Create new Data element\n Console_Data_Surfaces.append(tmpSurface)\n\n #Reset char count\n count = 0\n\n #Reset tmpSurface\n tmpSurface = pygame.Surface((Console_Background_Surface.get_width(), 14), SRCALPHA)\n \n #Create new Data element\n Console_Data_Surfaces.append(tmpSurface)\n\n#Adds Line of Text (Chat Message)\ndef ChatMessage(string, color):\n global Console_Surface\n global Console_Background_Surface\n global Console_Data_Surfaces\n global Console_Font\n\n #Create new string with message data, and player name data\n tmpStr = str(\"[\" + str(Player.MainPlayer.Player_Name) + \"]:\" + str(string))\n\n #Surface array for each character in \"string\"\n Char_Surface = []\n\n #Create a surface for each character\n for i in range(0, len(tmpStr)):\n Char_Surface.append(Console_Font.render(str(tmpStr[i]), False, color))\n\n #Render Message To Console Accordingly\n count = 0\n tmpSurface = pygame.Surface((Console_Background_Surface.get_width(), 14), SRCALPHA)\n\n #Go through each character\n for i in range(0, len(Char_Surface)):\n #36 char per line max\n if count <= 36:\n #Blit Surface Character\n tmpSurface.blit(Char_Surface[i], (count*6,0))\n\n #Increase count\n count += 1\n else:\n #Blit Surface Character\n tmpSurface.blit(Char_Surface[i], (count*6,0))\n\n #Create new Data element\n Console_Data_Surfaces.append(tmpSurface)\n\n #Reset char count\n count = 0\n\n #Reset tmpSurface\n tmpSurface = pygame.Surface((Console_Background_Surface.get_width(), 14), SRCALPHA)\n\n #Create new Data element\n Console_Data_Surfaces.append(tmpSurface)\n\n #Send to server\n Networking.Client.SendData(str(\"cht_msg:\" + str(string)) + \":\")\n\n#Writes Text To Console Input Area\ndef InputWrite(char, color):\n global Console_Surface\n global Console_Background_Surface\n global Console_Data_Surfaces\n global Console_Font\n global Console_Input_Text_Surface\n\n Console_Input_Text_Surface.append(pygame.Surface((Console_Background_Surface.get_width(), 14), SRCALPHA))\n Console_Input_Text_Surface[len(Console_Input_Text_Surface)-1] = Console_Font.render(str(char), False, color)\n\n\n#Handles Input To Console\ndef InputHandle():\n global Console_Surface\n global Console_Background_Surface\n global Console_Data_Surfaces\n global Console_Input_Prompt_Surface\n global Console_Input_Text\n global Console_Input_Text_Surface\n global Console_Font\n global Console_XPOS\n global Console_YPOS\n global Console_Focused\n\n #Set Local Variables\n Input = Console_Input_Text\n\n #Reset Other Variables\n Console_Input_Text = \"\"\n Console_Input_Text_Surface = []\n\n #Was the input a slash command?\n if Input[0:1] == \"/\":\n #Extract command data\n Command_Data = Input[1:len(Input)]\n\n #Extract command and parameter(s)\n SpaceLoc = Command_Data.find(\" \")\n Command = Command_Data[0:SpaceLoc]\n Parameters = Command_Data[SpaceLoc+1:len(Command_Data)]\n\n #Write(str(\"COMMAND: \" + Command + \" PARA: \" + Parameters), (0,255,0))\n\n #Commands\n if Command == \"get\":\n if Parameters == \"zone\":\n Write(str(Objects.Zone.Last_Triggered_Zone_Name),(130,250,123))\n elif Parameters == \"cords\":\n Write(str(\"Your current cords: \" + str(Player.MainPlayer.XPOS) + \",\" + str(Player.MainPlayer.YPOS)), (250,130,128))\n elif Parameters == \"name\":\n Write(str(\"Current Player Name: \" + str(Player.MainPlayer.Player_Name)), (255,0,0))\n elif Parameters == \"speed\":\n Write(str(\"Current Player Speed: \" + str(Player.MainPlayer.Move_Speed)), (255,0,0))\n else:\n Write(\"zone - Gets current zone info\", (255,134,0))\n Write(\"cords - Gets current player (x,y)\", (255,134,0))\n Write(\"name - Gets player name\", (255,134,0))\n Write(\"speed - Gets player movement speed\", (255,134,0))\n \n if Command == \"set\":\n if Parameters == \"speed-up\":\n Player.MainPlayer.Move_Speed += 1\n Write(str(\"Player Speed is now: \" + str(Player.MainPlayer.Move_Speed)), (255,0,0))\n elif Parameters == \"speed-down\":\n Player.MainPlayer.Move_Speed -= 1\n Write(str(\"Player Speed is now: \" + str(Player.MainPlayer.Move_Speed)), (255,0,0))\n else:\n Write(\"speed-up - Increases player speed by one\", (255,134,0))\n Write(\"speed-down - Decreases player speed by one\", (255,134,0))\n \n else:\n #Echo back to the console\n ChatMessage(str(Input), (61,255,255))\n \n#Writes text to screen message buffer\ndef ScreenWrite(message):\n global Screen_Message_Str\n #Set Message Buffer\n Screen_Message_Str = str(message)\n \n #Start Thread\n thread.start_new_thread(Screen_Write_Thread, ())\n \n#Used by the ScreenWrite Function (Threaded)\ndef Screen_Write_Thread():\n global Screen_Message\n global Screen_Message_Str\n \n #Load our font\n Font = pygame.font.Font(\"data/fonts/VeraMoBd.ttf\", 18)\n\n #Render Zone_Name\n Screen_Message = Font.render(str(Screen_Message_Str), False, (255,134,0))\n\n #Allow the Zone_Name_Surface to be displayed for one second\n time.sleep(1)\n\n #Reset Zone_Name_Surface Data\n Screen_Message = None\n\n#Handles Events when console is focused\ndef EventHandle(event):\n global Console_Surface\n global Console_Background_Surface\n global Console_Data_Surfaces\n global Console_Input_Prompt_Surface\n global Console_Input_Text\n global Console_Input_Text_Surface\n global Console_Font\n global Console_XPOS\n global Console_YPOS\n global Console_Focused\n \n #Keydown Events\n if event.type == KEYDOWN:\n if event.key == pygame.K_BACKSPACE:\n Console_Input_Text = Console_Input_Text[0:len(Console_Input_Text)-1]\n elif event.key == pygame.K_RETURN:\n InputHandle()\n elif int(event.key) > -1 and int(event.key) < 256:\n Console_Input_Text += chr(event.key)\n","sub_path":"Client/src/Console/Manage.py","file_name":"Manage.py","file_ext":"py","file_size_in_byte":12178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"201912270","text":"import os\nimport unittest\n\ntgt_dir = '\\\\tests'\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\nloader = unittest.TestLoader()\nstart_dir = dir_path + tgt_dir\n\nsuite = loader.discover(start_dir)\n\nif __name__ == \"__main__\":\n runner = unittest.TextTestRunner()\n runner.run(suite)\n","sub_path":"temporencUtils/components/___testsuite.py","file_name":"___testsuite.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"119464974","text":"import random, time\n\nwith open('art.txt') as art_file:\n logos = art_file.read().split('------')\nlogos = dict([logo.strip().split('\\n', 1) for logo in logos])\n\nteams = []\nwhile True:\n print(chr(27) + \"[2J\")\n name = input('Enter your name: ')\n if name.lower() == 'exit':\n break\n if teams == []:\n teams = list(logos.keys())\n random.shuffle(teams)\n assigned_team = teams.pop()\n print(\"Congrats, you are now part of the team: %s!\" % assigned_team)\n print(logos[assigned_team])\n with open('groupings.txt', 'a') as groupings:\n groupings.write('%s : %s\\n' % (name, assigned_team))\n time.sleep(3)\n\nwith open('groupings.txt', 'r') as groupings, open('groupings_sorted.txt', 'w') as groupings_sorted:\n gs = groupings.read().strip().split('\\n')\n gs = [team + '\\n' + '\\n'.join([g.split(' : ')[0] for g in gs if team in g]) + '\\n' for team in logos]\n groupings_sorted.write('\\n'.join(gs))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"278817977","text":"import os\nimport os.path as osp\n\n\nprefix = '/data/chaizh/anti-spoof-0430/'\nlst_set = ['train.csv', 'test.csv']\n#prefix = '/data/chaizh/anti-spoof-0430/crop224x224'\n#lst_set = ['real_test.csv']\n\n\nfor lst in lst_set:\n file_lst = lst.replace('.csv', '.lst')\n fp = open(file_lst, 'w')\n lst = osp.join(prefix, lst)\n for line in open(lst).readlines():\n wds = line.strip().split(',') \n fname,label = wds[0],wds[1]\n fname = osp.join(prefix, fname)\n fp.write('%s,%s\\n'%(fname, label))\n fp.close()\n","sub_path":"datasets/anti-spoof/addpre.py","file_name":"addpre.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"373779043","text":"from odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\nfrom datetime import datetime, timedelta, date\nfrom dateutil.relativedelta import *\nimport xlwt\nfrom xlwt import easyxf\nimport logging\n\n_logger = logging.getLogger(__name__)\n\ntry:\n import xlsxwriter\nexcept ImportError:\n _logger.debug('Cannot `import xlwt`.')\n\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n _logger.debug('Cannot `import cStringIO`.')\n\ntry:\n import base64\nexcept ImportError:\n _logger.debug('Cannot `import base64`.')\n\n\nclass WizardReportRequestDetails(models.TransientModel):\n _name = 'wizard.report.request.transaction'\n _description = 'Open Sale Details Report'\n\n @api.model\n def get_request_details(self):\n \"\"\" Serialise the orders of the day information\n params: date_start, date_stop string representing the datetime of order\n \"\"\"\n\n # startdate = datetime.strptime(self.start_date, \"%Y-%m-%d %H:%M:%S\") - relativedelta(hours=7)\n # startdate = datetime.strptime(self.start_date, \"%Y-%m-%d %H:%M:%S.%f\") - relativedelta(hours=7)\n # enddate = datetime.strptime(self.end_date, \"%Y-%m-%d %H:%M:%S\") - relativedelta(hours=7)\n\n _logger.info(self.start_date)\n\n tglawal = datetime.strptime(self.start_date, \"%Y-%m-%d\").date()\n str_start_date = str(tglawal.year) + \"-\" + str(tglawal.month).zfill(2) + \"-\" + str(tglawal.day).zfill(\n 2) + \" 00:00:00\"\n\n _logger.info(str_start_date)\n tglawal = datetime.strptime(str_start_date, \"%Y-%m-%d %H:%M:%S\") - relativedelta(hours=7)\n tglawal = tglawal.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n #=================================\n\n tglakhir = datetime.strptime(self.end_date, \"%Y-%m-%d\").date()\n _logger.info(tglakhir)\n str_end_date = str(tglakhir.year) + \"-\" + str(tglakhir.month).zfill(2) + \"-\" + str(tglakhir.day).zfill(\n 2) + \" 23:59:59\"\n\n # tglakhir = str_end_date.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n _logger.info(tglakhir)\n\n args = []\n args.append((('tanggal', '>=', tglawal)))\n args.append((('tanggal', '<=', str_end_date)))\n\n if self.billing_status == 'billing':\n args.append((('cara_bayar', '=', self.billing_status)))\n elif self.billing_status == 'non_billing':\n args.append((('cara_bayar', '=', self.billing_status)))\n\n if self.transaction_status == 'done':\n args.append((('state', '=', self.transaction_status)))\n elif self.transaction_status == 'payment':\n args.append((('state', '=', self.transaction_status)))\n elif self.transaction_status == 'cancel':\n args.append((('state', '=', self.transaction_status)))\n\n if self.jenis_member == '1st':\n args.append((('jenis_member', '=', self.jenis_member)))\n elif self.jenis_member == '2nd':\n args.append((('jenis_member', '!=', '1st')))\n\n if self.jenis_transaksi:\n args.append((('jenis_transaksi', '=', self.jenis_transaksi)))\n\n\n\n #args = [('tanggal', '>=', self.start_date),('tanggal', '<=', self.end_date), ('state', '=', self.transaction_status), ('cara_bayar', operator, billing_status)]\n\n transstikers = self.env['request.transstiker'].sudo().search(args)\n\n # _logger.info(transstikers)\n\n transstiker_datas = []\n\n for stiker in transstikers:\n vals = {}\n vals.update({'notrans': stiker.notrans})\n vals.update({'unit_kerja': stiker.unit_kerja.kode})\n vals.update({'name': stiker.name})\n tanggal = fields.Datetime.from_string(stiker.tanggal) + relativedelta(hours=7)\n vals.update({'tanggal': str(tanggal)})\n vals.update({'cara_bayar': stiker.cara_bayar})\n if stiker.jenis_transaksi == 'stop':\n vals.update({'jenis_transaksi': 'stop_billing'})\n else:\n vals.update({'jenis_transaksi': stiker.jenis_transaksi})\n if stiker.baru:\n if stiker.awal:\n awal = datetime.strptime(stiker.awal, \"%Y-%m-%d %H:%M:%S\") + relativedelta(hours=7)\n vals.update({'start_date': awal.strftime(\"%Y-%m-%d %H:%M:%S\")})\n else:\n vals.update({'start_date': False})\n\n if stiker.akhir:\n akhir = datetime.strptime(stiker.akhir, \"%Y-%m-%d %H:%M:%S\") + relativedelta(hours=7)\n vals.update({'end_date': akhir.strftime(\"%Y-%m-%d %H:%M:%S\")})\n else:\n vals.update({'end_date': False})\n vals.update({'duration': stiker.duration})\n vals.update({'nopol': stiker.nopol})\n else:\n vals.update({'start_date': ''})\n vals.update({'end_date': ''})\n vals.update({'duration': ''})\n vals.update({'nopol': ''})\n\n vals.update({'state': stiker.state})\n vals.update({'val_harga': stiker.val_harga})\n vals.update({'harga_beli_stiker': stiker.harga_beli_stiker})\n vals.update({'harga_kartu_hilang': stiker.harga_kartu_hilang})\n vals.update({'harga_ganti_nopol': stiker.harga_ganti_nopol})\n vals.update({'amount': stiker.amount})\n\n transstiker_datas.append(vals)\n\n return {\n \"transstikers\": transstiker_datas\n }\n\n start_date = fields.Date(required=True, )\n end_date = fields.Date(required=True, )\n jenis_transaksi = fields.Selection(string=\"Jenis Transaksi\",\n selection=[('langganan_baru', 'LANGGANAN BARU'),\n ('perpanjang_baru', 'PERPANJANG BARU'), ('perpanjang', 'PERPANJANG'),\n ('stop', 'STOP BILLING'), ],\n required=False, readonly=False)\n billing_status = fields.Selection(string=\"Billing Status\", selection=[('billing', 'Billing'), ('non_billing', 'Non Billing'), ('billing_non_billing', 'Billing & Non Billing'), ], required=False, default='billing')\n transaction_status = fields.Selection(string=\"Transaction Status\", selection=[('done', 'Done'),('payment', 'Waiting for Payment'),('cancel', 'Cancel')], required=False, default='done')\n jenis_member = fields.Selection(string=\"Mobil ke\", selection=[('1st', '1st'), ('2nd', '>= 2nd') ], required=False, readonly=False)\n report_filename = fields.Char('Filename', size=100, readonly=True, default='ReportTransaction.xlsx')\n report_file = fields.Binary('File', readonly=True)\n report_printed = fields.Boolean('Report Printed', default=False, readonly=True)\n\n @api.multi\n def generate_report_excel(self):\n for wizard in self:\n\n fp = StringIO()\n workbook = xlsxwriter.Workbook(fp)\n column_heading_style = easyxf('font:height 200;font:bold True;')\n\n request_details = self.get_request_details()\n worksheet = workbook.add_worksheet('Request Details')\n worksheet.write(0, 0, _('ID #'))\n worksheet.write(0, 1, _('UNIT'))\n worksheet.write(0, 2, _('NAMA'))\n worksheet.write(0, 3, _('TANGGAL'))\n worksheet.write(0, 4, _('BILLING STATUS'))\n worksheet.write(0, 5, _('JENIS TRANSAKSI'))\n worksheet.write(0, 6, _('START DATE'))\n worksheet.write(0, 7, _('END DATE'))\n worksheet.write(0, 8, _('DURASI'))\n worksheet.write(0, 9, _('NOPOL'))\n worksheet.write(0, 10, _('STATUS'))\n worksheet.write(0, 11, _('HARGA KONTRIBUSI'))\n worksheet.write(0, 12, _('HARGA STIKER'))\n worksheet.write(0, 13, _('HARGA KARTU PARKIR'))\n worksheet.write(0, 14, _('HARGA GANTI NOPOL'))\n worksheet.write(0, 15, _('TOTAL'))\n row = 1\n for order in request_details['transstikers']:\n worksheet.write(row, 0, order['notrans'])\n worksheet.write(row, 1, order['unit_kerja'])\n worksheet.write(row, 2, order['name'])\n worksheet.write(row, 3, order['tanggal'])\n worksheet.write(row, 4, order['cara_bayar'])\n worksheet.write(row, 5, order['jenis_transaksi'])\n if order['start_date']:\n startdate = order['start_date']\n else:\n startdate = ''\n worksheet.write(row, 6, startdate)\n if order['end_date']:\n enddate = order['end_date']\n else:\n enddate = ''\n worksheet.write(row, 7, enddate)\n worksheet.write(row, 8, order['duration'])\n worksheet.write(row, 9, order['nopol'])\n worksheet.write(row, 10, order['state'])\n worksheet.write(row, 11, order['val_harga'])\n worksheet.write(row, 12, order['harga_beli_stiker'])\n worksheet.write(row, 13, order['harga_kartu_hilang'])\n worksheet.write(row, 14, order['harga_ganti_nopol'])\n worksheet.write(row, 15, order['amount'])\n row += 1\n\n workbook.close()\n excel_file = base64.encodestring(fp.getvalue())\n wizard.report_file = excel_file\n wizard.report_printed = True\n fp.close()\n\n return {\n 'view_mode': 'form',\n 'res_id': wizard.id,\n 'res_model': 'wizard.report.request.transaction',\n 'view_type': 'form',\n 'type': 'ir.actions.act_window',\n 'context': self.env.context,\n 'target': 'new',\n }\n\n # @api.onchange('start_date')\n # def _onchange_start_date(self):\n # if self.start_date and self.end_date and self.end_date < self.start_date:\n # self.end_date = self.start_date\n #\n # @api.onchange('end_date')\n # def _onchange_end_date(self):\n # if self.end_date and self.end_date < self.start_date:\n # self.start_date = self.end_date\n\n @api.multi\n def generate_report(self):\n data = {\n 'date_start': self.start_date,\n 'date_stop': self.end_date,\n 'billing': self.billing_status,\n 'transaction': self.transaction_status,\n }\n return self.env['report'].get_action([], 'paymentmodule.report_requestdetails', data=data)\n","sub_path":"wizard/wizard_report_request_transaction.py","file_name":"wizard_report_request_transaction.py","file_ext":"py","file_size_in_byte":10531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"508471528","text":"import geocoder\nimport requests\n\ng = geocoder.arcgis('Redlands, CA')\nprint(g.latlng) # latlng is a tuple with a length of 2.\n\ndest = [\n'Space Needle',\n'Crater Lake',\n'Golden Gate Bridge',\n'Yosemite National Park',\n'Las Vegas, Nevada',\n'Grand Canyon National Park',\n'Aspen, Colorado',\n'Mount Rushmore',\n'Yellowstone National Park',\n'Sandpoint, Idaho',\n'Banff National Park',\n'Capilano Suspension Bridge'\n]\n\nfor point in dest:\n loc = geocoder.arcgis(point)\n print(f\"{point} Located at {loc.latlng}\")","sub_path":"wk7/solutions.py","file_name":"solutions.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"151865819","text":"from pathlib import Path\nfrom typing import BinaryIO, Iterable, Sized, Union\n\nimport bpy\nimport numpy as np\nfrom mathutils import Vector, Matrix, Euler\n\nfrom .flex_expressions import *\nfrom .mdl_file import Mdl\nfrom .structs.header import StudioHDRFlags\nfrom .structs.model import Model\nfrom .vertex_animation_cache import VertexAnimationCache\nfrom ..vtx.structs.mesh import Mesh as VtxMesh\nfrom ..vtx.structs.model import ModelLod as VtxModel\nfrom ..vtx.vtx import Vtx\nfrom ..vvd.vvd import Vvd\nfrom ...bpy_utilities.logging import BPYLoggingManager\nfrom ...bpy_utilities.material_loader.material_loader import Source1MaterialLoader\nfrom ...bpy_utilities.utils import get_material, get_new_unique_collection\nfrom ...source_shared.content_manager import ContentManager\nfrom ...source_shared.model_container import Source1ModelContainer\n\nlog_manager = BPYLoggingManager()\nlogger = log_manager.get_logger('mdl_loader')\n\n\ndef merge_strip_groups(vtx_mesh: VtxMesh):\n indices_accumulator = []\n vertex_accumulator = []\n vertex_offset = 0\n for strip_group in vtx_mesh.strip_groups:\n indices_accumulator.append(np.add(strip_group.indexes, vertex_offset))\n vertex_accumulator.append(strip_group.vertexes['original_mesh_vertex_index'].reshape(-1))\n vertex_offset += sum(strip.vertex_count for strip in strip_group.strips)\n return np.hstack(indices_accumulator), np.hstack(vertex_accumulator), vertex_offset\n\n\ndef merge_meshes(model: Model, vtx_model: VtxModel):\n vtx_vertices = []\n acc = 0\n mat_arrays = []\n indices_array = []\n for n, (vtx_mesh, mesh) in enumerate(zip(vtx_model.meshes, model.meshes)):\n\n if not vtx_mesh.strip_groups:\n continue\n\n vertex_start = mesh.vertex_index_start\n indices, vertices, offset = merge_strip_groups(vtx_mesh)\n indices = np.add(indices, acc)\n mat_array = np.full(indices.shape[0] // 3, mesh.material_index)\n mat_arrays.append(mat_array)\n vtx_vertices.extend(np.add(vertices, vertex_start))\n indices_array.append(indices)\n acc += offset\n\n return vtx_vertices, np.hstack(indices_array), np.hstack(mat_arrays)\n\n\ndef get_slice(data: [Iterable, Sized], start, count=None):\n if count is None:\n count = len(data) - start\n return data[start:start + count]\n\n\ndef create_armature(mdl: Mdl, collection, scale=1.0):\n model_name = Path(mdl.header.name).stem\n armature = bpy.data.armatures.new(f\"{model_name}_ARM_DATA\")\n armature_obj = bpy.data.objects.new(f\"{model_name}_ARM\", armature)\n armature_obj.show_in_front = True\n collection.objects.link(armature_obj)\n\n armature_obj.select_set(True)\n bpy.context.view_layer.objects.active = armature_obj\n\n bpy.ops.object.mode_set(mode='EDIT')\n bl_bones = []\n for bone in mdl.bones:\n bl_bone = armature.edit_bones.new(bone.name)\n bl_bones.append(bl_bone)\n\n for bl_bone, s_bone in zip(bl_bones, mdl.bones):\n if s_bone.parent_bone_index != -1:\n bl_parent = bl_bones[s_bone.parent_bone_index]\n bl_bone.parent = bl_parent\n bl_bone.tail = (Vector([0, 0, 1]) * scale) + bl_bone.head\n\n bpy.ops.object.mode_set(mode='POSE')\n for se_bone in mdl.bones:\n bl_bone = armature_obj.pose.bones.get(se_bone.name)\n pos = Vector(se_bone.position) * scale\n rot = Euler(se_bone.rotation)\n mat = Matrix.Translation(pos) @ rot.to_matrix().to_4x4()\n bl_bone.matrix_basis.identity()\n\n bl_bone.matrix = bl_bone.parent.matrix @ mat if bl_bone.parent else mat\n bpy.ops.pose.armature_apply()\n bpy.ops.object.mode_set(mode='OBJECT')\n\n return armature_obj\n\n\ndef import_model(mdl_file: BinaryIO, vvd_file: BinaryIO, vtx_file: BinaryIO, scale=1.0,\n create_drivers=False, parent_collection=None, disable_collection_sort=False, re_use_meshes=False):\n if parent_collection is None:\n parent_collection = bpy.context.scene.collection\n mdl = Mdl(mdl_file)\n mdl.read()\n vvd = Vvd(vvd_file)\n vvd.read()\n vtx = Vtx(vtx_file)\n vtx.read()\n\n container = Source1ModelContainer(mdl, vvd, vtx)\n\n desired_lod = 0\n all_vertices = vvd.lod_data[desired_lod]\n model_name = Path(mdl.header.name).stem + '_MODEL'\n\n master_collection = get_new_unique_collection(model_name, parent_collection)\n container.collection = master_collection\n static_prop = mdl.header.flags & StudioHDRFlags.STATIC_PROP != 0\n armature = None\n if mdl.flex_names:\n vac = VertexAnimationCache(mdl, vvd)\n vac.process_data()\n\n if not static_prop:\n armature = create_armature(mdl, master_collection, scale)\n container.armature = armature\n\n for vtx_body_part, body_part in zip(vtx.body_parts, mdl.body_parts):\n if disable_collection_sort:\n body_part_collection = master_collection\n else:\n body_part_collection = get_new_unique_collection(body_part.name, master_collection)\n\n for vtx_model, model in zip(vtx_body_part.models, body_part.models):\n\n if model.vertex_count == 0:\n continue\n mesh_name = f'{body_part.name}_{model.name}'\n used_copy = False\n if re_use_meshes and static_prop:\n mesh_obj_original = bpy.data.objects.get(mesh_name, None)\n mesh_data_original = bpy.data.meshes.get(f'{mesh_name}_MESH', False)\n if mesh_obj_original and mesh_data_original:\n mesh_data = mesh_data_original.copy()\n mesh_obj = mesh_obj_original.copy()\n mesh_obj['skin_groups'] = mesh_obj_original['skin_groups']\n mesh_obj['active_skin'] = mesh_obj_original['active_skin']\n mesh_obj['model_type'] = 's1'\n mesh_obj.data = mesh_data\n used_copy = True\n else:\n mesh_data = bpy.data.meshes.new(f'{mesh_name}_MESH')\n mesh_obj = bpy.data.objects.new(mesh_name, mesh_data)\n mesh_obj['skin_groups'] = {str(n): group for (n, group) in enumerate(mdl.skin_groups)}\n mesh_obj['active_skin'] = '0'\n mesh_obj['model_type'] = 's1'\n else:\n mesh_data = bpy.data.meshes.new(f'{mesh_name}_MESH')\n mesh_obj = bpy.data.objects.new(mesh_name, mesh_data)\n mesh_obj['skin_groups'] = {str(n): group for (n, group) in enumerate(mdl.skin_groups)}\n mesh_obj['active_skin'] = '0'\n mesh_obj['model_type'] = 's1'\n body_part_collection.objects.link(mesh_obj)\n if not static_prop:\n modifier = mesh_obj.modifiers.new(\n type=\"ARMATURE\", name=\"Armature\")\n modifier.object = armature\n mesh_obj.parent = armature\n\n container.objects.append(mesh_obj)\n\n if used_copy:\n continue\n\n model_vertices = get_slice(all_vertices, model.vertex_offset, model.vertex_count)\n vtx_vertices, indices_array, material_indices_array = merge_meshes(model, vtx_model.model_lods[desired_lod])\n\n indices_array = np.array(indices_array, dtype=np.uint32)\n vertices = model_vertices[vtx_vertices]\n\n mesh_data.from_pydata(vertices['vertex'] * scale, [], np.flip(indices_array).reshape((-1, 3)).tolist())\n mesh_data.update()\n\n mesh_data.polygons.foreach_set(\"use_smooth\", np.ones(len(mesh_data.polygons)))\n mesh_data.normals_split_custom_set_from_vertices(vertices['normal'])\n mesh_data.use_auto_smooth = True\n\n material_remapper = np.zeros((material_indices_array.max()+1,), dtype=np.uint32)\n for mat_id in np.unique(material_indices_array):\n mat_name = mdl.materials[mat_id].name\n material_remapper[mat_id] = get_material(mat_name[-63:], mesh_obj)\n\n mesh_data.polygons.foreach_set('material_index', material_remapper[material_indices_array[::-1]].tolist())\n\n mesh_data.uv_layers.new()\n uv_data = mesh_data.uv_layers[0].data\n\n vertex_indices = np.zeros((len(mesh_data.loops, )), dtype=np.uint32)\n mesh_data.loops.foreach_get('vertex_index', vertex_indices)\n uvs = vertices['uv']\n uvs[:, 1] = 1 - uvs[:, 1]\n uv_data.foreach_set('uv', uvs[vertex_indices].flatten())\n\n if not static_prop:\n weight_groups = {bone.name: mesh_obj.vertex_groups.new(name=bone.name) for bone in mdl.bones}\n\n for n, (bone_indices, bone_weights) in enumerate(zip(vertices['bone_id'], vertices['weight'])):\n for bone_index, weight in zip(bone_indices, bone_weights):\n if weight > 0:\n bone_name = mdl.bones[bone_index].name\n weight_groups[bone_name].add([n], weight, 'REPLACE')\n flex_names = []\n for mesh in model.meshes:\n if mesh.flexes:\n flex_names.extend([mdl.flex_names[flex.flex_desc_index] for flex in mesh.flexes])\n if flex_names:\n mesh_obj.shape_key_add(name='base')\n for flex_name in flex_names:\n shape_key = mesh_data.shape_keys.key_blocks.get(flex_name, None) or mesh_obj.shape_key_add(\n name=flex_name)\n vertex_animation = vac.vertex_cache[flex_name]\n\n model_vertices = get_slice(vertex_animation, model.vertex_offset, model.vertex_count)\n flex_vertices = model_vertices[vtx_vertices] * scale\n\n shape_key.data.foreach_set(\"co\", flex_vertices.reshape(-1))\n\n if create_drivers:\n create_flex_drivers(mesh_obj, mdl)\n if mdl.attachments:\n attachment_collection = get_new_unique_collection(f'{model_name}_attachments', master_collection)\n create_attachments(mdl, armature if not static_prop else container.objects[0], scale, attachment_collection)\n\n return container\n\n\ndef create_flex_drivers(obj, mdl: Mdl):\n all_exprs = mdl.rebuild_flex_rules()\n for controller in mdl.flex_controllers:\n obj.shape_key_add(name=controller.name)\n\n def parse_expr(expr: Union[Value, Expr, Function], driver, shape_key_block):\n if expr.__class__ in [FetchController, FetchFlex]:\n expr: Value = expr\n logger.info(f\"Parsing {expr} value\")\n if driver.variables.get(expr.value, None) is not None:\n return\n var = driver.variables.new()\n var.name = expr.value\n var.targets[0].id_type = 'KEY'\n var.targets[0].id = shape_key_block\n var.targets[0].data_path = \"key_blocks[\\\"{}\\\"].value\".format(expr.value)\n\n elif issubclass(expr.__class__, Expr):\n expr: Expr = expr\n parse_expr(expr.right, driver, shape_key_block)\n parse_expr(expr.left, driver, shape_key_block)\n elif issubclass(expr.__class__, Function):\n expr: Function = expr\n for var in expr.values:\n parse_expr(var, driver, shape_key_block)\n\n for target, expr in all_exprs.items():\n shape_key_block = obj.data.shape_keys\n shape_key = shape_key_block.key_blocks.get(target, obj.shape_key_add(name=target))\n\n shape_key.driver_remove(\"value\")\n fcurve = shape_key.driver_add(\"value\")\n fcurve.modifiers.remove(fcurve.modifiers[0])\n\n driver = fcurve.driver\n driver.type = 'SCRIPTED'\n parse_expr(expr, driver, shape_key_block)\n driver.expression = str(expr)\n logger.debug(f'{target} {expr}')\n\n\ndef create_attachments(mdl: Mdl, armature: bpy.types.Object, scale, parent_collection: bpy.types.Collection):\n for attachment in mdl.attachments:\n empty = bpy.data.objects.new(attachment.name, None)\n parent_collection.objects.link(empty)\n pos = Vector(attachment.pos) * scale\n rot = Euler(attachment.rot)\n empty.matrix_basis.identity()\n empty.scale *= scale\n empty.parent = armature\n if armature.type == 'ARMATURE':\n bone = armature.data.bones.get(mdl.bones[attachment.parent_bone].name)\n empty.parent_type = 'BONE'\n empty.parent_bone = bone.name\n empty.location = pos\n empty.rotation_euler = rot\n\n\ndef import_materials(mdl):\n content_manager = ContentManager()\n for material in mdl.materials:\n if bpy.data.materials.get(material.name[-63:], False):\n if bpy.data.materials[material.name[-63:]].get('source1_loaded', False):\n logger.info(f'Skipping loading of {material.name[-63:]} as it already loaded')\n continue\n material_path = None\n for mat_path in mdl.materials_paths:\n material_path = content_manager.find_material(Path(mat_path) / material.name)\n if material_path:\n break\n if material_path:\n new_material = Source1MaterialLoader(material_path, material.name[-63:])\n new_material.create_material()\n","sub_path":"source1/mdl/import_mdl.py","file_name":"import_mdl.py","file_ext":"py","file_size_in_byte":13254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"260745217","text":"import pandas as pd\n\n\ndef project_earnings(selected_project):\n '''\n\n '''\n\n # selected_project = selected_project\n\n # importing file\n dfProjects = pd.read_csv('static/munck_textfiles/df/df_projects.txt',\n sep=',',\n header=0,\n low_memory=False,\n encoding=\"ISO-8859-1\",\n parse_dates=['date'],\n index_col=[0])\n\n dfProject = dfProjects.loc[(dfProjects.index == selected_project)]\n\n # Creating invoice variables\n invoiced_sum = dfProject.inv_line.sum()\n invoiced_hours_sum = dfProject[dfProject.text.str.startswith('Timer')].inv_line.sum()\n invoiced_items = invoiced_sum - invoiced_hours_sum\n invoiced_hours_qty = dfProject[dfProject.text.str.startswith('Timer')].qty.sum()\n\n # ==================== CALCULATING TOTAL HOURS SPENT + COST OF HOURS ====================\n dfProject_hours = pd.read_csv('static/munck_textfiles/ProTransProjEmplTrans.txt',\n sep=\";\",\n header=None,\n dayfirst=True,\n low_memory=False,\n encoding=\"ISO-8859-1\",\n parse_dates=[4])\n\n dfProject_hours.drop([0, 1, 3, 8], axis=1, inplace=True)\n dfProject_hours.columns = ['project_id', 'date', 'text', 'qty', 'cost_per_h']\n\n dfProject_hours.set_index('project_id', inplace=True)\n dfProject_hours['emp_cost'] = dfProject_hours['qty'] * dfProject_hours['cost_per_h']\n dfProject_hours['cost_per_h'] = dfProject_hours['cost_per_h'].astype('int')\n dfProject_hours['emp_cost'] = dfProject_hours['emp_cost'].astype('int')\n\n # Calculating total hours spent on project\n dfProject_hours = dfProject_hours.loc[(dfProject_hours.index == selected_project)]\n project_hours_total = dfProject_hours.qty.sum()\n project_hours_total_cost = dfProject_hours.emp_cost.sum()\n\n # ==================== CALCULATING OTHER PROJECT RELATED COSTS ====================\n dfProject_costs = pd.read_csv('static/munck_textfiles/ProTransProjCostTrans.txt',\n sep=\";\",\n header=None,\n dayfirst=True,\n low_memory=False,\n encoding=\"ISO-8859-1\")\n\n dfProject_costs.drop([0, 1, 3, 4, 6, 8], axis=1, inplace=True)\n dfProject_costs.columns = ['project_id', 'text', 'cost']\n\n dfProject_costs.set_index('project_id', inplace=True)\n dfProject_costs['cost'] = dfProject_costs['cost'].astype('int')\n\n dfProject_costs = dfProject_costs.loc[(dfProject_costs.index == selected_project)]\n\n # Creating variables for projects costs\n project_other_costs_sum = dfProject_costs.cost.sum()\n\n # ==================== PROJECT EARNINGS ====================\n invoice_minus_costs = invoiced_sum - project_hours_total_cost - project_other_costs_sum\n\n project_earnings_dict = {\n 'invoiced_sum': invoiced_sum,\n 'invoiced_hours_sum': invoiced_hours_sum,\n 'invoiced_items': invoiced_items,\n 'invoiced_hours_qty': invoiced_hours_qty,\n 'project_hours_total': project_hours_total,\n 'project_hours_total_cost': project_hours_total_cost,\n 'project_other_costs_sum': project_other_costs_sum,\n 'invoice_minus_costs': invoice_minus_costs,\n }\n\n return project_earnings_dict\n","sub_path":"src/dashboard/pandas/project_analysis.py","file_name":"project_analysis.py","file_ext":"py","file_size_in_byte":3556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"254111266","text":"import pygame\npygame.init() #초기화\n\n#화면크기 설정\nscreen_width = 480 #가로(너비)\nscreen_height = 640 #세로\nscreen = pygame.display.set_mode((screen_width,screen_height))\n\n#화면 타이틀 설정\npygame.display.set_caption(\"민서 최애게임\")#게임이름 설정\n#FPS(화면속도 설정_Frame Per Seconds)\nclock = pygame.time.Clock()#대문자로 안써주면 atrribte error 남\n#배경 이미지 불러오기\nbackground = pygame.image.load(\"C://Users//김민서//OneDrive - 한국외국어대학교//바탕 화면//게임//Pygame//gunchim_background.png\")\n#캐릭터(스프라이트) 불러오기\ncharacter = pygame.image.load(\"C://Users//김민서//OneDrive - 한국외국어대학교//바탕 화면//게임//Pygame//gunchim_character.png\")\ncharacter = pygame.transform.scale(character, (70, 70))\ncharacter_size = character.get_rect().size#이미지 크기를 가져옴\ncharacter_width = character_size[0]#캐릭터 가로크기\ncharacter_height = character_size[1]#캐릭터 가로크기\n\n#캐릭터는 기본위치\ncharacter_x_pos = (screen_width / 2) - (character_width/2) #화면 가로의 절반크기에 해당하는 곳에 위치하도록(가로)\ncharacter_y_pos = screen_height - character_height #화면 하단부에 위치하도록(세로)\n#이동할 좌표\nto_x = 0\nto_y = 0\n#이동속도\ncharacter_speed = 0.6#속도 설정\n\n########################################\n\n#적 캐릭터\nenemy = pygame.image.load(\"C://Users//김민서//OneDrive - 한국외국어대학교//바탕 화면//게임//Pygame//hunbal_enemy.png\")\nenemy = pygame.transform.scale(enemy, (70, 70))\nenemy_size = enemy.get_rect().size#이미지 크기를 가져옴\nenemy_width = enemy_size[0]#캐릭터 가로크기\nenemy_height = enemy_size[1]#캐릭터 가로크기\n\n#캐릭터는 기본위치\nenemy_x_pos = (screen_width / 2) - (enemy_width/2) #화면 가로의 절반크기에 해당하는 곳에 위치하도록(가로)\nenemy_y_pos = (screen_height/2) - (enemy_height/2) #화면 하단부에 위치하도록(세로)\n\n#텍스트_ 폰트\ngame_font = pygame.font.Font(None,40)#None이라고 쓰면 default폰트로 쓰임\n\n#텍스트_게임제한시간을 표시해보자#\n\ntotal_time = 10 #제한시간\nstart_ticks = pygame.time.get_ticks()#시작 시간 정보(tick)을 받아옴\n\n\nrunning = True #게임 진행중?\nwhile running:\n dt = clock.tick(60)#게임화면 초당 프레임수 설정\n\n #캐릭터가 100만큼 이동해야한다 치자\n #10프레��: 1초동안 10번 동작 -> 10만큼 이동하면됨\n #20 프레임: 1초동안 20번 동작 -> 5만큼 이동해야함\n\n for event in pygame.event.get():# 어떠한 이벤트가 발생하였는가?\n if event.type == pygame.QUIT: #창을 닫을때(x표시)\n running = False #그때 창을 종료하자\n if event.type == pygame.KEYDOWN:#키가 눌러졌냐?\n if event.key == pygame.K_LEFT:#캐릭터 왼쪽으로\n to_x -= character_speed\n elif event.key ==pygame.K_RIGHT:#캐릭터 오른쪽으로\n to_x +=character_speed\n elif event.key == pygame.K_UP:#캐릭터 위쪽으로\n to_y -= character_speed\n elif event.key == pygame.K_DOWN:#캐릭터 아래쪽으로\n to_y += character_speed\n #방향키를 땠을때(누르지않을때는 멈추도록(=이동변수가 없도록))\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n to_x=0\n elif event.key == pygame.K_UP or event.key == pygame.K_DOWN:\n to_y=0\n character_x_pos += to_x * dt\n character_y_pos += to_y * dt\n #가로경계값(캐릭터가 화면밖에 나가지 못하도록)\n if character_x_pos <0:\n character_x_pos =screen_width - character_width\n character_x_pos =screen_width - character_width\n #세로경계값\n\n if character_y_pos < 0:\n character_y_pos = 0\n elif character_y_pos>screen_height-character_height:\n character_y_pos = screen_height\n\n #enemy와 충돌처리\n\n character_rect =character.get_rect()\n character_rect.left = character_x_pos\n character_rect.top = character_y_pos\n\n enemy_rect =enemy.get_rect()\n enemy_rect.left = enemy_x_pos#enemy가 고정은 되어있지만rect에 저장해준적이없어서\n enemy_rect.top = enemy_y_pos#해당 코드를 적어준다\n\n #충돌체크\n if(character_rect.colliderect(enemy_rect)):\n print(\"꽝\")\n running = False\n screen.blit(background,(0,0))#배경그리기_(0,0)은 background 나타나는 위치 \n screen.blit(character,(character_x_pos, character_y_pos))#캐릭터 그리기\n screen.blit(enemy,(enemy_x_pos,enemy_y_pos))#적 그리기\n\n #타이머를 출력해보자\n #경과시간을 어떻게 계산할까?\n elapsed_time = (pygame.time.get_ticks()-start_ticks) / 1000#경과시간\n timer = game_font.render(str(int(total_time-elapsed_time)), True, (255,255,255))\n #render 함수: 출력해준다\n screen.blit(timer,(10,10))\n #0초아래면 게임이 종료되도록\n if total_time -elapsed_time <=0:\n print(\"타임아웃\")\n running = False\n\n pygame.display.update()#게임화면 다시그리기_이걸로 계속 업데이트됨 \n\npygame.time.delay(2000)#2초 후에 게임종료(너무 빨리 종료되니까)\n#pygame 종료\npygame.quit()\n","sub_path":"pygame_basic/Oracsil.py","file_name":"Oracsil.py","file_ext":"py","file_size_in_byte":5428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"153497812","text":"\"\"\"\ncreated by ldolin\n\"\"\"\n\"\"\"\n使用抓包工具抓取信息,模拟有道词典\n\n\n\"\"\"\nimport requests\nimport json\n\nword = input('请输入要翻译的内容:')\ndata ={\n 'i': word,\n 'from': 'AUTO',\n 'to': 'AUTO',\n 'smartresult': 'dict',\n 'client': 'fanyideskweb',\n 'salt': '15650605695226',\n 'sign': '3e8da78d7d97caa873f560d90dbe615d',\n 'ts': '1565060569522',\n 'bv': '53539dde41bde18f4a71bb075fcf2e66',\n 'doctype': 'json',\n 'version': '2.1',\n 'keyfrom': 'fanyi.web',\n 'action': 'FY_BY_REALTlME'\n}\n\nurl = \"http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule\"\nheaders = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'}\nres = requests.post(url, data=data, headers=headers)\nres.encoding = 'utf-8'\nresult = res.text\n\n# 将json格式的字符串转为python的字典\n# 使用json模块中的loads方法:json格式字符串——》python字典\nresult_dict = json.loads(result)\n# print(result_dict)\nr = result_dict[\"translateResult\"][0][0]['tgt']\nprint('翻译之后是:%s' % r)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n ","sub_path":"day13/youdao_demo.py","file_name":"youdao_demo.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"302528928","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nget_ipython().system('pip list #내장 모듈 리스트 확인 방법')\n\n\n# In[ ]:\n\n\nhelp('sys') #help() 내장함수를 이용하여 모듈 사용 도움말 확인\n\n\n# In[ ]:\n\n\n\"\"\"\nimport[모듈명]\nimport[모듈명]as[Alias명]\nfrom[패키지명]import[모듈명]\nfrom[모듈명]import[클레스명/함수명]\n\"\"\"\n\n\n# In[4]:\n\n\nimport os #여러가지 운영체제를 활용하기 위한 기본 모듈\n\n\n# In[5]:\n\n\nos.getcwd() #실행되는 디렉토리 위치 경로 확인방법\n\n\n# In[6]:\n\n\nos.listdir()\n\n\n# In[ ]:\n\n\n\n\n\n# In[7]:\n\n\nimport numpy as np\n\n\n# In[8]:\n\n\nnp.absolute(-3) #절대값 계산하는 함수\n\n\n# In[9]:\n\n\nnp.sqrt(16) #루트씌운 값 계산하는 함수\n\n\n# In[ ]:\n\n\n\n\n\n# In[10]:\n\n\nfrom scipy import stats \n#from [패키지명] import [모듈명] 싸이파이 패키지안에 스탯츠라는 모듈을 사용하겠다!\n\n\n# In[11]:\n\n\nstats.hmean([1,2,3]) \n\n\n# In[21]:\n\n\nstats.variation([1,2,3]) #분산값 계산하는 함수\n\n\n# In[14]:\n\n\nfrom datetime import datetime\n#from [모듈명] import [클래스명/함수명]\n#datetime이라는 모듈안에 datetime이라는 클래스를 사용하는것\n\n\n# In[15]:\n\n\nnow = datetime.now()\n\n\n# In[17]:\n\n\nnow.year\n\n\n# In[18]:\n\n\nnow.month\n\n\n# In[ ]:\n\n\n\"\"\"\no 모듈의 물리적 위치\n -파이썬에서 모듈 import시 해당 모듈의 물리적 위치 탐색 순서\n 1) 현재 디렉토리\n 2) 환경변수 PYTHONPATH에 지정된 경로\n 3) Python이 설치된 경로 및 하위 라이브러리 디렉토리 경로\n\"\"\"\n\n\n# In[19]:\n\n\nimport sys \n\n\n# In[20]:\n\n\nsys.path #import를 할때 물리적 위치를 찾는 경로를 보여줌\n\n\n# # 11.3 파이썬 사용자 정의 모듈\n\n# In[ ]:\n\n\n\"\"\"\n사용자 정의 모듈\n현재 디렉토리에 pyprint.py로 저장\nmyprint.py 모듈 import 및 tap키로 함수 확인\n\"\"\"\n\n\n# In[23]:\n\n\nimport myprint\n\n\n# In[24]:\n\n\nhello = 'Hello World Python'\n\n\n# In[ ]:\n\n\n\n\n\n# In[25]:\n\n\nmyprint.print1(hello)\n\n\n# In[26]:\n\n\nmyprint.print2(hello)\n\n\n# In[31]:\n\n\nfrom mymodule import myprint2 \n#현재 디렉토리밑에 mymodule이라는 디렉토리 밑에 있는 myprint2라는 패키지를 사용하는것임.\n\n\n# In[32]:\n\n\nmyprint2.print3(hello)\n\n\n# In[33]:\n\n\nmyprint2.print4(hello)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"0707실습(5).py","file_name":"0707실습(5).py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"169142479","text":"from odoo import models, api, fields\nfrom odoo.addons.base.models import ir_ui_view\nfrom odoo.tools.safe_eval import safe_eval\nfrom odoo.tools import ConstantMapping\n\nsuper_transfer_node_to_modifiers = ir_ui_view.transfer_node_to_modifiers\n\ndef inherit_transfer_node_to_modifiers(node, modifiers, context=None, in_tree_view=False):\n super_transfer_node_to_modifiers(node, modifiers, context=context, in_tree_view=in_tree_view)\n if context.get(\"DynamicOdo\", False):\n for a in ('invisible', 'readonly', 'required'):\n if node.get(a):\n v = bool(safe_eval(node.get(a), {'context': context or {}}))\n if in_tree_view and a == 'invisible':\n a = \"column_invisible\"\n modifiers[a] = v\n\nir_ui_view.transfer_node_to_modifiers = inherit_transfer_node_to_modifiers\n\n\nclass IrUiView(models.Model):\n _inherit = \"ir.ui.view\"\n\n def _apply_group(self, model, node, modifiers, fields):\n groups = node.get('groups')\n res = super(IrUiView, self)._apply_group(model, node, modifiers, fields)\n if self.env.context.get(\"from_odo_studio\", False) and groups:\n node.set('groups', groups)\n return res\n\n def read_combined(self, fields=None):\n from_odo_studio = self.env.context.get(\"from_odo_studio\", False)\n res = super(IrUiView, self.with_context(inherit_branding=True) if from_odo_studio else self).read_combined(fields=fields)\n return res\n\n def read(self, fields=None, load='_classic_read'):\n report_id = self.env.context.get(\"REPORT_ID\", False)\n res = super(IrUiView, self).read(fields=fields, load=load)\n if len(self) == 1 and self.type == \"qweb\" and report_id:\n template = self.env['odo.studio.report'].search([['view_id', '=', self.id], ['report_id', '=', report_id]], limit=1)\n if len(template):\n for view in res:\n view['arch'] = template.xml\n return res\n\n def _pop_view_branding(self, element):\n from_odo_studio = self.env.context.get(\"from_odo_studio\", False)\n if from_odo_studio:\n movable_branding = ['data-oe-model', 'data-oe-id', 'data-oe-field', 'data-oe-xpath', 'data-oe-source-id']\n distributed_branding = dict(\n (attribute, element.get(attribute)) for attribute in movable_branding if element.get(attribute))\n return distributed_branding\n else:\n return super(IrUiView, self)._pop_view_branding(element)\n\n\nIrUiView()\n\n\nclass IrUiMenu(models.Model):\n _inherit = 'ir.ui.menu'\n\n model_id = fields.Many2one(string=\"Model\", comodel_name=\"ir.model\")\n\n @api.model\n def create_new_app(self, values):\n app_name = values.get(\"app_name\", False)\n menu_root = values.get(\"root_name\", False)\n if app_name:\n parent_menu = self.create({'name': app_name, 'parent_id': False, 'sequence': 100})\n if menu_root:\n parent_menu = self.create({'name': menu_root, 'parent_id': parent_menu.id, 'sequence': 1})\n values['parent_id'] = parent_menu.id\n values['sequence'] = 1\n if values.get(\"model_name\", False):\n self.create_new_model(values)\n else:\n self.create(values)\n\n\n @api.model\n def create_new_model(self, values):\n model_description = values.get(\"model_description\", False)\n model_name = values.get(\"model_name\", False)\n menu_name = values.get(\"name\", False)\n menu_parent = values.get(\"parent_id\", False)\n if model_name:\n # create new model\n model_values = {'name': model_description, 'model': model_name, 'state': 'manual',\n 'is_mail_thread': True, 'is_mail_activity': True,\n 'access_ids': [(0, 0, {'name': 'Group No One', 'group_id':\n self.env.ref('base.group_no_one').id, \"perm_read\": True, \"perm_write\": True, \"perm_create\": True, \"perm_unlink\": True})]}\n self.env['ir.model'].create(model_values)\n # create action window\n action_window_values = {'name': 'New Model', 'res_model': model_name,\n 'view_mode': \"tree,form\", 'target': 'current', 'view_id': False}\n action_id = self.env['ir.actions.act_window'].create(action_window_values)\n # create tree view\n view_data = {\"arch\": \"\", \"model\": model_name,\n \"name\": \"{model}.tree \".format(model=model_name)}\n view_id = self.env['odo.studio'].create_new_view(\n {'view_mode': 'tree', 'action_id': action_id.id, \"data\": view_data})\n # create form view\n view_data = {\"arch\": \"
\", \"model\": model_name,\n \"name\": \"{model}.form \".format(model=model_name)}\n self.env['odo.studio'].create_new_view({'view_mode': 'form', 'action_id': action_id.id, \"data\": view_data})\n # create menu\n self.create({'name': menu_name, 'parent_id': menu_parent, 'action': '%s,%s' % ('ir.actions.act_window', action_id.id)})\n # create model data\n self.env['ir.model.data'].create({\n 'module': 'ye_studio',\n 'name': view_data['name'],\n 'model': 'ir.ui.view',\n 'res_id': view_id.id,\n })\n\n @api.model_create_multi\n def create(self, values):\n for value in values:\n if 'new_view' in value:\n del value['new_view']\n res = super(IrUiMenu, self).create(values)\n if res.model_id and not res.action:\n model_action = self.env['ir.actions.act_window'].search([('res_model', '=', res.model_id.model)])\n # , ('view_id', '!=', False)])\n if len(model_action):\n has_view = model_action.filtered(lambda x: x.view_id != False)\n if len(has_view):\n have_tree = has_view.filtered(lambda x: (x.view_mode or \"\").find(\"tree\") >= 0)\n model_action = have_tree if len(have_tree) else has_view\n res.write({'action': '%s,%s' % ('ir.actions.act_window', model_action[0].id)})\n\n return res\n\n @api.model\n def get_form_view_id(self, view_type=None):\n if view_type == 'edit':\n return self.env.ref('ye_dynamic_odoo.edit_menu_form_view').id\n if view_type == \"create\":\n return self.env.ref('ye_dynamic_odoo.create_menu_form_view').id\n\n @api.model\n def prepare_data(self, menu):\n parent_id = menu['parent_id']\n return {\n 'name': menu['name'],\n 'sequence': menu['sequence'],\n 'parent_id': parent_id[0] if parent_id else parent_id,\n }\n\n @api.model\n def update_menu(self, data):\n data_delete = data.get('_delete', False)\n if data_delete:\n self.browse(data_delete).unlink()\n new_ids = {}\n for menu in data['_new']:\n new_ids[menu['id']] = self.create(self.prepare_data(menu)).id\n while len(data['_newAll']) > 0:\n list_create = []\n list_wait = []\n for menu in data['_newAll']:\n list_create.append(menu) if menu['parent_id'][0] in new_ids else list_wait.append(menu)\n data['_newAll'] = list_wait\n for menu in list_create:\n values = self.prepare_data(menu)\n values['parent_id'] = new_ids[menu['parent_id'][0]]\n new_ids[menu['id']] = self.create(values).id\n for menu in data['_parent']:\n values = self.prepare_data(menu)\n values['parent_id'] = new_ids[menu[\"parent_id\"][0]]\n self.browse(menu[\"id\"]).write(values)\n for menu in data['_old']:\n values = self.prepare_data(menu)\n self.browse(menu[\"id\"]).write(values)\n return True\n\n\n\nIrUiMenu()\n","sub_path":"addons/ye_dynamic_odoo/models/ir_ui_view.py","file_name":"ir_ui_view.py","file_ext":"py","file_size_in_byte":8062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"188704600","text":"gid_to_type = {\n 1 : 'All Pick',\n 2 : \"Captain's Mode\",\n 3 : 'Random Draft',\n 4 : 'Single Draft',\n 5 : 'All Random'}\n\ncid_to_region = {\n 111 : 'US West',\n 121 : 'US East',\n 122 : 'US East',\n 131 : 'EUR West',\n 132 : 'EUR West',\n 133 : 'EUR West',\n 151 : 'SE Asia',\n 152 : 'SE Asia',\n 161 : 'China',\n 163 : 'China',\n 171 : 'AUS',\n 181 : 'RUS',\n 182 : 'RUS',\n 191 : 'EUR East',\n 200 : 'South America'}\n\nhid_to_str = {\n 1 : 'Antimage',\n 2 : 'Axe',\n 3 : 'Bane',\n 4 : 'Blood Seeker',\n 5 : 'Crystal Maiden',\n 6 : 'Drow Ranger',\n 7 : 'Earthshaker',\n 8 : 'Juggernaut',\n 9 : 'Mirana',\n 10 : 'Morphling',\n 11 : 'Nevermore',\n 12 : 'Phantom Lancer',\n 13 : 'Puck',\n 14 : 'Pudge',\n 15 : 'Razor',\n 16 : 'Sand King',\n 17 : 'Storm Spirit',\n 18 : 'Sven',\n 19 : 'Tiny',\n 20 : 'Vengeful Spirit',\n 21 : 'Windrunner',\n 22 : 'Zeus',\n 23 : 'Kunkka',\n 24 : '',\n 25 : 'Lina',\n 26 : 'Lion',\n 27 : 'Shadow Shaman',\n 28 : 'Slardar',\n 29 : 'Tidehunter', \n 30 : 'Witch Doctor',\n 31 : 'Lich',\n 32 : 'Riki',\n 33 : 'Enigma',\n 34 : 'Tinker',\n 35 : 'Sniper',\n 36 : 'Necrolyte',\n 37 : 'Warlock',\n 38 : 'Beastmaster',\n 39 : 'Queen of Pain',\n 40 : 'Venomancer',\n 41 : 'Faceless Void',\n 42 : 'Skeleton King',\n 43 : 'Death Propher',\n 44 : 'Phantom Assassin',\n 45 : 'Pugna',\n 46 : 'Templar Assassin',\n 47 : 'Viper',\n 48 : 'Luna',\n 49 : 'Dragon Knight',\n 50 : 'Dazzle',\n 51 : 'Rattletrap',\n 52 : 'Leshrac',\n 53 : 'Furion',\n 54 : 'Lifestealer',\n 55 : 'Dark Seer',\n 56 : 'Clinkz',\n 57 : 'Omniknight',\n 58 : 'Enchantress',\n 59 : 'Huskar',\n 60 : 'Night Stalker',\n 61 : 'Broodmother',\n 62 : 'Bounty Hunter',\n 63 : 'Weaver',\n 64 : 'Jakiro',\n 65 : 'Batrider',\n 66 : 'Chen',\n 67 : 'Spectre',\n 68 : 'Ancient Apparition',\n 69 : 'Doom Bringer',\n 70 : 'Ursa',\n 71 : 'Spirit Breaker',\n 72 : 'Gryocopter',\n 73 : 'Alchemist',\n 74 : 'Invoker',\n 75 : 'Silencer',\n 76 : 'Obsidian Destroyer',\n 77 : 'Lycan',\n 78 : 'Brewmaster',\n 79 : 'Shadow Demon',\n 80 : 'Lone Druid',\n 81 : 'Chaos Knight',\n 82 : 'Meepo',\n 83 : 'Treant Protector',\n 84 : 'Ogre Magi',\n 85 : 'Undying',\n 86 : 'Rubick',\n 87 : 'Disruptor',\n 88 : 'Nyx Assassin',\n 89 : 'Naga Siren',\n 90 : 'Keeper of the Light',\n 91 : 'Wisp',\n 92 : 'Visage',\n 93 : 'Slark',\n 94 : 'Medusa',\n 95 : 'Troll Warlord',\n 96 : 'Centaur Warchief',\n 97 : 'Magnus',\n 98 : 'Shredder Timbersaw',\n 99 : 'Bristleback',\n 100 : 'Tusk',\n 101 : 'Skywrath Mage',\n 102 : 'Abaddon',\n 103 : 'Elder Titan',\n 104 : 'Legion Commander'}\n\niid_to_str = {\n 0 : '',\n 1 : 'Blink Dagger',\n 2 : 'Blades of Attack',\n 3 : 'Broadswoard',\n 4 : 'Chainmail',\n 5 : 'Claymore',\n 6 : 'Helm of Iron Will',\n 7 : 'Javelin',\n 8 : 'Mithril Hammer',\n 9 : 'Platemail',\n 10 : 'Quarterstaff',\n 11 : 'Quelling Blade',\n 12 : 'Ring of Protection',\n 13 : 'Gauntlets',\n 14 : 'Slippers',\n 15 : 'Mantle',\n 16 : 'Branches',\n 17 : 'Belt of Strength',\n 18 : 'Band of Elvenskin',\n 19 : 'Robe',\n 20 : 'Circlet',\n 21 : 'Ogre Axe',\n 22 : 'Blades of Alacrity',\n 23 : 'Staff of Wizardry',\n 24 : 'Ultimate Orb',\n 25 : 'Gloves',\n 26 : 'Lifesteal',\n 27 : 'Ring of Regen',\n 28 : 'Sobi Mask',\n 29 : 'Boots',\n 30 : 'Gem',\n 31 : 'Cloak',\n 32 : 'Talisman of Evasion',\n 33 : 'Cheese',\n 34 : 'Magic Stick',\n 35 : 'Recipe : Magic Wand',\n 36 : 'Magic wand',\n 37 : 'Ghost',\n 38 : 'Clarity',\n 39 : 'Flask',\n 40 : 'Dust',\n 41 : 'Bottle',\n 42 : 'Observer Wards',\n 43 : 'Sentry Wards',\n 44 : 'Tango',\n 45 : 'Courier',\n 46 : 'Teleport Scroll',\n 47 : 'Recipe : Boots of Travel',\n 48 : 'Boots of Travel',\n 49 : 'Recipe : Phase Boots',\n 50 : 'Phase Boots',\n 51 : 'Demon Edge',\n 52 : 'Eagle Horn',\n 53 : 'Reaver',\n 54 : 'Sacred Relic',\n 55 : 'Hyperstone',\n 56 : 'Ring of Health',\n 57 : 'Void Stone',\n 58 : 'Mystic Staff',\n 59 : 'Energy Booster',\n 60 : 'Point Booster',\n 61 : 'Vitality Booster',\n 62 : 'Recipe : Power Treads',\n 63 : 'Power Treads',\n 64 : 'Recipe : Hand of Midas',\n 65 : 'Hand of Midas',\n 66 : 'Recipe : Oblivion Staff',\n 67 : 'Oblivion Staff',\n 68 : 'Recipe : Perseverence',\n 69 : 'Perseverence',\n 70 : \"Recipe : Poor Man's Shield\",\n 71 : \"Poor Man's Shield\",\n 72 : 'Recipe : Bracer',\n 73 : 'Bracer',\n 74 : 'Recipe : Wraith Band',\n 75 : 'Wraith Band',\n 76 : 'Recipe : Null Talisman',\n 77 : 'Null Talisman',\n 78 : 'Recipe : Mekansm',\n 79 : 'Mekansm',\n 80 : 'Recipe : Vladimir',\n 81 : 'Vladimir',\n 82 : '',\n 83 : '',\n 84 : 'Flying Courier',\n 85 : 'Recipe : Buckler',\n 86 : 'Buckler',\n 87 : 'Recipe : Ring of Basilius',\n 88 : 'Ring of Basilius',\n 89 : 'Recipe : Pipe',\n 90 : 'Pipe',\n 91 : 'Recipe : Urn of Shadows',\n 92 : 'Urn of Shadows',\n 93 : 'Recipe : Headdress',\n 94 : 'Headdress',\n 95 : 'Recipe : Sheepstick',\n 96 : 'Sheepstick',\n 97 : 'Recipe : Orchid',\n 98 : 'Orchid',\n 99 : 'Recipe : Cyclone',\n 100 : 'Cyclone',\n 101 : 'Recipe : Force Staff',\n 102 : 'Force Staff',\n 103 : 'Recipe : Dagon',\n 104 : 'Dagon',\n 105 : 'Recipe : Necronomicon',\n 106 : 'Necronomicon',\n 107 : 'Recipe : Ultimate Scepter',\n 108 : 'Ultimate Scepter',\n 109 : 'Recipe : Refresher',\n 110 : 'Refresher',\n 111 : 'Recipe : Assault Curiass',\n 112 : 'Assault Curiass',\n 113 : 'Recipe : Heart',\n 114 : 'Heart',\n 115 : 'Recipe : Black King Bar',\n 116 : 'Black King Bar',\n 117 : 'Aegis',\n 118 : \"Recipe : Shiva's Gaurd\",\n 119 : \"Shiva's Gaurd\",\n 120 : 'Recipe : Bloodstone',\n 121 : 'Bloodstone',\n 122 : \"Recipe : Linken's Sphere\",\n 123 : \"Linken's Sphere\",\n 124 : 'Recipe : Vangaurd',\n 125 : 'Vangaurd',\n 126 : 'Recipe : Blademail',\n 127 : 'Blademail',\n 128 : 'Recipe : Soul Booster',\n 129 : 'Soul Booster',\n 130 : 'Recipe : Hood of Defiance',\n 131 : 'Hood of Defiance',\n 132 : 'Recipe : Rapier',\n 133 : 'Rapier',\n 134 : 'Recipe : Monkey King Bar',\n 135 : 'Monkey King Bar',\n 136 : 'Recipe : Radiance',\n 137 : 'Radiance',\n 138 : 'Recipe : Butterfly',\n 139 : 'Butterfly',\n 140 : 'Recipe : Daedalus',\n 141 : 'Daedalus',\n 142 : 'Recipe : Basher',\n 143 : 'Basher',\n 144 : 'Recipe : Battlefury',\n 145 : 'Battlefury',\n 146 : 'Recipe : Mantastyle',\n 147 : 'Mantastyle',\n 148 : 'Recipe : Crystalys',\n 149 : 'Crystalys',\n 150 : 'Recipe : Armlet',\n 151 : 'Armlet',\n 152 : 'Shadowblade',\n 153 : 'Recipe : Sange and Yasha',\n 154 : 'Sange and Yasha',\n 155 : 'Recipe : Satanic',\n 156 : 'Satanic',\n 157 : 'Recipe : Mjollnir',\n 158 : 'Mjollnir',\n 159 : 'Recipe : Skadi',\n 160 : 'Skadi',\n 161 : 'Recipe : Sange',\n 162 : 'Sange',\n 163 : 'Recipe : Helm of the Dominator',\n 164 : 'Helm of the Dominator',\n 165 : 'Recipe : Maelstrom',\n 166 : 'Maelstrom',\n 167 : 'Recipe : Desolator',\n 168 : 'Desolator',\n 169 : 'Recipe : Yasha',\n 170 : 'Yasha',\n 171 : 'Recipe : Mask of Madness',\n 172 : 'Mask of Madness',\n 173 : 'Recipe : Diffusal Blade',\n 174 : 'Diffusal Blade',\n 175 : 'Recipe : Ethereal Blade',\n 176 : 'Ethereal Blade',\n 177 : 'Recipe : Soul Ring',\n 178 : 'Soul Ring',\n 179 : 'Recipe : Arcane Boots',\n 180 : 'Arcane Boots',\n 181 : 'Orb of Venom',\n 182 : 'Stout Shield', \n 183 : 'Recipe : Shadowblade',\n 184 : 'Recipe : Drums',\n 185 : 'Drums',\n 186 : 'Recipe : Medallion of Courage',\n 187 : 'Medallion of Courage',\n 188 : 'Smoke of Deceit',\n 189 : 'Recipe : Veil of Discord',\n 190 : 'Veil of Discord',\n 191 : 'Recipe : Necronomicon 2',\n 192 : 'Necronomicon 2',\n 193 : 'Recipe : Necronomicon 3',\n 194 : 'Necronomicon 3',\n 195 : 'Recipe : Diffusal Blade 2',\n 196 : 'Diffusal Blade 2',\n 197 : 'Recipe : Dagon 2',\n 198 : 'Recipe : Dagon 3',\n 199 : 'Recipe : Dagon 4',\n 200 : 'Recipe : Dagon 5',\n 201 : 'Dagon 2',\n 202 : 'Dagon 3',\n 203 : 'Dagon 4',\n 204 : 'Dagon 5',\n 205 : 'Recipe : Rod of Atos',\n 206 : 'Rod of Atos',\n 207 : 'Recipe : Abyssal Blade',\n 208 : 'Abyssal Blade',\n 209 : \"Recipe : Heaven's Halbred\",\n 210 : \"Heaven's Halbred\",\n 211 : 'Recipe : Ring of Aquila',\n 212 : 'Ring of Aquila',\n 213 : 'Recipe : Tranquil Boots',\n 214 : 'Tranquil Boots',\n 215 : 'Shadow Amulet',\n 216 : 'Halloween Candy Corn',\n 217 : 'Mystery Hook',\n 218 : 'Mystery Arrow',\n 219 : 'Mystry Missle',\n 220 : 'Mystery Toss',\n 221 : 'Mystry Vacuum',\n 226 : 'Halloween Rapier',\n 227 : 'Present',\n 228 : 'Greevil Whistle',\n 229 : 'Winter Stocking',\n 230 : 'Winter Skates',\n 231 : 'Winter Cake',\n 232 : 'Winter Cookie',\n 233 : 'Winter CoCo',\n 234 : 'Winter Ham',\n 235 : 'Toggleable Whistle',\n 236 : 'Winter Kringle',\n 237 : 'Winter Mushroom',\n 238 : 'Winter Greevil Treat',\n 239 : 'Winter Greevil Garbage',\n 240 : 'Winter Greevil Chewy'}\n","sub_path":"dota2_ids.py","file_name":"dota2_ids.py","file_ext":"py","file_size_in_byte":9258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"552679353","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nThis is a scrip used to test while recycle.\n'''\n\n__author__ = 'leanna li'\n\ndef main():\n sandwich_orders = ['tuan', 'bacon', 'sausage', 'omelette']\n finished_sandwiches = []\n while sandwich_orders:\n temp = sandwich_orders.pop()\n print('I made your ' + temp + ' sandwich.')\n finished_sandwiches.append(temp)\n print(finished_sandwiches)\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"7-8.py","file_name":"7-8.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"238668981","text":"from django.shortcuts import render\nfrom django.template import loader\nfrom django.http import HttpResponse\nfrom django.core.files.storage import FileSystemStorage\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import UploadFileForm\nfrom .store_file import store_file_to_azure\nfrom .logic import handle_uploaded_file\nfrom .charts import charts, datasummary\nimport os\nfrom datetime import datetime, timedelta\n\n@login_required\ndef index(request):\n template = loader.get_template(\"uploads/index.html\")\n context = {'show_upload_status': False, 'uploaded_file_url': False, 'form': UploadFileForm()}\n\n if request.method == 'POST':\n form = UploadFileForm(request.POST, request.FILES)\n if form.is_valid():\n fs = FileSystemStorage()\n filename = fs.save('{}.xlsx'.format('_'.join([request.POST['tipo'],str(request.user),\\\n request.POST['periodo']])),request.FILES['archivo'])\n url = fs.url(filename)\n\n upload_file_handle, errors = handle_uploaded_file(request.FILES['archivo'],request.POST['tipo'], ' '.join([str(request.POST['periodo']).title(),str(datetime.today().year)]))\n\n if upload_file_handle:\n\n store_file_to_azure(url)\n\n script, div = charts(request.FILES['archivo'], request.POST['tipo'])\n\n summary = datasummary(request.FILES['archivo'])\n\n context['resumen'] = summary\n context['show_upload_status'] = True\n context['uploaded_file_success'] = True\n context['script'] = script\n context['div'] = div\n\n return render(request, 'uploads/index.html', context)\n else:\n errors_list = '
    '\n for key, value in errors.items():\n if not value:\n errors_list += '
  • ' + key + '
  • '\n errors_list += '
'\n context['show_upload_status'] = True\n context['uploaded_file_success'] = False\n context['error_list'] = errors_list\n os.remove(url)\n return render(request, 'uploads/index.html', context)\n else:\n return HttpResponse(\"Form not valid\")\n else:\n return render(request, 'uploads/index.html', context)\n\n@login_required\ndef get_user_profile(request,username):\n user = User.objects.get(username=username)\n return render(request, 'uploads')\n","sub_path":"mysite/mysite/mysite/uploads/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"78997913","text":"__author__ = 'Irwan Fathurrahman '\n__date__ = '27/08/20'\n\nfrom django.contrib.gis.db import models\nfrom gwml2.models.general import Quantity\nfrom gwml2.models.term import TermReferenceElevationType\n\n\nclass Geology(models.Model):\n \"\"\" Geology\n \"\"\"\n total_depth = models.OneToOneField(\n Quantity, on_delete=models.SET_NULL,\n null=True, blank=True,\n help_text='Depth of the well below the ground surface.'\n )\n reference_elevation = models.ForeignKey(\n TermReferenceElevationType, on_delete=models.SET_NULL,\n null=True, blank=True\n )\n\n class Meta:\n db_table = 'geology'\n","sub_path":"models/geology.py","file_name":"geology.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"10954691","text":"from PyQt5 import QtWidgets\nfrom gui import main\nimport controls\nimport sys\n\n\nclass mainwindow(QtWidgets.QMainWindow):\n def __init__(self):\n super(mainwindow, self).__init__()\n self.Form = QtWidgets.QWidget()\n self.ui = main.Ui_Form()\n self.ui.setupUi(self.Form)\n self.mmc = None\n self.epiShutter = 0\n self.diaShutter = 0\n self.stateLive = 0\n self.ui.epiToggleBtn.clicked.connect(self.epi_toggle)\n self.ui.diaToggleBtn.clicked.connect(self.dia_toggle)\n self.ui.loadMicroscopeBtn.clicked.connect(self.load_microscope)\n self.ui.unloadBtn.clicked.connect(self.unload_microscope)\n self.ui.eyepieceRadBtn.clicked.connect(self.eyepiece_toogle)\n self.ui.snapshotBtn.clicked.connect(self.snap_image)\n self.ui.liveToggleBtn.clicked.connect(self.live)\n\n def snap_image(self):\n img = controls.snap_image(self.mmc, exposure_time=200)\n self.display_image(img)\n\n def display_image(self, img):\n self.ui.widget.canvas.ax.imshow(img, cmap=\"gray\")\n self.ui.widget.canvas.draw()\n\n def live(self):\n if self.stateLive is 0:\n self.stateLive = 1\n self._live()\n else:\n self.stateLive = 0\n\n def _live(self):\n self.mmc.setCircularBufferMemoryFootprint(100)\n self.mmc.startContinuousSequenceAcquisition(1)\n while self.mmc.isSequenceRunning():\n img = self.mmc.getLastImage()\n if self.mmc.getRemainingImageCount() > 0:\n img = self.mmc.getLastImage()\n self.display_image(img)\n\n if stateLive is 0:\n self._stop_live()\n break\n\n def _stop_live(self):\n self.mmc.stopSequenceAcquisition()\n\n def load_microscope(self):\n if self.mmc is None:\n self.mmc = controls.loadDevices(\"\\configs\\Bright_Star.cfg\")\n self.mmc.initializeAllDevices()\n self.ui.loadMicroscopeBtn.setEnabled(False)\n self.ui.unloadBtn.setEnabled(True)\n\n def unload_microscope(self):\n self.mmc.reset()\n self.mmc = None\n self.ui.unloadBtn.setEnabled(False)\n self.ui.loadMicroscopeBtn.setEnabled(True)\n\n def epi_toggle(self):\n if self.epiShutter is 0:\n self.epiShutter = 1\n controls.shutter_control(self.mmc, \"epi\", self.epiShutter)\n self.ui.epiToggleBtn.setText(\"ON\")\n\n else:\n self.epiShutter = 0\n controls.shutter_control(self.mmc, \"epi\", self.epiShutter)\n self.ui.epiToggleBtn.setText(\"EPI\")\n\n def dia_toggle(self):\n if self.diaShutter is 0:\n self.diaShutter = 1\n controls.shutter_control(self.mmc, \"dia\", self.diaShutter)\n self.ui.diaToggleBtn.setText(\"ON\")\n\n else:\n self.diaShutter = 0\n controls.shutter_control(self.mmc, \"dia\", self.diaShutter)\n self.ui.diaToggleBtn.setText(\"DIA\")\n\n def eyepiece_toogle():\n pass\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n application = mainwindow()\n application.Form.show()\n sys.exit(app.exec_())\n","sub_path":"deepthought/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"436065968","text":"import sys\n\n#find the mininmum excluded number of any list of numbers\n#iterate through until you get all the way through without a member of the list \n#equaling minex, if that happens you have your minex\ndef mex(lst):\n\t\n\tminex = 0\n\ttraversed = False\n\n\twhile traversed != True:\n\t\ttraversed = True\n\t\tfor i in range(len(lst)):\n\t\t\tif lst[i] == minex:\n\t\t\t\ttraversed = False\n\t\t\t\tminex += 1\n\t\t\t\tbreak\n\n\treturn minex\n\n#finds the grundy number for n, and also updates the grundynumbers table in the \n#process so we don't have to recalculate values\ndef grundy(n):\n\tif(grundynumbers[n] == None): # if we haven't seen this before\n\t\tif n == 0:\n\t\t\tgrundynumbers[n] = 0 #base case, update table\n\t\telif n == 1:\n\t\t\tgrundynumbers[n] = 1 #base case\n\t\telif n == 2:\n\t\t\tgrundynumbers[n] = 0 #base case\n\t\telse:\n\t\t\tarraytomex = []\n\n\t\t\tarray = [1] * n\n\t\t\tfor i in range(n-2): #create pin representation\n\t\t\t\tarray[i] = 0\n\t\t\t\tarray[i+1] = 0\n\n\t\t\t\tcount = 0\n\t\t\t\tlst = []\n\n\t\t\t\tfor j in range(len(array)): #count consecutive pins then add to list\n\t\t\t\t\tif array[j] == 0:\n\t\t\t\t\t\tif count != 0:\n\t\t\t\t\t\t\tlst.append(count)\n\t\t\t\t\t\t\tcount = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tcount +=1\n\t\t\t\tif count != 0:\n\t\t\t\t\tlst.append(count)\n\n\t\t\t\tnimber = 0\n\t\t\t\tfor k in lst:\n\t\t\t\t\tnimber = nimber ^ grundy(k) #XOR is commutative, XOR to find grundy number \n\t\t\t\tarraytomex.append(nimber) #to be added to array and mexed\n\n\t\t\t\tarray[i] = 1\n\t\t\tgrundynumbers[n] = mex(arraytomex) #update table with new value\n\t\n\treturn grundynumbers[n] #return grundy number for n\n\n#method to break apart and analyze the pin representation\ndef decipher(s):\n\t\n\tlst = []\n\tcount = 0\n\tfor i in s:\n\t\tif i == '.':\n\t\t\tif count != 0: #if we have seen some x's before, add them to our list\n\t\t\t\tlst.append(count)\n\t\t\t\tcount = 0\n\t\telse:\n\t\t\tcount +=1\n\tif count != 0: #if we run off the end of the string before seeing another '.'\n\t\tlst.append(count)\n\tnimber = 0\n\tfor k in lst:\n\t\tnimber = grundy(nimber) ^ grundy(k)\n\tif nimber <= 0: #a non-positive grundy number indicates loss\n\t\treturn ('LOSS')\n\n\telse: #if n position then we need to show move to p position\n\t\t\tcopy = ''\n\t\t\tfor i in range(len(s)):\n\t\t\t\tif s[i] == 'x' and (i == len(s)-1 or s[i+1] == '.') and (i==0 or s[i-1] == '.'):\n\t\t\t\t\tcopy = s[:i] + '.' + s[i+1:]\n\t\t\t\t\tif (decipher(copy) == 'LOSS'):\n\t\t\t\t\t\treturn copy\n\n\n#if given the empty set, that is a p position and you lose\nif len(sys.argv) == 1:\n\tprint('LOSS') \n\t\nelif len(sys.argv) == 2:\n\tfor i in sys.argv[1]:\n\t\tif i != 'x' and i != '.':\n\t\t\tprint('wrong input!')\n\t\t\texit(0)\n\n\t#initialize table\n\tgrundynumbers = [None] * (len(sys.argv[1])+1)\n\t\n\t#if given string results in a loss, print loss otherwise indicate winning move\n\tresult = decipher(sys.argv[1])\n\tprint(result)\n\t\nelif len(sys.argv) == 3:\n\t#number to find the grundy of n\n\n\tif sys.argv[1] != 'grundy':\n\t\tprint('wrong input!')\n\t\texit(0)\n\ttry:\n\t\tint(sys.argv[2])\n\t\tn = int(sys.argv[2])\n\texcept ValueError:\n\t\tprint('You\\'re argument was not a number!')\n\t\texit(0)\n\n\t#initialize table\n\tgrundynumbers = [None] * (n+1)\n\n\t#print list of grundy numbers\n\tfor i in range(n+1):\n\t\tgrundynumbers[i] = grundy(i)\n\tprint(grundynumbers)\n\nelse:\n\tprint('Too many arguments!')\n\texit(0)","sub_path":"grundy.py","file_name":"grundy.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"140810841","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jan 22 19:40:15 2018\r\n\r\n@author: v-tabhav\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\ndata = pd.read_csv(\"train.csv\", index_col=\"Loan_ID\")\r\ndata = pd.read_csv(\"C:\\\\Users\\\\v-tabhav\\\\Desktop\\\\Python Traun\\\\train.csv\") #Reading the dataset in a dataframe using Pandas\r\n\r\n#1 – Boolean Indexing\r\n#What do you do, if you want to filter values of a column based on conditions from another set of columns? \r\n\r\ndata.loc[(data[\"Gender\"]==\"Female\") & (data[\"Education\"]==\"Not Graduate\") & (data[\"Loan_Status\"]==\"Y\"), [\"Gender\",\"Education\",\"Loan_Status\"]]\r\n\r\n\r\n#2 – Apply Function\r\n#It is one of the commonly used functions for playing with data and creating new variables. Apply returns some value after passing each row/column of a data frame with some function. The function can be both default or user-defined. For instance, here it can be used to find the #missing values in each row and column.\r\n\r\n#Create a new function:\r\ndef num_missing(x):\r\n return sum(x.isnull())\r\n\r\n#Applying per column:\r\nprint (\"Missing values per column:\")\r\nprint (data.apply(num_missing, axis=0)) #axis=0 defines that function is to be applied on each column\r\n\r\n#Applying per row:\r\nprint (\"\\nMissing values per row:\")\r\nprint (data.apply(num_missing, axis=1).head()) \r\n#axis=1 defines that function is to be applied on each row","sub_path":"Panda work hacks.py","file_name":"Panda work hacks.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"552527098","text":"\"\"\"\nviews.py\nWhere the display rendering code goes\n\nThe Hello World app places the string \"Hello World!\" at the center of the\nscreen. Arrow key presses will move the text in the corresponding direction\none space only i.e. the text can take one of 5 positions: up, down, left, right,\nand center. A space bar press will randomly change the color of the text.\n\"\"\"\nfrom core import UnlockView\n\n\nclass HelloWorldView(UnlockView):\n def __init__(self, canvas, model):\n super(HelloWorldView, self).__init__(canvas, model)\n self.label = self.create_label(\"Hello World!\")\n\n def render(self):\n if self.model.is_dirty():\n self.label.x = self.canvas.width*self.model.text_position[0]\n self.label.y = self.canvas.height*self.model.text_position[1]\n self.label.color = self.model.text_color\n","sub_path":"plugins/apps/helloworld/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"564248023","text":"def partition(li,s,e):\r\n #target of partition is to place pivot at its correct position and all elements less than pviot before pivot\r\n #and all greater than pivot after after pivot\r\n \r\n pivot=li[s]\r\n c=0\r\n for z in range(s,e+1):\r\n if li[z]=pivot:\r\n j=j-1\r\n else:\r\n li[i],li[j]=li[j],li[i]\r\n i=i+1\r\n j=j-1\r\n\r\n return s+c\r\ndef quickSort(arr, start, end):\r\n if start >= end:\r\n return\r\n pivot_index=partition(arr,start,end)\r\n quickSort(arr,start,pivot_index-1)\r\n quickSort(arr,pivot_index+1,end)\r\n\r\nn=int(input())\r\narr=list(int(i) for i in input().strip().split(' '))\r\nquickSort(arr, 0, n-1)\r\nprint(*arr)\r\n","sub_path":"recursionQuickSort.py","file_name":"recursionQuickSort.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"640596955","text":"#Write a function, which takes a non-negative integer (seconds) as input and returns the time in a\n#human-readable format (HH:MM:SS) Test.assert_equals(make_readable(86399), \"23:59:59\")\nnum = 86399\n#num = 5\n\nif num >= 3600:\n rtrnLst=(num//3600,(num%3600)//60,(num%3600)%60)\n\nelif num >= 60:\n rtrnLst=('00:'+str(num//60)+':'+str((num%3600)%60))\n\nelif num < 60 and num >= 10:\n rtrnLst=('00:00:'+str(num))\n #rtrnLst.append(num)\nelse:\n rtrnLst=('00:00:0'+str(num))\n\nprint(rtrnLst)\n","sub_path":"staticClock.py","file_name":"staticClock.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"37301454","text":"# Author: Sam Youles\n# 31/5/19\n\nimport numpy as np\nimport healpy as hp\nimport sys\nimport glob\nimport os\nimport fitsio\nfrom kappa_lya import *\nimport argparse\n\n\n#-- Input arguments\n\nif __name__=='__main__':\n\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description='Compute the convergence (kappa) between qsos and deltas.')\n\n parser.add_argument('--mapdir', required=True, type=str, \\\n help='folder containing input maps')\n parser.add_argument('--mapnumber', required=False, type=int, default=1, \\\n help='index number of input map')\n args, unknown = parser.parse_known_args()\n\n mapdir = args.mapdir\n mapnumber = args.mapnumber\n mapname = '{}/kappa_input{}.fits'.format(mapdir, mapnumber)\n\n #-- Create angular power spectrum of kappa\n theory = Theory()\n ell, cell = theory.get_cl_kappa(2.1, kmax=100., nz=100, lmax=10000)\n\n #-- Create kappa map\n nside=1024\n npix=nside**2*12\n seed=int(mapnumber)\n np.random.seed(seed)\n kappa = create_gaussian_kappa(ell, cell, nside=nside, seed=seed)\n hp.fitsfunc.write_map(mapname, kappa.A, column_names='I', fits_IDL=False, overwrite=True)\n\n","sub_path":"bin/make_input_maps.py","file_name":"make_input_maps.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"553751460","text":"from elasticsearch import Elasticsearch, helpers\n\nclass ElasticsearchHelper:\n def __init__(self, es_host, name, analyzer='my_analyzer'):\n self.__es = Elasticsearch(hosts = [es_host])\n self.__name = name\n self.__analyzer = analyzer\n\n def setIndex(self, name):\n self.__name = name\n\n def exists(self):\n return self.__es.indices.exists(self.__name)\n\n def delete(self):\n if self.exists():\n return self.__es.indices.delete(index=self.__name)\n\n def create(self, delete=False, **args):\n if delete:\n self.delete()\n if not self.exists():\n return self.__es.indices.create(index = self.__name, **args)\n\n def bulk(self, **args):\n return self.__es.bulk(index = self.__name, **args)\n\n def streaming_bulk(self, **args):\n return helpers.streaming_bulk(self.__es, index=self.__name, **args)\n\n def search(self, **args):\n return self.__es.search(index = self.__name, **args)\n\n def get(self, **args):\n return self.__es.get(index=self.__name, **args)\n\n def mtermvectors(self, ids, **args):\n i=0\n batch = 200\n while i 0:\n args[\"body\"]={\"ids\":ids[:1000]}\n ids = ids[1000:]\n res.extend(self.__es.mget(index=self.__name, **args)['docs'])\n return res\n\n","sub_path":"project/data_parser/storage/elastic.py","file_name":"elastic.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"159276560","text":"'''\nDjango Generic View Extensions\n\nFilterset extensions\n\ndjango-url-filter is a great package that parses GET parameters into Django filter arguments. \n\n http://django-url-filter.readthedocs.io/en/latest/\n\nAlas it does not have a nice way to pretty print the filter for reporting it on views, \nnor for extracting the filter options cleanly for reconstructing a URL or QuerySet.\n'''\n# Python imports\nimport urllib.parse\nimport datetime\nimport re\n\n# Django imports\nfrom django.utils.formats import localize\nfrom django.utils.safestring import mark_safe\n\noperation_text = {\n \"exact\" : \" = \",\n \"iexact\" : \" = \",\n \"contains\" : \" contains \",\n \"icontains\" : \" contains \",\n \"startswith\" : \" starts with \",\n \"istartswith\" : \" starts with \",\n \"endswith\" : \" ends with \",\n \"iendswith\" : \" ends with \",\n \"range\" : \" is between \",\n \"isnull\" : \" is NULL \",\n \"regex\" : \" matches \",\n \"iregex\" : \" matches \",\n \"in\" : \" is in \",\n \"gt\" : \" > \",\n \"gte\" : \" >= \",\n \"lt\" : \" < \",\n \"lte\" : \" <= \",\n # Date modifiers, probably not relevant in filters? If so may need some special handling.\n # \"date\" : \"__date\",\n # \"year\" : \"__year\",\n # \"quarter\" : \"__quarter\",\n # \"month\" : \"__month\",\n # \"day\" : \"__day\",\n # \"week\" : \"__week\",\n # \"week_day\" : \"__weekday\",\n # \"time\" : \"__time\",\n # \"hour\" : \"__hour\",\n # \"minute\" : \"__minute\",\n # \"second\" : \"__second\",\n } \n\ndef fix(obj):\n '''\n There's a sad known round trip problem with date_times in Python 3.6 and earlier.\n The str() value fo a datetime for timezone aware date_times produces a timezone \n format of ±[hh]:[mm] but the django DateTimeField when it validates inputs does not\n recognize this format (because it uses datetime.strptime() and this is a known round\n trip bug discussed for years here:\n \n https://bugs.python.org/issue15873\n \n fix() is like str() except for datetime objects only it removes that offending colon\n so the datetime can be parsed with a strptime format of '%Y-%m-%d %H:%M:%S%z' (which\n must be added to Django's DATETIME_INPUT_FORMATS of it's going to support round \n tripping on DateTimeFields. \n '''\n if isinstance(obj, datetime.datetime):\n return re.sub(r'(\\+|\\-)(\\d\\d):(\\d\\d)$',r'\\1\\2\\3',str(obj))\n else: \n return str(obj)\n \ndef get_field(model, components, component=0):\n '''\n Gets a field given the components of a filterset sepcification. \n :param model: The model in which the identified component is expected to be a field \n :param components: A list of components\n :param component: An index into that list identifying the component to consider\n '''\n def model_field(model, field_name):\n for field in model._meta.fields:\n if field.attname == field_name:\n return field\n return None\n \n field_name = components[component]\n field = getattr(model, field_name, None)\n \n # To Many fields \n if hasattr(field, \"rel\"):\n if component+1 < len(components):\n if field.rel.many_to_many:\n field = get_field(field.field.related_model, components, component+1) \n elif field.rel.one_to_many:\n field = get_field(field.field.model, components, component+1) \n \n # To One fields \n elif hasattr(field, \"field\"): \n if component+1 < len(components):\n field = get_field(field.field.related_model, components, component+1)\n \n # local model field\n else:\n field = model_field(model, field_name)\n \n return field\n\ndef is_filter_field(model, field):\n # For now just splitting for components. This does not in fact generalise if\n # the filter has an operation at its end, like __gt or such. I've steppped into\n # the filterset code to see how it builds components, but it's a slow job and I\n # bailed for now.\n #\n # TODO: work out how filtersets build components as we should reall see there \n # how it both ignores the operation at end of the name, and also seesm to take one\n # step further to the id field in relations.\n #\n # For now this serves purposes finely as we aren't using it on any filters\n # with operations (yet) and the last tier trace to id is not important to \n # establishing if it's a valid field to filter on.\n components = field.split(\"__\")\n filter_field = get_field(model, components)\n return not filter_field is None\n\ndef format_filterset(filterset, as_text=False):\n '''\n Returns a list of filter criteria that can be used in a URL construction \n or if as_text is True a pretty formatted string version of same.\n \n :param filterset: A filterset as produced by url_filter\n :param as_text: Returns a list if False, or a formatted string if True \n ''' \n result = []\n\n try:\n # get_specs raises an Empty exception if there are no specs, and a ValidationError if a value is illegal \n specs = filterset.get_specs()\n \n for spec in specs:\n field = get_field(filterset.queryset.model, spec.components)\n if len(spec.components) > 1 and spec.lookup == \"exact\":\n Os = field.model.objects.filter(**{\"{}__{}\".format(field.attname, spec.lookup):spec.value})\n O = Os[0] if Os.count() > 0 else None\n \n if as_text:\n if field.primary_key:\n field_name = field.model._meta.object_name\n field_value = str(O)\n else:\n field_name = \"{} {}\".format(field.model._meta.object_name, spec.components[-1])\n field_value = spec.value\n else:\n if field.primary_key:\n field_name = \"__\".join(spec.components[:-1])\n field_value = O.pk\n else:\n field_name = \"__\".join(spec.components)\n field_value = spec.value\n else:\n if as_text:\n field_name = field.verbose_name\n else:\n field_name = \"__\".join(spec.components)\n\n # DateTimeFields are a tad special.\n # In as_tex mode, localize them. In normal mode fix the str representation.\n # One for convenience and nicety, the other to get around a round-trip bug in Python! \n field_value = (localize(spec.value) if isinstance(spec.value, datetime.datetime) else str(spec.value)) if as_text else urllib.parse.quote_plus(fix(spec.value))\n \n if as_text and spec.lookup in operation_text:\n op = operation_text[spec.lookup]\n elif spec.lookup != \"exact\":\n op = \"__{}=\".format(spec.lookup)\n else:\n op = \"=\"\n \n result += [\"{}{}{}\".format(field_name, op, field_value)]\n\n if as_text:\n result = mark_safe(\" and \".join(result))\n \n return result\n except:\n return \"\" if as_text else []\n ","sub_path":"django_generic_view_extensions/filterset.py","file_name":"filterset.py","file_ext":"py","file_size_in_byte":7275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"377068568","text":"from minimogrify.fractions_compat import Fraction\nfrom collections import namedtuple\n\nBBox = namedtuple(\"BBox\", ['l', 't', 'r', 'b'])\n\n\ndef center(img_width, img_height, box):\n l, t, r, b = to_origin(box)\n\n w, h = size(box)\n left = Fraction(img_width - w, 2)\n top = Fraction(img_height - h, 2)\n right = left + w\n bottom = top + h\n return BBox(left, top, right, bottom)\n\n\ndef to_origin(box):\n l, t, r, b = box\n return BBox(0, 0, r-l, b-t)\n\n\ndef size(box):\n l, t, r, b = box\n return (r - l, b - t)\n","sub_path":"minimogrify/bbox.py","file_name":"bbox.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"69340539","text":"\"\"\"\n题目:\n输入一个字符串,按照字典打印出该字符串中字符的所有排列,例如输入字符串abc,\n则打印出有字符串a,b,c所能排列出来的所有字符串\n\"\"\"\nclass Solution:\n\tdef Permutation(self,ss):\n\t\tres=[]\n\t\tdef Traversal(ss,join_ss=''):\n\t\t\tif ss:\n\t\t\t\tfor i,s in enumerate(ss):\n\t\t\t\t\tsub_ss=ss[:i]+ss[i+1:]\n\t\t\t\t\tTraversal(sub_ss,join_ss+s)\n\t\t\telif join_ss and join_ss not in res:\n\t\t\t\tres.append(join_ss)\n\t\tif ss:\n\t\t\tTraversal(ss)\n\t\treturn res\nS=Solution()\nres=S.Permutation('eea')\nprint(res)","sub_path":"Test二十七.py","file_name":"Test二十七.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"382076092","text":"#!/usr/bin/env python\n\nimport subprocess\nimport optparse\n\n\ndef get_arguments():\n parser = optparse.OptionParser()\n parser.add_option(\"-i\", \"--interface\", dest=\"interface\", help=\"Interfaccia da selezionare per il cambio MAC address\")\n parser.add_option(\"-m\", \"--mac\", dest=\"new_mac\", help=\"Nuovo MAC address nel formato 00:00:00:00:00:00\")\n\n (options, arguments) = parser.parse_args()\n if not options.interface:\n parser.error(\"[-] Specifica una interfaccia, usa --help per maggiori informazioni\")\n elif not options.new_mac:\n parser.error(\"[-] Specifica un mac address, usa --help per maggiori informazioni\")\n return options\n\n\ndef change_mac(interface, new_mac):\n print(\"[+] Cambio MAC address per l'interfaccia \" + interface)\n\n subprocess.call([\"ifconfig\", interface, \"down\"])\n subprocess.call([\"ifconfig\", interface, \"hw\", \"ether\", new_mac])\n subprocess.call([\"ifconfig\", interface, \"up\"])\n\n\noptions = get_arguments()\nchange_mac(options.interface, options.new_mac)\n","sub_path":"mac_changer.py","file_name":"mac_changer.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"503009866","text":"import numpy as np\nimport math\nepoches=2000\nbatch_size=2000\n\nif __name__==\"__main__\":\n #the source input\n x=np.linspace(-math.pi,math.pi,batch_size)\n y=np.sin(x)\n\n #random the weight\n a=np.random.randn()\n b=np.random.randn()\n c=np.random.randn()\n d=np.random.randn()\n print(\"{},{},{},{}\".format(a,b,c,d))\n loss_aver=0\n for i in range(epoches):\n pre_y=a+b*x+c*x**2+d*x**3\n loss=np.square(y-pre_y).sum()\n loss_aver=loss_aver+loss\n if (i+1)%10==0:\n print(\"{},loss:{:.1f}\".format(i,loss_aver/10))\n loss_aver=0\n learn_rate= 1e-6\n a-=2*(pre_y-y).sum()*learn_rate\n b-=2*((pre_y-y)*x).sum()*learn_rate\n c-=2*((pre_y-y)*x**2).sum()*learn_rate\n d-=2*((pre_y-y)*x**3).sum()*learn_rate\n print(f'Result: y = {a} + {b} x + {c} x^2 + {d} x^3')\n x=math.pi/6\n res=a+b*x+c*x**2+d*x**3\n print(f\"result of sin(pai/6){res}\")\n\n\n\n","sub_path":"utils/poly_sine/poly_sine_numpy.py","file_name":"poly_sine_numpy.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"82946931","text":"import numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt \nimport seaborn as sns\nfrom matplotlib import cm \nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nfrom scipy import linalg\nfrom collections import OrderedDict\n\ndef meanList(data):\n means = np.mean(data)\n return means\n\n \ndef stdList(data):\n stdevs = np.std(data) \n return stdevs\n\n\ndef minList(data):\n mins = np.min(data)\n return mins\n\n\ndef maxList(data):\n maxs = np.max(data)\n return maxs\n\n\n\ndataset = pd.read_csv('nucleardata.csv')\n\npowersensors = []\npowersens1 = dataset['Power_range_sensor_1'];powersensors.append(powersens1)\npowersens2 = dataset['Power_range_sensor_2'];powersensors.append(powersens2)\npowersens3 = dataset['Power_range_sensor_3'];powersensors.append(powersens3) \npowersens4 = dataset['Power_range_sensor_4'];powersensors.append(powersens4)\n\npressuresensors = []\npressuresens1 = dataset['Pressure _sensor_1'];pressuresensors.append(pressuresens1)\npressuresens2 = dataset['Pressure _sensor_2'];pressuresensors.append(pressuresens2)\npressuresens3 = dataset['Pressure _sensor_3'];pressuresensors.append(pressuresens3)\npressuresens4 = dataset['Pressure _sensor_4'];pressuresensors.append(pressuresens4)\n\nvibesensors = []\nvibesens1 = dataset['Vibration_sensor_1'];vibesensors.append(vibesens1)\nvibesens2 = dataset['Vibration_sensor_2'];vibesensors.append(vibesens2)\nvibesens3 = dataset['Vibration_sensor_3'];vibesensors.append(vibesens3)\nvibesens4 = dataset['Vibration_sensor_4'];vibesensors.append(vibesens4)\n\n\npowermeans = []\nfor sensor in powersensors:\n meanofsens = meanList(sensor)\n powermeans.append(meanofsens)\n\npressuremeans = [] \nfor sensor in pressuresensors:\n meanofsens = meanList(sensor) \n pressuremeans.append(meanofsens)\n\nvibemeans = [] \nfor sensor in vibesensors:\n meanofsens = meanList(sensor) \n vibemeans.append(meanofsens)\n\n\n\npowerstd = []\nfor sensor in powersensors:\n sensStd = stdList(sensor)\n powerstd.append(sensStd)\n\npressurestd = [] \nfor sensor in pressuresensors:\n sensStd = stdList(sensor) \n pressurestd.append(sensStd)\n\nvibestd = [] \nfor sensor in vibesensors:\n sensStd = stdList(sensor) \n vibestd.append(sensStd)\n\n\n\npowermin = []\nfor sensor in powersensors:\n sensMin = minList(sensor)\n powermin.append(sensMin)\n\npressuremin = [] \nfor sensor in pressuresensors:\n sensMin = minList(sensor) \n pressuremin.append(sensMin)\n\nvibemin = [] \nfor sensor in vibesensors:\n sensMin = minList(sensor) \n vibemin.append(sensMin)\n\n\n\npowermax = []\nfor sensor in powersensors:\n sensMax = maxList(sensor)\n powermax.append(sensMax)\n\npressuremax = [] \nfor sensor in pressuresensors:\n sensMax = maxList(sensor) \n pressuremax.append(sensMax)\n\nvibemax = [] \nfor sensor in vibesensors:\n sensMax = maxList(sensor) \n vibemax.append(sensMax)\n\n\n'''\nprint('PowerMeans');print(powermeans)\nprint('PressMeans');print(pressuremeans)\nprint('VibeMeans');print(vibemeans)\n\nprint('PowerStd');print(powerstd)\nprint('PressStd');print(pressurestd)\nprint('VibeStd');print(vibestd)\n\nprint('PowerMin');print(powermin)\nprint('PressMin');print(pressuremin)\nprint('VibeMin');print(vibemin)\n\nprint('PowerMax');print(powermax)\nprint('PressMax');print(pressuremax)\n\nprint('VibeMax');print(vibemax)\n'''\n\nsummary = dataset.describe()\nprint(summary)\n\n\n\ndf = pd.DataFrame(dataset, columns=['Status','Vibration_sensor_1'])\ndf2 = pd.DataFrame(dataset, columns=['Status','Vibration_sensor_2'])\n\n\n#Remove Whitespace from the Headers\n#if whitespace remove\n\n#Boxplot\ndf.boxplot(by='Status')\nplt.ylabel('Vibration Sensor 1 Values')\nplt.show() \nplt.close()\n\n\n#Density Plot\ndf2.groupby('Status').plot(kind='kde', ax=plt.gca())\nplt.xlabel('Vibration Sensor 2 Values')\nplt.title('Density Plot for Vibration Sensor 2')\nplt.legend(('Abnormal', 'Normal'), loc = 'upper right')\n\nplt.show()\nplt.close() \n\n\n\n\n\n\n\n\n\n'''\n\ncategoricalConv = {'Normal':0,'Abnormal':1}\ndf = df.replace(categoricalConv)\n\n\nabs = df.index[df['Status'] == 1].tolist()\nfirstab = abs[0]\n\nrowtotal = df.shape[0]\n\nnormals = df.iloc[0:firstab]\nabnormals = df.iloc[firstab:rowtotal]\n'''\n'''\nboxplot = df.boxplot(column=['Status', 'Vibration_sensor_1'])\nplt.show()\n\ncategoricalConv = {'Normal':0,'Abnormal':1}\ndf = df.replace(categoricalConv)\n\nabs = df.index[df['Status'] == 1].tolist()\nfirstab = abs[0]\nrowtotal = df.shape[0]\n\n\nnormals = df2.iloc[0:firstab]\nabnormals1 = df2.iloc[firstab:rowtotal]\nabnormals = abnormals1.rename(columns={'Vibration_sensor_2': 'Vibration_sensor_2a'})\n\ndat1 = normals.loc[:, ['Vibration_sensor_2']]\ndat2 = abnormals.loc[:, ['Vibration_sensor_2a']]\ndensdata = pd.concat([dat1, dat2.reset_index()], axis=1)\ndensdata = densdata.drop('index', 1)\n\nprint('datadens:');print(densdata)\ndensdata.plot.kde()\n\n\n\nplt.show() \nplt.close()\n\n\n'''\n\n\n\n\n\n","sub_path":"Python Files/ANN_and_RFC/old/measures.py","file_name":"measures.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"119854987","text":"import requests\nimport json\n\nURL = 'https://www.sms4india.com/api/v1/sendCampaign'\n\nwith open('att.json') as json_data:\n data = json.load(json_data)\n\ndef sendPostRequest(reqUrl, apiKey, secretKey, useType, person_name,person_age, phoneNo, senderId, textMessage):\n req_params = {\n 'apikey':'GONH3SSENF7LFSKCH2CYO25AHUYGITU5',\n 'secret':'U6OGI5LEL57EYWSZ',\n 'usetype':'stage',\n 'phone': phoneNo,\n 'message':\"{} Age: {}, U have not been attending the medical checkups regularly kindly visit the nearest Poshan-Camp to avoid any further medical complications. \".format(person_name, person_age),\n 'senderid': 'SMSIND'\n }\n return requests.post(reqUrl, req_params)\n\nfor person in data:\n person_name = person[1]+person[2]\n person_age = person[3]\n person_phoneNo = person[5]\n response = sendPostRequest(URL, 'provided-api-key', 'provided-secret', 'prod/stage', person_name, person_age, person_phoneNo, 'active-sender-id', 'message-text' )\n print(response.text)\n","sub_path":"sms.py","file_name":"sms.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"365148488","text":"import asyncio\r\n\r\nfrom gd.abstractuser import AbstractUser, LevelRecord\r\nfrom gd.comment import Comment\r\nfrom gd.errors import ClientException, NothingFound\r\nfrom gd.friend_request import FriendRequest\r\nfrom gd.level import Level\r\nfrom gd.level_packs import Gauntlet, MapPack\r\nfrom gd.logging import get_logger\r\nfrom gd.message import Message\r\nfrom gd.rewards import Chest, Quest\r\nfrom gd.session import Session\r\nfrom gd.song import ArtistInfo, Author, Song\r\nfrom gd.typing import (\r\n Any,\r\n Client,\r\n Coroutine,\r\n Dict,\r\n Iterable,\r\n List,\r\n Optional,\r\n Sequence,\r\n Tuple,\r\n Type,\r\n Union,\r\n)\r\nfrom gd.user import User, UserStats\r\n\r\nfrom gd.events.listener import (\r\n TimelyLevelListener,\r\n RateLevelListener,\r\n MessageOrRequestListener,\r\n LevelCommentListener,\r\n)\r\n\r\nfrom gd.utils.converter import Converter\r\nfrom gd.utils.decorators import check_logged, impl_sync\r\nfrom gd.utils.enums import (\r\n CommentPolicyType,\r\n CommentStrategy,\r\n DemonDifficulty,\r\n FriendRequestPolicyType,\r\n IconType,\r\n LeaderboardStrategy,\r\n LevelLeaderboardStrategy,\r\n LevelLength,\r\n RewardType,\r\n MessagePolicyType,\r\n)\r\nfrom gd.utils.filters import Filters\r\nfrom gd.utils.http_request import HTTPClient\r\nfrom gd.utils.indexer import Index\r\nfrom gd.utils.parser import ExtDict\r\nfrom gd.utils.text_tools import make_repr\r\n\r\nfrom gd.utils.crypto.coders import Coder\r\n\r\nfrom gd import api, utils\r\n\r\nlog = get_logger(__name__)\r\n\r\n\r\ndef excluding(*args: Tuple[Type[BaseException]]) -> Tuple[Type[BaseException]]:\r\n return args\r\n\r\n\r\nDEFAULT_EXCLUDE: Tuple[Type[BaseException]] = excluding(NothingFound)\r\nDAILY, WEEKLY = -1, -2\r\n\r\n\r\ndef figure_type_and_special(item: Union[Comment, Level]) -> Optional[Tuple[int, int]]:\r\n # figure out typeid and \"special\" value for given item.\r\n # returns a pair: (typeid, special)\r\n if isinstance(item, Level):\r\n return 1, 0\r\n\r\n elif isinstance(item, Comment):\r\n if not item.type.value:\r\n return 2, item.level_id\r\n else:\r\n return 3, item.id\r\n\r\n else:\r\n return # if we are here, that means invalid entity was provided.\r\n\r\n\r\ndef construct_levels(\r\n lvdata: Iterable[ExtDict], cdata: Iterable[ExtDict], sdata: Iterable[ExtDict], client: Client\r\n) -> List[Level]:\r\n # construct levels from level data, creator data and song data\r\n creators = list(AbstractUser(**c, client=client) for c in cdata)\r\n songs = list(Song.from_data(s, client=client) for s in sdata)\r\n levels = []\r\n\r\n for data in lvdata:\r\n song = utils.get(songs, id=data.getcast(Index.LEVEL_SONG_ID, 0, int))\r\n if song is None:\r\n song = Song(\r\n **Converter.to_normal_song(data.getcast(Index.LEVEL_AUDIO_TRACK, 0, int)),\r\n client=client,\r\n )\r\n\r\n creator_id = data.getcast(Index.LEVEL_CREATOR_ID, 0, int)\r\n creator = utils.get(creators, id=creator_id)\r\n if creator is None:\r\n creator = AbstractUser(id=creator_id, name=\"unknown\", account_id=0, client=client)\r\n\r\n levels.append(Level.from_data(data, creator, song, client=client))\r\n\r\n return levels\r\n\r\n\r\nasync def is_alive_mock(level: Level) -> bool:\r\n # mock Level's is_alive method if the level was deleted.\r\n return False\r\n\r\n\r\n@impl_sync\r\nclass Client:\r\n r\"\"\"A main class in the gd.py library, used for interacting with the servers of Geometry Dash.\r\n\r\n Parameters\r\n ----------\r\n loop: Optional[:class:`asyncio.AbstractEventLoop`]\r\n The :class:`asyncio.AbstractEventLoop` to use for asynchronous operations.\r\n Defaults to ``None``, in which case the default event loop is used\r\n via :func:`.utils.acquire_loop`.\r\n\r\n load_after_post: :class:`bool`\r\n Whether to load comments/messages/requests after sending them.\r\n\r\n .. note::\r\n\r\n Defaults to ``True``, in which case the following method calls will return objects:\r\n\r\n - :meth:`.Client.send_message`;\r\n - :meth:`.Client.send_friend_request`;\r\n - :meth:`.Client.comment_level`;\r\n - :meth:`.Client.post_comment`.\r\n\r\n Otherwise, if ``False`` or not found (extremely rarely), these methods will return ``None``.\r\n\r\n \\*\\*http_args\r\n Arguments to pass to :class:`.HTTPClient` constructor.\r\n\r\n Attributes\r\n ----------\r\n account_id: :class:`int`\r\n Account ID of the client. If not logged in, defaults to ``0``.\r\n id: :class:`int`\r\n ID (Player ID) of the client. If not logged in, defaults to ``0``.\r\n name: :class:`str`\r\n Name of the client. If not logged in, default is ``None``.\r\n password: :class:`str`\r\n Password of the client. ``None`` if not logged in.\r\n encodedpass: :class:`str`\r\n Encoded Password of the client. ``None`` on init as well.\r\n db: Optional[:class:`.api.Database`]\r\n Database/Save API. If not loaded, has empty parts inside.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n *,\r\n loop: Optional[asyncio.AbstractEventLoop] = None,\r\n load_after_post: bool = True,\r\n **http_args,\r\n ) -> None:\r\n if loop is None:\r\n loop = utils.acquire_loop()\r\n\r\n self.session = Session(**http_args)\r\n self.load_after_post = load_after_post\r\n self.listeners = list()\r\n self.loop = loop\r\n self._set_to_defaults()\r\n\r\n def __repr__(self) -> str:\r\n info = {\r\n \"is_logged\": self.is_logged(),\r\n \"account_id\": self.account_id,\r\n \"id\": self.id,\r\n \"name\": self.name,\r\n \"password\": \"...\",\r\n }\r\n return make_repr(self, info)\r\n\r\n def __json__(self) -> Dict[str, Optional[Union[int, str]]]: # pragma: no cover\r\n return dict(\r\n account_id=self.account_id,\r\n id=self.id,\r\n name=self.name,\r\n password=None, # for safety reasons\r\n )\r\n\r\n def _set_to_defaults(self) -> None:\r\n self.db = api.Database()\r\n self.account_id = 0\r\n self.id = 0\r\n self.name = None\r\n self.password = None\r\n self.encodedpass = None\r\n\r\n def _upd(self, attr: str, value: Any) -> None: # pragma: no cover\r\n setattr(self, attr, value)\r\n # update encodedpass if password was updated\r\n if attr == \"password\":\r\n self.encodedpass = Coder.encode(type=\"accountpass\", string=self.password)\r\n\r\n def edit(self, **attrs) -> Client:\r\n \"\"\"Update attributes given by ``attrs`` of ``self``.\r\n\r\n This could be used to manually set credentials, for example:\r\n\r\n .. code-block:: python3\r\n\r\n client = gd.Client()\r\n client.edit(name='nekitdev', id=17876467, account_id=5509312, password='secret')\r\n \"\"\"\r\n for attr, value in attrs.items():\r\n self._upd(attr, value)\r\n return self\r\n\r\n @property\r\n def http(self) -> HTTPClient:\r\n \"\"\":class:`.HTTPClient`: HTTP Client bound to that Client. Same as ``self.session.http``.\"\"\"\r\n return self.session.http\r\n\r\n def is_logged(self) -> bool:\r\n \"\"\":class:`bool`: Indicates whether the Client is logged in.\"\"\"\r\n checks = (\r\n self.name is not None,\r\n self.password is not None,\r\n self.account_id > 0,\r\n self.id > 0,\r\n )\r\n return all(checks)\r\n\r\n async def ping_server(self) -> float:\r\n \"\"\"|coro|\r\n\r\n Pings ``boomlings.com/database`` and returns the time taken.\r\n\r\n Returns\r\n -------\r\n :class:`float`\r\n Server ping, in milliseconds.\r\n \"\"\"\r\n return await self.session.ping_server(\"http://boomlings.com/database/\")\r\n\r\n async def get_artist_info(self, song_id: int = 0) -> ArtistInfo:\r\n \"\"\"|coro|\r\n\r\n Retrieves artist info about for a song with a particular ID.\r\n\r\n Parameters\r\n ----------\r\n song_id: :class:`int`\r\n An ID of the song whose info to fetch.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Failed to fetch the song info.\r\n\r\n Returns\r\n -------\r\n :class:`.ArtistInfo`\r\n Info regarding the artist.\r\n \"\"\"\r\n data = await self.session.test_song(song_id)\r\n return ArtistInfo(**data, client=self)\r\n\r\n async def get_song(self, song_id: int = 0) -> Song:\r\n \"\"\"|coro|\r\n\r\n Fetches a song from Geometry Dash server.\r\n\r\n Parameters\r\n ----------\r\n song_id: :class:`int`\r\n An ID of the song to fetch.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Song under given ID was not found or does not exist.\r\n\r\n :exc:`.SongRestrictedForUsage`\r\n Song was not allowed to use.\r\n\r\n Returns\r\n -------\r\n :class:`.Song`\r\n The song from the ID.\r\n \"\"\"\r\n data = await self.session.get_song(song_id)\r\n return Song.from_data(data, client=self)\r\n\r\n async def get_ng_song(self, song_id: int = 0) -> Song:\r\n \"\"\"|coro|\r\n\r\n Fetches a song from Newgrounds.\r\n\r\n This function is in most cases might be slower than :meth:`.Client.get_song`,\r\n but it does not raise errors if a song is banned on GD Server.\r\n\r\n Parameters\r\n ----------\r\n song_id: :class:`int`\r\n An ID of the song to fetch.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Requested song is deleted from Newgrounds or does not exist.\r\n\r\n Returns\r\n -------\r\n :class:`.Song`\r\n The song found under given ID.\r\n \"\"\"\r\n data = await self.session.get_ng_song(song_id)\r\n return Song(**data, client=self)\r\n\r\n async def search_page_songs(self, query: str, page: int = 0) -> List[Song]:\r\n \"\"\"|coro|\r\n\r\n Search for songs on Newgrounds.\r\n\r\n Parameters\r\n ----------\r\n query: :class:`str`\r\n Query to search for.\r\n\r\n page: :class:`int`\r\n Page to look songs on.\r\n\r\n Returns\r\n -------\r\n List[:class:`.Song`]\r\n A list of Songs, containing attributes ``id``, ``name`` and ``author``.\r\n \"\"\"\r\n data = await self.session.search_page_songs(query=query, page=page)\r\n return utils.unique(Song(**part, client=self) for part in data)\r\n\r\n async def search_songs(self, query: str, pages: Iterable[int] = range(10)) -> List[Song]:\r\n \"\"\"|coro|\r\n\r\n Search for songs on Newgrounds.\r\n\r\n Parameters\r\n ----------\r\n query: :class:`str`\r\n Query to search for.\r\n\r\n pages: Iterable[:class:`int`]\r\n Pages to look songs on.\r\n\r\n Returns\r\n -------\r\n List[:class:`.Song`]\r\n A list of Songs, containing attributes ``id``, ``name`` and ``author``.\r\n \"\"\"\r\n data = await self.session.search_songs(query=query, pages=pages)\r\n return utils.unique(Song(**part, client=self) for part in data)\r\n\r\n async def search_page_users(self, query: str, page: int = 0) -> List[Author]:\r\n \"\"\"|coro|\r\n\r\n Search for users on Newgrounds.\r\n\r\n Parameters\r\n ----------\r\n query: :class:`str`\r\n Query to search for.\r\n\r\n page: :class:`int`\r\n Page to look users on.\r\n\r\n Returns\r\n -------\r\n List[:class:`.Author`]\r\n A list of Authors, containing attributes ``name`` and ``link``.\r\n \"\"\"\r\n data = await self.session.search_page_users(query=query, page=page)\r\n return utils.unique(Author(**part, client=self) for part in data)\r\n\r\n async def search_users(self, query: str, pages: Iterable[int] = range(10)) -> List[Author]:\r\n \"\"\"|coro|\r\n\r\n Search for users on Newgrounds.\r\n\r\n Parameters\r\n ----------\r\n query: :class:`str`\r\n Query to search for.\r\n\r\n pages: Iterable[:class:`int`]\r\n Pages to look users on.\r\n\r\n Returns\r\n -------\r\n List[:class:`.Author`]\r\n A list of Authors, containing attributes ``name`` and ``link``.\r\n \"\"\"\r\n data = await self.session.search_users(query=query, pages=pages)\r\n return utils.unique(Author(**part, client=self) for part in data)\r\n\r\n async def get_page_user_songs(self, user: Union[str, Author], page: int = 0) -> List[Song]:\r\n \"\"\"|coro|\r\n\r\n Search for songs by a user on Newgrounds.\r\n\r\n Parameters\r\n ----------\r\n query: :class:`str`\r\n Query to search for.\r\n\r\n page: :class:`int`\r\n Page to look songs on.\r\n\r\n Returns\r\n -------\r\n List[:class:`.Song`]\r\n A list of Songs, containing attributes ``id``, ``name`` and ``author``.\r\n \"\"\"\r\n data = await self.session.get_page_user_songs(user, page=page)\r\n return utils.unique(Song(**part, client=self) for part in data)\r\n\r\n async def get_user_songs(\r\n self, user: Union[str, Author], pages: Iterable[int] = range(10)\r\n ) -> List[Song]:\r\n \"\"\"|coro|\r\n\r\n Search for songs by a user on Newgrounds.\r\n\r\n Parameters\r\n ----------\r\n query: :class:`str`\r\n Query to search for.\r\n\r\n pages: Iterable[:class:`int`]\r\n Page to look songs on.\r\n\r\n Returns\r\n -------\r\n List[:class:`.Song`]\r\n A list of Songs, containing attributes ``id``, ``name`` and ``author``.\r\n \"\"\"\r\n name = user.name if isinstance(user, Author) else user\r\n\r\n data = await self.session.get_user_songs(name, pages=pages)\r\n\r\n return list(Song(**part, client=self) for part in data)\r\n\r\n async def get_user(self, account_id: int = 0) -> User:\r\n \"\"\"|coro|\r\n\r\n Gets a user from Geometry Dash server.\r\n\r\n Parameters\r\n ----------\r\n account_id: :class:`int`\r\n An account ID of the user to fetch.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n User with given account ID was not found.\r\n\r\n Returns\r\n -------\r\n :class:`.User`\r\n The user from the ID.\r\n \"\"\"\r\n data = await self.session.get_user(account_id, return_only_stats=False)\r\n return User.from_data(data, client=self)\r\n\r\n async def fetch_user(\r\n self, account_id: int = 0, *, stats: bool = False\r\n ) -> Union[AbstractUser, UserStats]:\r\n \"\"\"|coro|\r\n\r\n This is almost like :meth:`.Client.get_user`, except that it returns\r\n either :class:`.UserStats` or :class:`.AbstractUser` object.\r\n\r\n Parameters\r\n ----------\r\n account_id: :class:`int`\r\n An account ID of the user to fetch stats of.\r\n\r\n stats: :class:`bool`\r\n Whether to return :class:`.UserStats` or :class:`.AbstractUser`.\r\n By default returns :class:`.AbstractUser`.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n User with given account ID was not found, so fetching stats failed or user can not be returned.\r\n\r\n Returns\r\n -------\r\n Union[:class:`.UserStats`, :class:`.AbstractUser`]\r\n Abstract user or User stats from the ID. (if ID != -1)\r\n \"\"\"\r\n data = await self.session.get_user(account_id, return_only_stats=True)\r\n user_stats = UserStats.from_data(data, client=self)\r\n # return UserStats if needed, and AbstractUser otherwise.\r\n return user_stats if stats else user_stats.as_user()\r\n\r\n async def search_user(self, query: Union[int, str]) -> User:\r\n \"\"\"|coro|\r\n\r\n Searches for a user on Geometry Dash servers.\r\n\r\n Parameters\r\n ----------\r\n query: Union[:class:`int`, :class:`str`]\r\n A query to search for user with. Either Player ID or Name.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Nothing was found.\r\n\r\n Returns\r\n -------\r\n Union[:class:`.User`]\r\n A User found when searching with the query.\r\n \"\"\"\r\n data = await self.session.search_user(query, return_abstract=False)\r\n return User.from_data(data, client=self)\r\n\r\n async def find_user(self, query: Union[int, str]) -> AbstractUser:\r\n \"\"\"|coro|\r\n\r\n Fetches a user on Geometry Dash servers by given query.\r\n\r\n Works almost like :meth:`.Client.search_user`, except the fact that\r\n it returns :class:`.AbstractUser`.\r\n\r\n Parameters\r\n ----------\r\n query: Union[:class:`int`, :class:`str`]\r\n A query to search for user with.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n No user was found.\r\n\r\n Returns\r\n -------\r\n Union[:class:`.AbstractUser`]\r\n An AbstractUser corresponding to the query.\r\n \"\"\"\r\n data = await self.session.search_user(query, return_abstract=True)\r\n return AbstractUser.from_data(data, client=self)\r\n\r\n async def get_daily(self) -> Level:\r\n \"\"\"|coro|\r\n\r\n Gets current daily level.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Nothing found or invalid response was received.\r\n Returns\r\n -------\r\n :class:`.Level`\r\n Current daily level.\r\n \"\"\"\r\n return await self.get_level(DAILY)\r\n\r\n async def get_weekly(self) -> Level:\r\n \"\"\"|coro|\r\n\r\n Gets current weekly demon.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Nothing found or invalid response was received.\r\n Returns\r\n -------\r\n :class:`.Level`\r\n Current weekly demon.\r\n \"\"\"\r\n return await self.get_level(WEEKLY)\r\n\r\n async def get_level(self, level_id: int = 0, get_data: bool = True) -> Level:\r\n \"\"\"|coro|\r\n\r\n Fetches a level from Geometry Dash servers.\r\n\r\n Parameters\r\n ----------\r\n level_id: :class:`int`\r\n An ID of the level to fetch.\r\n\r\n .. note::\r\n\r\n If the given ID is *n*, and *0 > n >= -2*,\r\n this function will search for daily/weekly levels, however,\r\n it is not recommended to use since it can cause confusion.\r\n Use :meth:`.Client.get_daily` and :meth:`.Client.get_weekly`\r\n for better understanding.\r\n\r\n get_data: :class:`bool`\r\n Whether to download the level data or not.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Level with given ID was not found.\r\n\r\n Returns\r\n -------\r\n :class:`.Level`\r\n The level corresponding to given id.\r\n \"\"\"\r\n if get_data:\r\n level_data, creator_data, song_data = await self.session.get_level_info(level_id)\r\n else:\r\n return (await self.search_levels_on_page(query=level_id))[0]\r\n\r\n return Level.from_data(level_data, creator_data, song_data, client=self)\r\n\r\n async def get_many_levels(self, *level_ids: Sequence[int]) -> List[Level]:\r\n r\"\"\"|coro|\r\n\r\n Fetches many levels.\r\n\r\n Parameters\r\n ----------\r\n \\*level_ids: Sequence[:class:`int`]\r\n IDs of levels to fetch. This function returns all the levels that it is able to find.\r\n\r\n Example:\r\n\r\n .. code-block:: python3\r\n\r\n await client.get_many_levels(30029017, 44622744)\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Levels were not found.\r\n\r\n Returns\r\n -------\r\n List[:class:`.Level`]\r\n A list of all levels found.\r\n \"\"\"\r\n filters = Filters.setup_search_many()\r\n query = \",\".join(map(str, level_ids))\r\n\r\n return await self.search_levels_on_page(query=query, filters=filters)\r\n\r\n async def get_gauntlets(self) -> List[Gauntlet]:\r\n \"\"\"|coro|\r\n\r\n Fetches *The Lost Gauntlets*.\r\n\r\n Returns\r\n -------\r\n List[:class:`.Gauntlet`]\r\n All gauntlets retrieved, as list.\r\n \"\"\"\r\n data = await self.session.get_gauntlets()\r\n return list(Gauntlet.from_data(part, client=self) for part in data)\r\n\r\n async def get_page_map_packs(\r\n self, page: int = 0, *, exclude: Tuple[Type[BaseException]] = DEFAULT_EXCLUDE\r\n ) -> List[MapPack]:\r\n \"\"\"|coro|\r\n\r\n Fetches map packs on given page.\r\n\r\n Parameters\r\n ----------\r\n page: :class:`int`\r\n Page to look for map packs on.\r\n\r\n exclude: Tuple[Type[:exc:`BaseException`]]\r\n Exceptions to ignore. By default includes only :exc:`.NothingFound`.\r\n\r\n Returns\r\n -------\r\n List[:class:`.MapPack`]\r\n List of map packs retrieved.\r\n\r\n Raises\r\n ------\r\n :exc:`.NothingFound`\r\n No map packs were found at the given page.\r\n \"\"\"\r\n data = await self.session.get_page_map_packs(page=page, exclude=exclude)\r\n return list(MapPack.from_data(part, client=self) for part in data)\r\n\r\n async def get_map_packs(self, pages: Optional[Iterable[int]] = range(10)) -> List[MapPack]:\r\n \"\"\"|coro|\r\n\r\n Gets map packs on given ``pages``.\r\n\r\n Parameters\r\n ----------\r\n pages: Iterable[:class:`int`]\r\n Pages to search map packs on.\r\n\r\n Returns\r\n -------\r\n List[:class:`.MapPack`]\r\n List of map packs found.\r\n \"\"\"\r\n data = await self.session.get_map_packs(pages=pages)\r\n return list(MapPack.from_data(part, client=self) for part in data)\r\n\r\n async def unsafe_login(self, user: str, password: str) -> None:\r\n \"\"\"|coro|\r\n\r\n Login into account, without validating credentials.\r\n\r\n Parameters\r\n ----------\r\n user: :class:`str`\r\n A username of the account to log into.\r\n\r\n password: :class:`str`\r\n A password of the account to log into.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Could not find account by given ``user``.\r\n \"\"\"\r\n self_user = await self.find_user(user)\r\n self.edit(name=user, password=password, account_id=self_user.account_id, id=self_user.id)\r\n log.info(\"Logged in? as %r, with password %r.\", user, password)\r\n\r\n async def login(self, user: str, password: str) -> None:\r\n \"\"\"|coro|\r\n\r\n Tries to login with given parameters.\r\n\r\n Parameters\r\n ----------\r\n user: :class:`str`\r\n A username of the account to log into.\r\n\r\n password: :class:`str`\r\n A password of the account to log into.\r\n\r\n Raises\r\n ------\r\n :exc:`.LoginFailure`\r\n Given account credentials are not correct.\r\n \"\"\"\r\n account_id, player_id = await self.session.login(user=user, password=password)\r\n self.edit(name=user, password=password, account_id=account_id, id=player_id)\r\n\r\n log.info(\"Logged in as %r, with password %r.\", user, password)\r\n\r\n @check_logged\r\n async def upload_level(\r\n self,\r\n name: str = \"Unnamed\",\r\n id: int = 0,\r\n version: int = 1,\r\n length: Union[int, str, LevelLength] = 0,\r\n track: int = 0,\r\n song_id: int = 0,\r\n is_auto: bool = False,\r\n original: int = 0,\r\n two_player: bool = False,\r\n objects: Optional[int] = None,\r\n coins: int = 0,\r\n star_amount: int = 0,\r\n unlisted: bool = False,\r\n friends_only: bool = False,\r\n ldm: bool = False,\r\n password: Optional[Union[int, str]] = None,\r\n copyable: bool = False,\r\n data: Union[bytes, str] = \"\",\r\n description: str = \"\",\r\n *,\r\n load: bool = True,\r\n ) -> Level:\r\n \"\"\"|coro|\r\n\r\n Upload a level.\r\n\r\n Parameters\r\n ----------\r\n name: :class:`str`\r\n A name of the level.\r\n id: :class:`int`\r\n An ID of the level. ``0`` if uploading a new level,\r\n non-zero when attempting to update already existing level.\r\n version: :class:`int`\r\n A version of the level.\r\n length: Union[:class:`int`, :class:`str`, :class:`.LevelLength`]\r\n A length of the level. See :class:`.LevelLength` for more info.\r\n track: :class:`int`\r\n A normal track to set, e.g. ``0 - Stereo Madness, 1 - Back on Track, ...``.\r\n song_id: :class:`int`\r\n An ID of the custom song to set.\r\n is_auto: :class:`bool`\r\n Indicates if the level is auto.\r\n original: :class:`int`\r\n An ID of the original level.\r\n two_player: :class:`bool`\r\n Indicates whether the level has enabled Two Player mode.\r\n objects: :class:`int`\r\n The amount of objects in the level. If not provided, the amount\r\n is being calculated from the ``data`` parameter.\r\n coins: :class:`int`\r\n An amount of coins the level has.\r\n star_amount: :class:`int`\r\n The amount of stars to request.\r\n unlisted: :class:`bool`\r\n Indicates whether the level should be unlisted.\r\n friends_only: :class:`bool`\r\n Whether the level should be seen by friends only.\r\n ldm: :class:`bool`\r\n Indicates if the level has LDM mode.\r\n password: Union[:class:`int`, :class:`str`]\r\n The password to apply.\r\n Either a natural number or a string representing a natural number.\r\n If ``None``, depending on what ``copyable`` is,\r\n either indicates whether a level is free to copy or not copyable at all.\r\n copyable: :class:`bool`\r\n Indicates whether the level should be copyable.\r\n data: Union[:class:`bytes`, :class:`str`]\r\n The data of the level, as a stream.\r\n description: :class:`str`\r\n The description of the level.\r\n load: :class:`bool`\r\n Indicates whether the newly uploaded level should be loaded and returned.\r\n If false, the ``gd.Level(id=id, client=self)`` is returned.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Failed to upload a level.\r\n\r\n Returns\r\n -------\r\n :class:`.Level`\r\n Newly uploaded level.\r\n \"\"\"\r\n if objects is None:\r\n objects = len(utils.object_split(data))\r\n\r\n length = LevelLength.from_value(length)\r\n\r\n level_id = await self.session.upload_level(\r\n data=data,\r\n name=name,\r\n level_id=id,\r\n version=version,\r\n length=length,\r\n audio_track=track,\r\n song_id=song_id,\r\n is_auto=is_auto,\r\n original=original,\r\n two_player=two_player,\r\n objects=objects,\r\n coins=coins,\r\n stars=star_amount,\r\n unlisted=unlisted,\r\n friends_only=friends_only,\r\n ldm=ldm,\r\n password=password,\r\n copyable=copyable,\r\n desc=description,\r\n client=self,\r\n )\r\n\r\n if load:\r\n return await self.get_level(level_id)\r\n else:\r\n return Level(id=level_id, client=self)\r\n\r\n @check_logged\r\n async def get_page_levels(\r\n self, page: int = 0, *, exclude: Tuple[Type[BaseException]] = DEFAULT_EXCLUDE\r\n ) -> List[Level]:\r\n \"\"\"|coro|\r\n\r\n Gets levels of a client from a server.\r\n\r\n .. note::\r\n\r\n This method requires client to be logged in.\r\n\r\n Parameters\r\n ----------\r\n page: :class:`int`\r\n Page to look levels at.\r\n\r\n exclude: Tuple[Type[:exc:`BaseException`]]\r\n Exceptions to ignore. By default includes only :exc:`.NothingFound`.\r\n\r\n Returns\r\n -------\r\n List[:class:`.Level`]\r\n All levels found, as list. Might be an empty list.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n No levels were found.\r\n \"\"\"\r\n filters = Filters.setup_by_user()\r\n return await self.search_levels_on_page(filters=filters, exclude=exclude)\r\n\r\n @check_logged\r\n async def get_levels(self, pages: Optional[Iterable[int]] = range(10)) -> List[Level]:\r\n \"\"\"|coro|\r\n\r\n Searches for levels on given pages.\r\n\r\n .. note::\r\n\r\n This method requires authorised client.\r\n\r\n Parameters\r\n ----------\r\n pages: Iterable[:class:`int`]\r\n Pages to look for levels at.\r\n\r\n Returns\r\n -------\r\n List[:class:`.Level`]\r\n All levels found, as list, which might be empty.\r\n \"\"\"\r\n filters = Filters.setup_by_user()\r\n return await self.search_levels(pages=pages, filters=filters)\r\n\r\n @check_logged\r\n async def load(self) -> None:\r\n \"\"\"|coro|\r\n\r\n Loads save from a server and parses it.\r\n Sets :attr:`.Client.save` to :class:`.Save` namedtuple ``(completed, followed)``.\r\n \"\"\"\r\n db = await self.session.load_save(client=self)\r\n\r\n if db is None: # pragma: no cover\r\n log.warning(\"Failed to load a save.\")\r\n else:\r\n self.edit(db=db)\r\n log.info(\"Successfully loaded a save.\")\r\n\r\n @check_logged\r\n async def backup(self, save_data: Optional[Sequence[Union[bytes, str]]] = None) -> None:\r\n \"\"\"|coro|\r\n\r\n Back up (save) the data of the client.\r\n\r\n Parameters\r\n ----------\r\n save_data: Union[:class:`bytes`, :class:`str`]\r\n Save data to backup.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Failed to do a backup.\r\n \"\"\"\r\n if save_data is None and self.db is None:\r\n return log.warning(\"No data was provided.\")\r\n\r\n if not save_data:\r\n data = await api.save.to_string_async(self.db, connect=False, xor=False)\r\n else:\r\n data = save_data\r\n\r\n await self.session.do_save(client=self, data=data)\r\n\r\n save = backup\r\n\r\n def close(self, message: Optional[str] = None) -> None:\r\n \"\"\"*Closes* client.\r\n\r\n Basically sets its password and username to ``None``, which\r\n actually implies that client logs out.\r\n\r\n Parameters\r\n ----------\r\n message: :class:`str`\r\n A message to print after closing.\r\n \"\"\"\r\n self._set_to_defaults()\r\n\r\n log.info(\"Has logged out with message: %r\", message)\r\n\r\n def temp_login(self, user: str, password: str) -> Any:\r\n \"\"\"Async context manager, used for temporarily logging in.\r\n\r\n Typical usage can be, as follows:\r\n\r\n .. code-block:: python3\r\n\r\n async with client.temp_login('Name', 'Password'):\r\n await client.post_comment('Hey there from gd.py!')\r\n \"\"\"\r\n return LoginSession(self, user, password)\r\n\r\n @check_logged\r\n async def like(self, entity: Union[Comment, Level]) -> None:\r\n \"\"\"|coro|\r\n\r\n Like an entity (either a comment or a level).\r\n\r\n Parameters\r\n ----------\r\n entity: Union[:class:`.Comment`, :class:`.Level`]\r\n An entity to like.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Failed to like an entity.\r\n \"\"\"\r\n typeid, special = figure_type_and_special(entity)\r\n await self.session.like(entity.id, typeid, special, dislike=False, client=self)\r\n\r\n @check_logged\r\n async def dislike(self, entity: Union[Comment, Level]) -> None:\r\n \"\"\"|coro|\r\n\r\n Dislike an entity (either a comment or a level).\r\n\r\n Parameters\r\n ----------\r\n entity: Union[:class:`.Comment`, :class:`.Level`]\r\n An entity to like.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Failed to dislike an entity.\r\n \"\"\"\r\n typeid, special = figure_type_and_special(entity)\r\n await self.session.like(entity.id, typeid, special, dislike=True, client=self)\r\n\r\n @check_logged\r\n async def delete_comment(self, comment: Comment) -> None:\r\n \"\"\"|coro|\r\n\r\n Delete a comment.\r\n\r\n Parameters\r\n ----------\r\n comment: :class:`.Comment`\r\n A comment to delete.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Server did not return 1, which means comment was not deleted.\r\n \"\"\"\r\n await self.session.delete_comment(comment.type, comment.id, comment.level_id, client=self)\r\n\r\n @check_logged\r\n async def read_friend_request(self, request: FriendRequest) -> None:\r\n \"\"\"|coro|\r\n\r\n Read a friend request.\r\n\r\n Parameters\r\n ----------\r\n request: :class:`.FriendRequest`\r\n A friend request to read.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Failed to read a request.\r\n \"\"\"\r\n await self.session.read_friend_request(request.id, client=self)\r\n request.options.update(is_read=True)\r\n\r\n @check_logged\r\n async def delete_friend_request(self, request: FriendRequest) -> None:\r\n \"\"\"|coro|\r\n\r\n Delete a friend request.\r\n\r\n Parameters\r\n ----------\r\n request: :class:`.FriendRequest`\r\n A friend request to delete.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Failed to delete a friend request.\r\n \"\"\"\r\n await self.session.delete_friend_request(\r\n request.type, request.author.account_id, client=self\r\n )\r\n\r\n @check_logged\r\n async def accept_friend_request(self, request: FriendRequest) -> None:\r\n \"\"\"|coro|\r\n\r\n Accept a friend request.\r\n\r\n Parameters\r\n ----------\r\n request: :class:`.FriendRequest`\r\n A friend request to accept.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Failed to accept a friend request.\r\n \"\"\"\r\n await self.session.accept_friend_request(\r\n request.type, request.id, request.author.account_id, client=self\r\n )\r\n\r\n @check_logged\r\n async def read_message(self, message: Message) -> str:\r\n \"\"\"|coro|\r\n\r\n Read a message.\r\n\r\n Parameters\r\n ----------\r\n message: :class:`.Message`\r\n A message to read.\r\n\r\n Returns\r\n -------\r\n :class:`str`\r\n The content of the message.\r\n \"\"\"\r\n body = await self.session.read_message(message.type, message.id, client=self)\r\n message.body = body\r\n message.options.update(is_read=True)\r\n return body\r\n\r\n @check_logged\r\n async def delete_message(self, message: Message) -> None:\r\n \"\"\"|coro|\r\n\r\n Delete a message.\r\n\r\n Parameters\r\n ----------\r\n message: :class:`.Message`\r\n A message to delete.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Failed to delete a message.\r\n \"\"\"\r\n await self.session.delete_message(message.type, message.id, client=self)\r\n\r\n @check_logged\r\n async def send_message(self, user: AbstractUser, subject: str, body: str) -> Optional[Message]:\r\n \"\"\"|coro|\r\n\r\n Send a message.\r\n\r\n Parameters\r\n ----------\r\n user: :class:`.AbstractUser`\r\n User to send a message to.\r\n subject: :class:`str`\r\n Subject of a new message.\r\n body: :class:`str`\r\n Body of a new message.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Failed to send a message.\r\n\r\n Returns\r\n -------\r\n Optional[:class:`.Message`]\r\n Sent message.\r\n \"\"\"\r\n await self.session.send_message(user.account_id, subject=subject, body=body, client=self)\r\n\r\n if self.load_after_post:\r\n messages = await self.get_page_messages(\"sent\")\r\n message = utils.get(messages, subject=subject, recipient=user)\r\n\r\n if message is None:\r\n return\r\n\r\n message.body = body\r\n return message\r\n\r\n @check_logged\r\n async def block(self, user: AbstractUser) -> None:\r\n \"\"\"|coro|\r\n\r\n Block a user.\r\n\r\n Parameters\r\n ----------\r\n user: :class:`.AbstractUser`\r\n User to block.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Failed to block a user.\r\n \"\"\"\r\n await self.session.block_user(user.account_id, unblock=False, client=self)\r\n\r\n @check_logged\r\n async def unblock(self, user: AbstractUser) -> None:\r\n \"\"\"|coro|\r\n\r\n Unblock a user.\r\n\r\n Parameters\r\n ----------\r\n user: :class:`.AbstractUser`\r\n User to unblock.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Failed to unblock a user.\r\n \"\"\"\r\n await self.session.block_user(user.account_id, unblock=True, client=self)\r\n\r\n @check_logged\r\n async def unfriend(self, user: AbstractUser) -> None:\r\n \"\"\"|coro|\r\n\r\n Unfriend a user.\r\n\r\n Parameters\r\n ----------\r\n user: :class:`.AbstractUser`\r\n User to unfriend.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Failed to unfriend a user.\r\n \"\"\"\r\n await self.session.unfriend_user(user.account_id, client=self)\r\n\r\n @check_logged\r\n async def send_friend_request(\r\n self, user: AbstractUser, message: str = \"\"\r\n ) -> Optional[FriendRequest]:\r\n \"\"\"|coro|\r\n\r\n Send a friend request.\r\n\r\n Parameters\r\n ----------\r\n user: :class:`.AbstractUser`\r\n User to send a request to.\r\n\r\n message: :class:`str`\r\n Body of friend request message.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Failed to send a friend request to user.\r\n\r\n Returns\r\n -------\r\n Optional[:class:`.FriendRequest`]\r\n Sent friend request.\r\n \"\"\"\r\n await self.session.send_friend_request(user.account_id, message=message, client=self)\r\n\r\n if self.load_after_post:\r\n requests = await self.get_page_friend_requests(\"sent\")\r\n return utils.get(requests, recipient=user)\r\n\r\n async def retrieve_page_comments(\r\n self,\r\n user: AbstractUser,\r\n type: str = \"profile\",\r\n page: int = 0,\r\n strategy: Union[int, str, CommentStrategy] = 0,\r\n exclude: Tuple[Type[BaseException]] = DEFAULT_EXCLUDE,\r\n ) -> List[Comment]:\r\n \"\"\"|coro|\r\n\r\n Retrieve comments of a user.\r\n\r\n Parameters\r\n ----------\r\n user: :class:`.AbstractUser`\r\n User to retrieve comments of.\r\n type: :class:`str`\r\n Type of comments to look for.\r\n Either ``profile`` or ``level``.\r\n page: :class:`int`\r\n Page to look comments on.\r\n exclude: Tuple[Type[:exc:`BaseException`]]\r\n Exceptions to ignore. By default includes only :exc:`.NothingFound`.\r\n strategy: Union[:class:`int`, :class:`str`, :class:`.CommentStrategy`]\r\n Strategy to use. ``recent`` or ``most_liked``.\r\n\r\n Returns\r\n -------\r\n List[:class:`.Comment`]\r\n Retrieved comments.\r\n \"\"\"\r\n strategy = CommentStrategy.from_value(strategy)\r\n data = await self.session.retrieve_page_comments(\r\n user.account_id, user.id, type=type, page=page, exclude=exclude, strategy=strategy,\r\n )\r\n return list(Comment.from_data(part, user, client=self) for part in data)\r\n\r\n async def retrieve_comments(\r\n self,\r\n user: AbstractUser,\r\n type: str = \"profile\",\r\n pages: Optional[Iterable[int]] = range(10),\r\n strategy: Union[int, str, CommentStrategy] = 0,\r\n ) -> List[Comment]:\r\n \"\"\"|coro|\r\n\r\n Retrieve comments of a user.\r\n\r\n Parameters\r\n ----------\r\n user: :class:`.AbstractUser`\r\n User to retrieve comments of.\r\n type: :class:`str`\r\n Type of comments to look for.\r\n Either ``profile`` or ``level``.\r\n pages: Iterable[:class:`int`]\r\n Pages to look comments on.\r\n strategy: Union[:class:`int`, :class:`str`, :class:`.CommentStrategy`]\r\n Strategy to use. ``recent`` or ``most_liked``.\r\n\r\n Returns\r\n -------\r\n List[:class:`.Comment`]\r\n Retrieved comments.\r\n \"\"\"\r\n strategy = CommentStrategy.from_value(strategy)\r\n data = await self.session.retrieve_comments(\r\n user.account_id, user.id, type=type, pages=pages, strategy=strategy\r\n )\r\n return list(Comment.from_data(part, user, client=self) for part in data)\r\n\r\n async def report_level(self, level: Level) -> None:\r\n \"\"\"|coro|\r\n\r\n Report a level.\r\n\r\n Parameters\r\n ----------\r\n level: :class:`.Level`\r\n A level to report.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Failed to report a level.\r\n \"\"\"\r\n await self.session.report_level(level.id)\r\n\r\n @check_logged\r\n async def delete_level(self, level: Level) -> None:\r\n \"\"\"|coro|\r\n\r\n Delete a level.\r\n\r\n Parameters\r\n ----------\r\n level: :class:`.Level`\r\n A level to delete.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Failed to delete a level.\r\n \"\"\"\r\n await self.session.delete_level(level.id, client=self)\r\n level.is_alive = is_alive_mock\r\n\r\n @check_logged\r\n async def update_level_description(self, level: Level, content: str) -> None:\r\n \"\"\"|coro|\r\n\r\n Update description a level.\r\n\r\n Parameters\r\n ----------\r\n level: :class:`.Level`\r\n A level to update description of.\r\n\r\n content: :class:`str`\r\n Content of a new description.\r\n \"\"\"\r\n await self.session.update_level_desc(level.id, content, client=self)\r\n level.options.update(description=content)\r\n\r\n @check_logged\r\n async def rate_level(self, level: Level, stars: int = 1) -> None:\r\n \"\"\"|coro|\r\n\r\n Rate a level.\r\n\r\n Parameters\r\n ----------\r\n level: :class:`.Level`\r\n A level to rate.\r\n\r\n stars: :class:`int`\r\n Amount of stars to rate a level with.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Failed to rate a level.\r\n \"\"\"\r\n await self.session.rate_level(level.id, stars, client=self)\r\n\r\n @check_logged\r\n async def rate_demon(\r\n self,\r\n level: Level,\r\n demon_difficulty: Union[int, str, DemonDifficulty] = 1,\r\n as_mod: bool = False,\r\n ) -> None:\r\n \"\"\"|coro|\r\n\r\n Rate a level as demon.\r\n\r\n Parameters\r\n ----------\r\n level: :class:`.Level`\r\n A level to rate.\r\n demon_difficulty: Union[:class:`int`, :class:`str`, :class:`.DemonDifficulty`]\r\n Demon difficulty to rate a level with.\r\n as_mod: :class:`bool`\r\n Whether to attempt to rate a level as moderator.\r\n \"\"\"\r\n demon_difficulty = DemonDifficulty.from_value(demon_difficulty)\r\n\r\n success = await self.session.rate_demon(level.id, demon_difficulty, mod=as_mod, client=self)\r\n\r\n if success:\r\n log.info(\"Successfully demon-rated level: %s.\", level)\r\n else:\r\n log.warning(\"Failed to rate demon difficulty for level: %s.\", level)\r\n\r\n @check_logged\r\n async def send_level(self, level: Level, stars: int = 1, featured: bool = True) -> None:\r\n \"\"\"|coro|\r\n\r\n Send a level to RobTop.\r\n\r\n Parameters\r\n ----------\r\n level: :class:`.Level`\r\n A level to send.\r\n\r\n stars: :class:`int`\r\n Amount of stars to send a level with.\r\n\r\n featured: :class:`bool`\r\n Whether to send a level for feature.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Missing required moderator permissions.\r\n \"\"\"\r\n await self.session.send_level(level.id, stars, featured=featured, client=self)\r\n\r\n @check_logged\r\n async def comment_level(\r\n self, level: Level, content: str, percentage: int = 0\r\n ) -> Optional[Comment]:\r\n \"\"\"|coro|\r\n\r\n Comment a level.\r\n\r\n Parameters\r\n ----------\r\n level: :class:`.Level`\r\n A level to comment.\r\n\r\n content: :class:`str`\r\n Content of a comment to post.\r\n\r\n precentage: :class:`int`\r\n Percentage to put a comment with.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Failed to post a level comment.\r\n\r\n Returns\r\n -------\r\n Optional[:class:`.Comment`]\r\n Sent comment.\r\n \"\"\"\r\n await self.session.comment_level(level.id, content, percentage, client=self)\r\n\r\n if self.load_after_post:\r\n comments = await level.get_comments()\r\n return utils.get(comments, author=self.as_user(), body=content)\r\n\r\n @check_logged\r\n async def get_level_leaderboard(\r\n self, level: Level, strategy: Union[int, str, LevelLeaderboardStrategy]\r\n ) -> List[LevelRecord]:\r\n \"\"\"|coro|\r\n\r\n Get leaderboard of a level.\r\n\r\n Parameters\r\n ----------\r\n level: :class:`.Level`\r\n A level to fetch a leaderboard of.\r\n\r\n strategy: Union[:class:`int`, :class:`str`, :class:`.LevelLeaderboardStrategy`]\r\n Strategy to use when fetching a leaderboard.\r\n\r\n Returns\r\n -------\r\n List[:class:`.LevelRecord`]\r\n Level records that were found.\r\n \"\"\"\r\n strategy = LevelLeaderboardStrategy.from_value(strategy)\r\n data = await self.session.get_leaderboard(level.id, strategy=strategy, client=self)\r\n return list(LevelRecord.from_data(part, strategy=strategy, client=self) for part in data)\r\n\r\n async def get_level_comments(\r\n self,\r\n level: Level,\r\n strategy: Union[int, str, CommentStrategy] = 0,\r\n amount: int = 20,\r\n exclude: Tuple[Type[BaseException]] = DEFAULT_EXCLUDE,\r\n ) -> List[Comment]:\r\n \"\"\"|coro|\r\n\r\n Get comments of a level.\r\n\r\n Parameters\r\n ----------\r\n level: :class:`.Level`\r\n A level to fetch comments of.\r\n\r\n strategy: Union[:class:`int`, :class:`str`, :class:`.CommentStrategy`]\r\n Strategy to use when fetching a leaderboard.\r\n\r\n amount: :class:`int`\r\n Amount of comments to fetch. When lower than *0*, adds *2^31* to the amount.\r\n (meaning to fetch all the comments)\r\n\r\n exclude: Tuple[Type[:exc:`BaseException`]]\r\n Exceptions to ignore. By default includes only :exc:`.NothingFound`.\r\n\r\n Returns\r\n -------\r\n List[:class:`.Comment`]\r\n Comments that were found.\r\n \"\"\"\r\n if amount < 0:\r\n amount += 2 ** 31 # 2,147,483,648 is enough?! ~ nekit\r\n\r\n data = await self.session.get_level_comments(\r\n level_id=level.id,\r\n strategy=CommentStrategy.from_value(strategy),\r\n amount=amount,\r\n exclude=exclude,\r\n )\r\n return list(Comment.from_data(part, user_data, client=self) for (part, user_data) in data)\r\n\r\n @check_logged\r\n async def get_blocked_users(\r\n self, exclude: Tuple[Type[BaseException]] = DEFAULT_EXCLUDE\r\n ) -> List[AbstractUser]:\r\n \"\"\"|coro|\r\n\r\n Get all users blocked by a client.\r\n\r\n Parameters\r\n ----------\r\n exclude: Tuple[Type[:exc:`BaseException`]]\r\n Exceptions to ignore. By default includes only :exc:`.NothingFound`.\r\n\r\n Returns\r\n -------\r\n List[:class:`.AbstractUser`]\r\n All blocked users retrieved, as list.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Failed to fetch blocked users of a client.\r\n\r\n :exc:`.NothingFound`\r\n No blocked users were found. Cool.\r\n \"\"\"\r\n data = await self.session.get_user_list(type=1, exclude=exclude, client=self)\r\n return list(AbstractUser.from_data(part, client=self) for part in data)\r\n\r\n @check_logged\r\n async def get_friends(\r\n self, exclude: Tuple[Type[BaseException]] = DEFAULT_EXCLUDE\r\n ) -> List[AbstractUser]:\r\n \"\"\"|coro|\r\n\r\n Get all friends of a client.\r\n\r\n Parameters\r\n ----------\r\n exclude: Tuple[Type[:exc:`BaseException`]]\r\n Exceptions to ignore. By default includes only :exc:`.NothingFound`.\r\n\r\n Returns\r\n -------\r\n List[:class:`.AbstractUser`]\r\n All friends retrieved, as list.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Failed to fetch friends of a client.\r\n\r\n :exc:`.NothingFound`\r\n No friends were found. Sadly...\r\n \"\"\"\r\n data = await self.session.get_user_list(type=0, exclude=exclude, client=self)\r\n return list(AbstractUser.from_data(part, client=self) for part in data)\r\n\r\n @check_logged\r\n async def to_user(self) -> User:\r\n \"\"\"|coro|\r\n\r\n Gets user with :attr:`.Client.account_id`,\r\n which means that client should be logged in.\r\n\r\n Returns\r\n -------\r\n :class:`.User`\r\n User corresponding to :attr:`.Client.account_id`.\r\n \"\"\"\r\n return await self.get_user(self.account_id)\r\n\r\n @check_logged\r\n async def get_page_messages(\r\n self,\r\n sent_or_inbox: str = \"inbox\",\r\n page: int = 0,\r\n *,\r\n exclude: Tuple[Type[BaseException]] = DEFAULT_EXCLUDE,\r\n ) -> List[Message]:\r\n \"\"\"|coro|\r\n\r\n Gets messages on a specified page.\r\n\r\n Requires logged in client.\r\n\r\n Parameters\r\n ----------\r\n sent_or_inbox: :class:`str`\r\n The type of messages to look for. Either *inbox* or *sent*.\r\n\r\n page: :class:`int`\r\n Number of page to look at.\r\n\r\n exclude: Tuple[Type[:exc:`BaseException`]]\r\n Exceptions to ignore. By default includes only :exc:`.NothingFound`.\r\n\r\n Returns\r\n -------\r\n List[:class:`.Message`]\r\n List of messages found. Can be empty.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Failed to get messages.\r\n\r\n :exc:`.NothingFound`\r\n No messages were found.\r\n \"\"\"\r\n data = await self.session.get_page_messages(\r\n sent_or_inbox=sent_or_inbox, page=page, exclude=exclude, client=self\r\n )\r\n return list(Message.from_data(part, self.get_parse_dict(), client=self) for part in data)\r\n\r\n @check_logged\r\n async def get_messages(\r\n self, sent_or_inbox: str = \"inbox\", pages: Optional[Iterable[int]] = range(10)\r\n ) -> List[Message]:\r\n \"\"\"|coro|\r\n\r\n Retrieves messages from given ``pages``.\r\n\r\n Parameters\r\n ----------\r\n sent_or_inbox: :class:`str`\r\n Type of messages to retrieve. Either `'sent'` or `'inbox'`.\r\n Defaults to the latter.\r\n\r\n pages: Iterable[:class:`int`]\r\n Pages to look at, represented as a finite sequence, so iterations can be performed.\r\n\r\n Returns\r\n -------\r\n List[:class:`.Message`]\r\n List of messages found. Can be an empty list.\r\n \"\"\"\r\n data = await self.session.get_messages(\r\n sent_or_inbox=sent_or_inbox, pages=pages, client=self\r\n )\r\n\r\n return list(Message.from_data(part, self.get_parse_dict(), client=self) for part in data)\r\n\r\n @check_logged\r\n async def get_page_friend_requests(\r\n self,\r\n sent_or_inbox: str = \"inbox\",\r\n page: int = 0,\r\n *,\r\n exclude: Tuple[Type[BaseException]] = DEFAULT_EXCLUDE,\r\n ) -> List[FriendRequest]:\r\n \"\"\"|coro|\r\n\r\n Gets friend requests on a specified page.\r\n\r\n Requires logged in client.\r\n\r\n Parameters\r\n ----------\r\n sent_or_inbox: :class:`str`\r\n The type of friend requests to look for. Either *inbox* or *sent*.\r\n\r\n page: :class:`int`\r\n Number of page to look at.\r\n\r\n exclude: Tuple[Type[:exc:`BaseException`]]\r\n Exceptions to ignore. By default includes only :exc:`.NothingFound`.\r\n\r\n Returns\r\n -------\r\n List[:class:`.FriendRequest`]\r\n List of friend requests found. Can be empty.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Failed to get friend requests.\r\n\r\n :exc:`.NothingFound`\r\n No friend requests were found.\r\n \"\"\"\r\n data = await self.session.get_page_friend_requests(\r\n sent_or_inbox=sent_or_inbox, page=page, exclude=exclude, client=self\r\n )\r\n return list(\r\n FriendRequest.from_data(part, self.get_parse_dict(), client=self) for part in data\r\n )\r\n\r\n @check_logged\r\n async def get_friend_requests(\r\n self, sent_or_inbox: str = \"inbox\", pages: Optional[Iterable[int]] = range(10)\r\n ) -> List[FriendRequest]:\r\n \"\"\"|coro|\r\n\r\n Retrieves friend requests from given ``pages``.\r\n\r\n Parameters\r\n ----------\r\n sent_or_inbox: :class:`str`\r\n Type of friend requests to retrieve. Either `'sent'` or `'inbox'`.\r\n Defaults to the latter.\r\n\r\n pages: Iterable[:class:`int`]\r\n Pages to look at, represented as a finite sequence, so iterations can be performed.\r\n\r\n Returns\r\n -------\r\n List[:class:`.FriendRequests`]\r\n List of friend requests found. Can be an empty list.\r\n \"\"\"\r\n data = await self.session.get_friend_requests(\r\n sent_or_inbox=sent_or_inbox, pages=pages, client=self\r\n )\r\n return list(\r\n FriendRequest.from_data(part, self.get_parse_dict(), client=self) for part in data\r\n )\r\n\r\n @check_logged\r\n def get_parse_dict(self) -> ExtDict:\r\n return ExtDict({k: getattr(self, k) for k in (\"name\", \"id\", \"account_id\")})\r\n\r\n @check_logged\r\n def as_user(self) -> AbstractUser:\r\n return AbstractUser(**self.get_parse_dict(), client=self)\r\n\r\n async def get_top(\r\n self, strategy: Union[int, str, LeaderboardStrategy] = 0, *, count: int = 100\r\n ) -> List[UserStats]:\r\n \"\"\"|coro|\r\n\r\n Fetches user top by given strategy.\r\n\r\n Example:\r\n\r\n .. code-block:: python3\r\n\r\n # getting top 10 creators\r\n top10_creators = await client.get_top('creators', count=10)\r\n\r\n .. note::\r\n\r\n Players Top 100 has stopped refreshing in 2.1 version of Geometry Dash.\r\n However, you can fetch it by searching using ``'relative'`` strategy\r\n and giving huge ``count`` argument.\r\n\r\n Also, please note that searching with ``'friends'`` and ``'relative'`` strategies\r\n requires logged in client.\r\n\r\n Parameters\r\n ----------\r\n strategy: Union[:class:`int`, :class:`str`, :class:`.LeaderboardStrategy`]\r\n Strategy to apply when searching.\r\n\r\n Returns\r\n -------\r\n List[:class:`.UserStats`]\r\n \"\"\"\r\n strategy = LeaderboardStrategy.from_value(strategy)\r\n data = await self.session.get_top(strategy=strategy, count=count, client=self)\r\n return list(UserStats.from_data(part, client=self) for part in data)\r\n\r\n async def get_leaderboard(\r\n self, strategy: Union[int, str, LeaderboardStrategy] = 0, *, count: int = 100\r\n ) -> List[UserStats]:\r\n \"\"\"|coro|\r\n\r\n This is an alias for :meth:`.Client.get_top`.\r\n \"\"\"\r\n return await self.get_top(strategy, count=count)\r\n\r\n @check_logged\r\n async def post_comment(self, content: str) -> Optional[Comment]:\r\n \"\"\"|coro|\r\n\r\n Post a profile comment.\r\n\r\n Parameters\r\n ----------\r\n content: :class:`str`\r\n The content of a comment.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Failed to post a comment.\r\n\r\n Returns\r\n -------\r\n Optional[:class:`.Comment`]\r\n Posted comment.\r\n \"\"\"\r\n await self.session.post_comment(content, client=self)\r\n\r\n log.debug(\"Posted a comment. Content: %r\", content)\r\n\r\n if self.load_after_post:\r\n user = self.as_user()\r\n comments = await user.get_page_comments()\r\n return utils.get(comments, author=user, body=content)\r\n\r\n @check_logged\r\n async def update_profile(\r\n self,\r\n stars: Optional[int] = None,\r\n demons: Optional[int] = None,\r\n diamonds: Optional[int] = None,\r\n has_glow: Optional[bool] = None,\r\n icon_type: Optional[Union[int, str, IconType]] = None,\r\n icon: Optional[int] = None,\r\n color_1: Optional[int] = None,\r\n color_2: Optional[int] = None,\r\n coins: Optional[int] = None,\r\n user_coins: Optional[int] = None,\r\n cube: Optional[int] = None,\r\n ship: Optional[int] = None,\r\n ball: Optional[int] = None,\r\n ufo: Optional[int] = None,\r\n wave: Optional[int] = None,\r\n robot: Optional[int] = None,\r\n spider: Optional[int] = None,\r\n explosion: Optional[int] = None,\r\n special: int = 0,\r\n set_as_user: Optional[User] = None,\r\n ) -> None:\r\n \"\"\"|coro|\r\n\r\n Updates the profile of a client.\r\n\r\n .. note::\r\n\r\n gd.py developers are not responsible for any effects that calling this function\r\n may cause. Use this method on your own risk.\r\n\r\n Parameters\r\n ----------\r\n stars: :class:`int`\r\n An amount of stars to set.\r\n demons: :class:`int`\r\n An amount of completed demons to set.\r\n diamonds: :class:`int`\r\n An amount of diamonds to set.\r\n has_glow: :class:`bool`\r\n Indicates whether a user should have the glow outline.\r\n icon_type: :class:`int`\r\n Icon type that should be used. See :class:`.IconType` for info.\r\n icon: :class:`int`\r\n Icon ID that should be used.\r\n color_1: :class:`int`\r\n Index of a color to use as the main color.\r\n color_2: :class:`int`\r\n Index of a color to use as the secodary color.\r\n coins: :class:`int`\r\n An amount of secret coins to set.\r\n user_coins: :class:`int`\r\n An amount of user coins to set.\r\n cube: :class:`int`\r\n An index of a cube icon to apply.\r\n ship: :class:`int`\r\n An index of a ship icon to apply.\r\n ball: :class:`int`\r\n An index of a ball icon to apply.\r\n ufo: :class:`int`\r\n An index of a ufo icon to apply.\r\n wave: :class:`int`\r\n An index of a wave icon to apply.\r\n robot: :class:`int`\r\n An index of a robot icon to apply.\r\n spider: :class:`int`\r\n An index of a spider icon to apply.\r\n explosion: :class:`int`\r\n An index of a explosion to apply.\r\n special: :class:`int`\r\n The purpose of this parameter is unknown.\r\n set_as_user: :class:`.User`\r\n Passing this parameter allows to copy user's profile.\r\n \"\"\"\r\n if set_as_user is None:\r\n set_as_user = await self.to_user()\r\n\r\n user, iconset = set_as_user, set_as_user.icon_set\r\n\r\n stats_dict = {\r\n \"stars\": value_or(stars, user.stars),\r\n \"demons\": value_or(demons, user.demons),\r\n \"diamonds\": value_or(diamonds, user.diamonds),\r\n \"color1\": value_or(color_1, iconset.color_1.index),\r\n \"color2\": value_or(color_2, iconset.color_2.index),\r\n \"coins\": value_or(coins, user.coins),\r\n \"user_coins\": value_or(user_coins, user.user_coins),\r\n \"special\": special,\r\n \"icon\": value_or(icon, iconset.main),\r\n \"icon_type\": IconType.from_value(value_or(icon_type, iconset.main_type)).value,\r\n \"acc_icon\": value_or(cube, iconset.cube),\r\n \"acc_ship\": value_or(ship, iconset.ship),\r\n \"acc_ball\": value_or(ball, iconset.ball),\r\n \"acc_bird\": value_or(ufo, iconset.ufo),\r\n \"acc_dart\": value_or(wave, iconset.wave),\r\n \"acc_robot\": value_or(robot, iconset.robot),\r\n \"acc_spider\": value_or(spider, iconset.spider),\r\n \"acc_explosion\": value_or(explosion, iconset.explosion),\r\n \"acc_glow\": int(value_or(has_glow, iconset.has_glow_outline())),\r\n }\r\n\r\n await self.session.update_profile(stats_dict, client=self)\r\n\r\n @check_logged\r\n async def update_settings(\r\n self,\r\n *,\r\n message_policy: Optional[Union[int, str, MessagePolicyType]] = None,\r\n friend_request_policy: Optional[Union[int, str, FriendRequestPolicyType]] = None,\r\n comment_policy: Optional[Union[int, str, CommentPolicyType]] = None,\r\n youtube: Optional[str] = None,\r\n twitter: Optional[str] = None,\r\n twitch: Optional[str] = None,\r\n ) -> None:\r\n \"\"\"|coro|\r\n\r\n Updates profile settings of a client.\r\n\r\n .. note::\r\n\r\n For parameter in parameters, if parameter is ``None`` or omitted,\r\n it will be set to the current policy/link of the user corresponding\r\n to this client; that implies that running the following:\r\n\r\n .. code-block:: python3\r\n\r\n await client.update_settings()\r\n\r\n will cause no effect on profile settings.\r\n\r\n Parameters\r\n ----------\r\n message_policy: Union[:class:`int`, :class:`str`, :class:`.MessagePolicyType`]\r\n New message policy.\r\n friend_request_policy: Union[:class:`int`, :class:`str`, :class:`.FriendRequestPolicyType`]\r\n New friend request policy.\r\n comment_policy: Union[:class:`int`, :class:`str`, :class:`.CommentPolicyType`]\r\n New comment history policy.\r\n youtube: :class:`str`\r\n New youtube channel string. (not link)\r\n twitter: :class:`str`\r\n New twitter profile name.\r\n twitch: :class:`str`\r\n New twitch profile name.\r\n\r\n Raises\r\n ------\r\n :exc:`.MissingAccess`\r\n Failed to update profile.\r\n \"\"\"\r\n user = await self.to_user()\r\n\r\n profile_dict = {\r\n \"message_policy\": MessagePolicyType.from_value(\r\n value_or(message_policy, user.message_policy)\r\n ),\r\n \"friend_request_policy\": FriendRequestPolicyType.from_value(\r\n value_or(friend_request_policy, user.friend_request_policy)\r\n ),\r\n \"comment_policy\": CommentPolicyType.from_value(\r\n value_or(comment_policy, user.comment_policy)\r\n ),\r\n \"youtube\": value_or(youtube, user.youtube),\r\n \"twitter\": value_or(twitter, user.twitter),\r\n \"twitch\": value_or(twitch, user.twitch),\r\n }\r\n\r\n await self.session.update_settings(**profile_dict, client=self)\r\n\r\n @check_logged\r\n async def get_quests(self) -> List[Quest]:\r\n \"\"\"|coro|\r\n\r\n Get quests of a user.\r\n\r\n Returns\r\n -------\r\n List[:class:`.Quest`]\r\n Quests of a user.\r\n \"\"\"\r\n data = await self.session.get_quests(client=self)\r\n return list(Quest(**part, client=self) for part in data)\r\n\r\n @check_logged\r\n async def get_chests(self, reward_type: Union[int, str, RewardType] = 0) -> List[Chest]:\r\n \"\"\"|coro|\r\n\r\n Get chests of a user.\r\n\r\n Parameters\r\n ----------\r\n reward_type: Union[:class:`int`, :class:`str`, :class:`.RewardType`]\r\n Reward type. If given and non-zero, \"opens\" a chest.\r\n\r\n Returns\r\n -------\r\n List[:class:`.Chest`]\r\n Chests of a user.\r\n \"\"\"\r\n reward_type = RewardType.from_value(reward_type)\r\n\r\n data = await self.session.get_chests(reward_type, client=self)\r\n\r\n return list(Chest(**part, client=self) for part in data)\r\n\r\n async def search_levels_on_page(\r\n self,\r\n page: int = 0,\r\n query: Union[str, int] = \"\",\r\n filters: Optional[Filters] = None,\r\n user: Optional[Union[int, AbstractUser]] = None,\r\n gauntlet: Optional[Union[Gauntlet, int]] = None,\r\n *,\r\n exclude: Tuple[Type[BaseException]] = DEFAULT_EXCLUDE,\r\n ) -> List[Level]:\r\n \"\"\"|coro|\r\n\r\n Searches levels on given page by given query, applying filters as well.\r\n\r\n Parameters\r\n ----------\r\n page: :class:`int`\r\n A page to search levels on.\r\n\r\n query: Union[:class:`str`, :class:`int`]\r\n A query to search with.\r\n\r\n filters: :class:`.Filters`\r\n Filters to apply, as an object.\r\n\r\n user: Union[:class:`int`, :class:`.AbstractUser`]\r\n A user to search levels by. (if :class:`.Filters` has parameter ``strategy``\r\n equal to :class:`.SearchStrategy` ``BY_USER``. Can be omitted, then\r\n logged in client is required.)\r\n\r\n gauntlet: Union[:class:`int`, :class:`.Gauntlet`]\r\n A gauntlet to get levels in.\r\n\r\n exclude: Tuple[Type[:exc:`BaseException`]]\r\n Exceptions to ignore. By default includes only :exc:`.NothingFound`.\r\n\r\n Returns\r\n -------\r\n List[:class:`.Level`]\r\n Levels found on given page.\r\n \"\"\"\r\n if isinstance(user, AbstractUser):\r\n user = user.id\r\n\r\n if isinstance(gauntlet, Gauntlet):\r\n gauntlet = gauntlet.id\r\n\r\n lvdata, cdata, sdata = await self.session.search_levels_on_page(\r\n page=page,\r\n query=query,\r\n filters=filters,\r\n user_id=user,\r\n gauntlet=gauntlet,\r\n exclude=exclude,\r\n client=self,\r\n )\r\n\r\n return construct_levels(lvdata, cdata, sdata, client=self)\r\n\r\n async def search_levels(\r\n self,\r\n query: Union[str, int] = \"\",\r\n filters: Optional[Filters] = None,\r\n user: Optional[Union[int, AbstractUser]] = None,\r\n gauntlet: Optional[Union[int, Gauntlet]] = None,\r\n pages: Optional[Iterable[int]] = range(10),\r\n ) -> List[Level]:\r\n \"\"\"|coro|\r\n\r\n Searches levels on given pages.\r\n\r\n Parameters\r\n ----------\r\n query: Union[:class:`str`,:class:`int`]\r\n A query to search with.\r\n\r\n filters: :class:`.Filters`\r\n Filters to apply, as an object.\r\n\r\n user: Union[:class:`int`, :class:`.AbstractUser`]\r\n A user to search levels by. (if :class:`.Filters` has parameter ``strategy``\r\n equal to :class:`.SearchStrategy` ``BY_USER``. Can be omitted, then\r\n logged in client is required.)\r\n\r\n pages: Iterable[:class:`int`]\r\n Pages to look at, represented as a finite sequence, so iterations can be performed.\r\n\r\n Returns\r\n -------\r\n List[:class:`.Level`]\r\n List of levels found. Can be an empty list.\r\n \"\"\"\r\n if isinstance(user, AbstractUser):\r\n user = user.id\r\n\r\n if isinstance(gauntlet, Gauntlet):\r\n gauntlet = gauntlet.id\r\n\r\n lvdata, cdata, sdata = await self.session.search_levels(\r\n query=query, filters=filters, user_id=user, gauntlet=gauntlet, pages=pages, client=self\r\n )\r\n\r\n return utils.unique(construct_levels(lvdata, cdata, sdata, client=self))\r\n\r\n async def on_new_daily(self, level: Level) -> Any:\r\n \"\"\"|coro|\r\n\r\n This is an event that is fired when a new daily level is set.\r\n\r\n See :ref:`events` for more info.\r\n \"\"\"\r\n pass\r\n\r\n async def on_new_weekly(self, level: Level) -> Any:\r\n \"\"\"|coro|\r\n\r\n This is an event that is fired when a new weekly demon is assigned.\r\n\r\n See :ref:`events` for more info.\r\n \"\"\"\r\n pass\r\n\r\n async def on_level_rated(self, level: Level) -> Any:\r\n \"\"\"|coro|\r\n\r\n This is an event that is fired when a new level is rated.\r\n\r\n See :ref:`events` for more info.\r\n \"\"\"\r\n pass\r\n\r\n async def on_level_unrated(self, level: Level) -> Any:\r\n \"\"\"|coro|\r\n\r\n This is an event that is fired when a level is unrated.\r\n\r\n See :ref:`events` for more info.\r\n \"\"\"\r\n pass\r\n\r\n async def on_message(self, message: Message) -> Any:\r\n \"\"\"|coro|\r\n\r\n This is an event that is fired when a logged in client gets a message.\r\n\r\n See :ref:`events` for more info.\r\n \"\"\"\r\n pass\r\n\r\n async def on_friend_request(self, friend_request: FriendRequest) -> Any:\r\n \"\"\"|coro|\r\n\r\n This is an event that is fired when a logged in client gets a friend request.\r\n\r\n See :ref:`events` for more info.\r\n \"\"\"\r\n pass\r\n\r\n async def on_level_comment(self, level: Level, comment: Comment) -> Any:\r\n \"\"\"|coro|\r\n\r\n This is an event that is fired when a comment is posted on some level.\r\n\r\n See :ref:`events` for more info.\r\n \"\"\"\r\n pass\r\n\r\n def listen_for(\r\n self, type: str, entity_id: Optional[int] = None, delay: Optional[float] = None\r\n ) -> None:\r\n \"\"\"Function for enabling listeners of events.\r\n\r\n .. code-block:: python3\r\n\r\n client.listen_for(\"daily\")\r\n\r\n @client.event\r\n async def on_new_daily(level: gd.Level) -> None:\r\n print(f\"New daily: {level.name} (ID: {level.id}).\")\r\n\r\n See :ref:`events` for more info.\r\n \"\"\"\r\n lower = str(type).lower()\r\n\r\n kwargs = {\"client\": self}\r\n\r\n if delay is not None:\r\n kwargs[\"delay\"] = delay\r\n\r\n if lower in {\"daily\", \"weekly\"}:\r\n listener = TimelyLevelListener(timely_type=lower, **kwargs)\r\n\r\n elif lower in {\"rate\", \"unrate\"}:\r\n listener = RateLevelListener(listen_to_rate=(lower == \"rate\"), **kwargs)\r\n\r\n elif lower in {\"friend_request\", \"message\"}:\r\n listener = MessageOrRequestListener(listen_messages=(lower == \"message\"), **kwargs)\r\n\r\n elif lower in {\"level_comment\"}:\r\n if entity_id is None:\r\n raise ClientException(f\"Entity ID is required for type: {lower!r}.\")\r\n\r\n listener = LevelCommentListener(level_id=entity_id, **kwargs)\r\n\r\n else:\r\n raise ClientException(f\"Invalid listener type: {type!r}.\")\r\n\r\n self.listeners.append(listener)\r\n\r\n return self.event # allow using as a decorator\r\n\r\n async def dispatch(self, event_name: str, *args, **kwargs) -> Any:\r\n r\"\"\"|coro|\r\n\r\n Dispatch an event given by ``event_name`` with ``*args`` and ``**kwargs``.\r\n\r\n Parameters\r\n ----------\r\n event_name: :class:`str`\r\n Name of event to dispatch, e.g. ``\"new_daily\"``.\r\n\r\n \\*args\r\n Args to call handler with.\r\n\r\n \\*\\*kwargs\r\n Keyword args to call handler with.\r\n\r\n Returns\r\n -------\r\n Any\r\n Whatever handler returns.\r\n \"\"\"\r\n name = \"on_\" + event_name\r\n\r\n log.info(f\"Dispatching event {name!r}, client: {self!r}\")\r\n\r\n try:\r\n method = getattr(self, name)\r\n\r\n except AttributeError:\r\n return\r\n\r\n return await utils.maybe_coroutine(method, *args, **kwargs)\r\n\r\n def run(self, coro: Coroutine, *, debug: bool = False) -> Any:\r\n \"\"\"A handy shortcut for :func:`.utils.run`.\r\n\r\n This is equivalent to:\r\n\r\n .. code-block:: python3\r\n\r\n gd.utils.run(coro, loop=self.loop, debug=debug)\r\n \"\"\"\r\n return utils.run(coro, loop=self.loop, debug=debug)\r\n\r\n def event(self, coro: Coroutine) -> Coroutine:\r\n \"\"\"A decorator that registers an event to listen to.\r\n\r\n Events must be a |coroutine_link|_, if not, :exc:`TypeError` is raised.\r\n\r\n Example\r\n -------\r\n\r\n .. code-block:: python3\r\n\r\n @client.event\r\n async def on_level_rated(level):\r\n print(level.name)\r\n\r\n Raises\r\n ------\r\n :exc:`TypeError`\r\n The coroutine passed is not actually a coroutine.\r\n \"\"\"\r\n\r\n if not asyncio.iscoroutinefunction(coro):\r\n raise TypeError(\"event registered must be a coroutine function.\")\r\n\r\n setattr(self, coro.__name__, coro)\r\n log.debug(\"%s has been successfully registered as an event.\", coro.__name__)\r\n\r\n return coro\r\n\r\n\r\nclass LoginSession:\r\n \"\"\"Small wrapper around Client.login method.\r\n Allows to temporarily login and execute\r\n a block of code in an statement.\r\n \"\"\"\r\n\r\n def __init__(self, client: Client, username: str, password: str) -> None:\r\n self._client = client\r\n self._name = username\r\n self._pass = password\r\n\r\n async def __aenter__(self) -> Client:\r\n await self._client.login(self._name, self._pass)\r\n return self._client\r\n\r\n async def __aexit__(self, *exc) -> None:\r\n self._client.close()\r\n\r\n\r\ndef value_or(value: Any, default: Any) -> Any:\r\n return default if value is None else value\r\n","sub_path":"gd/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":73763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"549625042","text":"import socket\n\ndef main():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n address, port = input(\"Please enter a host and port number, separated by a space: \").split()\n dest_address = (address, int(port))\n quote_request(sock, dest_address)\n\ndef quote_request(_sock, _addr):\n qotd_request = \"Please send me a quote\"\n _sock.sendto(qotd_request.encode('ascii'), _addr)\n data, address = _sock.recvfrom(4096)\n print(\"The QOTD from\", address[0], \"is\", data.decode('ascii'))\n\n num = int(input(\"\\nInput 1, 2, or 3, then press enter:\\n1 - Request another quote from the same server\\n2 - Enter a different host address and/or port number\\n3 - Terminate session \\n\"))\n if num == 1:\n quote_request(_sock, _addr)\n elif num == 2:\n main()\n else:\n _sock.close()\n\nif __name__ == '__main__':\n main()\n","sub_path":"udp_qotd_client.py","file_name":"udp_qotd_client.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"632582911","text":"#coding:utf-8\nfrom selenium import webdriver\n\n\nurl = 'https://qd.lianjia.com/'\n\ndriver = webdriver.Chrome()\n\n\ndriver.get(url)\n\njs = 'scrollTo(0,500)'\n\ndriver.execute_script(js)\n\n\ndriver.find_element_by_xpath('//*[@id=\"ershoufanglist\"]/div/ul/li[1]/a/div/span[1]').click()","sub_path":"spider/阶段11-爬虫开发/代码以及其他/04.selenium/code/9.execute_js.py","file_name":"9.execute_js.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"589039451","text":"\"\"\"\nThe Python standard library's 'calendar' module allows you to\nrender a calendar to your terminal.\nhttps://docs.python.org/3.6/library/calendar.html\n\nWrite a program that accepts user input of the form\n `14_cal.py [month] [year]`\nand does the following:\n - If the user doesn't specify any input, your program should\n print the calendar for the current month. The 'datetime'\n module may be helpful for this.\n - If the user specifies one argument, assume they passed in a\n month and render the calendar for that month of the current year.\n - If the user specifies two arguments, assume they passed in\n both the month and the year. Render the calendar for that\n month and year.\n - Otherwise, print a usage statement to the terminal indicating\n the format that your program expects arguments to be given.\n Then exit the program.\n\nNote: the user should provide argument input\n(in the initial call to run the file) and not\nprompted input. Also, the brackets around year are to\ndenote that the argument is optional, as this is a common convention in\ndocumentation.\n\nThis would mean that from the command line you would call `python3 14_cal.py 4 2015` to\nprint out a calendar for April in 2015, but if you omit either the year or both values,\nit should use today’s date to get the month and year.\n\"\"\"\n\nimport sys\nimport calendar\nfrom datetime import datetime\n\nif __name__ == \"__main__\":\n\n CURRENT_DATE = datetime.today()\n MONTH_INTS = [x for x in range(1, 13)]\n args = sys.argv[1:]\n\n print(args)\n # Instantiate the calendar class\n cal = calendar.TextCalendar()\n\n # Set a year and month variable\n # These will be our defaults\n year = CURRENT_DATE.year\n month = CURRENT_DATE.month\n\n\n # package integer arguments into a separate list\n date_params = [int(x) for x in args]\n\n if len(date_params) == 0:\n # No Arguments are supplied.\n cal.prmonth(year, month)\n elif len(date_params) == 1 and date_params[0] in MONTH_INTS:\n # Only the month is supplied\n cal.prmonth(year, date_params[0])\n elif len(date_params) == 2 and len(str(date_params[1])) == 4: # O(log(n))\n # The month and year are supplied.\n cal.prmonth(date_params[1], date_params[0])\n else:\n # Correct arguments not supplied.\n print(\n \"\"\"\n Arguments not understood. Expected one of [None], [Month],\n or [Month] and [Year]. If only 1 arugment is passed, it must be a\n month!\n \"\"\"\n )\n","sub_path":"src/14_cal.py","file_name":"14_cal.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"602147838","text":"import io\nimport re\nimport os\nimport warnings\nimport unicodedata\nfrom typing import List\n\nimport contractions\nimport inflect\nfrom colorama import Fore, Style\nfrom nltk import word_tokenize\nfrom nltk.stem.snowball import SnowballStemmer\nfrom bs4 import BeautifulSoup\n\nwarnings.filterwarnings('ignore', category=UserWarning, module='bs4')\n\n\ndef strip_html(text):\n soup = BeautifulSoup(text, \"html.parser\")\n return soup.get_text()\n\n\ndef remove_between_square_brackets(text):\n return re.sub(r'\\[[^]]*\\]', '', text)\n\n\ndef denoise_text(text):\n text = strip_html(text)\n text = remove_between_square_brackets(text)\n return text\n\n\ndef remove_non_ascii(words: List[str]) -> List[str]:\n new_words: List[str] = []\n\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n\n return new_words\n\n\ndef to_lowercase(words: List[str]) -> List[str]:\n new_words: List[str] = []\n\n for word in words:\n new_word = word.lower()\n new_words.append(new_word)\n\n return new_words\n\n\ndef remove_punctuation(words: List[str]) -> List[str]:\n new_words: List[str] = []\n\n for word in words:\n new_word = re.sub(r'[^\\w\\s]', '', word)\n\n if new_word != '':\n new_words.append(new_word)\n\n return new_words\n\n\ndef replace_numbers(words: List[str]) -> List[str]:\n p = inflect.engine()\n new_words: List[str] = []\n\n for word in words:\n if word.isdigit():\n new_word = p.number_to_words(word)\n new_words.append(new_word)\n else:\n new_words.append(word)\n\n return new_words\n\n\ndef stem_words(words: List[str]) -> List[str]:\n stemmer = SnowballStemmer('english')\n stems: List[str] = []\n\n for word in words:\n stem = stemmer.stem(word)\n stems.append(stem)\n\n return stems\n\n\ndef normalize(words: List[str]) -> List[str]:\n words = remove_non_ascii(words)\n words = to_lowercase(words)\n words = remove_punctuation(words)\n words = stem_words(words)\n try:\n words = replace_numbers(words)\n except Exception as e:\n print('Cannot convert number to string. Number too big. ', str(e))\n\n return words\n\n\ndef preprocess_documents(documents, base_path):\n preprocessed_documents: List[str] = []\n\n for file_id in documents:\n with io.open(fr'{base_path}\\\\{file_id}', mode='r', encoding='utf-8-sig') as file_reader:\n with io.open(fr'normalized_files\\\\{os.path.splitext(file_id)[0]}.txt', mode='w') as file_writer:\n print(f'{Fore.BLUE}Preprocessing {file_reader.name}{Style.RESET_ALL}...')\n\n for line in file_reader:\n line = denoise_text(line)\n line = contractions.fix(line)\n words = word_tokenize(line)\n norm_words = normalize(words)\n\n if len(norm_words) == 0:\n continue\n\n file_writer.write(f\"{'|'.join(norm_words)}\\n\")\n norm_words.clear()\n\n preprocessed_documents.append(fr'normalized_files\\\\{os.path.splitext(file_id)[0]}.txt')\n\n print(f'{Fore.GREEN}Finished preprocessing {file_reader.name}{Style.RESET_ALL}')\n\n return preprocessed_documents\n","sub_path":"task_6/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"159904951","text":"import os, sys, io\nsys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf-8') \nfrom file2number import process\n\nfolder_path = \"./20_newsgroup/\"\n\nfor folder in os.listdir(folder_path):\n folder_name = folder_path+folder\n\n res_file = open(folder_name+\".number\",'w', encoding=\"utf-8\")\n res_write = \"\"\n\n file_path = folder_name+\"/\"\n for file in os.listdir(file_path):\n file_name = file_path + file\n res_write += process(file_name) + '\\n'\n\n res_file.write(res_write)\n res_file.close()","sub_path":"NLP/word embedding comparison/data/data2result.py","file_name":"data2result.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"501017788","text":"import admin_q_book_author\nimport admin_q_borrower\nimport admin_q_due_date\nimport admin_q_publisher\nimport admin_q_library\n\nclass Admin:\n def __init__(self):\n self.choice = ''\n self.choices_id_matrix = {}\n self.error_message = ''\n self.error = False\n self.complete = False\n self.store = {}\n\n def grabId(self):\n return self.choices_id_matrix[self.choice]\n\n def __call__(self):\n self.complete = False\n print(\"What would you like to do?\\n\")\n self.choice = input(\"1) Add/Update/Delete Book and Author\\n2) Add/Update/Delete Publishers\\n3) Add/Update/Delete Library Branches\\n4) Add/Update/Delete Borrowers\\n5) Over-ride Due Date for a Book loan\\n\")\n self.engine()\n \n def engine(self):\n if self.complete == False and self.error == False:\n if self.choice == \"1\":\n admin_q_book_author.start(self)\n elif self.choice == \"2\":\n admin_q_publisher.start(self)\n elif self.choice == \"3\":\n admin_q_library.start(self)\n elif self.choice == \"4\":\n admin_q_borrower.start(self)\n elif self.choice == \"5\":\n admin_q_due_date.start(self)\n self.reset()\n\n def reset(self):\n self.choice = input(\"Continue program as Administrator?\\n1) Yes\\n2)No (terminate program)\\n\")\n if self.choice == \"1\":\n self.__call__()\n elif self.choice == \"2\":\n print(\"Thanks for using Gold Coast Solutions\")\n\n","sub_path":"admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"479389844","text":"#!/usr/bin/env python\nfrom jinja2 import Template, Environment, FileSystemLoader\nfrom subprocess import call\nfrom os import remove\n\nuser='llparse'\nversions=['2.4.0-centos-7', '2.4.0-ubuntu-14.04']\n\nenv = Environment(\n loader=FileSystemLoader('./templates'),\n trim_blocks=True)\n\nfor version in versions:\n image='{0}/chronos:{1}'.format(user, version)\n dockerfile='Dockerfile.{0}'.format(version)\n\n with open(dockerfile, 'w') as f:\n template = env.get_template('Dockerfile.j2')\n f.write(template.render(version=version))\n \n call(['docker', 'build', '-f', dockerfile, '-t', image, '.'])\n call(['docker', 'push', image])\n remove(dockerfile)\n","sub_path":"images/chronos/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"108551619","text":"import json\nfrom urllib import parse as urlparse\nimport base64\nfrom functools import lru_cache\nimport math\nimport hmac\nimport hashlib\nimport os\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom boto3.dynamodb.conditions import Key\nimport urllib3\n\n# Get the service resource.\ndynamodb = boto3.resource('dynamodb')\n\n# set environment variable\nTABLE_NAME = os.environ['TABLE_NAME']\nOAUTH_TOKEN = os.environ['OAUTH_TOKEN']\n\ntable = dynamodb.Table(TABLE_NAME)\nhttp = urllib3.PoolManager()\n\ndef get_body(event):\n return base64.b64decode(str(event['body'])).decode('ascii')\n\n@lru_cache(maxsize=60)\ndef explain(acronym):\n\n results = table.query(KeyConditionExpression=Key(\"Acronym\").eq(acronym))\n\n try:\n item = results['Items'][0]\n \n retval = item['Acronym'] + \" - \" + item['Definition'] + \"\\n---\\n*Meaning*: \" + item['Meaning'] + \"\\n*Notes*: \" + item['Notes']\n \n except:\n retval = f'{acronym} is not defined.'\n\n return retval\n \n@lru_cache(maxsize=60)\ndef define(acronym, definition, meaning, notes, response_url):\n \n results = table.put_item(\n Item={\n 'Acronym': acronym,\n 'Definition': definition,\n 'Meaning': meaning,\n 'Notes': notes\n }\n )\n\n print(str(results))\n \n result = results['ResponseMetadata']['HTTPStatusCode']\n print(\"Result: \" + str(result))\n \n headers = {\n 'Content-Type': 'application/plain-text',\n 'Authorization': 'Bearer ' + OAUTH_TOKEN\n }\n print(\"headers: \" + str(headers))\n \n \n if result == 200:\n \n body={\n \"response_type\": \"in_channel\",\n \"text\": acronym + ' successfully defined.',\n \"attachments\": [\n {\n \"text\": explain(acronym)\n }\n ]\n }\n print(\"body: \" + str(body))\n \n response = http.request('POST', response_url, body=json.dumps(body), headers=headers)\n print(\"response: \" + str(response.status) + \" \" + str(response.data))\n else:\n body={\n \"response_type\": \"in_channel\",\n \"text\": 'Error (' + str(result) + ') defining ' + acronym,\n }\n print(\"body: \" + str(body))\n \n response = http.request('POST', response_url, body=json.dumps(body), headers=headers)\n print(\"response: \" + str(response.status) + \" \" + str(response.data))\n\n return result\n\n\ndef lambda_handler(event, context):\n print(\"add_meaning: \" + str(event))\n \n if check_hash(event) == False:\n print('Signature check failed')\n print('event: ' + str(event))\n return\n \n body = dict(urlparse.parse_qsl(get_body(event))) # data comes b64 and also urlencoded name=value& pairs\n print(\"Body: \" + str(body))\n \n payload = json.loads(body.get('payload',\"no-payload\"))\n \n print(\"Payload: \" + str(payload) )\n \n acronym = payload['view']['state']['values']['acronym_block']['acronym_input']['value']\n print(\"acronym: \" + acronym)\n \n definition = payload['view']['state']['values']['definition_block']['definition_input']['value']\n print(\"definition: \" + definition)\n \n meaning = payload['view']['state']['values']['meaning_block']['meaning_input']['value']\n \n if meaning is not None:\n print(\"meaning: \" + meaning)\n else:\n print(\"no meaning\")\n meaning = \"\"\n \n notes = payload['view']['state']['values']['notes_block']['notes_input']['value']\n \n if notes is not None:\n print(\"notes: \" + notes)\n else:\n print(\"no notes\")\n notes = \"\"\n \n return_url = payload['response_urls'][0]['response_url']\n \n status_code = define(acronym,definition,meaning,notes,return_url)\n \n return {\n \"statusCode\" : status_code\n }\n\ndef check_hash(event):\n slack_signing_secret = os.environ['SLACK_SIGNING_SECRET']\n body = get_body(event)\n timestamp = event[\"headers\"]['x-slack-request-timestamp']\n sig_basestring = 'v0:' + timestamp + ':' + body\n my_signature = 'v0=' + hmac.new(\n bytes(slack_signing_secret, 'UTF-8'),\n msg=bytes(sig_basestring, 'UTF-8'),\n digestmod=hashlib.sha256\n ).hexdigest()\n print(\"Generated signature: \" + my_signature)\n\n slack_signature = event['headers']['x-slack-signature']\n print(\"Slack signature: \" + slack_signature)\n\n return hmac.compare_digest(my_signature, slack_signature)","sub_path":"lambda/add_meaning.py","file_name":"add_meaning.py","file_ext":"py","file_size_in_byte":4431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"69987954","text":"import cherrypy\nimport logging\nimport threading\n\nfrom .api import FlexbotApiClient\nfrom .bot import Bot, NoEligibleUsersException\nfrom .logger_factory import LoggerFactory\nfrom .manager import UserManager\nfrom . import util\nfrom .web import FlexbotWebServer\n\nclass Server(object):\n def __init__(self, configuration, workout_logger=None, **kwargs):\n self.logger = logging.getLogger(__name__)\n self.configuration = configuration\n if workout_logger == None:\n logger_factory = LoggerFactory(self.configuration)\n self.workout_logger = logger_factory.get_logger()\n else:\n self.workout_logger = workout_logger\n\n if 'slack_api' in kwargs:\n self.slack_api = kwargs['slack_api']\n else:\n self.slack_api = FlexbotApiClient(configuration, token=self.configuration.slack_token())\n\n if 'user_manager' in kwargs:\n self.user_manager = kwargs['user_manager']\n else:\n self.user_manager = UserManager(self.slack_api, self.configuration, self.workout_logger)\n\n if 'bot' in kwargs:\n self.bot = kwargs['bot']\n else:\n self.bot = Bot(self.slack_api, self.configuration, self.user_manager)\n\n if 'web_server' in kwargs:\n self.web_server = kwargs['web_server']\n else:\n self.web_server = FlexbotWebServer(self.user_manager, self.user_manager, configuration)\n\n def start(self):\n self.logger.debug('Starting workout loop')\n workout_loop_thread = threading.Thread(target=self.workout_loop)\n workout_loop_thread.daemon = False\n workout_loop_thread.start()\n # Start the webserver\n self.logger.debug('Starting webserver')\n cherrypy.config.update({'server.socket_host': '0.0.0.0',\n 'server.socket_port': self.configuration.webserver_port(),\n 'log.screen': True,\n })\n cherrypy.quickstart(self.web_server)\n\n def workout_loop(self):\n \"\"\"\n Runs the workout bot in an infinite loop.\n \"\"\"\n was_office_hours = False\n\n while True:\n was_office_hours = self.workout_step(was_office_hours)\n\n def workout_step(self, was_office_hours):\n \"\"\"\n Runs a step of the workout bot, handling exceptions. Returns True iff is_office_hours\n returns true before the current workout.\n \"\"\"\n is_office_hours = self.bot.is_office_hours()\n try:\n self._workout_step(was_office_hours, is_office_hours)\n except NoEligibleUsersException:\n self.logger.info(\"No eligible users currently, waiting 5 minutes\")\n util.sleep(minutes=5)\n return is_office_hours\n\n def _workout_step(self, was_office_hours, is_office_hours):\n if is_office_hours:\n # Clear the previous day's history if this is the first workout of the day\n if not was_office_hours:\n self.logger.debug(\"Clearing users\")\n self.user_manager.clear_users()\n\n # Get an exercise to do\n exercise, reps, mins_to_exercise = self.bot.select_exercise_and_start_time()\n util.sleep(minutes=mins_to_exercise)\n\n # Assign the exercise to someone\n self.bot.assign_exercise(exercise, reps)\n else:\n # Show some stats if the final workout has just passed\n if was_office_hours:\n self.slack_api.post_flex_message(self.user_manager.stats())\n\n # Sleep for a bit\n if not self.configuration.debug():\n util.sleep(minutes=5) # Sleep 5 minutes\n else:\n # If debugging, check again in 5 seconds\n util.sleep(seconds=5)\n\n","sub_path":"flexbot/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"477977788","text":"'''\nCreated by: Joseph Song (JLS731)\nCreated on: 4/10/2015\nAssignment 9\nDescription: Loads in a csv file and returns a DataFrame dataset.\nNote: this function was taken from the previous assignment and modified for assignment 9. \n'''\n\nimport pandas as pd\n\ndef loadcsvdata(filename, variables=''):\n ''' Loads csv file and returns a DataFrame. Takes in a variable list to use to extract specific columns.'''\n if variables =='':\n data = pd.read_csv(filename)\n else:\n data = pd.read_csv(filename,usecols = variables)\n\n data['GRADE DATE'] = pd.to_datetime(data['GRADE DATE'])\n data['INSPECTION DATE'] = pd.to_datetime(data['INSPECTION DATE'])\n return data\n\n\n","sub_path":"JLS731/loaddata.py","file_name":"loaddata.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"331312949","text":"from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.optim.lr_scheduler import StepLR\n\n\n\"\"\"\nThis is the original MNIST example code provided by PyTorch, modified with\ncode from the \"Training a Classifier\" PyTorch tutorial due to the recent\nunavailability of the MNIST dataset.\nAll original code can be found here: \nhttps://github.com/pytorch/examples/blob/master/mnist/main.py\nhttps://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html#sphx-glr-beginner-blitz-cifar10-tutorial-py\n\"\"\"\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(3, 6, 5)\n self.pool = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(6, 16, 5)\n self.fc1 = nn.Linear(16 * 5 * 5, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n\n def forward(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = x.view(-1, 16 * 5 * 5)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n\ndef train(args, model, device, train_loader, optimizer, criterion, epoch):\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = criterion(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n\n\ndef test(args, model, device, test_loader):\n total = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n _, predicted = torch.max(output.data, 1)\n total += target.size(0)\n correct += (predicted == target).sum().item()\n accuracy = 100 * correct / total\n print('Test set: Accuracy on {} images: {:.0f} %'.format(total, accuracy))\n\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\n parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\n parser.add_argument('--epochs', type=int, default=14, metavar='N',\n help='number of epochs to train (default: 14)')\n parser.add_argument('--lr', type=float, default=0.001, metavar='LR',\n help='learning rate (default: 0.001)')\n parser.add_argument('--momentum', type=float, default=0.9, metavar='M',\n help='momentum step size (default: 0.9)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\n parser.add_argument('--save-model', type=bool, default=False,\n help='For Saving the current Model')\n args = parser.parse_args()\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n cifar_transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\n )\n train_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('../data', train=True, download=True,\n transform=cifar_transform),\n batch_size=args.batch_size, shuffle=True, **kwargs\n )\n test_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('../data', train=False, transform=cifar_transform),\n batch_size=args.test_batch_size, shuffle=True, **kwargs\n )\n\n model = Net().to(device)\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n\n for epoch in range(1, args.epochs + 1):\n running_loss = 0.0\n train(args, model, device, train_loader, optimizer, criterion, epoch)\n test(args, model, device, test_loader)\n\n if args.save_model:\n torch.save(model.state_dict(), \"model.pth\")\n\n\nif __name__ == '__main__':\n main()","sub_path":"notebooks/scripts/original_pytorch_cifar10.py","file_name":"original_pytorch_cifar10.py","file_ext":"py","file_size_in_byte":5012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"275297604","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"funktioner för ryggsäck\"\"\"\n\n\ndef readinfo():\n \"\"\"read file\"\"\"\n fhand = open('inv.data', 'r')\n data = fhand.read()\n count = len(open('inv.data').readlines())\n\n print('Ryggsäcken')\n print(\"===================\" + \"\\n\")\n print(\"Det finns %s blommor i ryggsäcken \" %count)\n print('\\n' + data + '\\n')\n\n\ndef pick(pickflower):\n \"\"\"pick a flower\"\"\"\n flowerList = list()\n flowerList.append(pickflower)\n count = len(open('inv.data').readlines())\n for eachitem in flowerList:\n fhand = open('inv.data', 'a')\n if count < 4:\n fhand.write(str(eachitem)+'\\n')\n fhand.close()\n print(\"E-G tog upp %s \" %pickflower)\n else:\n print(\"Ryggsäcken är full! E-G kan ej bära mer gejer!\")\n\ndef drop(dropflower):\n \"\"\"drop a flower\"\"\"\n fhand = open(\"inv.data\", \"r\")\n lines = fhand.readlines()\n fhand.close()\n fhand = open('inv.data', 'w')\n for line in lines:\n if line != dropflower + \"\\n\":\n fhand.write(line)\n fhand.close()\n print(\"E-G släppte %s \" %dropflower)\n\ndef dropAll(files):\n \"\"\"drop all flower\"\"\"\n with open(files, 'w'):\n pass\n print(\"E-G har släppt alla blommor!\")\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"361999414","text":"from math import factorial\nfrom collections import Counter\n\n\ndef combinations_count(n, r):\n if n >= r:\n return factorial(n) // (factorial(n - r) * factorial(r))\n else:\n return 0\n\nc = Counter()\nn = int(input())\nA = list(map(int, input().split()))\nfor i in A:\n c[i] += 1\nans = 0\nfor k, v in c.items():\n ans += combinations_count(v, 2)\n\nfor i in A:\n print(ans-c[i]+1)\n","sub_path":"Beginner-Contest/ABC159/ABC159_D.py","file_name":"ABC159_D.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"38684282","text":"import autofit as af\nimport autolens as al\nfrom test_autolens.integration.tests.imaging import runner\n\ntest_type = \"grid_search\"\ntest_name = \"multinest_grid__subhalo\"\ndata_name = \"lens_sie__source_smooth\"\ninstrument = \"vro\"\n\n\ndef make_pipeline(name, folders, search=af.DynestyStatic()):\n\n lens = al.GalaxyModel(redshift=0.5, mass=al.mp.EllipticalIsothermal)\n\n lens.mass.centre_0 = af.UniformPrior(lower_limit=-0.01, upper_limit=0.01)\n lens.mass.centre_1 = af.UniformPrior(lower_limit=-0.01, upper_limit=0.01)\n lens.mass.einstein_radius = af.UniformPrior(lower_limit=1.55, upper_limit=1.65)\n\n source = al.GalaxyModel(redshift=1.0, light=al.lp.EllipticalSersic)\n\n source.light.centre_0 = af.UniformPrior(lower_limit=-0.01, upper_limit=0.01)\n source.light.centre_1 = af.UniformPrior(lower_limit=-0.01, upper_limit=0.01)\n source.light.intensity = af.UniformPrior(lower_limit=0.35, upper_limit=0.45)\n source.light.effective_radius = af.UniformPrior(lower_limit=0.45, upper_limit=0.55)\n source.light.sersic_index = af.UniformPrior(lower_limit=0.9, upper_limit=1.1)\n\n phase1 = al.PhaseImaging(\n phase_name=\"phase_1\",\n folders=folders,\n galaxies=dict(lens=lens, source=source),\n search=search,\n settings=al.SettingsPhaseImaging(),\n )\n\n class GridPhase(af.as_grid_search(phase_class=al.PhaseImaging, parallel=False)):\n @property\n def grid_priors(self):\n return [\n self.model.galaxies.subhalo.mass.centre_0,\n self.model.galaxies.subhalo.mass.centre_1,\n ]\n\n subhalo = al.GalaxyModel(redshift=0.5, mass=al.mp.SphericalTruncatedNFWMCRLudlow)\n\n subhalo.mass.mass_at_200 = af.LogUniformPrior(lower_limit=1.0e6, upper_limit=1.0e11)\n\n subhalo.mass.centre_0 = af.UniformPrior(lower_limit=-2.5, upper_limit=2.5)\n subhalo.mass.centre_1 = af.UniformPrior(lower_limit=-2.5, upper_limit=2.5)\n\n phase2 = GridPhase(\n phase_name=\"phase_2\",\n folders=folders,\n galaxies=dict(\n lens=af.last.instance.galaxies.lens,\n subhalo=subhalo,\n source=af.last.instance.galaxies.source,\n ),\n search=search,\n settings=al.SettingsPhaseImaging(),\n number_of_steps=2,\n )\n\n phase3 = al.PhaseImaging(\n phase_name=\"phase_3__subhalo_refine\",\n folders=folders,\n galaxies=dict(\n lens=af.last[-1].model.galaxies.lens,\n subhalo=phase2.result.model.galaxies.subhalo,\n source=af.last[-1].instance.galaxies.source,\n ),\n settings=al.SettingsPhaseImaging(),\n search=af.DynestyStatic(),\n )\n\n return al.PipelineDataset(name, phase1, phase2, phase3)\n\n\nif __name__ == \"__main__\":\n import sys\n\n runner.run(sys.modules[__name__])\n","sub_path":"test_autolens/integration/tests/features/subhalo.py","file_name":"subhalo.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"338957702","text":"import datetime, os, signal, subprocess, sys, time, unittest\n\ndef run(command, stdin = None, timeout = 30):\n \"\"\"\n Runs the specified command using specified standard input (if any) and\n returns the output on success. If the command doesn't return within the\n specified time (in seconds), \"__TIMEOUT__\" is returned.\n \"\"\"\n\n start = datetime.datetime.now()\n process = subprocess.Popen(command.split(),\n stdin = subprocess.PIPE, \n stdout = subprocess.PIPE,\n stderr = subprocess.STDOUT)\n if not stdin is None:\n process.stdin.write(stdin)\n process.stdin.close()\n while process.poll() is None:\n time.sleep(0.1)\n now = datetime.datetime.now()\n if (now - start).seconds > timeout:\n os.kill(process.pid, signal.SIGKILL)\n os.waitpid(-1, os.WNOHANG)\n return \"__TIMEOUT__\"\n return process.stdout.read().strip()\n\nclass Problem1(unittest.TestCase):\n \n def test1(self):\n command = \"python course_prep.py\"\n got = run(command)\n self.assertNotEquals(got, \"__TIMEOUT__\")\n self.assertFalse(\"...\" in got)\n\nclass Problem2(unittest.TestCase):\n \n def test1(self):\n command = \"python picobot.py -e env1.txt -b 10,10 -r north_south.pb\"\n sought = \"\"\"bot at: (24, 10), cells left: 506\nBot stopped!\"\"\"\n got = run(command)\n self.assertNotEquals(got, \"__TIMEOUT__\")\n self.assertTrue(got.endswith(sought))\n\nclass Problem3(unittest.TestCase):\n\n def test1(self):\n command = \"python picobot.py -e env1.txt -b 10,10 -r south_east.pb\"\n sought = \"\"\"bot at: (24, 24), cells left: 500\nBot stopped!\"\"\"\n got = run(command)\n self.assertNotEquals(got, \"__TIMEOUT__\")\n self.assertTrue(got.endswith(sought))\n\nclass Problem4(unittest.TestCase):\n\n def test1(self):\n command = \"python hmmmSimulator.py -f sum_of_squares.b -n\"\n sought = \"\"\"346\"\"\"\n got = run(command, \"11 15\")\n self.assertNotEquals(got, \"__TIMEOUT__\")\n self.assertEquals(got, sought)\n\nclass Problem5(unittest.TestCase):\n \n def test1(self):\n command = \"python hmmmSimulator.py -f max.b -n\"\n sought = \"\"\"5\"\"\"\n got = run(command, \"1 5\")\n self.assertNotEquals(got, \"__TIMEOUT__\")\n self.assertEquals(got, sought)\n\n def test2(self):\n command = \"python hmmmSimulator.py -f max.b -n\"\n sought = \"\"\"7\"\"\"\n got = run(command, \"7 2\")\n self.assertNotEquals(got, \"__TIMEOUT__\")\n self.assertEquals(got, sought)\n\n def test3(self):\n command = \"python hmmmSimulator.py -f max.b -n\"\n sought = \"\"\"3\"\"\"\n got = run(command, \"3 3\")\n self.assertNotEquals(got, \"__TIMEOUT__\")\n self.assertEquals(got, sought)\n\nclass Problem6(unittest.TestCase):\n \n def test1(self):\n command = \"python hmmmSimulator.py -f countdown.b -n\"\n sought = \"\"\"5\n4\n3\n2\n1\n0\"\"\"\n got = run(command, \"5\")\n self.assertNotEquals(got, \"__TIMEOUT__\")\n self.assertEquals(got, sought)\n\n def test2(self):\n command = \"python hmmmSimulator.py -f countdown.b -n\"\n sought = \"\"\"\"\"\"\n got = run(command, \"-5\")\n self.assertNotEquals(got, \"__TIMEOUT__\")\n self.assertEquals(got, sought)\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"cs110/homework1/run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"44593978","text":"import pandas as pd\nimport numpy as np\nfrom read_data import *\nfrom anomaly_detect import *\nfrom factor_form import *\n# \nif __name__ == '__main__':\n \"\"\"\n 日期参数\n input_date: 指标观察日期;\n base_interval: 以 n 天前作为基线日期,该值为 1,即环比昨天,该值为 7 即周同比上周;\n hist_interval: 往前追溯多少天\n \"\"\"\n input_date = '20210421'\n base_interval = 7\n hist_interval = 7\n\n\n \"\"\"\n mstag: MySQL 连接标识符\n tbl_name: 连接表名\n dt_col: 日期字段名称\n \"\"\"\n\n mstag = 'test'\n tbl_name = 'demo_onecloud'\n dt_col = 'dayno'\n stat_metric = ['req_nums', 'req_fill_nums', 'expose_nums', 'acc_nums', 'acc_cost']\n comp_metric = ['req_nums', 'req_fill_rate', 'expose_rate', 'ctr', 'ppc']\n\n obs_metric = 'acc_cost'\n comp_metric.append(obs_metric)\n dim_col = ['ad_show_pos', 'ad_owner_id']\n\n stat_date, base_date, hist_date = date_gen(input_date, base_interval, hist_interval, False)\n\n path = '/Users/davidaclee/Desktop/data_coding/data_project/AnomalyContrib/data/test.csv' \n sql1= 'select * from {} where 1 = 1 and dt between \"{}\" and \"{}\"'.format(tbl_name, hist_date, stat_date)\n # df_ = load_from_mysql(mstag, sql1)\n df_ = load_from_csv(path)\n # logging.info(df_)\n df_[dt_col] = df_[dt_col].astype(str)\n df_[dim_col] = df_[dim_col].astype(str)\n \n df = df_.query('{} in [\"{}\", \"{}\"]'.format(dt_col, stat_date, base_date))\n df_hist = df_.groupby(by = dt_col)[obs_metric].sum().reset_index()\n \n\n logging.info(df)\n logging.info(df.info())\n logging.info(df_hist)\n \n detect_info_dod = dod_threshold_detect(df_hist, dt_col, stat_date, base_date, obs_metric)\n detect_info_ksigma = k_sigma(3, df_hist, dt_col, stat_date, base_date, obs_metric)\n detect_info_boxplot = boxplot_detect(df_hist, dt_col, stat_date, base_date, obs_metric)\n detect_info_zscore = zscore_detect(df_hist, dt_col, stat_date, base_date, obs_metric)\n\n # # logging.info([detect_info_dod['detect_result'], detect_info_ksigma['detect_result'], detect_info_boxplot['detect_result'], detect_info_zscore['detect_result']])\n logging.info([detect_info_dod, detect_info_ksigma, detect_info_boxplot, detect_info_zscore])\n\n # df_factor_form_t = get_factor_form(df, dt_col, stat_date, base_date, obs_metric, stat_metric, comp_metric)\n # logging.info('df_factor_form_t:\\n{}'.format(df_factor_form_t))\n # # df_factor_form_t = df_factor_form.T\n # # df_factor_form_t['impact_' + obs_metric] = 0\n # # df_factor_form_t['diff_pct'] = df_factor_form_t[stat_date]/df_factor_form_t[base_date]\n # # df_factor_form_t['diff_value'] = df_factor_form_t[stat_date]-df_factor_form_t[base_date]\n\n # # logging.info('df_factor_form_t:\\n', df_factor_form_t)","sub_path":"anomalyContrib/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"266829343","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 25 18:45:03 2017\n\n@author: dell\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nres = requests.get('http://news.sina.com.cn/')\nres.encoding = 'utf-8'\n#print(res.text)\n\nsoup = BeautifulSoup(res.text,'html.parser')\n\n#print(soup)\nfor news in soup.select('.blk_72'):\n# print(news.select('li'))\n if len(news.select('li'))>0:\n# print(news.select('li')[1].text)\n h2 = news.select('li')[1].text\n# time = news.select('.time')[1].text\n a = news.select('a')[1]['href']\n print(h2,a)\n\n\n#soup = BeautifulSoup(html_sample,'html.parser')\n#print(soup.text)\n\n\n#html =\n\"\"\"\nThe Dormouse's story\n\n

The Dormouse's story

\n

Once upon a time there were three little sisters; and their names were\n,\nLacie and\nTillie;\nand they lived at the bottom of a well.

\n

...

\n\"\"\"\n#a = ' i amd a link'\n#soup = BeautifulSoup(a,'html.parser')\n##print(soup.prettify())\n#print(soup.select('a')[0]['abc'])\n","sub_path":"html/else/html_study2.py","file_name":"html_study2.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"617929357","text":"import pickle\nimport gzip\nimport numpy as np\n\n\n\ndef vectorized_result(j):\n \"\"\"Return a 10-dimensional unit vector with a 1.0 in the jth\n position and zeroes elsewhere. This is used to convert a digit\n (0...9) into a corresponding desired output from the neural\n network.\"\"\"\n e = np.zeros((10, 1))\n e[j] = 1.0\n return e\n\n\nwith open(\"data/train_img.pickle\", \"rb\") as f:\n train_set2 = pickle.load(f, encoding='iso-8859-1')\n\nprint(train_set2[0][0])\n#\nwith gzip.open(\"data/mnist.pkl.gz\", \"rb\") as f:\n train_set, valid_set, test_set = pickle.load(f, encoding='iso-8859-1')\nprint(\"**************************************\")\nprint(len(train_set))\nprint(train_set[0][0]) # [0] -> kodirane slike , [1] -> števka ki označuje zapis\nprint(train_set[1][0])\nprint(len(train_set[0][0]))\n#\n#\n\n\n\n# training_inputs = np.reshape(train_set[0][0], (784, 1))\n# training_inputs = [np.reshape(x, (784, 1)) for x in train_set[0]] # converts into a array\n# print(training_inputs)\n# training_results = [vectorized_result(y) for y in train_set[1]]\n# training_results = vectorized_result(train_set[0][1])\n# print(training_results)\n# print(training_results)\n\n# training_data = list(zip(training_inputs, training_results))\n# print(training_data)\n\n\n# with gzip.open(\"data/mnist.pkl.gz\", \"rb\") as f:\n# alla = pickle.load(f, encoding='iso-8859-1')\n#\n# print(type(alla))\n#\n","sub_path":"my_pickle.py","file_name":"my_pickle.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"382623828","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport os\nimport logging\nimport inspect\n\nclass Logger:\n\n def __init__(self, WS_PATH):\n log_file = WS_PATH + os.path.splitext(os.path.basename(__file__))[0] + \".log\"\n if (os.path.isfile(log_file)):\n os.remove(log_file)\n self.logger = logging.getLogger(\"at\")\n self.logger.setLevel(logging.INFO)\n fh = logging.FileHandler(log_file)\n fh.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n fmt = logging.Formatter(fmt='[%(asctime)s] %(message)s',datefmt='%Y-%m-%d %H:%M:%S')\n fh.setFormatter(fmt)\n ch.setFormatter(fmt)\n self.logger.addHandler(fh)\n self.logger.addHandler(ch)\n\n def log(self, msg):\n class_name = inspect.currentframe().f_back.f_locals['self'].__class__.__name__\n func_name = inspect.currentframe().f_back.f_code.co_name\n file_name = inspect.currentframe().f_back.f_code.co_filename\n line_number = inspect.currentframe().f_back.f_code.co_firstlineno\n self.logger.info((\" %s [%s::%s][%s:%i]\") % (msg, class_name, func_name, file_name, line_number))\n","sub_path":"AT/utility/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"119437804","text":"import torch\nimport torch.nn as nn\nimport torch.nn.init as init\nfrom torch.optim.lr_scheduler import *\nimport torchvision\nimport torch.utils.data as Data\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\nimport torchvision.models as models\nfrom torchvision import utils\nfrom tensorboardX import SummaryWriter\n\nnum_epoches = 150\nbatch_size = 64\nnum_classes = 65\nlr = 0.1\nstep_size = 30\n\ndevice = torch.device('cuda:4' if torch.cuda.is_available() else 'cpu')\n\nbase_path = '/workspace/fubo/'\ntrain_path = base_path + 'train/'\nvalid_path = base_path + 'valid/'\n\ntrain_transform = transforms.Compose([\n transforms.Resize(224),\n transforms.RandomCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),\n transforms.RandomAffine(degrees=10, translate=(0.1, 0.1), scale=(0.9, 1.1)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n])\n\nvalid_transform = transforms.Compose([\n transforms.Resize(224),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n])\n\ndataset_train = datasets.ImageFolder(root=train_path, transform=train_transform)\ndataset_valid = datasets.ImageFolder(root=valid_path, transform=valid_transform)\n\ntrain_loader = torch.utils.data.DataLoader(\n dataset=dataset_train,\n batch_size=batch_size,\n shuffle=True,\n num_workers=4,\n)\n\nvalid_loader = torch.utils.data.DataLoader(\n dataset=dataset_valid,\n batch_size=batch_size,\n shuffle=False,\n num_workers=4,\n)\n\n\nclass ResNet50Fe(nn.Module):\n def __init__(self):\n super(ResNet50Fe, self).__init__()\n model_resnet50 = models.resnet50(pretrained=False)\n self.conv1 = model_resnet50.conv1\n self.bn1 = model_resnet50.bn1\n self.relu = model_resnet50.relu\n self.maxpool = model_resnet50.maxpool\n self.layer1 = model_resnet50.layer1\n self.layer2 = model_resnet50.layer2\n self.layer3 = model_resnet50.layer3\n self.layer4 = model_resnet50.layer4\n self.avgpool = model_resnet50.avgpool\n self.__in_features = model_resnet50.fc.in_features\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n return x\n\n def output_num(self):\n return self.__in_features\n\n\nclass ResNet50(nn.Module):\n def __init__(self):\n super(ResNet50, self).__init__()\n self.features = ResNet50Fe()\n self.classifier = nn.Linear(self.features.output_num(), num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n init.kaiming_normal_(m.weight.data)\n init.normal_(m.bias.data, std=0.02)\n\n def forward(self, x):\n x = self.features(x)\n x = self.classifier(x)\n return x\n\n\nif __name__ == '__main__':\n resnet50 = ResNet50().to(device)\n\n optimizer = torch.optim.SGD(resnet50.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005, nesterov=True)\n scheduler = StepLR(optimizer, step_size=step_size, gamma=0.5)\n loss_func = torch.nn.CrossEntropyLoss()\n\n summary_writer = SummaryWriter()\n dump_input = torch.rand(1, 3, 224, 224).to(device)\n summary_writer.add_graph(resnet50, (dump_input,), verbose=False)\n\n for epoch in range(num_epoches):\n\n resnet50.train()\n running_loss = 0.0\n running_acc = 0.0\n for step, (batch_x, batch_y) in enumerate(train_loader): # 每一步 loader 释放一小批数据用来学习\n batch_x, batch_y = batch_x.to(device), batch_y.to(device)\n\n out = resnet50(batch_x)\n loss = loss_func(out, batch_y)\n running_loss += loss.data.item() * batch_y.size(0)\n _, pred = torch.max(out, 1)\n num_correct = (pred == batch_y).sum()\n running_acc += num_correct.item()\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n print('Finish {} epoch, Loss: {:.6f}, Accuracy of Train: {:.6f}'.format(\n epoch + 1, running_loss / (len(dataset_train)), running_acc / (len(dataset_train))))\n\n scheduler.step()\n for param_group in optimizer.param_groups:\n print(param_group['lr'])\n\n summary_writer.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch)\n summary_writer.add_scalar('train_loss', running_loss / (len(dataset_train)), epoch)\n summary_writer.add_scalar('train_acc', running_acc / (len(dataset_train)), epoch)\n\n resnet50.eval()\n correct = 0\n total = 0\n for data in valid_loader:\n images, labels = data\n images, labels = images.to(device), labels.to(device)\n outputs = resnet50(images)\n _, predicted = torch.max(outputs, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n accuracy = 100 * correct / total\n summary_writer.add_scalar('top1', accuracy, epoch)\n print('Accuracy of Test: %.2f %%' % (accuracy))\n\n torch.save(resnet50, \"resnet50.pth\")\n","sub_path":"hw2/resnet50.py","file_name":"resnet50.py","file_ext":"py","file_size_in_byte":5741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"452215821","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# django-pyvows extensions\n# https://github.com/rafaelcaricio/django-pyvows\n\n# Licensed under the MIT license:\n# http://www.opensource.org/licenses/mit-license\n# Copyright (c) 2011 Rafael Caricio rafael@caricio.com\n\nfrom threading import current_thread\n\nclass SettingsTracker(object):\n\n def install(self):\n actual_import = __builtins__['__import__']\n if actual_import != self._import:\n self.real_import = actual_import\n __builtins__['__import__'] = self._import\n\n def _import(self, name, globals=None, locals=None, fromlist=[], level=-1):\n result = apply(self.real_import, (name, globals, locals, fromlist, level))\n fromlist = (fromlist or [])\n if name == 'django.conf' and 'settings' in fromlist:\n if type(result.settings) != VowsSettings:\n result.settings = VowsSettings(result.settings)\n elif name == 'django' and 'conf' in fromlist:\n if type(result.conf.settings) != VowsSettings:\n result.conf.settings = VowsSettings(result.conf.settings)\n return result\n\nclass VowsSettings(object):\n\n def __init__(self, original_settings):\n self.original_settings = original_settings\n\n def __getattr__(self, attr_name):\n thread = current_thread()\n if hasattr(thread, 'settings'):\n if attr_name in thread.settings:\n return thread.settings[attr_name]\n return getattr(self.original_settings, attr_name)\n\nsettings_tracker = SettingsTracker()\n","sub_path":"django_pyvows/settings_manager.py","file_name":"settings_manager.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"110419669","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('trackit_api', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='PricedropDetails',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('drop_value', models.FloatField()),\n ('product', models.ForeignKey(to='trackit_api.ProductData')),\n ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='UserDetails',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('username', models.CharField(max_length=40)),\n ('email', models.EmailField(max_length=60)),\n ('password', models.CharField(max_length=20)),\n ],\n ),\n ]\n","sub_path":"trackit_api/migrations/0002_pricedropdetails_userdetails.py","file_name":"0002_pricedropdetails_userdetails.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"492095144","text":"from time import sleep\nimport threading\n\n\nclass MockRobotEncoder(object):\n def __init__(self, speed_error):\n self._speed_error = speed_error\n self._value = 0\n self._total = 0\n self.speed = 0.5\n\n self._t = threading.Thread(\n target=self._mock_encoder,\n args=(0.1,))\n self._t.start()\n\n def reset(self):\n self._value = 0\n\n def _mock_encoder(self, interval):\n while True:\n self._increment()\n # sleep differing amounts based on the speed and error introduced\n sleep(interval * (2 - self._speed) * self._speed_error)\n\n def _increment(self):\n self._value += 1\n self._total += 1\n\n @property\n def value(self):\n return int(self._value)\n\n @property\n def speed(self):\n return self._speed\n\n @property\n def total(self):\n return int(self._total)\n\n @speed.setter\n def speed(self, value):\n self._speed = value\n\n\nKP = 2\nKD = 0\nKI = 0\nTARGET = 0\n\nmotor_speed = 0.5\ne1 = MockRobotEncoder(1.13) # 엔코더값\ne1_prev_error = 0\ne1_sum_error = 0\n\nwhile True:\n e1_error = TARGET - e1.value\n e1_sum_error += e1_error\n print(e1.speed)\n print(e1.value)\n\n e1_adj = (e1_error * KP) + (e1_prev_error * KD) + (e1_sum_error * KI)\n e1_adj = round(e1_adj,2)\n\n e1_prev_error = e1_error\n\n motor_speed += e1_adj\n motor_speed = round(motor_speed,2)\n\n e1.speed = motor_speed\n\n print(\"********************************\")\n print(\"error1 :: {} \".format(e1_error))\n print(\"adj1 :: {}\".format(e1_adj))\n print(\"e1 :: {}\".format(e1.value))\n print(\"m1 :: {}\".format(motor_speed))\n print(\"********************************\")\n\n e1.reset()\n\n sleep(1)","sub_path":"src/pid.py","file_name":"pid.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"264270378","text":"import datetime\n\nimport pytest\nfrom django.test import TestCase\n\nfrom zenslackchat.models import ZenSlackChat\nfrom zenslackchat.models import NotFoundError\n\n\nUTC = datetime.timezone.utc\n\n\nISSUE_FIXTURES = [\n # Old open issue on 31 Dec 2019\n dict(\n chat_id=\"slack-chat-0\", \n ticket_id=\"zendesk-ticket-0\", \n opened=datetime.datetime(2019, 12, 28, 13, 51, tzinfo=UTC),\n closed=None,\n ),\n\n # On 1st Jan 2020\n #\n # Two closed issues \n # Three open issues\n #\n # closed on 1 Jan 2020\n dict(\n chat_id=\"slack-chat-1\", \n ticket_id=\"zendesk-ticket-1\", \n opened=datetime.datetime(2020, 1, 1, 12, 30, tzinfo=UTC),\n closed=datetime.datetime(2020, 1, 1, 13, 42, tzinfo=UTC),\n ),\n dict(\n chat_id=\"slack-chat-2\", \n ticket_id=\"zendesk-ticket-2\", \n opened=datetime.datetime(2020, 1, 1, 12, 30, tzinfo=UTC),\n closed=datetime.datetime(2020, 1, 1, 17, 11, tzinfo=UTC),\n ),\n # open 1 Jan 2020\n dict(\n chat_id=\"slack-chat-3\", \n ticket_id=\"zendesk-ticket-3\", \n opened=datetime.datetime(2020, 1, 1, 8, 7, tzinfo=UTC),\n closed=None\n ),\n dict(\n chat_id=\"slack-chat-4\", \n ticket_id=\"zendesk-ticket-4\", \n opened=datetime.datetime(2020, 1, 1, 22, 44, tzinfo=UTC),\n closed=None\n ),\n dict(\n chat_id=\"slack-chat-5\", \n ticket_id=\"zendesk-ticket-5\", \n opened=datetime.datetime(2020, 1, 1, 12, 30, tzinfo=UTC),\n closed=None\n )\n]\n\n\ndef test_daily_summary_data(log, db):\n \"\"\"Test the output of the daily report.\n \"\"\"\n workspace_uri = 'https://s.l.a.c.k'\n\n # Generate fixtures for report run:\n for issue in ISSUE_FIXTURES:\n ZenSlackChat.open(\n channel_id='some_channel_id', \n chat_id=issue['chat_id'], \n ticket_id=issue['ticket_id'], \n opened=issue['opened']\n )\n if issue['closed']:\n ZenSlackChat.resolve(\n channel_id='some_channel_id', \n chat_id=issue['chat_id'], \n closed=issue['closed']\n )\n\n # Its the 2 Jan 2020, report on what happened on 1 Jan. This should count\n # all open issues, but only count closed issues for 1 Jan.\n #\n now = datetime.datetime(2020, 1, 2, 0, 0, 0, tzinfo=UTC)\n\n report = ZenSlackChat.daily_summary(workspace_uri, when=now)\n\n # 3 on 1 Jan + 1 still open on 31 Dec:\n assert len(report['open']) == 4\n\n # oldest issue first\n assert report['open'][0] == (\n 'https://s.l.a.c.k/some_channel_id/pslack-chat-4'\n )\n\n # Only the 2 closed issues on 1 Jan:\n assert report['closed'] == 2\n\n # Its the 3 Jan 2020, report on what happened on 2 Jan.\n #\n now = datetime.datetime(2020, 1, 3, 0, 0, 0, tzinfo=UTC)\n\n report = ZenSlackChat.daily_summary(workspace_uri, when=now)\n\n # 3 on 1 Jan + 1 still open on 31 Dec:\n assert len(report['open']) == 4\n\n # No closed issues on 2 Jan:\n assert report['closed'] == 0\n\n\ndef test_daily_report_plaintext(log, db):\n \"\"\"Test the text output that could be sent as a daily report.\n \"\"\"\n report = dict(\n open=[\n \"https://s.l.a.c.k/chat-id-1\", \n \"https://s.l.a.c.k/chat-id-2\", \n \"https://s.l.a.c.k/chat-id-3\", \n \"https://s.l.a.c.k/chat-id-4\", \n ],\n closed=3\n )\n\n plain_text = ZenSlackChat.daily_report(report)\n\n expected = \"\"\"\n📊 Daily WebOps SRE Issue Report\n\nClosed 🤘: 3\n\nUnresolved 🔥: 4\n- https://s.l.a.c.k/chat-id-1\n- https://s.l.a.c.k/chat-id-2\n- https://s.l.a.c.k/chat-id-3\n- https://s.l.a.c.k/chat-id-4\n\nCheers,\n\n🤖 ZenSlackChat\n \"\"\".strip()\n\n assert plain_text == expected ","sub_path":"tests/test_bot_reports.py","file_name":"test_bot_reports.py","file_ext":"py","file_size_in_byte":3733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"624727793","text":"class Player():\n def __init__(self, name):\n self.name = name\n self.HP = 100\n self.power = 100\n self.score = 3\n self.med_list = []\n\n def set_name(self, new_name):\n self.name = new_name\n\n def get_name(self):\n return self.name\n\n def attack(self,a):\n ATTACK_DAMAGE = 10\n if a.HP < ATTACK_DAMAGE:\n self.die(a)\n a.HP -= ATTACK_DAMAGE\n self.score += 20\n print(self.name, \"hits\", a.name, \"causing \", ATTACK_DAMAGE, \"damage\")\n\n def big_attack(self,a):\n BIG_ATTACK_POWER_COST = 10\n if self.power < BIG_ATTACK_POWER_COST:\n print(\"Not enough power, failt to use big attack\")\n return\n BIG_ATTACK_DAMAGE = 25\n if a.HP < BIG_ATTACK_DAMAGE:\n self.die(a)\n a.HP -= BIG_ATTACK_DAMAGE\n self.power -= BIG_ATTACK_POWER_COST\n self.score += 30\n print(self.name, \"hits\", a.name, \"causing \", BIG_ATTACK_DAMAGE, \"damage\")\n\n # As long as valid MedType is passed in as argument\n # No matter what it is, just find the first item\n # that is instance of this class, drop it, and \n # increase HP accordingly\n def take_med(self, MedType):\n first_small_med_index = -1 \n for i in range(len(self.med_list)):\n if isinstance(self.med_list[i], MedType):\n first_small_med_index = i\n break\n if first_small_med_index == -1:\n return\n self.HP += MedType.life\n self.med_list.pop(first_small_med_index)\n \n def buy_med(self, MedType):\n if self.score < MedType.price:\n print(\"Not enough score to buy med\")\n return\n self.score -= MedType.price\n self.med_list.append(MedType())\n # Called in attack() and big_attack() when opponent HP\n # is lower than damage\n def die(self, a):\n # When exit() is called, the program terminates.\n # When exit(\"xxx\") is called, program terminates with\n # message \"xxx\" shown on screen\n # Try it in python console\n exit(\"Player \"+a.name+\" died\")\n\nclass SmallMed: \n life = 10\n price = 30\n\nclass BigMed:\n life = 30\n price = 50","sub_path":"lecture6/solution/game1/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"322176807","text":"import warnings\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import Any, Dict, List\n\nimport xarray as xr\nfrom _echopype_version import version as ECHOPYPE_VERSION\n\nfrom ..core import SONAR_MODELS\nfrom ..qc import coerce_increasing_time, exist_reversed_time\nfrom .echodata import EchoData\n\n\ndef union_attrs(datasets: List[xr.Dataset]) -> Dict[str, Any]:\n \"\"\"\n Merges attrs from a list of datasets.\n Prioritizes keys from later datsets.\n \"\"\"\n\n total_attrs = dict()\n for ds in datasets:\n total_attrs.update(ds.attrs)\n return total_attrs\n\n\ndef assemble_combined_provenance(input_paths):\n return xr.Dataset(\n data_vars={\n \"src_filenames\": (\"file\", input_paths),\n },\n attrs={\n \"conversion_software_name\": \"echopype\",\n \"conversion_software_version\": ECHOPYPE_VERSION,\n \"conversion_time\": datetime.utcnow().isoformat(timespec=\"seconds\")\n + \"Z\", # use UTC time\n },\n )\n\n\ndef combine_echodata(echodatas: List[EchoData], combine_attrs=\"override\") -> EchoData:\n \"\"\"\n Combines multiple `EchoData` objects into a single `EchoData` object.\n\n Parameters\n ----------\n echodatas: List[EchoData]\n The list of `EchoData` objects to be combined.\n combine_attrs: { \"override\", \"drop\", \"identical\", \"no_conflicts\", \"overwrite_conflicts\" }\n String indicating how to combine attrs of the `EchoData` objects being merged.\n This parameter matches the identically named xarray parameter\n (see https://xarray.pydata.org/en/latest/generated/xarray.combine_nested.html)\n with the exception of the \"overwrite_conflicts\" value.\n\n * \"override\": Default. skip comparing and copy attrs from the first `EchoData`\n object to the result.\n * \"drop\": empty attrs on returned `EchoData` object.\n * \"identical\": all attrs must be the same on every object.\n * \"no_conflicts\": attrs from all objects are combined,\n any that have the same name must also have the same value.\n * \"overwrite_conflicts\": attrs from all `EchoData` objects are combined,\n attrs with conflicting keys will be overwritten by later `EchoData` objects.\n\n Returns\n -------\n EchoData\n An `EchoData` object with all of the data from the input `EchoData` objects combined.\n\n Raises\n ------\n ValueError\n If `echodatas` contains `EchoData` objects with different or `None` `sonar_model` values\n (i.e., all `EchoData` objects must have the same non-None `sonar_model` value).\n ValueError\n If EchoData objects have conflicting source file names.\n\n Warns\n -----\n UserWarning\n If the `sonar_model` of the input `EchoData` objects is `\"EK60\"` and any `EchoData` objects\n have non-monotonically increasing `ping_time`, `location_time` or `mru_time` values,\n the corresponding values in the output `EchoData` object will be increased starting at the\n timestamp where the reversal occurs such that all values in the output are monotonically\n increasing. Additionally, the original `ping_time`, `location_time` or `mru_time` values\n will be stored in the `Provenance` group, although this behavior may change in future\n versions.\n\n Warnings\n --------\n Changes in parameters between `EchoData` objects are not currently checked;\n however, they may raise an error in future versions.\n\n Notes\n -----\n * `EchoData` objects are combined by combining their groups individually.\n * Attributes from all groups before the combination will be stored in the provenance group,\n although this behavior may change in future versions.\n * The `source_file` and `converted_raw_path` attributes will be copied from the first\n `EchoData` object in the given list, but this may change in future versions.\n\n Examples\n --------\n >>> ed1 = echopype.open_converted(\"file1.nc\")\n >>> ed2 = echopype.open_converted(\"file2.zarr\")\n >>> combined = echopype.combine_echodata([ed1, ed2])\n \"\"\"\n\n result = EchoData()\n if len(echodatas) == 0:\n return result\n result.source_file = echodatas[0].source_file\n result.converted_raw_path = echodatas[0].converted_raw_path\n\n sonar_model = None\n for echodata in echodatas:\n if echodata.sonar_model is None:\n raise ValueError(\n \"all EchoData objects must have non-None sonar_model values\"\n )\n elif sonar_model is None:\n sonar_model = echodata.sonar_model\n elif echodata.sonar_model != sonar_model:\n raise ValueError(\n \"all EchoData objects must have the same sonar_model value\"\n )\n\n # ping time before reversal correction\n old_ping_time = None\n # ping time after reversal correction\n new_ping_time = None\n # location time before reversal correction\n old_location_time = None\n # location time after reversal correction\n new_location_time = None\n # mru time before reversal correction\n old_mru_time = None\n # mru time after reversal correction\n new_mru_time = None\n\n # all attributes before combination\n # { group1: [echodata1 attrs, echodata2 attrs, ...], ... }\n old_attrs: Dict[str, List[Dict[str, Any]]] = dict()\n\n for group in EchoData.group_map:\n group_datasets = [\n getattr(echodata, group)\n for echodata in echodatas\n if getattr(echodata, group) is not None\n ]\n if group in (\"top\", \"sonar\"):\n combined_group = getattr(echodatas[0], group)\n elif group == \"provenance\":\n combined_group = assemble_combined_provenance(\n [\n echodata.source_file\n if echodata.source_file is not None\n else echodata.converted_raw_path\n for echodata in echodatas\n ]\n )\n else:\n if len(group_datasets) == 0:\n setattr(result, group, None)\n continue\n\n concat_dim = SONAR_MODELS[sonar_model][\"concat_dims\"].get(\n group, SONAR_MODELS[sonar_model][\"concat_dims\"][\"default\"]\n )\n concat_data_vars = SONAR_MODELS[sonar_model][\"concat_data_vars\"].get(\n group, SONAR_MODELS[sonar_model][\"concat_data_vars\"][\"default\"]\n )\n combined_group = xr.combine_nested(\n group_datasets,\n [concat_dim],\n data_vars=concat_data_vars,\n coords=\"minimal\",\n combine_attrs=\"drop\"\n if combine_attrs == \"overwrite_conflicts\"\n else combine_attrs,\n )\n if combine_attrs == \"overwrite_conflicts\":\n combined_group.attrs.update(union_attrs(group_datasets))\n\n if group == \"beam\":\n if sonar_model == \"EK80\":\n combined_group[\"transceiver_software_version\"] = combined_group[\n \"transceiver_software_version\"\n ].astype(\" 1:\n old_attrs[group] = [group_dataset.attrs for group_dataset in group_datasets]\n if combined_group is not None:\n # xarray inserts this dimension when concating along multiple dimensions\n combined_group = combined_group.drop_dims(\"concat_dim\", errors=\"ignore\")\n setattr(result, group, combined_group)\n\n # save ping time before reversal correction\n if old_ping_time is not None:\n result.provenance[\"old_ping_time\"] = old_ping_time\n result.provenance.attrs[\"reversed_ping_times\"] = 1\n # save location time before reversal correction\n if old_location_time is not None:\n result.provenance[\"old_location_time\"] = old_location_time\n result.provenance.attrs[\"reversed_ping_times\"] = 1\n # save mru time before reversal correction\n if old_mru_time is not None:\n result.provenance[\"old_mru_time\"] = old_mru_time\n result.provenance.attrs[\"reversed_ping_times\"] = 1\n # TODO: possible parameter to disable original attributes and original ping_time storage\n # in provenance group?\n # save attrs from before combination\n for group in old_attrs:\n all_group_attrs = set()\n for group_attrs in old_attrs[group]:\n for attr in group_attrs:\n all_group_attrs.add(attr)\n echodata_filenames = []\n for ed in echodatas:\n if ed.source_file is not None:\n filepath = ed.source_file\n elif ed.converted_raw_path is not None:\n filepath = ed.converted_raw_path\n else:\n # unreachable\n raise ValueError(\"EchoData object does not have a file path\")\n filename = Path(filepath).name\n if filename in echodata_filenames:\n raise ValueError(\"EchoData objects have conflicting filenames\")\n echodata_filenames.append(filename)\n attrs = xr.DataArray(\n [\n [group_attrs.get(attr) for attr in all_group_attrs]\n for group_attrs in old_attrs[group]\n ],\n coords={\n \"echodata_filename\": echodata_filenames,\n f\"{group}_attr_key\": list(all_group_attrs),\n },\n dims=[\"echodata_filename\", f\"{group}_attr_key\"],\n )\n result.provenance = result.provenance.assign({f\"{group}_attrs\": attrs})\n\n # Add back sonar model\n result.sonar_model = sonar_model\n\n return result\n","sub_path":"echopype/echodata/combine.py","file_name":"combine.py","file_ext":"py","file_size_in_byte":12711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"215969741","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\ncndaqiang\n\"\"\"\nimport sys\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\n\nplt.switch_backend('agg')\n\ndef getdos(inputfile):\n f=open(inputfile)\n rownum=len(f.readlines())\n ierror=f.seek(0,0)\n line=f.readline()\n EFermi=float(line.split()[-2])\n colnum=len(f.readline().split())\n dos=np.zeros([rownum,colnum])\n\n ierror=f.seek(0,0)\n ierror=f.readline()\n for row in np.arange(rownum):\n line=f.readline()\n if not line: #等价于if line == \"\":\n break\n dos[row,:]=[float(i) for i in line.split()]\n dos=dos[0:row,:]\n dos[:,0]=dos[:,0]-EFermi\n f.close()\n return dos\n\n\n\n\n# plt.plot(dos[:,0],dos[:,1],label=\"DOS\")\n# plt.plot(dos[:,0],dos[:,2],label=\"Integer DOS\")\n# plt.xlabel('E -Ef(eV)')\n# plt.ylabel('DOS')\n# plt.title('DOS'+inputfile)\n# plt.legend() #上图例,plt里面的label\n# plt.show() #该条命令画完图展示,关闭窗口会自动清空画板内容\n\n\nif(len(sys.argv) > 1):\n dosfile = sys.argv[1:]\nelse:\n dosfile=os.popen(\"ls *.dos\").readlines()\n for i in np.arange(len(dosfile)):\n dosfile[i]=dosfile[i][:-1]\n\nif( len(dosfile) > 1 ):\n fig, ax = plt.subplots(1,len(dosfile),sharex=True,sharey=False,figsize=(16,6))\n for i in np.arange(len(dosfile)):\n inputfile=dosfile[i]\n print(inputfile)\n dos=getdos(inputfile)\n ax[i].plot(dos[:,0],dos[:,1],label=\"UP\")\n ax[i].plot(dos[:,0],-1.0*dos[:,2],label=\"DOWN\")\n ax[i].fill_between(dos[:,0], 0, dos[:,1] ) #, facecolor='yellow', alpha=0.5)\n ax[i].fill_between(dos[:,0], 0, -dos[:,2]) #,, facecolor='red', alpha=0.5)\n ax[i].set_xlabel('E -Ef(eV)')\n ax[i].set_ylabel('DOS')\n ax[i].set_title('DOS'+inputfile)\n ax[i].legend() #上图例,plt里面的label\nelse:\n fig, ax = plt.subplots(1,1,sharex=True,sharey=False,figsize=(10,6))\n inputfile=dosfile[0]\n dos=getdos(inputfile)\n ax.plot(dos[:,0],dos[:,1],label=\"UP\")\n ax.plot(dos[:,0],-1.0*dos[:,2],label=\"DOWN\")\n ax.fill_between(dos[:,0], 0, dos[:,1] ) #, facecolor='yellow', alpha=0.5)\n ax.fill_between(dos[:,0], 0, -dos[:,2]) #,, facecolor='red', alpha=0.5)\n\n ax.set_xlabel('E -Ef(eV)')\n ax.set_ylabel('DOS')\n ax.set_title(inputfile+\".DOS\")\n ax.legend() #上图例,plt里面的label\n\n\nfigfile=inputfile+\".png\"\nplt.savefig(figfile,dpi=80)\n","sub_path":"DOC/material-for-ljubljana-qe-summer-school-master/Day-2/example4.functionals/ex1.DFT+U/dos_nspin.py","file_name":"dos_nspin.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"156514771","text":"\"\"\" rl<1 rh>1 DX is cutoff \"\"\"\nimport cv2\nimport numpy as np\n\n\ndef homo(img, rl, rh, DX):\n img = np.float32(img)\n rows, cols, dim = img.shape\n imgYCrCb = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)\n y, cr, cb = cv2.split(imgYCrCb)\n y_log = np.log(y + 1)\n y_fft = np.fft.fft2(y_log)\n G = np.ones((rows, cols))\n for i in range(rows):\n for j in range(cols):\n G[i][j] = ((rh - rl) * (-np.exp(-((i - rows / 2) ** 2 + (j - cols / 2) ** 2) / (DX ** 2)))) + rl\n\n result_filter = G * y_fft\n\n result_interm = np.exp(np.fft.ifft2(result_filter))\n\n result = np.real(result_interm)\n maxx = np.max(result)\n minn = np.min(result)\n\n pix_range = maxx - minn\n new_img = 255*(result - minn) / pix_range\n\n return np.uint8(new_img)\n","sub_path":"img_preprocess/homofilter.py","file_name":"homofilter.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"112072084","text":"import numpy as np\r\nimport tensorflow as tf\r\nimport random\r\n\r\ntrainDir = \"../../traindata.txt\"\r\ntestDir = \"../../testdata.txt\"\r\n\r\nXtrain = np.loadtxt(trainDir)[:,0:10]\r\nYtrain = np.loadtxt(trainDir)[:,10].reshape((Xtrain.shape[0],1))\r\nXtest = np.loadtxt(testDir)[:,0:10]\r\nYtest = np.loadtxt(testDir)[:,10].reshape(Xtest.shape[0],1)\r\n\r\nMIndex = np.where(Ytrain==1)[0]\r\nFIndex = np.where(Ytrain==0)[0]\r\n\r\nMIndext = np.where(Ytest==1)[0]\r\nFIndext = np.where(Ytest==0)[0]\r\n\r\nyy_test = np.zeros((Ytest.shape[0], 2))\r\nyy_test[MIndext, 1] = 1\r\nyy_test[FIndext, 0] = 1\r\n\r\nsubN = 10\r\nminErr = 1\r\nmean = 0\r\nfor n in range(10):\r\n MIndexSub = MIndex[np.array(random.sample(range(MIndex.shape[0]), subN))]\r\n FIndexSub = FIndex[np.array(random.sample(range(FIndex.shape[0]), subN))]\r\n# FIndexSub = [0,1,2,3,4,5,6,7,8,9]\r\n# MIndexSub = [469,470,471,472,473,474,475,476,477,478]\r\n indexSub = np.concatenate((MIndexSub, FIndexSub))\r\n\r\n XtrainSub = Xtrain[indexSub]\r\n YtrainSub = Ytrain[indexSub]\r\n \r\n yy_train = np.zeros((YtrainSub.shape[0], 2))\r\n yy_train[range(10), 1] = 1\r\n yy_train[range(10,20), 0] = 1\r\n\r\n N = XtrainSub.shape[0]\r\n D = XtrainSub.shape[1]\r\n H = 5\r\n \r\n x = tf.placeholder(tf.float32, shape=(None,D))\r\n y = tf.placeholder(tf.float32, shape=(None,2))\r\n \r\n init = tf.contrib.layers.xavier_initializer()\r\n h = tf.layers.dense(inputs=x, units=H, activation=tf.nn.softmax,\r\n kernel_initializer=init)\r\n y_pred = tf.layers.dense(inputs=h, units=2, activation=tf.nn.sigmoid,\r\n kernel_initializer=init)\r\n \r\n loss = tf.losses.mean_squared_error(y_pred, y)\r\n \r\n optimizer = tf.train.RMSPropOptimizer(1e-3)\r\n updates = optimizer.minimize(loss)\r\n \r\n correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y, 1))\r\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))\r\n \r\n saver = tf.train.Saver()\r\n \r\n with tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n values = { x: XtrainSub,\r\n y: yy_train,\r\n }\r\n for t in range(5000):\r\n loss_val, _ = sess.run([loss, updates],\r\n feed_dict=values)\r\n if np.mod(t, 500) == 100:\r\n print(loss_val)\r\n \r\n err = 1 - sess.run(accuracy, feed_dict={x: Xtest, y:yy_test})\r\n print('err:', err)\r\n \r\n mean = mean + err\r\n \r\n if minErr > err:\r\n minErr = err\r\n minMIndex = MIndexSub\r\n minFIndex = FIndexSub\r\n save_path = saver.save(sess, \"model/20_10/20_10.ckpt\")\r\n print(\"Model saved in file: %s\" % save_path)\r\n else:\r\n pass\r\n \r\nprint('Mean Error Ratio:', mean/10) # 0.280487813056\r\nprint('Min Error Ratio:', minErr) # 0.118902444839\r\nprint('Male Index:', minMIndex) # [854 828 799 915 924 699 505 700 832 677]\r\nprint('Female Index:', minFIndex) # [ 37 453 266 173 404 400 364 342 134 462]\r\n","sub_path":"Pattern-Recognition/hw1-Linear-Classifier/scripts/neural-network/nn-20-10.py","file_name":"nn-20-10.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"559835058","text":"from __future__ import unicode_literals\n\nimport logging\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.utils import six\nfrom djblets.webapi.decorators import (webapi_login_required,\n webapi_request_fields,\n webapi_response_errors)\nfrom djblets.webapi.errors import (DOES_NOT_EXIST,\n NOT_LOGGED_IN,\n PERMISSION_DENIED)\nfrom reviewboard.webapi.resources import resources, WebAPIResource\nfrom reviewboard.webapi.resources.user import UserResource\n\nfrom mozreview.models import get_profile\n\n\nclass LDAPAssociationResource(WebAPIResource):\n \"\"\"Resource for updating or retrieving the ldap username for a user.\"\"\"\n\n name = 'ldap_association'\n uri_object_key = 'username'\n uri_object_key_regex = r'[A-Za-z0-9@\\._\\-\\'\\+]+'\n allowed_methods = ('GET', 'PUT')\n fields = {\n 'user': {\n 'type': UserResource,\n 'description': 'The Review Board user',\n },\n 'ldap_username': {\n 'type': six.text_type,\n 'description': 'LDAP username authorized for use',\n },\n }\n\n def has_access_permissions(self, request, *args, **kwargs):\n return (\n request.user.is_authenticated() and (\n request.user.has_perm('mozreview.modify_ldap_association') or\n request.user.username == kwargs.get(self.uri_object_key)))\n\n def has_list_access_permissions(self, request, *args, **kwargs):\n # The list returns no information so we don't care who views it.\n return True\n\n def get_href(self, obj, request, *args, **kwargs):\n \"\"\"Return the uri to this item.\n\n In order to have Review Board's get_links machinary work properly\n we must pass a truthy `obj` into it. `obj` will only be used inside\n of get_href to find the url to this item, but since we're not using\n an actual model we just do what RB does when you don't have an `obj`,\n return the current url.\n \"\"\"\n return request.build_absolute_uri()\n\n def create_item_payload(self, request, user, profile, *args, **kwargs):\n \"\"\"Create an item payload for a given user and profile.\"\"\"\n return {\n 'links': self.get_links(self.item_child_resources, request=request,\n obj=True, *args, **kwargs),\n self.item_result_key: {\n # TODO: Once MozReview is using a djblets release containing\n # commit c33bd0d4a3a1, we should replace this dictionary\n # creation with: `self.serialize_link(user, *args, **kwargs)`.\n 'user': {\n 'method': 'GET',\n 'href': self.get_serializer_for_object(user).get_href(\n user, request, *args, **kwargs),\n 'title': user.username,\n },\n 'ldap_username': profile.ldap_username,\n },\n }\n\n @webapi_login_required\n @webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)\n def get(self, request, *args, **kwargs):\n \"\"\"Return the ldap association for a particular user.\"\"\"\n if not self.has_access_permissions(request, *args, **kwargs):\n return PERMISSION_DENIED\n\n try:\n # Since this resources uri_object_key matches that of the\n # UserResource we should properly query for the requested\n # username.\n user = resources.user.get_object(request, *args, **kwargs)\n except ObjectDoesNotExist:\n # This shouldn't be leaking any information about the existence\n # of users because if they are querying for a user that is not\n # themselves a PERMISSION_DENIED has already been returned.\n #\n # This case should really only happen when a user with the\n # 'mozreview.modify_ldap.association' is querying for a user\n # and they are allowed to know all user existence.\n return DOES_NOT_EXIST\n\n return 200, self.create_item_payload(request, user, get_profile(user))\n\n @webapi_login_required\n @webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)\n def get_list(self, request, *args, **kwargs):\n \"\"\"Handle retrieving the list resource.\n\n Never return any useful information as getting the RB pagination\n to work with a non model resource is tough and not worth the effort\n at the moment. Even though we don't give useful infromation, allowing\n GET on the list resource means the RBTools API can retrieve it and\n call `list.get_item()`.\n \"\"\"\n if not self.has_list_access_permissions(request, *args, **kwargs):\n return PERMISSION_DENIED\n\n return 200, {\n 'links': self.get_links(self.list_child_resources, request=request,\n *args, **kwargs),\n 'total_results': 0,\n self.list_result_key: [],\n }\n\n @webapi_login_required\n @webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)\n @webapi_request_fields(\n required={\n 'ldap_username': {\n 'type': six.text_type,\n 'description': 'LDAP username to associate with this user',\n },\n },\n )\n def update(self, request, ldap_username, *args, **kwargs):\n \"\"\"Associate an ldap username with a user.\n\n The only users authorized to perform this operation are those with\n the `mozreview.modify_ldap_association` permission. Users are *not*\n allowed to update their own ldap_username association as it\n represents that the Review Board user has been proven to own the ldap\n account.\n \"\"\"\n if not request.user.has_perm('mozreview.modify_ldap_association'):\n return PERMISSION_DENIED\n\n try:\n user = resources.user.get_object(request, *args, **kwargs)\n except ObjectDoesNotExist:\n return DOES_NOT_EXIST\n\n mozreview_profile = get_profile(user)\n mozreview_profile.ldap_username = ldap_username\n mozreview_profile.save()\n\n logging.info('associating user: %s with ldap_username: %s' % (user,\n ldap_username))\n\n return 200, self.create_item_payload(request, user, mozreview_profile)\n\n\nldap_association_resource = LDAPAssociationResource()\n","sub_path":"pylib/mozreview/mozreview/ldap/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":6526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"293296709","text":"import numpy as np\n\nfrom se.index import Index\n\n# inutilizada\ndef score_document(query, doc):\n score = 0\n for word in doc:\n if word in query:\n score += 1\n return score\n\n\ndef score_document_tf_idf(query, doc_number, doc, index: Index):\n N = len(doc)\n tf_idf = 0\n for word in query:\n wordCnt = index.wordCount(word, str(doc_number))\n if wordCnt != 0:\n tf_idf += np.log2(1 + wordCnt) * np.log2(N / len(index.lookup(word)))\n return tf_idf\n\n\ndef rank_documents(query, docs, index_query, index: Index):\n ranked_index = []\n for doc_number in index_query:\n doc_number = int(doc_number)\n doc = docs[doc_number]\n score = score_document_tf_idf(query, doc_number, doc, index)\n ranked_index.append((score, doc_number))\n ranked_index = sorted(ranked_index, key=lambda x: -x[0])\n return [item[1] for item in ranked_index]\n","sub_path":"se/rank.py","file_name":"rank.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"606413727","text":"from PyQt5 import QtWidgets,uic\nimport sys\nfrom threading import Thread\nimport speech_recognition as sr\nfrom voicecontrol import *\nfrom textout import startread\nimport subprocess as subp\nfrom wit import Wit\nimport time\nfrom weatherinfo import weather_api\nfrom firebase import firebase\nfrom textblob import TextBlob\nfrom baby.notify import notify_msg\n# from reminder import set_reminder\nfrom dateutil import parser\nimport datetime\nfrom notification_sound.run import play_sound\nfrom pyddg import ddgs\nr = sr.Recognizer()\nclient = Wit(\"ZNO6FG754JWMCICDJ3SZXBKB7KERIBT5\")\nfirebaseref = firebase.FirebaseApplication('https://niku-1531031167486.firebaseio.com/')\n\ndef call_of_call_frm_here(data_from_other_fun_to_call_from_here):\n pass\n # call_frm_hre_thread = Thread(target=call_from_here(data_from_other_fun_to_call_from_here))\n # call_frm_hre_thread.start()\n\n\n\ndef speakit(data):\n subp.call(\"echo '{}' | festival --tts\".format(data),shell= True)\n\n\ndef set_reminder(text):\n entity =None\n value = None\n entity2=None\n value2 = None\n\n try:\n entity= list(text['entities'])[0]\n value =text['entities'][entity][0]['value']\n entity2 = list(text['entities'])[1]\n value2 =text['entities'][entity2][0]['value']\n # print(entity,value)\n # print(entity2,value2)\n if entity == 'datetime':\n dt = parser.parse('{}'.format(value))\n date,timeit = dt.date(),dt.time()\n subject = value2\n timeit= timeit.strftime('%H:%M')\n \n # datetime = date+time\n elif entity2 == 'datetime':\n dt = parser.parse('{}'.format(value2))\n date,timeit = dt.date(),dt.time()\n timeit = timeit.strftime('%H:%M')\n subject = value\n \n # datetime = date+time\n speakit('setting reminder at {}'.format(timeit))\n return timeit,subject\n\n \n\n\n\n except Exception as e:\n pass\n # print(e)\n\ndef check_reminder(timeit=0,subject=None):\n time_here = timeit\n subject_here = subject\n # time_here = time_here.append(time)\n # subject_here = subject_here.append(subject)\n # print(time_here,subject_here)\n print('reminder set at {} about {}'.format(timeit,subject))\n\n if time_here != 0:\n d = datetime.datetime.now()\n d= d.time()\n time_now=d.strftime('%H:%M')\n # for i in len(time_here):\n if time_now == time_here:\n subject_to_send = subject\n # time_here.remove(i)\n # subject_here.remove(i)\n time_here = 0\n speakit(\"you have a reminder about {}\".format(subject_to_send))\n print(\"you have a reminder about {}\".format(subject_to_send))\n \n \n else:\n # time.sleep(60)\n pass\n \n \n\n\n\n\ndef from_app():\n while 1:\n text = firebaseref.get('/Text',None)\n if text != '':\n os.system('aplay notification_sound/from_other.wav')\n firebaseref.put('/','Text','')\n print('from app:',text)\n notify_msg(message='Data from App')\n # call_of_call_frm_here(text)\n # call_frm_hre_thread = Thread(target=call_from_here(text))\n # call_frm_hre_thread.start()\n # call_frm_hre_thread.join()\n # call_correct(text)\n call_from_here(text)\n time.sleep(.01)\n # else:\n # print(\"waiting for function\")\ndef from_web_voice():\n while 1:\n \n text = firebaseref.get('/from_web_speech',None)\n if text != '':\n os.system('aplay notification_sound/from_other.wav')\n firebaseref.put('/','from_web_speech','')\n print('from web voice:',text)\n notify_msg(message='Data from web voice')\n\n call_from_here(text)\n time.sleep(.01)\ndef from_web_text():\n while 1:\n text = firebaseref.get('/from_web_app',None)\n if text != '':\n os.system('aplay notification_sound/from_other.wav')\n firebaseref.put('/','from_web_app','')\n print('from web app:',text)\n notify_msg(message='Data from web text')\n\n call_from_here(text)\n time.sleep(.01)\n\n\n\n\ndef show_print():\n data = \"\"\n\n while 1:\n\n with sr.Microphone() as source:\n # play_sound()\n os.system('mpg123 notification_sound/bubbling-up.mp3')\n print(\"Speak:\")\n r.adjust_for_ambient_noise(source)\n audio = r.listen(source,)\n # print('voice done')\n # recoginze_call(audio)\n try:\n print('######...........processsssssinggggggggg...............######')\n # text = r.recognize_wit(audio,key='ZNO6FG754JWMCICDJ3SZXBKB7KERIBT5')\n text= r.recognize_google(audio)\n text= text.lower()\n # print(text)\n if text != '':\n # call_correct(text)\n call_from_here(text)\n # call_frm_hre_thread = Thread(target=call_from_here(text))\n # call_frm_hre_thread.start()\n # call_frm_hre_thread.join()\n print(text)\n # call_of_call_frm_here(text)\n # niku.box.addItem(text)\n \n\n \n except sr.UnknownValueError:\n print(\"Could not understand audio \")\n # notify_msg(message=\"Could not understand audio \")\n # niku.box.addItem(\"Could not understand audio\")\n\n # text = nikubarut('press enter to continue (or) type your query:')\n # if text == '':\n # continue\n # else:\n # call_correct(text)\n except sr.RequestError as e:\n print(\"Could not request results; {0}\".format(e))\n # niku.box.addItem(\"some error it will noted in below \\n {}\".format(e))\n # text= nikubarut('enter query to work:')\n # call_correct(text)\n time.sleep(0.02)\n\n\n\n\n\n\ndef call_from_here(text):\n\n resp = client.message(text)\n # print(resp)\n # print('entity value is {}'.format(list(resp['entities'])[0]))\n entity =None\n value = None\n timeit =0\n subject = ''\n try:\n entity= list(resp['entities'])[0]\n value =resp['entities'][entity][0]['value']\n # print(entity,value)\n # time.sleep(.10)\n if entity != None and value != None:\n if entity == 'niku':\n # print('in niku')\n if value == 'read':\n print('reading')\n data = startread()\n speakit(data)\n if entity == 'translate':\n # print(entity,value)\n \n if value == 'tamil':\n data = startread()\n if data == '':\n os.system('aplay notification_sound/error.wav')\n speakit('sorry,no data selected')\n print('/n/n sorry,no data selected')\n else:\n speakit('trying to translate selected text in tamil')\n translate = TextBlob(data)\n try:\n translate_lang = translate.detect_language()\n # print('deteced language is ',translate_lang)\n if translate_lang == 'ta':\n speakit('the selected language is already in tamil')\n else:\n speakit('translating data from {}'.format(translate_lang))\n translated_data = translate.translate(from_lang=translate_lang,to='ta')\n translated_data = str(translated_data)\n # print(translated_data)\n with open('translated.txt','w') as result_file:\n result_file.write(translated_data)\n os.system('aplay notification_sound/translating.wav')\n speakit('the data was translated sucessfully')\n print('/n /n the data was translated sucessfully and saved in translated.txt /n/n')\n # niku.trans_text.setText(translated_data)\n except Exception as e:\n os.system('aplay notification_sound/error.wav')\n speakit('sorry some error happend while translating')\n print ('sorry some error happend while translating the error is',e)\n \n\n elif entity == 'datetime' or entity =='subject':\n timeit,subject = set_reminder(resp)\n \n\n \n elif entity == 'open':\n open_software(value)\n elif entity == 'show':\n window_movement(value)\n\n elif entity == 'weather':\n data = weather_api(value)\n speakit('the weather in {} is {}'.format(value,data))\n print('the weather in {} is {}'.format(value,data))\n elif entity == 'search_niku':\n ddgs(value)\n\n # \n # check_reminder(timeit,subject) \n\n # else:\n # print(text)\n # call_correct(text)\n \n except IndexError:\n # print(resp)\n # print(text)\n text= text.lower()\n call_correct(text)\n # print(e)\n\n\n\n\n# def from_desktop():\n# if niku.nikubar.text() == '':\n# pass\n# else:\n# text = niku.nikubar.text()\n# niku.box.addItem(niku.nikubar.text())\n# niku.box.scrollToBottom()\n# # print(niku.nikubar.text())\n# niku.nikubar.setText(\"\")\n \n# # call_frm_hre_thread = Thread(target=call_from_here(text))\n# # call_frm_hre_thread.start()\n# # call_frm_hre_thread.join()\n# call_of_call_frm_here(text)\n\n\n# # call_from_here(text)\n \n\n\n\n\n\n\n# def thr():\n# from_desktop_thread = Thread(target=from_desktop)\n# from_desktop_thread.start()\n# from_desktop_thread.join()\n\n\n\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication([])\n # niku = uic.loadUi('niku.ui') gui enable\n speakit('hello sanjay welcome back')\n \n speech_recon_thread = Thread(target=show_print)\n speech_recon_thread.start()\n\n # recoginze_call_thread = Thread(target=recoginze_call)\n # recoginze_call_thread.start()\n \n # call_of_call_frm_here()\n call_frm_hre_thread = Thread(target=call_from_here)\n call_frm_hre_thread.start()\n\n from_app_thread = Thread(target=from_app)\n from_app_thread.start()\n \n check_reminder_thread = Thread(target=check_reminder)\n check_reminder_thread.start()\n\n from_web_voice_thread = Thread(target=from_web_voice)\n from_web_voice_thread.start()\n \n from_web_app_thread = Thread(target=from_web_text)\n from_web_app_thread.start()\n\n from_app_thread.join()\n call_frm_hre_thread.join()\n # recoginze_call_thread.join()\n speech_recon_thread.join()\n check_reminder_thread.join()\n from_web_app_thread.join()\n from_web_voice_thread.join()\n\n # niku.nikubar.returnPressed.connect(thr)\n\n\n\n \n # niku.show()\n app.exec()\n sys.exit()\n","sub_path":"niku_1.0/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"325603218","text":"import random\nimport os\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\ndef seed_everything(seed=1234):\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n \ndef mixup_data(x, y, alpha=1.0, use_cuda=True):\n '''Returns mixed inputs, pairs of targets, and lambda'''\n if alpha > 0:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1\n\n batch_size = x.size()[0]\n if use_cuda:\n index = torch.randperm(batch_size).cuda()\n else:\n index = torch.randperm(batch_size)\n\n mixed_x = lam * x + (1 - lam) * x[index]\n y_a, y_b = y, y[index]\n return mixed_x, y_a, y_b, lam\n\ndef rand_bbox(size, lam):\n W = size[2]\n H = size[3]\n cut_rat = np.sqrt(1. - lam)\n cut_w = np.int(W * cut_rat)\n cut_h = np.int(H * cut_rat)\n\n # uniform\n cx = np.random.randint(W)\n cy = np.random.randint(H)\n\n bbx1 = np.clip(cx - cut_w // 2, 0, W)\n bby1 = np.clip(cy - cut_h // 2, 0, H)\n bbx2 = np.clip(cx + cut_w // 2, 0, W)\n bby2 = np.clip(cy + cut_h // 2, 0, H)\n\n return bbx1, bby1, bbx2, bby2\n\ndef cutmix_data(x,y,beta = 1.0, use_cuda=True):\n # generate mixed sample\n lam = np.random.beta(beta, beta)\n rand_index = torch.randperm(x.size()[0]).cuda()\n y_a = y\n y_b = y[rand_index]\n bbx1, bby1, bbx2, bby2 = rand_bbox(x.size(), lam)\n x[:, :, bbx1:bbx2, bby1:bby2] = x[rand_index, :, bbx1:bbx2, bby1:bby2]\n # adjust lambda to exactly match pixel ratio\n lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (x.size()[-1] * x.size()[-2]))\n return x, y_a, y_b, lam\n\ndef cross_entropy(weights = None):\n def _cross_entropy(input, target, size_average=True, weights = weights):\n \"\"\" Cross entropy that accepts soft targets\n Args:\n pred: predictions for neural network\n targets: targets, can be soft\n size_average: if false, sum is returned instead of mean\n Examples::\n input = torch.FloatTensor([[1.1, 2.8, 1.3], [1.1, 2.1, 4.8]])\n input = torch.autograd.Variable(out, requires_grad=True)\n target = torch.FloatTensor([[0.05, 0.9, 0.05], [0.05, 0.05, 0.9]])\n target = torch.autograd.Variable(y1)\n loss = cross_entropy(input, target)\n loss.backward()\n \"\"\"\n if weights is None:\n weights = torch.Tensor([1]*len(target.shape[1]))\n weights = torch.Tensor(weights)\n weights = weights / torch.sum(weights,dim = 0, keepdim=True)\n weights = weights.cuda()\n logsoftmax = nn.LogSoftmax()\n if size_average:\n return torch.mean(torch.sum(-weights*target * logsoftmax(input), dim=1))\n else:\n return torch.sum(torch.sum(-weights*target * logsoftmax(input), dim=1))\n return _cross_entropy\n\ndef to_onehot(label,num_classes=2):\n return np.eye(num_classes)[label]\n\ndef label_smoothing(onehot,eps = 0.01):\n assert onehot.ndim == 1\n d = len(onehot)\n return (1-eps)*onehot + (eps/(d-1))*(1-onehot)\n\ndef eval_metric(preds,trues,n_classes = 3):\n acc = 0\n Precisions = []\n Recalls = []\n \n for i in range(n_classes):\n tp,fp,tn,fn = 0,0,0,0\n for p,t in zip(preds,trues):\n if p == t:\n if t==i:\n tp +=1\n else:\n tn +=1\n acc +=1\n else:\n if t==i:\n fn +=1\n else:\n fp +=1\n Recalls.append(tp/(tp+fn))\n Precisions.append(tp/(tp+fp))\n \n acc = acc / (len(preds)*n_classes)\n precision = np.mean(Precisions)\n recall = np.mean(Recalls)\n f1 = 2*(precision*recall)/(precision+recall)\n return acc,precision,recall,f1","sub_path":"src/.ipynb_checkpoints/utils-checkpoint.py","file_name":"utils-checkpoint.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"614346433","text":"#!/usr/bin/env python3\n\"\"\"\nFilters segments from a corpus if they are shorter or longer than a given threshold.\nLength is measured as number of elements after splitting at whitespace.\n\nTakes four arguments:\n* path to the corpus file to be filtered\n* path to the output file\n* minimum number of elements per segment in the output file\n* maximum number of elements per segment in the output file\n\"\"\"\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"f_in\", type=str, help=\"Path to the corpus file\")\nparser.add_argument(\"f_out\", type=str, help=\"Path to the output file\")\nparser.add_argument(\"--min\", type=int, default=5, help=\"Minimum number of elements per sentence\")\nparser.add_argument(\"--max\", type=int, default=30, help=\"Maximum number of elements per sentence\")\n\nargs = parser.parse_args()\nmin_threshold = args.min\nmax_threshold = args.max\n\nwith open(args.f_in) as f_in, open(args.f_out, 'w', encoding='UTF-8') as f_out:\n for segment in f_in:\n tokens = len(segment.split())\n if tokens < min_threshold or tokens > max_threshold:\n continue\n f_out.write(segment)\n","sub_path":"scripts/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"32552315","text":"#!usr/bin/python\n\n# fit.py\n#\n# fit. is a program (barely) that creates bf code.\n#\n# It was coded by Tim Leach (that's me, obviously), aka Billywhizz635.\n# (There's a long story behing the username.)\n#\n# Out of interest, \"fit.\" stands for \"f*** it.\", because that's\n# how I felt when trying to make particularly complicated\n# bf programs, hence the existence of this program.\n\nimport fitCodeGen\n\nkeys = 0 # How many key values are valid inputs.\ndisLen = 0 # How long the display section of the data tape is.\nvariables = [] # List containing all user-defined variables.\nchessboard = \" 1 2 3 4 5 6 7 8\\n\" + \\\n \" a #r #k #b #K #Q #b #k #r\\n\" + \\\n \" b #p #p #p #p #p #p #p #p\\n\" + \\\n \" c \\n\" + \\\n \" d \\n\" + \\\n \" e \\n\" + \\\n \" f \\n\" + \\\n \" g Xp Xp Xp Xp Xp Xp Xp Xp\\n\" + \\\n \" h Xr Xk Xb XK XQ Xb Xk Xr\\n\" + \\\n \" \"\n\ntestGame = fitCodeGen.FitGame()\ntestGame.genCode( \"test\" )","sub_path":"fit.py","file_name":"fit.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"520847915","text":"# Licensed to the .NET Foundation under one or more agreements.\n# The .NET Foundation licenses this file to you under the MIT license.\n# See the LICENSE file in the project root for more information.\n\nfrom enum import Enum\nfrom typing import Callable, Iterable, Mapping, Sequence\n\nfrom result import Err, Ok, Result\n\nfrom ..commonlib.collection_util import combine_mappings\nfrom ..commonlib.result_utils import flat_map_ok, map_ok\nfrom ..commonlib.type_utils import enum_value\nfrom ..commonlib.util import opt_max, opt_median\n\nfrom .clr import Clr\nfrom .clr_types import AbstractJoinInfoForHeap, AbstractJoinInfoForProcess, cs_result_to_result\nfrom .enums import GcJoinPhase, GcJoinStage, ServerGCState\nfrom .types import FailableFloat, PerHeapGetter, ProcessedHeap, ProcessInfo, SingleHeapMetric\n\n\n# From MoreAnalysis.cs\nclass Strictness(Enum):\n loose = 0\n strict = 1\n\n\ndef get_join_info_for_all_gcs(\n clr: Clr, proc: ProcessInfo\n) -> Result[str, AbstractJoinInfoForProcess]:\n if proc.uses_server_gc:\n return cs_result_to_result(\n clr.JoinAnalysis.AnalyzeAllGcs(\n clr.to_array(clr.TraceGC, proc.gcs), enum_value(Strictness.strict)\n )\n )\n else:\n return Err(\"Can't get join info for non-server GC\")\n\n\ndef _get_all_state_durations_for_heap(\n hp: ProcessedHeap, state: ServerGCState\n) -> Result[str, Iterable[float]]:\n return map_ok(\n hp.join_info,\n lambda h: (stage.MSecPerState[enum_value(state)] for stage in h.ForegroundStages),\n )\n\n\ndef _get_all_join_durations_for_heap(hp: ProcessedHeap) -> Result[str, Iterable[float]]:\n return _get_all_state_durations_for_heap(hp, ServerGCState.waiting_in_join)\n\n\ndef _median_or_empty(i: Iterable[float]) -> FailableFloat:\n m = opt_median(i)\n return Err(\"empty join durations\") if m is None else Ok(m)\n\n\ndef _max_or_empty(i: Iterable[float]) -> FailableFloat:\n m = opt_max(i)\n return Err(\"empty join durations\") if m is None else Ok(m)\n\n\ndef _sum_or_empty(i: Iterable[float]) -> FailableFloat:\n s = sum(i)\n return Err(\"empty join durations\") if s == 0 else Ok(s)\n\n\ndef _get_median_individual_join_msec(hp: ProcessedHeap) -> FailableFloat:\n return flat_map_ok(_get_all_join_durations_for_heap(hp), _median_or_empty)\n\n\ndef _get_max_individual_join_msec(hp: ProcessedHeap) -> FailableFloat:\n return flat_map_ok(_get_all_join_durations_for_heap(hp), _max_or_empty)\n\n\ndef _get_total_join_msec(hp: ProcessedHeap) -> FailableFloat:\n return flat_map_ok(_get_all_join_durations_for_heap(hp), _sum_or_empty)\n\n\nPER_HEAP_JOIN_GETTERS: Mapping[SingleHeapMetric, PerHeapGetter] = {\n SingleHeapMetric(\"MedianIndividualJoinMSec\"): _get_median_individual_join_msec,\n SingleHeapMetric(\"MaxIndividualJoinMSec\"): _get_max_individual_join_msec,\n SingleHeapMetric(\"TotalJoinMSec\"): _get_total_join_msec,\n}\n\n\ndef _per_heap_getter(cb: Callable[[AbstractJoinInfoForHeap], float]) -> PerHeapGetter:\n return lambda hp: map_ok(hp.join_info, cb)\n\n\ndef _getter_for_total_time_in_state(state: ServerGCState) -> PerHeapGetter:\n return _per_heap_getter(lambda i: i.TotalMSecInState(enum_value(state)))\n\n\ndef _getter_for_total_time_in_stage(stage: GcJoinStage) -> PerHeapGetter:\n return _per_heap_getter(lambda i: i.TotalMSecInStage(enum_value(stage)))\n\n\ndef _getter_for_total_time_in_phase(phase: GcJoinPhase) -> PerHeapGetter:\n return _per_heap_getter(lambda i: i.TotalMSecInPhase(enum_value(phase)))\n\n\nSTATE_TIMES_GETTERS: Mapping[SingleHeapMetric, PerHeapGetter] = {\n SingleHeapMetric(state.name): _getter_for_total_time_in_state(state) for state in ServerGCState\n}\n\nSTAGE_TIMES_GETTERS: Mapping[SingleHeapMetric, PerHeapGetter] = {\n SingleHeapMetric(stage.name): _getter_for_total_time_in_stage(stage)\n for stage in GcJoinStage\n if stage != GcJoinStage.restart\n}\n\nPHASE_TIMES_GETTERS: Mapping[SingleHeapMetric, PerHeapGetter] = {\n SingleHeapMetric(phase.name): _getter_for_total_time_in_phase(phase) for phase in GcJoinPhase\n}\n\nALL_GETTERS_FROM_JOIN_ANALYSIS: Mapping[SingleHeapMetric, PerHeapGetter] = combine_mappings(\n PER_HEAP_JOIN_GETTERS, STATE_TIMES_GETTERS, STAGE_TIMES_GETTERS, PHASE_TIMES_GETTERS\n)\n\n_PER_HEAP_JOIN_TIME_METRICS: Sequence[str] = [m.name for m in PER_HEAP_JOIN_GETTERS]\n_PER_HEAP_JOIN_STATE_METRICS: Sequence[str] = [m.name for m in STATE_TIMES_GETTERS]\n_PER_HEAP_JOIN_STAGE_METRICS: Sequence[str] = [m.name for m in STAGE_TIMES_GETTERS]\n_PER_HEAP_JOIN_PHASE_METRICS: Sequence[str] = [m.name for m in PHASE_TIMES_GETTERS]\n\nJOIN_PER_HEAP_METRICS_ALIASES: Mapping[str, Sequence[str]] = {\n \"joinTimes\": _PER_HEAP_JOIN_TIME_METRICS,\n \"states\": _PER_HEAP_JOIN_STATE_METRICS,\n \"stages\": _PER_HEAP_JOIN_STAGE_METRICS,\n \"phases\": _PER_HEAP_JOIN_PHASE_METRICS,\n}\n\n\ndef means(names: Sequence[str]) -> Sequence[str]:\n return [f\"{name}_Mean\" for name in names]\n\n\n_PER_GC_JOIN_STATE_METRICS = means(_PER_HEAP_JOIN_STATE_METRICS)\n_PER_GC_JOIN_STAGE_METRICS = means(_PER_HEAP_JOIN_STAGE_METRICS)\n_PER_GC_JOIN_PHASE_METRICS = means(_PER_HEAP_JOIN_PHASE_METRICS)\n\nJOIN_PER_GC_METRICS_ALIASES: Mapping[str, Sequence[str]] = {\n \"joinTimes\": (\"TotalJoinMSec_Max\", \"TotalJoinMSec_Mean\"),\n \"states\": _PER_GC_JOIN_STATE_METRICS,\n \"stages\": _PER_GC_JOIN_STAGE_METRICS,\n \"phases\": _PER_GC_JOIN_PHASE_METRICS,\n}\n\nJOIN_RUN_METRICS_ALIASES: Mapping[str, Sequence[str]] = {\n \"joinTimes\": (\"TotalJoinMSec_Max_Mean\", \"TotalJoinMSec_Mean_Mean\"),\n \"states\": means(_PER_GC_JOIN_STATE_METRICS),\n \"stages\": means(_PER_GC_JOIN_STAGE_METRICS),\n \"phases\": means(_PER_GC_JOIN_PHASE_METRICS),\n}\n","sub_path":"src/benchmarks/gc/src/analysis/join_analysis.py","file_name":"join_analysis.py","file_ext":"py","file_size_in_byte":5593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"379125914","text":"# Connect to the data base \nimport sqlite3\nimport numpy as np\nfrom sklearn import preprocessing\n\n\nconnection = sqlite3.connect('/Users/Dylan/Documents/Project/AudioFeatures/database.sqlite') #Connect to the database\n\n\nX = [] #Training feature set\nY = [] #Training class\nX_test = [] #Testing feature set\nY_true = [] #Testing class\nfor i, tables in enumerate(['Dinner_audio_features','Party_audio_features','Sleep_audio_features','Workout_audio_features']):\n cursor = connection.execute(\"SELECT * from \" + tables) #select the data from the database\n result = list(cursor)\n for row in result[:21]: #select the first 21 for Training the learning algorithm\n row = list(row[2:]) #remove the non-numerical data selected from the database\n X.append(row) #Add the feature set to the training data \n Y.append(i+1) #Add the class of each of the feature set to the class list\n for row in result[21:]:#select the remaning feature set for testing the learning model\n row = list(row[2:])#remove the non-numerical data selected from the database\n X_test.append(row)#Add the feature set to the testing data \n Y_true.append(i+1)#Add the class of each of the feature set to the true class list\n\nscaler = preprocessing.StandardScaler().fit(X)\nX = scaler.transform(X)\nX_test = scaler.transform(X_test)\nY = np.array(Y)\nconnection.close()\n\n\n#Data visualization \n\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n\npca = PCA(n_components=2) #Create a PCA object that reduces the dimentions to 2\nX_r = pca.fit(X).transform(X) #fit the data\nlda = LinearDiscriminantAnalysis(n_components=2)#Create a LDA object that reduces the dimentions to 2\nX_r2 = lda.fit(X, Y).transform(X) #fit the data\n\ncolors = ['navy', 'turquoise', 'darkorange', 'red']\nlw = 2\ntarget_names = ['Dinner','Party','Sleep','Workout']\nfor color, i, target_name in zip(colors, [1, 2, 3, 4], target_names):\n plt.scatter(X_r[Y == i, 0], X_r[Y == i, 1], color=color, alpha=.8, lw=lw,\n label=target_name)\nplt.legend(loc='best', shadow=False, scatterpoints=1)\nplt.title('PCA of Audio feature dataset')\nplt.show()\n\nplt.figure()\nfor color, i, target_name in zip(colors, [1, 2, 3, 4], target_names):\n plt.scatter(X_r2[Y == i, 0], X_r2[Y == i, 1], alpha=.8, color=color,\n label=target_name)\nplt.legend(loc='best', shadow=False, scatterpoints=1)\nplt.title('LDA of Audio Feature dataset')\nplt.show()\n\n\n##\n#Machine Learning\n\nfrom sklearn.ensemble import VotingClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.svm import SVC\n\nsvmclf = SVC(C=1.55, decision_function_shape='ovr')\nsvmclf.fit(X, Y)\n\nMLclf = MLPClassifier(solver='lbfgs', alpha=1e-5,\n hidden_layer_sizes=(7,49), random_state=1, warm_start=True)\nfor i in range(5):\n MLclf.fit(X, Y)\n\nKnnclf = KNeighborsClassifier(n_neighbors=3)\nKnnclf.fit(X, Y)\n\neclf = VotingClassifier(estimators=[('svm', svmclf), ('nn', MLclf), ('knn', Knnclf)], voting='hard')\neclf.fit(X, Y)\n\nsvm_Predict = svmclf.predict(X_test)\nMLP_Predict = MLclf.predict(X_test)\nKnn_Predict = Knnclf.predict(X_test)\nvoting_Predict = eclf.predict(X_test)\n\nprint(\"kNN accuracy = {}% \".format(accuracy_score(Y_true, Knn_Predict) * 100))\nprint(\"SVM accuracy = {}% \".format(accuracy_score(Y_true, svm_Predict) * 100))\nprint(\"Neural Network accuracy = {}% \".format(accuracy_score(Y_true, MLP_Predict) * 100))\nprint(\"Votingaccuracy = {}%\".format(accuracy_score(Y_true, voting_Predict) * 100))\n\n\n#\n#Confusion Matrix of Various Classifiers\n\nfrom sklearn.metrics import confusion_matrix\nimport itertools\n\ndef plot_confusion_matrix(cm, classes=[\"dinner\", \"party\", \"sleep\", \"workout\"], title=\"\",\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title('Confusion matrix ' +title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\nsvm_cm = confusion_matrix(Y_true, svm_Predict)\nMLP_cm = confusion_matrix(Y_true, MLP_Predict)\nKnn_cm = confusion_matrix(Y_true, Knn_Predict)\nvoting_cm = confusion_matrix(Y_true, voting_Predict)\n\nplt.figure()\nplot_confusion_matrix(svm_cm, title = 'SVM')\nplt.figure()\nplot_confusion_matrix(MLP_cm, title = 'MLP')\nplt.figure()\nplot_confusion_matrix(Knn_cm, title = 'Knn')\nplt.figure()\nplot_confusion_matrix(voting_cm, title = 'Voting')\n\n\n#\n#Data visualization and spotify audio features data set 1\nX = [] #Training feature set\nY = [] #Training class\nX_test = [] #Testing feature set\nY_true = [] #Testing class\nfor i, tables in enumerate(['Dinner_spotify_features','Party_spotify_features','Sleep_spotify_features','Workout_spotify_features']):\n cursor = connection.execute(\"SELECT \\\"acousticness\\\",\\\"danceability\\\",\\\"energy\\\",\\\"instrumentalness\\\",\\\"speechiness\\\",\\\"tempo\\\",\\\"valence\\\",\\\"loudness\\\" from \"+tables) #select the data from the database\n result = list(cursor)\n for row in result[:150]: #select the first 21 for Training the learning algorithm\n row = list(row[4:]) #remove the non-numerical data selected from the database\n X.append(row) #Add the feature set to the training data \n Y.append(i+1) #Add the class of each of the feature set to the class list\n for row in result[151:230]:#select the remaning feature set for testing the learning model\n row = list(row[4:])#remove the non-numerical data selected from the database\n X_test.append(row)#Add the feature set to the testing data \n Y_true.append(i+1)#Add the class of each of the feature set to the true class list\n\nscaler = preprocessing.StandardScaler().fit(X)\nX = scaler.transform(X)\nX_test = scaler.transform(X_test)\nY = np.array(Y)\nY_true = np.array(Y_true)\n\n#Data visualization and spotify audio features data set 2\npca = PCA(n_components=2) #Create a PCA object that reduces the dimentions to 2\nX_r = pca.fit(X).transform(X) #fit the data\nlda = LinearDiscriminantAnalysis(n_components=2)#Create a LDA object that reduces the dimentions to 2\nX_r2 = lda.fit(X, Y).transform(X) #fit the data\n\ncolors = ['navy', 'turquoise', 'darkorange', 'red']\nlw = 2\ntarget_names = ['Dinner','Party','Sleep','Workout']\nfor color, i, target_name in zip(colors, [1, 2, 3, 4], target_names):\n plt.scatter(X_r[Y == i, 0], X_r[Y == i, 1], color=color, alpha=.8, lw=lw,\n label=target_name)\nplt.legend(loc='best', shadow=False, scatterpoints=1)\nplt.title('PCA of Spotify feature dataset')\n\nplt.figure()\nfor color, i, target_name in zip(colors, [1, 2, 3, 4], target_names):\n plt.scatter(X_r2[Y == i, 0], X_r2[Y == i, 1], alpha=.8, color=color,\n label=target_name)\nplt.legend(loc='best', shadow=False, scatterpoints=1)\nplt.title('LDA of Spotify Feature dataset')\n\n# Data visualization and spotify features data set 3\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\n\nsvmclf = SVC(C=2.015, gamma=0.005, decision_function_shape='ovo')\nsvmclf.fit(X, Y)\n\nMLclf = MLPClassifier(activation='tanh',solver='adam', alpha=1e-5,\n hidden_layer_sizes=(25,25,45,35,45, 25), random_state=1, warm_start=True)\nfor i in range(5):\n MLclf.fit(X, Y)\n\nrfclf = RandomForestClassifier(n_estimators=100, criterion=\"entropy\", max_features='auto', random_state=1)\nrfclf.fit(X, Y)\n\netclf = ExtraTreesClassifier(criterion='entropy', n_estimators=100, max_features='auto')\netclf.fit(X, Y)\n\neclf = VotingClassifier(estimators=[('svm', svmclf), ('rf', rfclf), ('et', etclf)], voting='hard')\neclf.fit(X, Y)\n\nsvm_Predict = svmclf.predict(X_test)\nMLP_Predict = MLclf.predict(X_test)\nrfYpred = rfclf.predict(X_test)\netYpred = etclf.predict(X_test)\nvoting_Predict = eclf.predict(X_test)\n\nprint(\"RFaccuracy = {}% \".format(accuracy_score(Y_true, rfYpred) * 100))\nprint(\"ETaccuracy = {}% \".format(accuracy_score(Y_true, etYpred) * 100))\nprint(\"SVM accuracy = {}% \".format(accuracy_score(Y_true, svm_Predict) * 100))\nprint(\"Neural Network accuracy = {}% \".format(accuracy_score(Y_true, MLP_Predict) * 100))\nprint(\"Votingaccuracy = {}%\".format(accuracy_score(Y_true, voting_Predict) * 100))","sub_path":"AudioFeature1.py","file_name":"AudioFeature1.py","file_ext":"py","file_size_in_byte":8631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"228797877","text":"# -*- coding:utf-8 -*-\n\n\"\"\"\ngevent的携程的实现\n利用了等待耗时的时间,等待的时间可以腾出来做别的事情\n如果没有等待时间,则还是按照函数的执行的流程进行执行\n\"\"\"\nimport time\nimport gevent\nCOUNT = 100\nWAIT_TIME = 0.5\n\ndef test_1(count):\n for i in range(count):\n print(f\"*{i}*****test1**********\")\n # time.sleep(WAIT_TIME) # 普通的等待时间,对于携程的等待时间不生效\n gevent.sleep(WAIT_TIME) # 真正的携程的的等待的时间\n\ndef test_2(count):\n for i in range(count):\n print(f\"@{i}@@@@@test2@@@@@@@@@\")\n # time.sleep(WAIT_TIME) # 普通的等待时间,对于携程的等待时间不生效\n gevent.sleep(WAIT_TIME)\n\ndef test_3(count):\n for i in range(count):\n print(f\"#{i}######test2########\")\n # time.sleep(WAIT_TIME)\n gevent.sleep(WAIT_TIME)\n\n\n\nif __name__ == \"__main__\":\n g1 = gevent.spawn(test_1, COUNT)\n g2 = gevent.spawn(test_2, COUNT)\n g3 = gevent.spawn(test_3, COUNT)\n\n g1.join()\n g2.join()\n g3.join()\n\n\n","sub_path":"002 进程+线程+协程/004 迭代/gevent实现并发9.py","file_name":"gevent实现并发9.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"392140796","text":"import json\n\nimport pyhf\n\n\ndef get_parameter_names(model):\n labels = []\n for parname in model.config.par_order:\n for i_par in range(model.config.param_set(parname).n_parameters):\n labels.append(\n \"{}[bin_{}]\".format(parname, i_par)\n if model.config.param_set(parname).n_parameters > 1\n else parname\n )\n return labels\n\n\ndef print_results(bestfit, uncertainty, labels):\n max_label_length = max([len(label) for label in labels])\n for i, label in enumerate(labels):\n l_with_spacer = label + \" \" * (max_label_length - len(label))\n print(f\"{l_with_spacer}: {bestfit[i]:.6f} +/- {uncertainty[i]:.6f}\")\n\n\ndef fit(spec):\n workspace = pyhf.Workspace(spec)\n model = workspace.model()\n data = workspace.data(model)\n\n pyhf.set_backend(\"numpy\", pyhf.optimize.minuit_optimizer(verbose=True))\n result = pyhf.infer.mle.fit(data, model, return_uncertainties=True)\n\n bestfit = result[:, 0]\n uncertainty = result[:, 1]\n labels = get_parameter_names(model)\n\n print_results(bestfit, uncertainty, labels)\n return bestfit, uncertainty, labels\n\n\nif __name__ == \"__main__\":\n with open(\"higgs4l_pyhf_workspace.json\") as f:\n ws = json.load(f)\n\n fit(ws)\n","sub_path":"fit.py","file_name":"fit.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"50850125","text":"import os\nimport torch\nimport pickle\nimport argparse\n\n# Path\nimport os, sys\ncurrentdir = os.path.dirname(os.path.realpath(__file__))\nparentdir = os.path.dirname(currentdir)\nbasedir = os.path.dirname(parentdir)\nsys.path.append(parentdir)\nsys.path.append(basedir)\n\n# Target\nfrom target import get_target, get_target_id, add_target_args\n\n# MCMC\nfrom ess import ESS\n\n###########\n## Setup ##\n###########\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--exp', type=str, required=True)\nparser.add_argument('--mcmc', type=str, required=True)\nparser.add_argument('--device', type=str, default='cuda' if torch.cuda.is_available() else 'cpu')\neval_args = parser.parse_args()\n\ntorch.manual_seed(0)\nexp_path = os.path.join('log', eval_args.exp)\npath_args = os.path.join(exp_path, 'args.pkl')\nmcmc_path = os.path.join(exp_path, eval_args.mcmc)\npath_chain = os.path.join(mcmc_path, 'chain.pt')\n\n###############\n## Load args ##\n###############\n\nwith open(path_args, 'rb') as f:\n args = pickle.load(f)\n\n####################\n## Specify target ##\n####################\n\ntarget = get_target(args).to(eval_args.device)\ntarget_id = get_target_id(args)\n\n####################\n## Compute energy ##\n####################\n\ntheta = torch.load(path_chain).to(eval_args.device)\nnum_chains, num_samples, num_dims = theta.shape\nenergy = torch.zeros(num_chains, num_samples).to(eval_args.device)\nfor i in range(num_samples):\n energy[:,i] = - target.log_prob(theta[:,i])\n print('{}/{}'.format(i+1, num_samples), end='\\r')\nmin, med, max = torch.min(energy).item(), torch.median(energy).item(), torch.max(energy).item()\nmean, std = torch.mean(energy).item(), torch.std(energy).item()\n\n##################\n## Save samples ##\n##################\n\nprint('Saving...')\n\n# Save ESS\nwith open(os.path.join(mcmc_path, 'energy.txt'), 'w') as f:\n f.write('min: {}\\n'.format(min))\n f.write('med: {}\\n'.format(med))\n f.write('max: {}\\n'.format(max))\n f.write('mean: {}\\n'.format(mean))\n f.write('std: {}\\n'.format(std))\n","sub_path":"bayesian_regression/flow/eval_energy.py","file_name":"eval_energy.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"119126940","text":"import tensorflow as tf\nimport numpy as np\nfrom CF_NADE import CF_NADE\nfrom Data_user_per_sample import Data_user\n\nflags = tf.app.flags\n\n\"\"\"\ndefine the parameters\n\"\"\"\nflags.DEFINE_float('time_transform_parameter', 0, 'for the weights decay through time')\nflags.DEFINE_integer('batch_size', 512, 'batch_size for the users')\nflags.DEFINE_integer('movie_dim', 3706, 'how many movies in the dataset')\nflags.DEFINE_integer('num_classes', 5, 'score range')\nflags.DEFINE_float('learning_rate', 0.001, 'learning_rate for Adam')\nflags.DEFINE_integer('hidden_dim', 500, 'dimenstion of hidden states')\nflags.DEFINE_boolean('train', False, 'whether to train model')\nflags.DEFINE_integer('epochs', 10, 'epochs to train')\nflags.DEFINE_float('weight_decay', 1, 'weight decay for regularization')\nFLAGS = flags.FLAGS\n\ndef main(_):\n\tmyData = Data('../ml-1m/ratings.dat')\n\tmyData.split_sets({'train': 0.95, 'test': 0.05})\n\n\trun_config = tf.ConfigProto()\n\trun_config.gpu_options.allow_growth=True\n\n\twith tf.Session(config=run_config) as sess:\n\t\tcf_nade = CF_NADE(sess, FLAGS)\n\n\t\tif (FLAGS.train == True):\n\t\t\tcf_nade.train(myData, FLAGS)\n\n\n\nif __name__ == '__main__':\n\ttf.app.run()","sub_path":"new_implement/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"432851595","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\"\"\"\n\nimport pygame\nfrom pygame.locals import *\nimport time\n\nclass Quadrilateral(object):\n \"\"\" Represents a quadrilateral that can draw itself to a pygame window \"\"\"\n def __init__(self,x1,y1,x2,y2,x3,y3,x4,y4):\n self.x1 = x1\n self.y1 = y1\n self.x2 = x2\n self.y2 = y2\n self.x3 = x3\n self.y3 = y3\n self.x4 = x4\n self.y4 = y4\n\n def draw(self,screen):\n pygame.draw.line(screen, pygame.Color(0,0,0), (self.x1, self.y1), (self.x2, self.y2))\n pygame.draw.line(screen, pygame.Color(0,0,0), (self.x2, self.y2), (self.x3, self.y3))\n pygame.draw.line(screen, pygame.Color(0,0,0), (self.x3, self.y3), (self.x4, self.y4))\n pygame.draw.line(screen, pygame.Color(0,0,0), (self.x4, self.y4), (self.x1, self.y1))\n\t \nif __name__ == '__main__':\n pygame.init()\n\n size = (640,480)\n screen = pygame.display.set_mode(size)\n quad = Quadrilateral(100,100,200,90,200,300,100,300)\n running = True\n\n while running:\n screen.fill(pygame.Color(255,255,255))\n quad.draw(screen)\n for event in pygame.event.get():\n if event.type == QUIT:\n running = False\n time.sleep(.01)\n pygame.display.update()\n\n pygame.quit()","sub_path":"in_class_excersizes/amon.py","file_name":"amon.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"445287882","text":"from jira import JIRA, Issue\n\n\nclass JIRAMod(JIRA):\n def incompleted_issues(self, board_id, sprint_id):\n \"\"\"\n Returns the incompleted issues for the sprint.\n\n Modified from the original to use the response\n field 'issuesNotCompletedInCurrentSprint' instead of the\n field 'incompletedIssues'.\n \"\"\"\n r_url = 'rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s'\n r_json = self._get_json(r_url % (board_id, sprint_id),\n base=self.AGILE_BASE_URL)\n issues = [Issue(self._options, self._session, raw_issues_json)\n for raw_issues_json\n in\n r_json['contents']['issuesNotCompletedInCurrentSprint']]\n\n return issues\n","sub_path":"spride/jira_mod.py","file_name":"jira_mod.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"80984880","text":"\"\"\"robotmesh.com/studio/64a33791a3776a661acced69.\"\"\"\n\n\nfrom vex import (\n Brain, Motor,\n Ports,\n SECONDS,\n wait,\n)\n\n\nbrain = Brain()\n\nmotor = Motor(Ports.PORT1)\n\n\nmotor_type = motor.type()\n\n\nbrain.screen.print_line(1, motor_type)\n\n\nwait(3, SECONDS)\n","sub_path":"test/V5/RobotMesh/Motor/.Test-Motor-type.V5.RobotMesh.py","file_name":".Test-Motor-type.V5.RobotMesh.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"613305698","text":"from __future__ import print_function\nfrom flask import Flask, request\nfrom flask.ext.api.decorators import set_parsers\nfrom flask.ext.api.parsers import MultiPartParser\nfrom PIL import Image\nimport requests\nimport os\nimport sys\nimport json\n\n\n# Settings\n\nUPLOAD_HOST = os.environ['IMAGE_PROCESSOR_UPLOAD_HOST']\n\napp = Flask(__name__)\napp.config['DEFAULT_PARSERS'] = [\n 'flask.ext.api.parsers.JSONParser',\n 'flask.ext.api.parsers.URLEncodedParser',\n 'flask.ext.api.parsers.MultiPartParser'\n]\n\n\ndef send_data_to_server(image_path):\n '''\n Upload file to https://github.com/giovanism/emptybox/ like service.\n Return the saved object's key\n '''\n image_filename = os.path.basename(image_path)\n\n values = {'type': 'file'}\n\n multipart_form_data = {\n 'file': (image_filename, open(image_path, 'rb')),\n }\n print(\"Sending File to S3 Server in port 18010\", file=sys.stderr)\n response = requests.post('http://%s/upload' % UPLOAD_HOST,\n data=values,\n files=multipart_form_data)\n print('Result from s3 server ', response.status_code, file=sys.stderr)\n\n return json.loads(response.content)['filename']\n\n\n@app.route(\"/\", methods=['GET', 'POST'])\n@set_parsers(MultiPartParser)\ndef hello():\n upload = request.files.getlist(\"image\")[0]\n\n print(\"File name: {}\".format(upload.filename), file=sys.stderr)\n filename = upload.filename\n\n # file support verification\n ext = os.path.splitext(filename)[1]\n if (ext == \".jpg\") or (ext == \".png\") or (ext == \".bmp\"):\n print(\"File accepted\", file=sys.stderr)\n else:\n return \"Error\"\n\n # save file\n preprocess_file = \"/tmp/tmp_image\" + ext\n print(\"File saved to to:\", preprocess_file, file=sys.stderr)\n upload.save(preprocess_file)\n\n img = Image.open(preprocess_file)\n pixels = img.load()\n\n for i in range(img.size[0]):\n for j in range(img.size[1]):\n px = pixels[i, j]\n pixels[i, j] = (255 ^ px[0], 255 ^ px[1], 255 ^ px[2])\n\n processed_file = \"/tmp/tmp_processed_image\" + ext\n\n img.save(processed_file)\n\n key = send_data_to_server(processed_file)\n\n return key\n\n\nif __name__ == \"__main__\":\n port = int(os.environ.get(\"PORT\", 5000))\n app.run(debug=True, host='0.0.0.0', port=port)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"141418288","text":"# -*- coding: utf-8 -*-\nimport tensorflow as tf\nimport pickle\nimport os\n\ndef unpickle(file):\n with open(file, 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n return dict\n\ndef generate_tfrecord(train, labels, output_path, output_name):\n if not os.path.exists(output_path):\n os.mkdir(output_path)\n writer = tf.python_io.TFRecordWriter(os.path.join(output_path, output_name))\n for ind, (file, label) in enumerate(zip(train, labels)):\n img_raw = file.tobytes()\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),\n \"label\": tf.train.Feature(int64_list=tf.train.Int64List(value=[label]))\n }))\n writer.write(example.SerializeToString()) # Serialize To String\n if ind != 0 and ind % 1000 == 0:\n print(\"%d num imgs processed\" % ind)\n writer.close()\n\ndef createTFrecord(dir,trainOrTest):\n path = dir+\"/{}\".format(trainOrTest)\n data = unpickle(path)\n real_data = data[b'data']\n x_data = real_data.reshape(real_data.shape[0], 3, 32, 32)\n x_data = x_data.transpose(0, 2, 3, 1)\n x_labels = data[b'fine_labels']\n if not os.path.exists(dir+'/tfrecord/{}.tfrecords'.format(trainOrTest)):\n generate_tfrecord(x_data, x_labels, dir+'./tfrecord/', '{}.tfrecords'.format(trainOrTest))\n\n\nif __name__ == \"__main__\":\n createTFrecord('../data/cifar-100-python','train')\n createTFrecord('../data/cifar-100-python','test')","sub_path":"chapter1/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"318979785","text":"#! /usr/bin/env python\nfrom scapy.all import *\nfrom random import *\n\n#ports scannes chez le destinataire\nportDest=[20, 21, 22, 69, 80]\n\nfor port in portDest:\n srcPort = randint(49151, 65535)\n uans, ans = sr(IP(dst='192.168.1.29', src='192.168.1.28')/TCP(sport=srcPort, dport=port, flags='S'), timeout=10000)\n\n if (uans[0][1].getlayer(TCP).flags == 'SA'):\n print(\"Le port \" + str(port) + \" est ouvert\")\n elif (uans[0][1].getlayer(TCP).flags == 'RA'):\n print(\"Le port \" + str(port) + \" est ferme\")\n","sub_path":"scan.py","file_name":"scan.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"31634228","text":"#!/usr/bin/env python\n\n# from distutils.core import setup\n\nimport os\n# To use a consistent encoding\nfrom codecs import open\n\nimport setuptools\n\n# Get the long description from the README file\nwith open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'README.md')) as file:\n long_description = file.read()\n\nsetuptools.setup(\n\n name='trapy',\n\n version='0.6.3',\n\n description='Structural Fire Engineering - Probabilistic Reliability Assessment',\n\n author='Yan Fu',\n\n author_email='fuyans@gmail.com',\n\n url='https://github.com/fsepy/sfeprapy',\n\n download_url=\"https://github.com/fsepy/sfeprapy/archive/master.zip\",\n\n keywords=[\"fire safety\", \"structural fire engineering\"],\n\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Development Status :: 4 - Beta\",\n \"Environment :: Other Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Scientific/Engineering\",\n ],\n\n long_description='Structural Fire Engineering - Probabilistic Reliability Assessment',\n\n packages=[\n 'trapy',\n 'trapy.func',\n ],\n\n install_requires=[\n 'matplotlib>=2.2.2',\n 'numpy>=1.15.0',\n 'pandas>=0.23.3',\n 'scipy>=1.1.0',\n 'seaborn>=0.9.0',\n 'tqdm',\n ],\n\n include_package_data=True,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"134949251","text":"import argparse\nimport os\nimport json\nimport shutil\nimport random\nfrom itertools import islice\n\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data as data\n\nfrom ignite.contrib.handlers import ProgressBar\nfrom ignite.engine import Engine, Events\nfrom ignite.handlers import ModelCheckpoint, Timer\nfrom ignite.metrics import RunningAverage, Loss\n\nfrom sklearn.model_selection import train_test_split\nfrom data import HistoDataNorm\nfrom model_old import Glow\n\nimport argparse\nimport numpy as np\nimport os\nimport random\nimport torch\nimport torch.optim as optim\n\nimport torch.utils.data as data_utils\nimport torchvision\nimport torchvision.transforms as transforms\n\nfrom PIL import Image\n\n\n\ndef check_manual_seed(seed):\n #seed = seed or random.randint(1, 10000)\n random.seed(seed)\n torch.manual_seed(seed)\n\n np.random.seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n\n print(\"Using seed: {seed}\".format(seed=seed))\n\n\ndef check_dataset(dataset, augmentation, missing, seed):\n if dataset == \"malaria\":\n print(\"malaria\")\n\n domain_list_train = os.listdir('dataset_sorted_by_domain/')\n\n dataset = HistoDataNorm('dataset_sorted_by_domain/', domain_list=domain_list_train, augmentation=augmentation)\n\n train_size = int(0.8 * len(dataset))\n test_size = len(dataset) - train_size\n\n train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])\n # train_dataset = HistoDataNorm('train/', domain_list=domain_list_train, augmentation=False)\n # test_dataset = HistoDataNorm('test/', domain_list=domain_list_train, augmentation=False)\n\n elif dataset == \"malaria_10_domains\":\n #print(\"malaria 2 domains\")\n\n #domain_list_train = ['C184P145ThinF', 'C59P20thinF']\n domain_list_train = [\"C116P77ThinF\", \"C132P93ThinF\", \"C137P98ThinF\", \"C180P141NThinF\", \"C182P143NThinF\", \\\n \"C184P145ThinF\", \"C39P4thinF\", 'C59P20thinF', \"C68P29N\", \"C99P60ThinF\"]\n\n # for domain in domain_list_train:\n # print(\"domain length\", domain, len(os.listdir('dataset_sorted_by_domain/'+domain + '/Uninfected')+os.listdir('dataset_sorted_by_domain/'+domain + '/Parasitized')))\n\n # train_dataset = HistoDataNorm('train/', domain_list=domain_list_train, augmentation=False)\n # test_dataset = HistoDataNorm('test/', domain_list=domain_list_train, augmentation=False)\n\n dataset = HistoDataNorm('dataset_sorted_by_domain/', domain_list=domain_list_train, augmentation=augmentation)\n\n train_size = int(0.8 * len(dataset))\n test_size = len(dataset) - train_size\n\n train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])\n\n elif dataset == \"missing_Uninfected\":\n print(\"missing_uninfected\")\n\n domain_list_train_1 = [\"C116P77ThinF\", \"C132P93ThinF\", \"C137P98ThinF\", \"C180P141NThinF\", \"C182P143NThinF\", \\\n \"C184P145ThinF\", \"C39P4thinF\", 'C59P20thinF', \"C68P29N\", \"C99P60ThinF\"] \n domain_list_train_1.remove(args.missing)\n domain_list_train_1.append(args.missing +'_empty')\n dataset_1 = HistoDataNorm(dataset +'/', domain_list=domain_list_train_1, augmentation=augmentation)\n #train_size_1 = int(0.8 * len(dataset_1))\n #test_size_1 = len(dataset_1) - train_size_1\n #train_dataset_1, test_dataset_1 = torch.utils.data.random_split(dataset_1, [train_size_1, test_size_1])\n \n targets = dataset_1.train_labels\n train_idx, valid_idx= train_test_split(np.arange(len(targets)), test_size=0.2, random_state=seed, shuffle=True, stratify=targets)\n\n train_dataset_1 = torch.utils.data.Subset(dataset_1, train_idx)\n test_dataset_1 = torch.utils.data.Subset(dataset_1, valid_idx)\n \n domain_list_train_2 = [\"C116P77ThinF_empty\", \"C132P93ThinF_empty\", \"C137P98ThinF_empty\", \"C180P141NThinF_empty\", \"C182P143NThinF_empty\", \\\n \"C184P145ThinF_empty\", \"C39P4thinF_empty\", 'C59P20thinF_empty', \"C68P29N_empty\", \"C99P60ThinF_empty\"] \n domain_list_train_2.remove(args.missing+'_empty')\n domain_list_train_2.append(args.missing +'_m')\n dataset_2 = HistoDataNorm(dataset+'/', domain_list=domain_list_train_2, augmentation=augmentation)\n train_size_2 = int(0.8 * len(dataset_2))\n test_size_2 = len(dataset_2) - train_size_2\n train_dataset_2, test_dataset_2 = torch.utils.data.random_split(dataset_2, [train_size_2, test_size_2])\n \n train_dataset = torch.utils.data.ConcatDataset([train_dataset_1,train_dataset_2])\n test_dataset = torch.utils.data.ConcatDataset([test_dataset_1,test_dataset_2])\n \n \n elif dataset == \"missing_Parasitized\":\n print(\"missing_parasitized\")\n\n domain_list_train_1 = [\"C116P77ThinF\", \"C132P93ThinF\", \"C137P98ThinF\", \"C180P141NThinF\", \"C182P143NThinF\", \\\n \"C184P145ThinF\", \"C39P4thinF\", 'C59P20thinF', \"C68P29N\", \"C99P60ThinF\"] \n domain_list_train_1.remove(args.missing)\n domain_list_train_1.append(args.missing +'_empty')\n domain_list_train_1.sort()\n dataset_1 = HistoDataNorm(dataset+'/', domain_list=domain_list_train_1, augmentation=augmentation)\n #train_size_1 = int(0.8 * len(dataset_1))\n #test_size_1 = len(dataset_1) - train_size_1\n #train_dataset_1, test_dataset_1 = torch.utils.data.random_split(dataset_1, [train_size_1, test_size_1])\n \n targets = dataset_1.train_labels\n train_idx, valid_idx= train_test_split(np.arange(len(targets)), test_size=0.2, random_state=seed, shuffle=True, stratify=targets)\n\n train_dataset_1 = torch.utils.data.Subset(dataset_1, train_idx)\n test_dataset_1 = torch.utils.data.Subset(dataset_1, valid_idx)\n \n \n domain_list_train_2 = [\"C116P77ThinF_empty\", \"C132P93ThinF_empty\", \"C137P98ThinF_empty\", \"C180P141NThinF_empty\", \"C182P143NThinF_empty\", \\\n \"C184P145ThinF_empty\", \"C39P4thinF_empty\", 'C59P20thinF_empty', \"C68P29N_empty\", \"C99P60ThinF_empty\"] \n domain_list_train_2.remove(args.missing+'_empty')\n domain_list_train_2.append(args.missing +'_m')\n domain_list_train_2.sort()\n dataset_2 = HistoDataNorm(dataset+'/', domain_list=domain_list_train_2, augmentation=augmentation)\n train_size_2 = int(0.8 * len(dataset_2))\n test_size_2 = len(dataset_2) - train_size_2\n train_dataset_2, test_dataset_2 = torch.utils.data.random_split(dataset_2, [train_size_2, test_size_2])\n print(len(train_dataset_1))\n print(len(train_dataset_2))\n print(len(test_dataset_1))\n print(len(test_dataset_2))\n \n train_dataset = torch.utils.data.ConcatDataset([train_dataset_1,train_dataset_2])\n test_dataset = torch.utils.data.ConcatDataset([test_dataset_1,test_dataset_2])\n\n return train_dataset, test_dataset\n\n\n\ndef compute_loss(nll, reduction=\"mean\"):\n if reduction == \"mean\":\n losses = {\"nll\": torch.mean(nll)}\n elif reduction == \"none\":\n losses = {\"nll\": nll}\n\n losses[\"total_loss\"] = losses[\"nll\"]\n\n return losses\n\n\ndef compute_loss_y(nll, y_logits, y_weight, y, multi_class, reduction=\"mean\"):\n if reduction == \"mean\":\n losses = {\"nll\": torch.mean(nll)}\n elif reduction == \"none\":\n losses = {\"nll\": nll}\n\n if multi_class:\n #print(\"multi class?\")\n # y_logits = torch.sigmoid(y_logits)\n loss_classes = F.binary_cross_entropy_with_logits(\n y_logits, y, reduction=\"mean\"\n )\n else:\n # print(y_logits)\n loss_classes = F.cross_entropy(\n y_logits, torch.argmax(y, dim=1), reduction=reduction\n )\n\n losses[\"loss_classes\"] = loss_classes\n losses[\"total_loss\"] = losses[\"nll\"] + y_weight * loss_classes\n\n return losses\n\n\ndef main(\n dataset,\n augment,\n batch_size,\n eval_batch_size,\n epochs,\n saved_model,\n seed,\n hidden_channels,\n K,\n L,\n actnorm_scale,\n flow_permutation,\n flow_coupling,\n LU_decomposed,\n learn_top,\n y_condition,\n extra_condition,\n sp_condition,\n d_condition,\n yd_condition,\n y_weight,\n max_grad_clip,\n max_grad_norm,\n lr,\n n_workers,\n cuda,\n n_init_batches,\n output_dir,\n missing,\n saved_optimizer,\n warmup,\n):\n\n print(output_dir)\n device = \"cpu\" if (not torch.cuda.is_available() or not cuda) else \"cuda:0\"\n print(device)\n check_manual_seed(seed)\n print(\"augmenting?\", augment)\n train_dataset, test_dataset = check_dataset(dataset, augment, missing, seed)\n image_shape = (32, 32, 3)\n\n\n multi_class = False\n\n if yd_condition:\n #num_classes = 10*2\n num_classes = 10+2\n multi_class=True\n elif d_condition:\n num_classes=10\n else:\n num_classes=2\n print(\"num classes\", num_classes)\n\n train_loader = data.DataLoader(\n train_dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=n_workers,\n drop_last=True,\n )\n test_loader = data.DataLoader(\n test_dataset,\n batch_size=eval_batch_size,\n shuffle=False,\n num_workers=n_workers,\n drop_last=False,\n )\n\n model = Glow(\n image_shape,\n hidden_channels,\n K,\n L,\n actnorm_scale,\n flow_permutation,\n flow_coupling,\n LU_decomposed,\n num_classes,\n learn_top,\n y_condition,\n extra_condition,\n sp_condition,\n d_condition,\n yd_condition\n )\n\n\n\n model = model.to(device)\n optimizer = optim.Adamax(model.parameters(), lr=lr, weight_decay=5e-5)\n\n lr_lambda = lambda epoch: min(1.0, (epoch + 1) / warmup) # noqa\n scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)\n\n def step(engine, batch):\n model.train()\n optimizer.zero_grad()\n\n x, y, d, yd = batch\n x = x.to(device)\n\n if y_condition:\n y = y.to(device)\n z, nll, y_logits = model(x, y)\n losses = compute_loss_y(nll, y_logits, y_weight, y, multi_class)\n elif d_condition:\n d = d.to(device)\n z, nll, d_logits = model(x, d)\n d_weight=y_weight\n # multi_class false as only using 2 domains at the moment\n losses = compute_loss_y(nll, d_logits, d_weight, d, multi_class)\n elif yd_condition:\n yd = yd.to(device)\n z, nll, yd_logits = model(x, yd)\n yd_weight = y_weight\n losses = compute_loss_y(nll, yd_logits, yd_weight, yd, multi_class)\n else:\n print(\"none\")\n z, nll, y_logits = model(x, None)\n losses = compute_loss(nll)\n\n losses[\"total_loss\"].backward()\n\n if max_grad_clip > 0:\n torch.nn.utils.clip_grad_value_(model.parameters(), max_grad_clip)\n if max_grad_norm > 0:\n torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)\n\n optimizer.step()\n\n return losses\n\n def eval_step(engine, batch):\n model.eval()\n\n x, y, d, yd = batch\n x = x.to(device)\n\n with torch.no_grad():\n if y_condition:\n y = y.to(device)\n z, nll, y_logits = model(x, y)\n losses = compute_loss_y(\n nll, y_logits, y_weight, y, multi_class, reduction=\"none\"\n )\n elif d_condition:\n d = d.to(device)\n z, nll, d_logits = model(x, d)\n d_weight=y_weight\n losses = compute_loss_y(\n nll, d_logits, d_weight, d, multi_class, reduction=\"none\"\n )\n elif yd_condition:\n yd = yd.to(device)\n z, nll, yd_logits = model(x, yd)\n yd_weight = y_weight\n losses = compute_loss_y(\n nll, yd_logits, yd_weight, yd, multi_class, reduction=\"none\"\n )\n else:\n\n z, nll, y_logits = model(x, None)\n losses = compute_loss(nll, reduction=\"none\")\n\n return losses\n\n trainer = Engine(step)\n checkpoint_handler = ModelCheckpoint(\n output_dir, \"glow\", save_interval=1, n_saved=2, require_empty=False\n )\n\n trainer.add_event_handler(\n Events.EPOCH_COMPLETED,\n checkpoint_handler,\n {\"model\": model, \"optimizer\": optimizer},\n )\n\n monitoring_metrics = [\"total_loss\"]\n RunningAverage(output_transform=lambda x: x[\"total_loss\"]).attach(\n trainer, \"total_loss\"\n )\n\n evaluator = Engine(eval_step)\n\n # Note: replace by https://github.com/pytorch/ignite/pull/524 when released\n Loss(\n lambda x, y: torch.mean(x),\n output_transform=lambda x: (\n x[\"total_loss\"],\n torch.empty(x[\"total_loss\"].shape[0]),\n ),\n ).attach(evaluator, \"total_loss\")\n\n if y_condition or d_condition or yd_condition:\n monitoring_metrics.extend([\"nll\"])\n RunningAverage(output_transform=lambda x: x[\"nll\"]).attach(trainer, \"nll\")\n\n # Note: replace by https://github.com/pytorch/ignite/pull/524 when released\n Loss(\n lambda x, y: torch.mean(x),\n output_transform=lambda x: (x[\"nll\"], torch.empty(x[\"nll\"].shape[0])),\n ).attach(evaluator, \"nll\")\n\n\n pbar = ProgressBar()\n pbar.attach(trainer, metric_names=monitoring_metrics)\n\n # load pre-trained model if given\n if saved_model:\n model.load_state_dict(torch.load(saved_model))\n model.set_actnorm_init()\n\n if saved_optimizer:\n optimizer.load_state_dict(torch.load(saved_optimizer))\n\n file_name, ext = os.path.splitext(saved_model)\n resume_epoch = int(file_name.split(\"_\")[-1])\n\n @trainer.on(Events.STARTED)\n def resume_training(engine):\n engine.state.epoch = resume_epoch\n engine.state.iteration = resume_epoch * len(engine.state.dataloader)\n\n @trainer.on(Events.STARTED)\n def init(engine):\n model.train()\n\n init_batches = []\n init_targets = []\n init_domains = []\n init_yds = []\n\n with torch.no_grad():\n for batch, target, domain, yd in islice(train_loader, None, n_init_batches):\n init_batches.append(batch)\n init_targets.append(target)\n init_domains.append(domain)\n init_yds.append(yd)\n\n\n init_batches = torch.cat(init_batches).to(device)\n\n assert init_batches.shape[0] == n_init_batches * batch_size\n\n if y_condition :\n init_targets = torch.cat(init_targets).to(device)\n model(init_batches, init_targets)\n elif d_condition:\n init_domains = torch.cat(init_domains).to(device)\n model(init_batches, init_domains)\n elif yd_condition:\n init_yds = torch.cat(init_yds).to(device)\n model(init_batches, init_yds)\n else:\n init_targets = None\n model(init_batches, init_targets)\n\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def evaluate(engine):\n evaluator.run(test_loader)\n\n scheduler.step()\n metrics = evaluator.state.metrics\n\n losses = \", \".join([f\"{key}: {value:.2f}\" for key, value in metrics.items()])\n\n print(f\"Validation Results - Epoch: {engine.state.epoch} {losses}\")\n \n def score_function(engine):\n val_loss = engine.state.metrics['total_loss']\n\n return -val_loss\n \n \n name = \"best_\" \n\n val_handler = ModelCheckpoint(\n output_dir, name, score_function=score_function, score_name=\"val_loss\", n_saved=1, require_empty=False\n )\n\n evaluator.add_event_handler(\n Events.EPOCH_COMPLETED,\n val_handler,\n {\"model\": model},\n )\n\n timer = Timer(average=True)\n timer.attach(\n trainer,\n start=Events.EPOCH_STARTED,\n resume=Events.ITERATION_STARTED,\n pause=Events.ITERATION_COMPLETED,\n step=Events.ITERATION_COMPLETED,\n )\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def print_times(engine):\n pbar.log_message(\n f\"Epoch {engine.state.epoch} done. Time per batch: {timer.value():.3f}[s]\"\n )\n timer.reset()\n\n trainer.run(train_loader, epochs)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"--dataset\",\n type=str,\n default=\"malaria\",\n help=\"Type of the dataset to be used.\",\n )\n\n parser.add_argument(\n \"--no_augment\",\n action=\"store_false\",\n dest=\"augment\",\n help=\"Augment training data\",\n )\n\n parser.add_argument(\n \"--hidden_channels\", type=int, default=512, help=\"Number of hidden channels\"\n )\n\n parser.add_argument(\"--K\", type=int, default=32, help=\"Number of layers per block\")\n\n parser.add_argument(\"--L\", type=int, default=3, help=\"Number of blocks\")\n\n parser.add_argument(\n \"--actnorm_scale\", type=float, default=1.0, help=\"Act norm scale\"\n )\n\n parser.add_argument(\n \"--flow_permutation\",\n type=str,\n default=\"invconv\",\n choices=[\"invconv\", \"shuffle\", \"reverse\"],\n help=\"Type of flow permutation\",\n )\n\n parser.add_argument(\n \"--flow_coupling\",\n type=str,\n default=\"affine\",\n choices=[\"additive\", \"affine\"],\n help=\"Type of flow coupling\",\n )\n\n parser.add_argument(\n \"--no_LU_decomposed\",\n action=\"store_false\",\n dest=\"LU_decomposed\",\n help=\"Train with LU decomposed 1x1 convs\",\n )\n\n parser.add_argument(\n \"--no_learn_top\",\n action=\"store_false\",\n help=\"Do not train top layer (prior)\",\n dest=\"learn_top\",\n )\n\n parser.add_argument(\n \"--y_condition\", action=\"store_true\", help=\"Train using class condition\"\n )\n\n parser.add_argument(\n \"--extra_condition\", action=\"store_true\", help=\"Extra conditioning\"\n )\n\n parser.add_argument(\n \"--sp_condition\", action=\"store_true\", help=\"split prior conditioning\"\n )\n\n parser.add_argument(\n \"--d_condition\", action=\"store_true\", help=\"Train using domain conditioning\"\n )\n\n parser.add_argument(\n \"--yd_condition\", action=\"store_true\", help=\"Train using label and domain conditioning\"\n )\n\n parser.add_argument(\n \"--y_weight\", type=float, default=0.01, help=\"Weight for class condition loss\"\n )\n\n parser.add_argument(\n \"--max_grad_clip\",\n type=float,\n default=0,\n help=\"Max gradient value (clip above - for off)\",\n )\n\n parser.add_argument(\n \"--max_grad_norm\",\n type=float,\n default=0,\n help=\"Max norm of gradient (clip above - 0 for off)\",\n )\n\n parser.add_argument(\n \"--n_workers\", type=int, default=6, help=\"number of data loading workers\"\n )\n\n parser.add_argument(\n \"--batch_size\", type=int, default=64, help=\"batch size used during training\"\n )\n\n parser.add_argument(\n \"--eval_batch_size\",\n type=int,\n default=512,\n help=\"batch size used during evaluation\",\n )\n\n parser.add_argument(\n \"--epochs\", type=int, default=250, help=\"number of epochs to train for\"\n )\n\n parser.add_argument(\"--lr\", type=float, default=5e-4, help=\"Learning rate\")\n\n parser.add_argument(\n \"--warmup\",\n type=float,\n default=5,\n help=\"Use this number of epochs to warmup learning rate linearly from zero to learning rate\", # noqa\n )\n\n parser.add_argument(\n \"--n_init_batches\",\n type=int,\n default=8,\n help=\"Number of batches to use for Act Norm initialisation\",\n )\n\n parser.add_argument(\n \"--no_cuda\", action=\"store_false\", dest=\"cuda\", help=\"Disables cuda\"\n )\n\n parser.add_argument(\n \"--output_dir\",\n default=\"output/\",\n help=\"Directory to output logs and model checkpoints\",\n )\n\n parser.add_argument(\n \"--fresh\", action=\"store_true\", help=\"Remove output directory before starting\"\n )\n\n parser.add_argument(\n \"--saved_model\",\n default=\"\",\n help=\"Path to model to load for continuing training\",\n )\n \n parser.add_argument(\n \"--missing\",\n type=str,\n default=\"\",\n help=\"missing domain\",\n )\n\n parser.add_argument(\n \"--saved_optimizer\",\n default=\"\",\n help=\"Path to optimizer to load for continuing training\",\n )\n\n parser.add_argument(\"--seed\", type=int, default=0, help=\"manual seed\")\n\n args = parser.parse_args()\n args.output_dir = os.path.join(args.output_dir,str(args.seed))\n \n os.makedirs(args.output_dir, exist_ok=True)\n\n\n kwargs = vars(args)\n del kwargs[\"fresh\"]\n\n with open(os.path.join(args.output_dir, \"hparams.json\"), \"w\") as fp:\n json.dump(kwargs, fp, sort_keys=True, indent=4)\n\n main(**kwargs)\n","sub_path":"train_old.py","file_name":"train_old.py","file_ext":"py","file_size_in_byte":21297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"273914192","text":"IP = \"10.130.58.13\"\n\nimport math #importerer matematikbibliotek\nimport cv2 #importerer cv2 bibliotek, som skal bruges for at tjekke at det er installeret korrekt\nfrom robotprogrammer import Robot_programmer #importerer særens \"bibliotek\" fra andet dokument\nfrom Robotcam import RobotCam #importerer særens \"bibliotek\" fra andet dokument\nfrom sympy.solvers import solve #importerer sympy bibliotek (dog skal man lige have installeret den nyeste version af sympy)\nfrom sympy import Symbol #heter Symbol fra sympy, som bruges til at definere ubekendte\nfrom rTData import RTData\n\n#kommandoer:\nkommando_quit = \"q\"\nkommando_help = \"help\"\nkommando_move = \"move\"\nkommando_home = \"home\"\nkommando_moveA = \"angle\"\nkommando_open = \"open\"\nkommando_close = \"close\"\nkommando_calibrate = \"calib\"\nkommando_camFind = \"cam\"\nkommando_transform = \"transform\"\nkommando_camConnect = \"connect\"\nkommando_find = \"find\"\nkommando_stat = \"status\"\nkommando_pos = \"position\"\nkommando_conf = \"conf\"\nkommando_fmove = \"relative\"\nkommando_fmoveA = \"rangle\"\nkommando_antal = \"antal\"\nkommando_automation = \"automation\"\n\n#Dictionary:\ncmd = {}\ncmd[\"Quits the program\"] = kommando_quit\ncmd[\"List of commands\"] = kommando_help\ncmd[\"Moves the robot to a home position\"] = kommando_home\ncmd[\"Moves the robot to given position\"] = kommando_move\ncmd[\"Move robot and give an angle rotating the grapper\"] = kommando_moveA\ncmd[\"Moves the robot relative to it's realtime position\"] = kommando_fmove\ncmd[\"Moves the angle of the robot relative to it's realtime position\"] = kommando_fmoveA\ncmd[\"Finds the block and moves the robot to that position\"] = kommando_find\ncmd[\"Automates the robot process\"] = kommando_automation\ncmd[\"Shows the amount of blocks the robot has moved\"] = kommando_antal\ncmd[\"Opens gripper of robot\"] = kommando_open\ncmd[\"Closes gripper of robot\"] = kommando_close\ncmd[\"Connects to the camera\"] = kommando_camConnect\ncmd[\"Finds coordinates for block (cam coordinates)\"] = kommando_camFind\ncmd[\"Calibrates Camera\"] = kommando_calibrate\ncmd[\"Transforms information from camera to imput position for the robot (x,y)\"] = kommando_transform\ncmd[\"1.0: the robot is still - 2.0: the robot is moving\"] = kommando_stat\ncmd[\"Gets the position of the robot\"] = kommando_pos\ncmd[\"Gets the configuration of the robot\"] = kommando_conf\n\n#Her oprettes et robot−objekt 4 \nrobot = Robot_programmer() \n#Her forbindes til robotten. \nrobot.connect(IP , False)\n\nrtd = RTData()\nrtd.connect(IP, False)\n\n#variabler\ncal = False\ncamOnline = False\namount = 0\nfailsafe = 0\nax = None\nbx = None\nay = None\nby = None\n\n\n#funktioner:\n\ndef wait(s = 2):\n while rtd.program_state == s:\n pass\n\n#Funktionen tager en liste og tjekker om længden er ens med det 3. parameter, hvis ikke; spørg igen...\ndef lenChecker(first, list1, l):\n list1.remove(first)\n while len(list1) != l:\n print(\"Der er ikke {} informationer!\".format(l))\n command = input(\"Skriv informationerne: \")\n list1 = command.split()\n return list1\n\n#Tjekker status for robotten og retunerer værdien\ndef status(x = False):\n stat = rtd.program_state\n if x:\n print(stat)\n return stat\n\n#Tjekker positionen for robotten og returnerer den\ndef position(x = False):\n pos = rtd.tool_frame \n if x:\n print(pos)\n return pos\n\n#Tjekker configurationen af robotten og retunerer det\ndef configuration(x = False):\n conf = rtd.qactual\n if x:\n print(conf)\n return conf\n\n#Sender informationer (parameter 1,2,3) til robotten om at flytte til\ndef move(x, y, z):\n x = float(x)/1000 \n y = float(y)/1000 \n z = float(z)/1000\n stat = status()\n if stat == 1:\n robot.move_xyz(x , y, z)\n elif stat == 0:\n print(\"Robotten er ikke forbundet!\")\n else:\n print(\"Robotten er i bevægelse\")\n\n#Gør det samme som move(), men tilføjer en vinkel\ndef moveA(x, y, z, rz):\n x = float(x)/1000 #0.3\n y = float(y)/1000 #0.5\n z = float(z)/1000 #0.02\n rz = float(rz) #3\n rz = rz*2*math.pi/360\n\n stat = status()\n if stat == 1:\n robot.move_xyza(x , y, z, rz) #sender informationerne til robotten, så det bliver udført\n elif stat == 0:\n print(\"Robotten er ikke forbundet!\")\n else:\n print(\"Robotten er i bevægelse\")\n\n#Roterer robotten et givent antal grader i forhold til vinklen robtten har\ndef relativ_vinkel(rz):\n rz = float(rz)\n rz = rz*2*math.pi/360\n stat = status()\n if stat == 1:\n pos = rtd.tool_frame\n robot.move_xyza(pos[0], pos[1], pos[2], pos[5] + rz)\n elif stat == 0:\n print(\"Robotten er ikke forbundet!\")\n else:\n print(\"Robotten er i bevægelse\")\n\n#Åbner gribearmen på robotten\ndef open_gripper():\n stat = status()\n if stat == 1:\n robot.open_gripper()\n elif stat == 0:\n print(\"Robotten er ikke forbundet!\")\n else:\n print(\"Robotten er i bevægelse\")\n\n#Lukker gribearmen på robotten\ndef close_gripper():\n stat = status()\n if stat == 1:\n robot.close_gripper()\n elif stat == 0:\n print(\"Robotten er ikke forbundet!\")\n else:\n print(\"Robotten er i bevægelse\")\n\n#Kalibrerer robottens koordinater med kameraets koordinater, så den robotten efterfølgende selv kan finde ud hvordan den skal positionenere sig for at den står rigtigt\ndef calibrate(x1, y1, x1Camera, y1Camera, x2, y2, x2Camera, y2Camera, ar1, ac1, ar2, ac2):\n global axS, bxS, ayS, byS, aa, ab\n ar1 = float(ar1)\n ac1 = float(ac1)\n ar2 = float(ar2)\n ac2 = float(ac2)\n '''\n 300 80 50 30 70 150 110 90\n #list1[0] = 300 x1\n list1[1] = 80 y1\n #list1[2] = 50 x1 camera\n list1[3] = 30 y1 camera\n #list1[4] = 70 x2\n list1[5] = 150 y2\n #list1[6] = 110 x2 camera\n list1[7] = 90 y2 camera\n '''\n #tager informationerne fra listen og sepererer dem, så de kan regnes med\n x1 = float(x1)\n y1 = float(y1)\n x1Camera = float(x1Camera)\n y1Camera = float(y1Camera)\n x2 = float(x2)\n y2 = float(y2)\n x2Camera = float(x2Camera)\n y2Camera = float(y2Camera)\n \n #ligning1\n #definerer ax og bx i sympy systemet\n ax = Symbol('ax')\n bx = Symbol('bx')\n \n #solver ved hjælp af sympy biblioteket (ligningen er lig 0, så man skal tage højresiden og trække fra på begge sider for at løse den)\n axL = solve(ax*x1Camera + bx - x1, ax)\n axS = axL[0]\n bxL = solve(axS*x2Camera + bx - x2, bx)\n bxS = float(bxL[0])\n axL = solve(ax*x1Camera + bxS - x1, ax)\n axS = float(axL[0])\n\n #ligning2\n ay = Symbol('ay')\n by = Symbol('by')\n \n ayL = solve(ay*y1Camera + by - y1, ay)\n ayS = ayL[0]\n byL = solve(ayS*y2Camera + by - y2, by)\n byS = float(byL[0])\n ayL = solve(ay*y1Camera + byS - y1, ay)\n ayS = float(ayL[0])\n \n\n aa = (ar1 - ar2) / (ac1 - ac2)\n ab = ar1 - (aa * ac1)\n\n #for begge ligninger er der gæmt værdier ax, ay, bx, by som skal bruges i transform\n print(\"\\nax er:\", axS, \"\\nbx er:\", bxS, \"\\nay er:\", ayS, \"\\nby er:\", byS)\n\n print(\"Kalibrering ok\")\n\n return axS, bxS, ayS, byS, aa, ab\n\n#Transformerer, dvs. tjekker om kalibreringen er lavet rigtigt\ndef transform(cal, xCamera, yCamera, aCam):\n if cal:\n aCam = float(aCam) \n xCamera = float(xCamera)\n yCamera = float(yCamera)\n \n #sætter dem ind i ligningen for at transformere og få værdien, som skal ind i robotten\n ligning1 = float(ax*xCamera + bx)\n ligning2 = float(ay*yCamera + by)\n\n test = aCam * aa + ab\n \n\n print(\"Dette er en test, hvor angle er {}\".format(test))\n #printer værdierne som robotten skal have, for at placere sig ved klodsen i x- og y-koordinatsystemet\n print(\"(\", ligning1, \",\", ligning2, \")\")\n else:\n print(\"Calibrate first pls\")\n\n#Forbinder til cameraret\ndef cam_connect():\n global cam\n #Her oprettes et cam−objekt\n try:\n cam = RobotCam()\n except:\n print(\"Kunne ikke forbinde til kameraret!\")\n\n#Bruger cameraret til at finde en klods\ndef cam_find(camOnline):\n if camOnline:\n cam.analyze()\n else:\n print(\"Kameraret er ikke forbundet!\")\n\n#Finder en klods med kameraret og flytter robotten derhen\ndef find(cal, camOnline, ax, bx, ay, by):\n if cal and camOnline:\n array = cam.analyze()\n print(array)\n xc = float(array[0])\n yc = float(array[1])\n\n xr = xc * ax + bx\n yr = yc * ay+ by\n print(xr)\n print(yr)\n stat = status()\n if stat == 1:\n robot.move_xyz(xr/1000, yr/1000, float(245)/1000)\n elif stat == 0:\n print(\"Robotten er ikke forbundet!\")\n else:\n print(\"Robotten er i bevægelse\")\n else:\n print(\"Calibrate and/or connect camera first pls\")\n\n#Flytter robotten relativt til dens nuværende position\ndef relative(x, y, z):\n x = float(x)/1000\n y = float(y)/1000\n z = float(z)/1000\n stat = status()\n if stat == 1:\n pos = rtd.tool_frame\n robot.move_xyz(float(pos[0])+x, float(pos[1])+y, float(pos[2])+z)\n elif stat == 0:\n print(\"Robotten er ikke forbundet!\")\n else:\n print(\"Robotten er i bevægelse\")\n\n#Viser antallet af klodser flyttet, eller sætter en ny værdi for det\ndef antal(x, amount):\n if len(x) == 1:\n print(amount)\n else:\n m = lenChecker(x[0], x, 1)\n amount = m[0]\n print(amount)\n return amount\n\n#komandosystem:\n#Kalder nødvendige funktioner i forhold til kommandoen der skal udføres\nwhile True:\n try:\n msg = input(\"\\nSkriv her: \")\n \n if not msg:\n break\n \n elif msg.startswith(kommando_quit):\n break\n \n elif msg.startswith(kommando_help):\n print(\"\")\n counter = 1\n for navn in cmd:\n print(counter, \"-\", \"kommando: \" + cmd[navn], \" - \", navn)\n counter += 1\n \n elif msg.startswith(kommando_move):\n m = msg.split()\n m = lenChecker(m[0], m, 3)\n move(m[0], m[1], m[2])\n \n elif msg.startswith(kommando_home):\n robot.move_home() #flytter sig til hjem-positionen defineret i det andet dokument\n \n elif msg.startswith(kommando_moveA):\n m = msg.split()\n m = lenChecker(m[0], m, 4)\n moveA(m[0], m[1], m[2], m[3])\n \n elif msg.startswith(kommando_fmoveA):\n m = msg.split()\n m = lenChecker(m[0], m, 1)\n relativ_vinkel(m[0])\n \n elif msg.startswith(kommando_open):\n open_gripper()\n \n elif msg.startswith(kommando_close):\n close_gripper()\n \n elif msg.startswith(kommando_calibrate):\n m = msg.split()\n m = lenChecker(m[0], m, 12)\n elementer = calibrate(m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8], m[9], m[10], m[11])\n ax = elementer[0]\n bx = elementer[1]\n ay = elementer[2]\n by = elementer[3]\n aa = elementer[4]\n ab = elementer[5]\n cal = True\n \n elif msg.startswith(kommando_transform):\n m = msg.split()\n m = lenChecker(m[0], m, 3)\n transform(cal, m[0], m[1], m[2])\n \n elif msg.startswith(kommando_camConnect):\n cam_connect()\n camOnline = True\n \n elif msg == (kommando_camFind):\n cam_find(camOnline)\n \n elif msg == (kommando_find):\n find(cal, camOnline, ax, bx, ay, by)\n \n elif msg == (kommando_stat):\n status(True)\n \n elif msg == (kommando_pos):\n position(True)\n \n elif msg == (kommando_conf):\n configuration(True)\n \n elif msg.startswith(kommando_fmove):\n m = msg.split()\n m = lenChecker(m[0], m, 3)\n relative(m[0], m[1], m[2])\n\n elif msg.startswith(kommando_antal):\n m = msg.split()\n amount = antal(m, amount)\n \n \n elif msg == (kommando_automation):\n if cal and camOnline:\n robot.move_home()\n for i in range(2):\n array = cam.analyze()\n #while len(array) > 2:\n # array = cam.analyze()\n xc = float(array[0])\n yc = float(array[1])\n a = float(array[2])\n \n \n xr = xc * axS + bxS\n yr = yc * ayS + byS\n ar = (aa * a + ab)\n ar = ar*2*math.pi/360\n #*360/(2*math.pi)\n print(xr)\n print(yr)\n if yr < -400:\n print(\"Den rykker sig for meget på y-aksen!\")\n failsafe = 1\n stat = status()\n if stat == 0:\n print(\"Robotten er ikke forbundet!\")\n elif failsafe == 0:\n robot.move_xyza(xr/1000, yr/1000, float(245)/1000, ar)\n wait(1)\n wait()\n #open_gripper()\n robot.open_gripper()\n wait(1)\n wait()\n pos = rtd.tool_frame\n #move(pos[0], pos[1], pos[2]-float(167)/1000)\n robot.move_xyz(pos[0], pos[1], pos[2]-float(167)/1000)\n wait(1)\n wait()\n #close_gripper()\n robot.close_gripper()\n amount += 1\n wait(1)\n wait()\n robot.move_home()\n wait(1)\n wait()\n pos = rtd.tool_frame\n #move(pos[0], pos[1], pos[2]-float(175)/1000)\n robot.move_xyz(pos[0], pos[1], pos[2]-180/1000)\n wait(1)\n wait()\n pos = rtd.tool_frame\n #move(pos[0], pos[1]-float(25)/1000*amount, pos[2])\n robot.move_xyz(pos[0], pos[1]+float(25)/1000*amount, pos[2])\n wait(1)\n wait()\n #open_gripper()\n robot.open_gripper()\n wait(1)\n wait()\n robot.move_home()\n \n else:\n print(\"Calibrate and/or connect camera first pls\")\n else:\n print(\"\\nDu skrev \" + str(msg))\n except Exception as e:\n print(e)\n break\nrtd.disconnect()\nprint(\"Tak for nu\")","sub_path":"Automationsprojekt_FrederikT_og_AndreasP/Forbindelse_til_robotten.py","file_name":"Forbindelse_til_robotten.py","file_ext":"py","file_size_in_byte":14893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"530412344","text":"f = open('food-data.txt', 'r')\r\n\r\nfoodData = str.split(f.read(), \"\\n\")\r\n\r\ncurrentIndex = \"-1\"\r\nfor i in range(0, len(foodData)):\r\n\tdata = str.split(foodData[i], \"\\t\")\r\n\tif data[0] != currentIndex:\r\n\t\tif currentIndex != \"-1\":\r\n\t\t\toutFile = open(\"food-data/\" + currentIndex + \".txt\", 'w')\r\n\t\t\toutFile.write(out)\r\n\t\t\toutFile.close()\r\n\t\tcurrentIndex = data[0]\r\n\t\tout = \"\"\r\n\tout += data[1] + \"\\t\" + data[2] + \"\\n\"\r\noutFile = open(\"food-data/\" + currentIndex + \".txt\", 'w')\r\noutFile.write(out)\r\noutFile.close()\r\n","sub_path":"database/offline/reorganize-food-data.py","file_name":"reorganize-food-data.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"131951881","text":"\"\"\"\nName: Kevin Pham\nEID: kmp2996\n\"\"\"\n\nfrom random import randrange, seed\n\ndef isAction(s) :\n assert s == \"hop\" or s == \"left\" or s == \"right\" or s == \"infect\" or s.split()[0] == \"go\" or s.split()[0] == \"if_empty\" or s.split()[0] == \"if_wall\" or s.split()[0] == \"if_random\" or s.split()[0] == \"if_enemy\"\n if (s == \"hop\" or s == \"left\" or s == \"right\" or s == \"infect\") :\n return True\n else :\n return False\n\n# darwin board\nclass Darwin:\n def __init__(self, r, c) :\n self.r = r\n self.c = c\n self.t = 0\n # board: row-major - i * c + j\n self.b = []\n for i in range(r * c): \n self.b.append(\".\")\n \n # print board\n def printBoard(self) :\n print(\" \", end = \"\")\n for i in range (self.c) :\n print(str(i % 10), end = \"\")\n print()\n\n for i in range (self.r) :\n print(str(i % 10) + \" \", end = \"\")\n for j in range (self.c) :\n if (self.b[i * self.c + j] == \".\") :\n print(\".\", end = \"\")\n else : \n print(self.b[i * self.c + j].s.s, end = \"\")\n print()\n print()\n\n # go turn of game\n def turn(self) :\n for i in range(self.r) :\n for j in range(self.c) :\n # Go to next iteration if not creature\n if(self.b[i * self.c + j] == \".\" or self.b[i * self.c + j].t == False) :\n continue\n else :\n # creature to at point [i][j]\n cr = self.b[i * self.c + j]\n # Do control commands\n while(not isAction(cr.s.ins[int(cr.c)])) :\n contr = cr.s.ins[int(cr.c)].split()\n if (contr[0] == \"if_empty\") :\n if (cr.d == 0 and j > 0) :\n if (self.b[i * self.c + (j - 1)] == \".\") :\n cr.c = int(contr[1])\n else :\n cr.c += 1\n elif (cr.d == 1 and i > 0) :\n if (self.b[(i - 1) * self.c + j] == \".\") :\n cr.c = int(contr[1])\n else :\n cr.c += 1\n elif (cr.d == 2 and j < (self.c - 1)) :\n if (self.b[i * self.c + (j + 1)] == \".\") :\n cr.c = int(contr[1])\n else :\n cr.c += 1\n elif (cr.d == 3 and i < (self.r - 1)) :\n if (self.b[(i + 1) * self.c + j] == \".\") :\n cr.c = int(contr[1])\n else :\n cr.c = int(cr.c) + 1\n else :\n cr.c += 1\n elif (contr[0] == \"if_wall\") :\n if (cr.d == 0 and j == 0) :\n cr.c = int(contr[1])\n elif (cr.d == 1 and i == 0) :\n cr.c = int(contr[1])\n elif (cr.d == 2 and j == (self.c - 1)) :\n cr.c = int(contr[1])\n elif (cr.d == 3 and i == (self.r - 1)) :\n cr.c = int(contr[1])\n else :\n cr.c += 1\n elif (contr[0] == \"if_random\") :\n if (randrange(0, 2) == 1) :\n cr.c = int(contr[1])\n else :\n cr.c = int(cr.c) + 1\n elif (contr[0] == \"if_enemy\") : \n if (cr.d == 0 and j > 0) :\n if (self.b[i * self.c + (j - 1)] != \".\" and self.b[i * self.c + (j - 1)].s != cr.s) :\n cr.c = int(contr[1])\n else :\n cr.c += 1\n elif (cr.d == 1 and i > 0) :\n if (self.b[(i - 1) * self.c + j] != \".\" and self.b[(i - 1) * self.c + j].s != cr.s) :\n cr.c = int(contr[1])\n else :\n cr.c += 1\n elif (cr.d == 2 and j < (self.c - 1)) :\n if (self.b[i * self.c + (j + 1)] != \".\" and self.b[i * self.c + (j + 1)].s != cr.s) :\n cr.c = int(contr[1])\n else :\n cr.c += 1\n elif (cr.d == 3 and i < (self.r - 1)) :\n if (self.b[(i + 1) * self.c + j] != \".\" and self.b[(i + 1) * self.c + j].s != cr.s) :\n cr.c = int(contr[1])\n else :\n cr.c = int(cr.c) + 1\n else :\n cr.c = int(cr.c) + 1\n elif (contr[0] == \"go\") :\n cr.c = int(contr[1]) \n\n # Action Command\n if (cr.s.ins[int(cr.c)] == \"hop\") :\n if (cr.d == 0 and j > 0) :\n if (self.b[i * self.c + (j - 1)] == \".\") :\n self.b[i * self.c + (j - 1)] = cr\n self.b[i * self.c + j] = \".\"\n elif (cr.d == 1 and i > 0) :\n if (self.b[(i - 1) * self.c + j] == \".\") :\n self.b[(i - 1) * self.c + j] = cr \n self.b[i * self.c + j] = \".\" \n elif (cr.d == 2 and j < (self.c - 1)) :\n if (self.b[i * self.c + (j + 1)] == \".\") :\n self.b[i * self.c + (j + 1)] = cr\n self.b[i * self.c + j] = \".\"\n elif (cr.d == 3 and i < (self.r - 1)) :\n if (self.b[(i + 1) * self.c + j] == \".\") :\n self.b[(i + 1) * self.c + j] = cr\n self.b[i * self.c + j] = \".\"\n elif (cr.s.ins[int(cr.c)] == \"left\") :\n cr.d = (cr.d - 1) % 4\n elif (cr.s.ins[int(cr.c)] == \"right\") :\n cr.d = (cr.d + 1) % 4\n elif (cr.s.ins[int(cr.c)] == \"infect\") :\n if (cr.d == 0 and j > 0) :\n if (self.b[i * self.c + (j - 1)] != \".\" and self.b[i * self.c + (j - 1)].s != cr.s) :\n self.b[i * self.c + (j - 1)].s = cr.s\n self.b[i * self.c + (j - 1)].c = int(0)\n elif (cr.d == 1 and i > 0) :\n if (self.b[(i - 1) * self.c + j] != \".\" and self.b[(i - 1) * self.c + j].s != cr.s) :\n self.b[(i - 1) * self.c + j].s = cr.s \n self.b[(i - 1) * self.c + j].c = int(0) \n elif (cr.d == 2 and j < (self.c - 1)) :\n if (self.b[i * self.c + (j + 1)] != \".\" and self.b[i * self.c + (j + 1)].s != cr.s) :\n self.b[i * self.c + (j + 1)].s = cr.s\n self.b[i * self.c + (j + 1)].c = int(0)\n elif (cr.d == 3 and i < (self.r - 1)) :\n if (self.b[(i + 1) * self.c + j] != \".\" and self.b[(i + 1) * self.c + j].s != cr.s) :\n self.b[(i + 1) * self.c + j].s = cr.s\n self.b[(i + 1) * self.c + j].c = int(0) \n cr.t = False\n cr.c = int(cr.c) + 1 \n\n for i in range(self.r) :\n for j in range(self.c) :\n if(self.b[i * self.c + j] != \".\") :\n self.b[i * self.c + j].t = True \n \n def addCreature (self, cr, r, c):\n if (self.b[r * self.c + c] == \".\") :\n self.b[r * self.c + c] = cr\n\n# Class species makes all the species\nclass Species:\n # symbol, instructions, number of instructions\n def __init__(self, s) :\n self.s = s\n self.ins = []\n\n # add instructions for species\n def addInstruction(self, s) :\n assert s == \"hop\" or s == \"left\" or s == \"right\" or s == \"infect\" or s.split()[0] == \"go\" or s.split()[0] == \"if_empty\" or s.split()[0] == \"if_wall\" or s.split()[0] == \"if_random\" or s.split()[0] == \"if_enemy\"\n self.ins.append(s)\n\nclass Creature:\n # direction: 0: west, 1: north, 2: east, 3: south\n # species, counter, direction, bool for turn\n def __init__(self, s, d) :\n assert d > -1 and d < 4\n self.s = s\n self.c = 0\n self.d = d\n self.t = True\n\n\n","sub_path":"Darwin.py","file_name":"Darwin.py","file_ext":"py","file_size_in_byte":7349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"318076624","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport praw\nimport pandas as pd\nimport datetime as dt\nfrom pprint import pprint\nimport requests\nimport json\n\n\n# In[2]:\n\n\ntopics_dict = { \"subreddit_id\":[], \"subreddit\":[],\n \"id\":[],\n \"author\":[], \"name\":[],\n \"title\":[], \"ups\":[], \"downs\":[],\n \"score\":[], \n \"url\":[], \n \"comms_num\": [],\n \"domain\":[],\n \"created\": [], \n \"body\":[]}\n\nreddit_dict = { \"subreddit_id\":[], \"subreddit\":[],\n \"id\":[],\n \"title\":[],\n \"score\":[], \n \"url\":[], \"reddit_url\":[],\n \"comms_num\": [],\n \"created\": [], \n \"body\":[]}\n\ncomments_dict = { \n \"subreddit_id\":[], \"subreddit\":[],\"link_id\":[],\n \"id\":[],\n \"author\":[], \"name\":[],\n \"title\":[], \"ups\":[], \"downs\":[],\n \"score\":[], \n \"url\":[], \"reddit_url\":[],\n \"comms_num\": [],\n \"domain\":[],\n \"created\": [],\n \"text\":[],\n \"body\":[]}\n\ndef get_date(created):\n return dt.datetime.fromtimestamp(created)\n\ndef catch_ex(data=None):\n if (data):\n return data\n else:\n return \"\"\n# In[3]:\n\ndef tmp_fun(comment):\n try:\n link_id = catch_ex(comment['data']['link_id'])\n except:\n link_id = None\n comments_dict[\"link_id\"].append(link_id)\n\n try:\n subreddit_id = catch_ex(comment['data']['subreddit_id'])\n except:\n subreddit_id = None\n comments_dict[\"subreddit_id\"].append(subreddit_id)\n\n try:\n subreddit = catch_ex(comment['data']['subreddit'])\n except:\n subreddit = None\n comments_dict[\"subreddit\"].append(subreddit)\n\n try:\n id = catch_ex(comment['data']['id'])\n except:\n id = None\n comments_dict[\"id\"].append(id)\n\n try:\n author = catch_ex(comment['data']['author'])\n except:\n author = None\n comments_dict[\"author\"].append(author)\n\n try:\n name = catch_ex(comment['data']['name'])\n except:\n name = None\n comments_dict[\"name\"].append(name)\n\n try:\n title = catch_ex(comment['data']['title'])\n except:\n title = None\n comments_dict[\"title\"].append(title)\n\n try:\n ups = catch_ex(comment['data']['ups'])\n except:\n ups = None\n comments_dict[\"ups\"].append(ups)\n\n try:\n downs = catch_ex(comment['data']['downs'])\n except:\n downs = None\n comments_dict[\"downs\"].append(downs)\n\n try:\n score = catch_ex(comment['data']['score'])\n except:\n score = None\n comments_dict[\"score\"].append(score)\n\n try:\n reddit_url = catch_ex(comment['data']['permalink'])\n except:\n reddit_url = \"\"\n comments_dict[\"reddit_url\"].append(str('https://www.reddit.com') + reddit_url)\n\n try:\n url = catch_ex(comment['data']['url'])\n except:\n url = None\n comments_dict[\"url\"].append(url)\n\n try:\n num_comments = catch_ex(comment['data']['num_comments'])\n except:\n num_comments = None\n comments_dict[\"comms_num\"].append(num_comments)\n\n try:\n created = catch_ex(comment['data']['created'])\n except:\n created = \"\"\n comments_dict[\"created\"].append(created)\n\n try:\n domain = catch_ex(comment['data']['domain'])\n except:\n domain = None\n comments_dict[\"domain\"].append(domain)\n\n try:\n selftext = catch_ex(comment['data']['selftext'])\n except:\n selftext = None\n comments_dict[\"text\"].append(selftext)\n\n try:\n body = catch_ex(comment['data']['body'])\n except:\n body = None\n comments_dict[\"body\"].append(body)\n\n comments_dict[\"reddit_url\"]\n\ndef reddit_posts(url, header):\n post_comments = requests.get(str('https://www.reddit.com') + url + str('.json'),headers=header)\n #print(post_comments.text)\n post_comments = json.loads(post_comments.text)\n #print(comments)\n url_list = []\n for comments in post_comments: \n for comment in comments['data']['children']:\n url_list.append(tmp_fun(comment))\n return url_list\n\n\n# In[ ]:\n\n\n\"\"\"\nreddit = praw.Reddit(client_id='JMgbGNgkSVeOLw',\n client_secret='EPQ_T-A78lM91iV5ksG4VBTWeso',\n user_agent='d_reviewers',\n username='nidhis-enixta',\n password='nidhi@2615')\n\n\n\nsubreddit = reddit.subreddit('detergent')\ntop_subreddit = subreddit.top(limit=500)\n\nfor submission in top_subreddit:\n topics_dict[\"title\"].append(submission.title)\n topics_dict[\"score\"].append(submission.score)\n topics_dict[\"id\"].append(submission.id)\n topics_dict[\"url\"].append(submission.url)\n topics_dict[\"comms_num\"].append(submission.num_comments)\n topics_dict[\"created\"].append(submission.created)\n topics_dict[\"body\"].append(submission.selftext)\n \ntopics_data = pd.DataFrame(topics_dict)\n_timestamp = topics_data[\"created\"].apply(get_date)\ntopics_data = topics_data.assign(timestamp = _timestamp)\n\ntopics_data\n\"\"\"\n\n\n# In[4]:\n\nif __name__ == '__main__':\n #url = \"https://www.reddit.com/r/detergent/.json?count=20\"\n url = \"https://www.reddit.com/search.json?q=title%3A%22laundry%20detergent%22%20AND%20self%3A1&limit=100\"\n agent = {\"User-Agent\": \"Chrome/67.0.3396.87\"}\n posts = requests.get(url,headers=agent)\n data = json.loads(posts.text)\n tmp = []\n for child in data['data']['children']:\n url = child['data']['permalink']\n tmp.extend(reddit_posts(url,agent))\n\n #flattened_list = [y for x in tmp for y in x]\n #res = []\n #[res.extend(x) for x in flattened_list if x not in res]\n #print(len(res))\n #res = res[:100]\n #for i in res:\n # print(i)\n comments_data = pd.DataFrame(comments_dict)\n _timestamp = comments_data[\"created\"].apply(get_date)\n comments_data = comments_data.assign(timestamp = _timestamp)\n\n comments_data.to_csv('comments_data2.csv', index=False)\n print(\"done....\")\n\n\n# In[ ]:\n\n\"\"\"\nif __name__ == '__main__':\n #url = \"https://www.reddit.com/r/detergent/.json?count=20\"\n #url = \"https://www.reddit.com/search.json?q=title%3A%22laundry%20detergent%22&limit=100\"\n url = \"https://www.reddit.com/search.json?q=title%3A%22laundry%20detergent%22%20AND%20self%3A1&limit=100\"\n agent = {\"User-Agent\": \"Chrome/67.0.3396.87\"}\n posts = requests.get(url,headers=agent)\n data = json.loads(posts.text)\n \n for child in data['data']['children']:\n reddit_dict[\"title\"].append(child['data']['title'])\n reddit_dict[\"score\"].append(child['data']['score'])\n reddit_dict[\"id\"].append(child['data']['id'])\n reddit_dict[\"url\"].append(child['data']['url'])\n try:\n if (child['data']['num_comments']): \n num_comments=child['data']['num_comments'] \n else: \n num_comments=\"\"\n except:\n num_comments=\"\"\n reddit_dict[\"comms_num\"].append(num_comments)\n reddit_dict[\"created\"].append(child['data']['created'])\n try:\n if (child['data']['body']): \n body=child['data']['body'] \n else: \n body=\"\"\n except:\n body=\"\"\n reddit_dict[\"body\"].append(body)\n try:\n if (child['data']['subreddit_id']): \n subreddit_id=child['data']['subreddit_id'] \n else: \n subreddit_id=None\n except:\n subreddit_id=None\n reddit_dict[\"subreddit_id\"].append(subreddit_id)\n \n try:\n if (child['data']['subreddit']): \n subreddit=child['data']['subreddit'] \n else: \n subreddit=None\n except:\n subreddit=None\n reddit_dict[\"subreddit\"].append(subreddit)\n \n try:\n if (child['data']['permalink']): \n reddit_url=child['data']['permalink'] \n else: \n reddit_url=\"\"\n except:\n reddit_url=\"\"\n reddit_dict[\"reddit_url\"].append(str('https://www.reddit.com') + reddit_url)\n \n comments_data = pd.DataFrame(reddit_dict)\n #_timestamp = comments_data[\"created\"].apply(get_date)\n #comments_data = comments_data.assign(timestamp = _timestamp)\n \n comments_data.to_csv('comments_data4.csv', index=False)\n print(\"done....\")\n \"\"\"\n\n# In[ ]:\n\n\n\n\n","sub_path":"reddit_scraper.py","file_name":"reddit_scraper.py","file_ext":"py","file_size_in_byte":8513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"648030786","text":"#!/usr/bin/env python\n# coding=utf-8\n# Stan 2016-06-19\n\nfrom __future__ import (division, absolute_import,\n print_function, unicode_literals)\n\nfrom wtforms import (Form, SubmitField, StringField, TextAreaField,\n HiddenField, validators)\n\nfrom ..models.group import Group\n\n\nclass AddGroupForm(Form):\n name = StringField('Name:', [\n validators.DataRequired(),\n validators.Length(min=3, max=40),\n ],\n render_kw={\n \"placeholder\": \"Group name (required)\",\n }\n )\n description = TextAreaField('Description:',\n render_kw={\n \"placeholder\": \"Group description (optional)\",\n }\n )\n format = StringField()\n submit = SubmitField('Add group')\n\n def validate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n\n if not self.name.data.isalnum():\n self.name.errors.append('All characters in the string must be alphanumeric')\n return False\n\n group = Group.query.filter_by(name=self.name.data).first()\n if group:\n self.name.errors.append('Group already registered')\n return False\n\n return True\n","sub_path":"flask_tutorial_03/forms/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"219928378","text":"q = int(input())\n\nans = [0] * q\n\nfor i in range(q) :\n x , d , n = map(int,input().split())\n ans[i] = x\n for j in range(1,n) :\n ans[i] *= (x + j*d)\n ans[i] = ans[i] % 1000003\n\nfor a in ans :\n print(a)\n","sub_path":"AtCoder/other/M-SOLUTIONS/e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"163073024","text":"import logging as log\n\n\nclass Mail(object):\n def __init__(self, page_html = ''):\n from email.mime.text import MIMEText\n from email.mime.multipart import MIMEMultipart\n self.message = MIMEMultipart()\n self.message.attach(MIMEText(page_html.encode('utf-8'), 'html','utf-8'))\n self.email_sender = 'xnat@fpmaragall.org'\n\n def send_mail(self, receiver_mail, subject='XNAT update', attached_files=[],\n config_file = '/home/grg/.xnat_bsc.cfg'):\n '''Function to send a mail'''\n import os.path as osp\n import smtplib\n\n for f in attached_files:\n from email.MIMEBase import MIMEBase\n from email import Encoders\n part = MIMEBase('application', \"octet-stream\")\n part.set_payload( open(f, \"rb\").read() )\n Encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment', filename=\"%s\" % osp.basename(f))\n self.message.attach(part)\n\n self.message['Subject'] = subject\n self.message['From'] = self.email_sender\n self.message['To'] = \", \".join(receiver_mail[0])\n self.message['Cc'] = \", \".join(receiver_mail[1])\n\n s = smtplib.SMTP(host='smtp.gmail.com', port=587)\n s.ehlo()\n s.starttls()\n s.login('xnat@fpmaragall.org', eval(open(config_file).read())['gmail_password'])\n recipients = self.message[\"To\"].split(\",\") + self.message[\"Cc\"].split(\",\")\n log.info(recipients)\n s.sendmail(self.email_sender, recipients, self.message.as_string())\n s.quit()\n\n\ndef check_session(xnatId, project=None, destination=[['goperto@fpmaragall.org'], ['goperto@fpmaragall.org']],\n send_mail=False):\n import os.path as osp\n from pyxnat import Interface\n import pandas as pd\n from datetime import datetime\n import collect\n import validate as vx\n from xml import etree\n\n def md5(fname):\n import hashlib\n hash_md5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()\n\n startTime = datetime.now()\n\n config_file = '/home/grg/.xnat_bsc.cfg'\n central = Interface(config=config_file)\n\n # Fetch project\n project = collect.fetch_session_project(xnatId, central) if project is None else project\n log.info('project %s detected'%project)\n\n projects = central.select.projects().get()\n exp = central.select.project(project).experiment(xnatId)\n\n # Fetch subject\n xml = central._exec(central.select.project(project).experiment(str(xnatId))._uri +\n '?format=xml');\n NS=\"{http://nrg.wustl.edu/xnat}\"\n root = etree.ElementTree.XML(xml)\n subject = root.find(NS+'dcmPatientId').text\n\n # Collects session information\n keys = ['ID', 'type', 'file_count', 'frames']\n d = collect.collect_sequences_list(central, project, xnatId)\n data = []\n for each in d:\n row = [project, xnatId]\n for k in keys:\n row.append(each[k])\n data.append(row)\n df = pd.DataFrame(data, columns=['project', 'xnatId', 'ID', 'type', 'file_count', 'frames'])\n\n # Validates session\n res, logs = vx.validate_session_from_df(df, project, xnatId)\n\n # Compiles email body\n html = '''

\"\"

\n

 

\n

Dear XNAT user,

\n

THIS IS A TEST.

\n

The following session was archived in BBRC:

\n
    \n
  • Project: !!PROJECT!!
  • \n
  • Subject: !!SUBJECT!!
  • \n
  • Session: !!SESSION!!
  • \n
\n
The result of the validation is: !!RESULT!!
\n

\n
The following observations were returned:
\n

\n
!!REPORT!!
\n

\n
Additional information here.
\n\n
BBRC Team
\n
--
generated in !!ELAPSEDTIME!! (!!FILE!! v.!!VERSION!!)
\n

 

\n '''\n\n html = html.replace('!!PROJECT!!', project)\n html = html.replace('!!SUBJECT!!', subject)\n html = html.replace('!!SESSION!!', str(xnatId))\n result = ['SUCCESS', 'WARNING', 'ERRORS'][res]\n html = html.replace('!!RESULT!!', result)\n html = html.replace('!!COLOR!!', ['#229c06', '#e1be2e', '#a11111'][res])\n html = html.replace('!!REPORT!!', '
'.join(logs))\n e = central.select.experiment(xnatId)\n url = 'https://barcelonabrainimaging.org/data/experiments/%s?format=html'%e.id()\n html = html.replace('!!URL!!', url)\n\n # Estimates elapsed time\n seconds = datetime.now() - startTime\n m, s = divmod(seconds.total_seconds(), 60)\n h, m = divmod(m, 60)\n elapsedtime = \"%d:%02d:%02d\" % (h, m, s)\n version = md5(__file__)[:8]\n html = html.replace('!!ELAPSEDTIME!!', elapsedtime)\n html = html.replace('!!FILE!!', osp.basename(__file__))\n html = html.replace('!!VERSION!!', version)\n\n # Sends email\n m = Mail(html)\n subject = 'XNAT update: %s (%s) validation results (%s)'%(xnatId, project, result)\n if send_mail:\n m.send_mail(destination, subject=subject)\n","sub_path":"bbrc/xnat/mailing.py","file_name":"mailing.py","file_ext":"py","file_size_in_byte":6665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"325474815","text":"import sys\r\n\r\n#################################################################\r\n## globals\r\n\r\nMAGIC = \"$9$\"\r\n\r\n###################################\r\n## letter families\r\n\r\nFAMILY = [\"QzF3n6/9CAtpu0O\", \"B1IREhcSyrleKvMW8LXx\", \"7N-dVbwsY2g4oaJZGUDj\", \"iHkq.mPf5T\"]\r\nEXTRA = dict()\r\nfor x, item in enumerate(FAMILY):\r\n for c in item:\r\n EXTRA[c] = 3 - x\r\n\r\n###################################\r\n## forward and reverse dictionaries\r\n\r\nNUM_ALPHA = [x for x in \"\".join(FAMILY)]\r\nALPHA_NUM = {NUM_ALPHA[x]: x for x in range(0, len(NUM_ALPHA))}\r\n\r\n###################################\r\n## encoding moduli by position\r\n\r\nENCODING = [[1, 4, 32], [1, 16, 32], [1, 8, 32], [1, 64], [1, 32], [1, 4, 16, 128], [1, 32, 64]]\r\n\r\n\r\ndef _nibble(cref, length):\r\n nib = cref[0:length]\r\n rest = cref[length:]\r\n if len(nib) != length:\r\n print (\"Ran out of characters: hit '%s', expecting %s chars\" % (nib, length))\r\n sys.exit(1)\r\n return nib, rest\r\n\r\n\r\ndef _gap(c1, c2):\r\n return (ALPHA_NUM[str(c2)] - ALPHA_NUM[str(c1)]) % (len(NUM_ALPHA)) - 1\r\n\r\n\r\ndef _gap_decode(gaps, dec):\r\n num = 0\r\n if len(gaps) != len(dec):\r\n print (\"Nibble and decode size not the same!\")\r\n sys.exit(1)\r\n for x in range(0, len(gaps)):\r\n num += gaps[x] * dec[x]\r\n return chr(num % 256)\r\n\r\n\r\ndef juniper_decrypt(crypt):\r\n chars = crypt.split(\"$9$\", 1)[1]\r\n first, chars = _nibble(chars, 1)\r\n toss, chars = _nibble(chars, EXTRA[first])\r\n prev = first\r\n decrypt = \"\"\r\n while chars:\r\n decode = ENCODING[len(decrypt) % len(ENCODING)]\r\n nibble, chars = _nibble(chars, len(decode))\r\n gaps = []\r\n for i in nibble:\r\n g = _gap(prev, i)\r\n prev = i\r\n gaps += [g]\r\n decrypt += _gap_decode(gaps, decode)\r\n return decrypt\r\n\r\nprint (juniper_decrypt(\"$9$.fFnO1RyevB17-Vs4o36/C0B\"))\r\n","sub_path":"JunosDecode.py","file_name":"JunosDecode.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"225846041","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('offers', '0011_auto_20170112_1652'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='offers',\n name='conditions_loads_tovars',\n field=models.CharField(verbose_name='Условия загрузки товаров', blank=True, max_length=200),\n ),\n ]\n","sub_path":"offers/migrations/0012_offers_conditions_loads_tovars.py","file_name":"0012_offers_conditions_loads_tovars.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"433319866","text":"import os\nimport django\nimport math\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"travelItenerary.settings\")\n\ndjango.setup()\nfrom iteneraryApplication.models import *\n\nDistTime = DistanceTime.objects.all()\n\nfor disttimObject in DistTime:\n distance = disttimObject.distance\n if distance < 0:\n [lat1, lon1] = [math.radians(disttimObject.source.latitude), math.radians(disttimObject.source.longitude)]\n [lat2, lon2] = [math.radians(disttimObject.dest.latitude), math.radians(disttimObject.dest.longitude)]\n R = 6373.0\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat / 2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n\n distance = R * c * 1000\n #print lat1, lon1, '>', lat2, lon2, '=', distance\n disttimObject.distance = distance\n disttimObject.time = distance / 666\n disttimObject.save()\n\n time = disttimObject.time\n newtime = time\n if time < 5: \n newtime = 5\n else:\n newtime = 5*(time/5)\n disttimObject.time = newtime\n disttimObject.save()\n\n\n","sub_path":"travelItenerary/negdist.py","file_name":"negdist.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"113502276","text":"import os\nfrom kazoo.client import KazooClient, KazooState\nfrom kazoo.exceptions import NoNodeError\nfrom kazoo.retry import KazooRetry\nfrom kazoo.handlers.threading import KazooTimeoutError\nfrom barbados.exceptions import FatalException\nfrom barbados.services.logging import Log\n\n\nclass ZookeeperConnector:\n def __init__(self, hosts=os.getenv('AMARI_ZOOKEEPER_HOSTS', '127.0.0.1:2181'), read_only=False):\n self.hosts = hosts\n self.read_only = read_only\n\n Log.info(\"Using Zookeeper hosts: \\\"%s\\\"\" % hosts)\n\n def set(self, path, value):\n self._connect()\n self.zk.ensure_path(path)\n\n if not self.zk.exists:\n self.zk.create(path, str.encode(value))\n else:\n self.zk.set(path, str.encode(value))\n\n def get(self, path):\n self._connect()\n try:\n data, stat = self.zk.get(path)\n return data.decode(\"utf-8\")\n except NoNodeError:\n raise KeyError(\"%s does not exist.\" % path)\n except Exception as e:\n Log.error(e.__class__)\n Log.error(e)\n\n def _connect(self):\n if not hasattr(self, 'zk'):\n self.zk = KazooClient(hosts=self.hosts, read_only=self.read_only, timeout=5, connection_retry=self._get_retry())\n elif self.zk.state != KazooState.CONNECTED:\n Log.warning(\"ZooKeeper state is %s\" % self.zk.state)\n pass\n elif self.zk.state == KazooState.CONNECTED:\n return\n else:\n raise Exception(\"We in a weird state. %s\" % self.zk.state)\n\n try:\n return self.zk.start()\n except KazooTimeoutError as e:\n raise FatalException(\"Timeout connecting to ZooKeeper (%s)\" % e)\n\n @staticmethod\n def _get_retry():\n return KazooRetry(max_tries=5, backoff=2, max_delay=30)\n","sub_path":"barbados/connectors/zookeeper.py","file_name":"zookeeper.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"67032887","text":"import os\r\nimport math\r\nimport numpy as np\r\nimport csvWrite as csvWriter\r\nimport Liu as yao\r\nfrom argparse import ArgumentParser\r\n\r\n#parser = ArgumentParser(description='Parsing the results')\r\n\r\n#parser.add_argument(\r\n # '--path',\r\n # '-p',\r\n # dest='dir_path',\r\n # action='store',\r\n # help='Directory where traces are',\r\n # required=False,\r\n # )\r\n\r\n\r\n#args = parser.parse_args()\r\n\r\ndef summary_results(listOfNumbers, traceName,filename):\r\n \r\n videoBitrate = []\r\n rate = []\r\n stallList = []\r\n representationIndex = []\r\n maxRate = 0\r\n lineCounter = 0\r\n initialDelay = 0\r\n numOfStalls = 0\r\n totalStallDuration = 0\r\n numSeg = 0\r\n avgRate = 0\r\n prevRate = -1\r\n numOfSwitches = 0\r\n \r\n for line in listOfNumbers:\r\n lineCounter += 1\r\n resultsPerLine = list(map(float, line.split(',')[0:17]))\r\n\r\n numSeg = len(listOfNumbers)\r\n segmentDuration = resultsPerLine[3]\r\n representationIndex.append(resultsPerLine[0])\r\n videoBitrate.append(resultsPerLine[2])\r\n\r\n if lineCounter == 1:\r\n initialDelay = resultsPerLine[1]\r\n if lineCounter > 1:\r\n #if resultsPerLine[1] > 0:\r\n stallList.append(resultsPerLine[1])\r\n if resultsPerLine[1] > 0:\r\n numOfStalls += 1\r\n totalStallDuration += resultsPerLine[1]\r\n \r\n for i in range(0,len(representationIndex)):\r\n if(prevRate == -1):\r\n prevRate = representationIndex[i]\r\n rateDiff = representationIndex[i] - prevRate\r\n prevRate = representationIndex[i]\r\n if(rateDiff != 0):\r\n numOfSwitches += 1\r\n maxRate = max(videoBitrate)\r\n avgRate = np.mean(videoBitrate)\r\n\r\n if sum(stallList) == 0.0:\r\n yao.Yao_QoE_Estimation(listOfNumbers,traceName,filename)\r\n\r\ndef open_trace(traceName,filename):\r\n f = open(traceName, 'r')\r\n allLines = f.readlines()\r\n return summary_results(allLines[1:] , traceName,filename)\r\n\r\nif __name__ == '__main__':\r\n\r\n for (dirname, dirnames, filenames) in os.walk(r'C:\\Users\\varun\\OneDrive\\Thesis\\temp\\temp_ds'):\r\n for filename in filenames:\r\n nameOfFile = os.path.join(dirname, filename)\r\n newBatch = open_trace(nameOfFile,filename)\r\n ","sub_path":"TraceAnalysis.py","file_name":"TraceAnalysis.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"461365264","text":"import json\nimport os\n\nimport click\nfrom epic_kitchens.dataset.epic_dataset import EpicVideoDataset\nimport matplotlib.pyplot as plt\n\n\n@click.command()\n@click.option('--gulp-dir',\n type=str,\n default=None,\n help='EPIC-Kitchens gulp dir.')\n@click.option('--interim-dir',\n type=str,\n default=None,\n help='/path/to/starter-kit-action-recognition/data/interim')\n@click.option('--out', type=str, default=None, help='Output root folder.')\n@click.option('--split-path',\n type=str,\n default=None,\n help='trainval.json path.')\ndef gen_label(gulp_dir, interim_dir, out, split_path):\n with open(split_path, 'r') as f:\n trainval = json.load(f)\n idxsplit = (len(trainval['train']) + len(trainval['val']))*[None]\n for i in trainval['train']:\n idxsplit[i] = 'train'\n for i in trainval['val']:\n idxsplit[i] = 'val'\n assert None not in idxsplit\n\n action_classes = {}\n class_counts = {}\n next_action_class = 0\n rgbviddata = EpicVideoDataset(f'{gulp_dir}/rgb_train', 'verb+noun')\n outputs = {'train': [], 'val': []}\n categories = []\n for i, seg in enumerate(rgbviddata.video_segments):\n parid = seg['participant_id']\n vidid = seg['video_id']\n nar = seg['narration'].replace(' ', '-')\n uid = seg['uid']\n reldir = f'{parid}/{vidid}/{vidid}_{uid}_{nar}'\n assert os.path.exists(f'{interim_dir}/{reldir}'), f'{interim_dir}/{reldir}'\n\n verb = seg['verb_class']\n noun = seg['noun_class']\n action = f'{verb},{noun}'\n if action in action_classes:\n classidx = action_classes[action]\n class_counts[action] += 1\n else:\n categories.append(f'{seg[\"verb\"]} {seg[\"noun\"]}')\n classidx = next_action_class\n action_classes[action] = classidx\n class_counts[action] = 1\n next_action_class += 1\n\n nframes = seg['num_frames']\n outputs[idxsplit[i]].append(f'{reldir} {nframes} {classidx}')\n\n assert len(set(categories)) == len(categories)\n\n with open(f'{out}/category.txt', 'w') as f:\n f.write('\\n'.join(categories))\n\n with open(f'{out}/train_videofolder.txt', 'w') as f:\n f.write('\\n'.join(outputs['train']))\n\n with open(f'{out}/val_videofolder.txt', 'w') as f:\n f.write('\\n'.join(outputs['val']))\n\n class_counts = list(class_counts.values())\n class_counts.sort()\n plt.bar(range(0, len(class_counts)), class_counts)\n plt.savefig('action_class_histogram.png')\n\n\nif __name__ == '__main__':\n gen_label() # pylint:disable=no-value-for-parameter\n","sub_path":"tools/gen_label_epic_kitchens.py","file_name":"gen_label_epic_kitchens.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"383164291","text":"# kepler_grids\nfrom pyburst.mcmc import mcmc\nfrom pyburst.mcmc import mcmc_tools\n\nimport numpy as np\nimport sys\nimport os\nimport time\n\n\n# =============================================================================\n# Usage:\n# python run_concord.py [version] [source] [n_walkers] [n_steps] [n_threads] [dumpstep]\n# =============================================================================\nprint('=' * 30)\nGRIDS_PATH = os.environ['KEPLER_GRIDS']\nnparams = 6\nnargs = len(sys.argv)\n\nif (nargs != nparams + 1) and (nargs != nparams + 2):\n print(f\"\"\"Must provide {nparams} parameters:\n 1. version : mcmc version ID\n 2. source : source object (e.g., gs1826)\n 3. n_walkers : number of mcmc walkers\n 4. n_steps : number of mcmc steps to take\n 5. n_threads : number of threads/cores to use\n 6. dumpstep : steps to do between savedumps\n 7. (step0 : step to restart from. Optional)\"\"\")\n sys.exit()\n\nversion = int(sys.argv[1])\nsource = sys.argv[2]\nn_walkers = int(sys.argv[3])\nn_steps = int(sys.argv[4])\nn_threads = int(sys.argv[5])\ndumpstep = int(sys.argv[6])\nmcmc_path = mcmc_tools.get_mcmc_path(source)\n\n# ===== if restart =====\nif nargs == (nparams + 2):\n restart = True\n start = int(sys.argv[7])\n chain0 = mcmc_tools.load_chain(source=source, version=version, n_walkers=n_walkers,\n n_steps=start)\n pos = chain0[:, -1, :]\nelse:\n restart = False\n start = 0\n pos = mcmc.setup_positions(source=source, version=version, n_walkers=n_walkers)\n\nsampler = mcmc.setup_sampler(source=source, version=version,\n pos=pos, n_threads=n_threads)\niterations = round(n_steps / dumpstep)\nt0 = time.time()\n\n# ===== do 'dumpstep' steps at a time =====\nfor i in range(iterations):\n step0 = start + (i * dumpstep)\n step1 = start + ((i + 1) * dumpstep)\n\n print('-' * 30)\n print(f'Doing steps: {step0} - {step1}')\n pos, lnprob, rstate = mcmc.run_sampler(sampler, pos=pos, n_steps=dumpstep)\n\n # ===== concatenate loaded chain to current chain =====\n if restart:\n save_chain = np.concatenate([chain0, sampler.chain], 1)\n else:\n save_chain = sampler.chain\n\n # === save chain state ===\n filename = mcmc_tools.get_mcmc_string(source=source, version=version, prefix='chain',\n n_steps=step1, n_walkers=n_walkers, extension='.npy')\n filepath = os.path.join(mcmc_path, filename)\n print(f'Saving: {filepath}')\n np.save(filepath, save_chain)\n\n # ===== save sampler state =====\n mcmc_tools.save_sampler_state(sampler, source=source, version=version,\n n_steps=step1, n_walkers=n_walkers)\n\nprint('=' * 30)\nprint('Done!')\n\nt1 = time.time()\ndt = t1 - t0\ntime_per_step = dt / n_steps\ntime_per_sample = dt / (n_walkers * n_steps)\n\nprint(f'Total compute time: {dt:.0f} s ({dt/3600:.2f} hr)')\nprint(f'Average time per step: {time_per_step:.1f} s')\nprint(f'Average time per sample: {time_per_sample:.4f} s')\nprint('=' * 30)\n","sub_path":"scripts/run_mcmc.py","file_name":"run_mcmc.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"298085772","text":"vacation_cost = float(input())\nmoney_on_hand = float(input())\ndays_saving = 0\nspending_days = 5\n\nsaving = True\nwhile saving:\n transaction = input()\n amount = float(input())\n days_saving += 1\n if transaction == 'save':\n money_on_hand += amount\n spending_days = 5\n elif transaction == 'spend':\n spending_days -= 1\n if money_on_hand >= amount:\n money_on_hand -= amount\n else:\n money_on_hand = 0\n\n if money_on_hand >= vacation_cost:\n print(f\"You saved the money for {days_saving} days.\")\n break\n\n if spending_days == 0:\n print(f\"You can't save the money.\")\n print(f\"{days_saving}\")\n break\n","sub_path":"ProgrammingBasics/05.Whileloop/03vacation.py","file_name":"03vacation.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"244148615","text":"#! /usr/bin/python\n\ndef all_equal(ar):\n \"\"\"\n Checks that all terms in the array are equal\n Input:\n ar - Numerical array\n \"\"\"\n\n for i in ar:\n if i!=ar[0]:\n return False\n return True\n\ndef main():\n\n import numpy\n\n mass, xmom, ymom, enr, tracer = \\\n numpy.loadtxt('res.txt',unpack=True);\n\n f = open('gradesheet.txt','w')\n f.write('mass '+str(all_equal(mass))+'\\n')\n f.write('xmom '+str(all_equal(xmom))+'\\n')\n f.write('ymom '+str(all_equal(ymom))+'\\n')\n f.write('enr '+str(all_equal(enr))+'\\n')\n f.write('tracer '+str(all_equal(tracer))+'\\n')\n f.close()\n\n return all_equal(mass) and \\\n all_equal(xmom) and \\\n all_equal(ymom) and \\\n all_equal(enr) and \\\n all_equal(tracer)\n\nif __name__=='__main__':\n import os\n\n if main():\n os.system('touch test_passed.res')\n else:\n os.system('touch test_failed.res')\n","sub_path":"tests/newtonian/two_dimensional/conservation_lagrangian/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"14435530","text":"from __future__ import division \nfrom sys import argv\nfrom nltk.corpus import stopwords\nimport math\nimport nltk\n\nscript, number = argv\ni=1\nstop = open('stopwords.txt','r')\n#pos = open('pos.txt','w')\n#neg = open('neg.txt','w')\n#voca = open('vocabluary.txt','w')\n#j = number\n# from bigramvocabulary import get_bigram_list\ndef get_bigram_list(fname):\n\tf = open(fname)\n\traw = f.read()\n\t#print raw\n\ttokens = nltk.word_tokenize(raw)\n\tbgs = nltk.bigrams(tokens)\n\tfdist = nltk.FreqDist(bgs)\n#print fdist.str()\n\t#for k,v in fdist.items():\n\t\t#print k,v\n\treturn fdist\ndef create(number):\n# print number\n\tj = int(number)\n\tj = (j+1) % 10\n\tpos = []\n\tneg = []\n\tvoab = {}\n\ttrain_pos = open('train_pos.txt','w')\n\ttrain_neg = open('train_neg.txt','w')\n\ttrain_file = open('train_file.txt','w')\n\twhile(j != int(number)):\n\t\t#print \"learning file\",j\n\t\tfil = open(str(j),'r')\n\t\tfor line in fil:\n\t\t\tlis = line.split()\n\t\t\tif (lis[-1] == '1'):\n\t\t\t\ttrain_file.write(\" \".join(lis[:-1]))\n\t\t\t\ttrain_pos.write(\" \".join(lis[:-1]))\n\t\t\t\ttrain_pos.write(\"\\n\")\n\t\t\t\tpos = pos + lis[:-1]\n\t\t\telse:\n\t\t\t\tneg = neg + lis[:-1]\n\t\t\t\ttrain_file.write(\" \".join(lis[:-1]))\n\t\t\t\ttrain_neg.write(\" \".join(lis[:-1]))\n\t\t\t\ttrain_neg.write(\"\\n\")\n\t\t\tfor word in lis:\n\t\t\t#\tprint word\n\t\t\t\tif (word not in voab.keys() and (word != \"1\" or word != \"0\")):\n\t\t\t\t\tvoab[word] = 1;\n\t\t\t\telse:\n\t\t\t\t\tvoab[word] = voab[word] + 1;\n\t\tj = (j+1)%10\n\t\tfil.close()\n\tfor w in voab.keys():\n\t\tif(voab[w] < 2):\n\t\t\tdel voab[w]\n\ttrain_pos.close()\n\ttrain_neg.close()\n\n\tpbfl = get_bigram_list('train_pos.txt')\n\tnbfl = get_bigram_list('train_neg.txt')\n\ttbfl = get_bigram_list('train_file.txt')\n\n\t#for k,l in \n\t#for k,v in tbfl.items():\n\t\t#print k,v\n\t#voc_size= ([w for w in tbfl if tbfl[w] >= 2])\n\t#print voc_size\n\n\n\n\treturn (pos,neg,voab,pbfl,nbfl,tbfl)\n\t\t\t\n\t\ndef populate(filed):\n\tvoc = []\n\tfor i in filed:\n\t\ti = i.split()\n\t\tvoc = voc + i\n\treturn voc\n\ndef prob(bigram,wl,wl1,bfl,tbfl,n):\n\n\t#print wl\n\tif(tbfl[bigram]>=3):\n\n\t\t#print bigram,bfl[bigram]\n\t\tval=bfl[bigram]\n\t\tk = (val+1)/(wl.count(bigram[0])+n-bfl[bigram])\n\t\t#k1=(wl.count(bigram[1])+1)/(len(wl)+len(wl1))\n\t\t#print \"k\" , k , \"k1\" , k1\n\t\t#print \"bigram\"\n\t\t#print bigram[0]\n\telif(bigram[1] in wl1.keys() and wl1[bigram[1]] >= 2):\n\t\tk = (wl.count(bigram[1]) + 1)/(len(wl) + len(wl1) )\n\telse:\n\t\tk = 1/(len(wl) + len(wl1))\n\treturn math.log10(k)\n\ndef str_to_bigrams(doc):\n\tdoc = doc.split()\n\tbigrams = []\n\tbigrams.append(('*',doc[0]))\n\tbigrams.append((doc[-1],'**'))\n\tfor i in range(len(doc)-1):\n\t\tabigram = (doc[i],doc[i+1])\n\t\t#print abigram\n\t\tbigrams.append(abigram)\n\treturn bigrams\n\ndef classify(stri,posi,negi,pbfl,nbfl,tbfl,n,v):\n\tnprob,pprob=0,0\n\tbigrams = str_to_bigrams(stri)\n\tfor i in bigrams:\n\t\tnprob = nprob + prob(i,negi,v,nbfl,tbfl,n)\n\t\tpprob = pprob + prob(i,posi,v,pbfl,tbfl,n)\n\tif(nprob > pprob):\n\t\treturn '0'\n\telse:\n\t\treturn '1'\n\ndef validation(number): \n\tpositive,negative,vocab,pbfl,nbfl,tbfl = create(number)\n\t#print pbfl\n # for k,v in pbfl:\n\t# print k,v\t\n\t#print positive\n#vocab = populate(voca)\n#positive = populate(pos)\n#negative = populate(neg)\n\tn = len(vocab)\n\tTP = 0\n\tcount = 0;\n\ttest = open(number,'r')\n\tfor line in test:\n\t\t\n\t\tli = line.split()\n\t # print li\n\t\tli = li[:-1]\n\t\t#print li\n\t\tlinee = \" \".join(li)\n\t\tres = classify(linee,positive,negative,pbfl,nbfl,tbfl,n,vocab) \n\t\tlis = line.split()\n\t# print \"checking\",lis[-1],res\n\t\tif (lis[-1] == res):\n\t\t\tTP = TP + 1\n\t\tcount = count + 1\n\tprint (TP/count)*100\n\nvalidation(number)\n","sub_path":"bigramnaive.py","file_name":"bigramnaive.py","file_ext":"py","file_size_in_byte":3448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"87356101","text":"import graphene\nfrom app.api.client.helpers.client_helpers import get_default_client_status\nfrom django.db.models import Q\nfrom graphene.types.generic import GenericScalar\nfrom graphene_django.types import ObjectType\nfrom graphql_extensions.auth.decorators import login_required\n\nfrom ..helpers.pagination_helper import pagination_helper\nfrom ..helpers.permission_required import role_required, token_required\nfrom ..helpers.validate_object_id import validate_object_id\nfrom ..helpers.validation_errors import error_dict\nfrom .models import CorporateClient, IndividualClient\nfrom .object_types import (CorporateClientPaginatedType, CorporateClientType,\n IndividualClientPaginatedType, IndividualClientType)\n\n\nclass Query(ObjectType):\n client_status_options = GenericScalar()\n individual_client = graphene.Field(IndividualClientType, id=graphene.String())\n corporate_client = graphene.Field(CorporateClientType, id=graphene.String())\n individual_clients = graphene.Field(IndividualClientPaginatedType,\n page=graphene.Int(),\n search=graphene.String(),\n limit=graphene.Int())\n corporate_clients = graphene.Field(CorporateClientPaginatedType,\n page=graphene.Int(),\n search=graphene.String(),\n limit=graphene.Int())\n\n @token_required\n @login_required\n def resolve_client_status_options(self, info, **kwargs):\n return get_default_client_status()\n\n @token_required\n @login_required\n def resolve_individual_client(self, info, **kwargs):\n error_msg = error_dict['permission_denied'].format(\"view\", 'client')\n role_required(info.context.user, ['admin', 'manager'], error_msg)\n id = kwargs.get('id', None)\n return validate_object_id(id, IndividualClient,\n \"Individual Client\", info.context.user.agency)\n\n @token_required\n @login_required\n def resolve_corporate_client(self, info, **kwargs):\n error_msg = error_dict['permission_denied'].format(\"view\", 'client')\n role_required(info.context.user, ['admin', 'manager'], error_msg)\n id = kwargs.get('id', None)\n return validate_object_id(id, CorporateClient,\n \"Corporate Client\", info.context.user.agency)\n\n @token_required\n @login_required\n def resolve_corporate_clients(self, info, search=None, **kwargs):\n page = kwargs.get('page', 1)\n limit = kwargs.get('limit', 10)\n error_msg = error_dict['admin_only'].format('list clients')\n role_required(info.context.user, ['admin', 'manager'], error_msg)\n if search:\n filter = (\n Q(name__icontains=search) |\n Q(about__icontains=search) |\n Q(postal_address__icontains=search) |\n Q(kra_pin__icontains=search) |\n Q(town__icontains=search) |\n Q(email__icontains=search) |\n Q(phone_number__icontains=search) |\n Q(status__icontains=search)\n )\n clients = CorporateClient.objects.filter(\n filter, agency=info.context.user.agency).all().order_by(\n 'name')\n else:\n clients = CorporateClient.objects.filter(\n agency=info.context.user.agency).all().order_by(\n 'name')\n return pagination_helper(clients, page, limit, CorporateClientPaginatedType)\n\n @token_required\n @login_required\n def resolve_individual_clients(self, info, search=None, **kwargs):\n page = kwargs.get('page', 1)\n limit = kwargs.get('limit', 10)\n error_msg = error_dict['admin_only'].format('list clients')\n role_required(info.context.user, ['admin', 'manager'], error_msg)\n if search:\n filter = (\n Q(first_name__icontains=search) |\n Q(last_name__icontains=search) |\n Q(postal_address__icontains=search) |\n Q(surname__icontains=search) |\n Q(kra_pin__icontains=search) |\n Q(town__icontains=search) |\n Q(email__icontains=search) |\n Q(phone_number__icontains=search) |\n Q(status__icontains=search)\n )\n clients = IndividualClient.objects.filter(\n filter, agency=info.context.user.agency).all().order_by(\n 'first_name')\n else:\n clients = IndividualClient.objects.filter(\n agency=info.context.user.agency).all().order_by(\n 'first_name')\n return pagination_helper(clients, page, limit, IndividualClientPaginatedType)\n","sub_path":"app/api/videos/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":4866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"15038892","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# E-mail : wu.wu@hisilicon.com\n# Data : 2015-06-02 17:22:02\n# Desc :\nfrom django.conf.urls import url\nfrom django.conf import settings\n\nfrom . import views\n\nurlpatterns = [\n # ex: /polls/\n url(r'^$', views.index, name='index'),\n # for Performance\n url(r'algorithm/$', views.algorithm, name='algorithm'),\n url(r'cpu_sincore/$', views.cpu_sincore, name='cpu_sincore'),\n url(r'cpu_multicore/$', views.cpu_multicore, name='cpu_multicore'),\n url(r'storage/$', views.storage, name='storage'),\n url(r'latency/$', views.latency, name='latency'),\n url(r'memory/$', views.memory, name='memory'),\n url(r'network/$', views.network, name='network'),\n #url(r'io/$', views.io, name='io'),\n url(r'application/$', views.application, name='application'),\n # for Functional\n url(r'kernel/$', views.kernel, name='kernel'),\n url(r'debug/$', views.debug, name='debug'),\n url(r'peripheral/$', views.peripheral, name='peripheral'),\n ##\n url(r'static/(?P.*)$', 'django.views.static.serve',\n {'document_root': settings.STATIC_ROOT}, name='static'),\n]\n","sub_path":"frontend/polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"386297942","text":"\n\n#calss header\nclass _MENACE():\n\tdef __init__(self,): \n\t\tself.name = \"MENACE\"\n\t\tself.definitions = [u'something that is likely to cause harm: ', u'a dangerous quality that makes you think someone is going to do something bad: ', u'a person, especially a child, who is very annoying', u'to demand money using threats: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_menace.py","file_name":"_menace.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"41465059","text":"# 양방향 bfs\ndef bfs(v):\n global ans\n Q = list()\n Q.append(v)\n while Q: # 큐가 빌때 까지\n v= Q.pop(0)\n for w in graph[v]:\n if not visit[w]:\n Q.append(w)\n visit[w]=visit[v]+1\n ans+=1\n\n\n\nnode = int(input())\nn = int(input())\ngraph = [[] for _ in range(node+1)]\nvisit = [0]*(node+1)\nans = 0\nfor _ in range(n):\n s,e = map(int,input().split())\n graph[s].append(e)\n graph[e].append(s)\n\nbfs(1) # 1노드 부터 시작\nprint(ans-1)","sub_path":"BOJ/B2606_바이러스.py","file_name":"B2606_바이러스.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"134071162","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'polls'\nurlpatterns = [\n path('', views.IndexView.as_view(), name='index'),\n path('product//',views.DetailView.as_view(), name='detail'),\n path('add-to-cart//',views.add_to_cart, name='add-to-cart'),\n path('order/',views.getOrderItems, name='order'),\n path('order/delete//',views.deleteOrder, name='deleteOrder')\n]","sub_path":"polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"202893978","text":"\nfrom sources.general import Cmd\n\n\ncogName = \"Misc Cog\"\ncog = {\n \"name\": cogName,\n \"description\": \"This part of the bot has random miscellaneous things that don't fit anywhere else.\"\n}\n\nemojiGreen = \"🟢\"\nemojiRed = \"🔴\"\n\nprotectDemocracy = \"laprOS will not allow Hermit to silence democracy.\"\n\nping = Cmd(\n \"ping\",\n \"A test command.\",\n pong = \"Pong\"\n)\ncat = Cmd(\n \"cat\",\n \"Sends a cute cat picture to the chat. (Bugged for certain users.)\",\n cat = lambda message: message\n)\nvote = Cmd(\n \"vote\",\n \"Starts a vote on this command's message.\",\n emojiNotFound = f\"Couldn't find one of the emojis you're trying to use for the vote. Are you sure it's available on this server?\"\n)\ncoinflip = Cmd(\n \"coinflip\", \"flipcoin\",\n \"Performs a coinflip, heads or tails.\",\n heads = \"Heads!\",\n tails = \"Tails!\"\n)\nsup = Cmd(\n \"sup\",\n \"Yo.\",\n sup = \"sup\"\n)\nstab = Cmd(\n \"stab\",\n \"Idfk, blame DragonD\",\n usage=[\n \"slink\"\n ]\n)\nhug = Cmd(\n \"hug\",\n \"Idfk, blame Bonehead\",\n usage=[\n \"\",\n \"lapras 🙂\"\n ]\n)\npunch = Cmd(\n \"punch\",\n \"Idfk, blame Sudmensch\",\n usage=[\n \"robbie\"\n ]\n)\ndab = Cmd(\n \"dab\",\n \"Idfk, blame Domingize\",\n usage=[\n \"\",\n \"on laprOS\"\n ]\n)","sub_path":"sources/text/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"23693175","text":"metadata = \"\"\"\r\nsummary @ XML stylesheet transformation library\r\nhomepage @ http://xmlsoft.org/XSLT/\r\nlicense @ MIT\r\nsrc_url @ftp://xmlsoft.org/libxslt/$fullname.tar.gz\r\narch @ ~x86_64\n\"\"\"\r\n\r\ndepends = \"\"\"\r\nruntime @ sys-libs/glibc dev-libs/libxml2 dev-libs/libgcrypt\r\n\"\"\"\r\n\r\ndef prepare():\r\n patch(\"fix-sandbox-problems.patch\")\r\n patch(\"libxslt-1.1.25-fix-python-linking.patch\", level=1)\r\n patch(\"libxslt.m4-libxslt-1.1.8.patch\")\r\n autoreconf(\"-fi\")\r\n\r\ndef configure():\r\n conf(\"--with-python\")\r\n\r\ndef build():\r\n export(\"PYTHONDONTWRITEBYTECODE\", \"1\")\r\n make()\r\n\r\ndef install():\r\n raw_install(\"DESTDIR=%s\" % install_dir)\r\n\r\n insdoc(\"COPYING\")\r\n","sub_path":"dev-libs/libxslt/libxslt-1.1.26.py","file_name":"libxslt-1.1.26.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"48945156","text":"from multiprocessing import Process, Lock\n\ndef readfile(rank):\n fw=open(str(rank),'w')\n for s in open('../token/group/48','r'):\n fw.write(s)\n fw.close()\n\n\n\nfor rank in range(10):\n Process(target=readfile, args=(rank,)).start()\n\n\"\"\"\ndef f( i):\n #l.acquire()\n print 'hello world', i\n #l.release()\n\nif __name__ == '__main__':\n #lock = Lock()\n\n for num in range(10):\n Process(target=f, args=(num, )).start()\n\"\"\"\n","sub_path":"src/setjoin/bf/others/bf2/t/multiprocess_readfile.py","file_name":"multiprocess_readfile.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"278982012","text":"import theano\n#theano.config.device = 'gpu'\n#theano.config.floatX = 'float32'\nimport numpy as np\n\nfrom keras.utils.np_utils import to_categorical\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D\nfrom keras.optimizers import SGD\n#import dload\n#import random\nmodel = Sequential()\n\ndef spaceout():\n train=np.load(\"train.npy\")\n train_labels=np.load(\"train_labels.npy\")\n test=np.load(\"test.npy\")\n test_labels=np.load(\"test_labels.npy\")\n return train,train_labels,test,test_labels\n\n\n#theano.gof.cc.get_module_cache().clear()\n\n# input: 100x100 images with 3 channels -> (3, 100, 100) tensors.\n# this applies 32 convolution filters of size 3x3 each.\nmodel.add(Convolution2D(32, 3, 3, border_mode='valid', input_shape=(3, 640, 640)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Convolution2D(32, 3, 3))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\n\nmodel.add(Flatten())\n# Note: Keras does automatic shape inference.\nmodel.add(Dense(32))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\n\nmodel.add(Dense(5))\nmodel.add(Activation('softmax'))\n\nsgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\nmodel.compile(loss='hinge', optimizer=sgd)\n\ntrain,train_labels,test,test_labels=spaceout()\ntest=np.swapaxes(np.swapaxes(test,3,1),3,2)\ntest_labels=to_categorical(test_labels)\nX_train=np.swapaxes(np.swapaxes(train,3,1),3,2)\nY_train=to_categorical(train_labels)\nprint (X_train.shape)\n\nprint (model.summary())\nmodel.fit(X_train, Y_train, batch_size=2, nb_epoch=5,verbose=1)\nscore = model.evaluate(test, test_labels, batch_size=16)\npreds=model.predict(test,batch_size=1)\npred=np.argmax(preds,axis=1)\nprint(pred)\nprint(preds)\nprint(preds.shape)\nprint(test_labels)\nprint(pred==test_labels)\nprint(np.sum(pred==test_labels))\n\n","sub_path":"2layertest.py","file_name":"2layertest.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"630180011","text":"from functools import wraps\nfrom flask_restful import Resource\n\n\ndef sanitize_response(response):\n data = None\n status = 200\n headers = {}\n if isinstance(response, tuple) and len(response) is 3:\n (data, status, headers) = response\n if isinstance(response, tuple) and len(response) is 5:\n (status, data, code, message, header) = response\n return status, data, code, message, headers.update(header)\n else:\n data = response\n return data, status, headers\n\n\ndef patch_response_data(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n resp = func(*args, **kwargs)\n response = sanitize_response(resp)\n if isinstance(response, tuple) and len(response) is 5:\n status, data, code, message, headers = response\n data = {\"responseData\": data,\n \"status\": status,\n \"message\": message}\n\n return data, code, headers\n else:\n data, status, headers = response\n\n if type(data) not in [list, dict]:\n return resp\n patched = isinstance(data, dict) and (\n \"errorCode\" in data or \"responseData\" in data\n )\n\n if not patched:\n data = {\n \"responseData\": data\n }\n\n if 'errorCode' in data.keys():\n status = data['errorCode']\n\n return data, status, headers\n\n return wrapper\n\n\nclass BaseResource(Resource):\n method_decorators = [\n patch_response_data]\n","sub_path":"application/src/common/base_resource.py","file_name":"base_resource.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"428287044","text":"import ROOT\n\ndef CFD_with_ToA_correction( fileName, toa, cfd ):\n\n root_file = ROOT.TFile.Open( fileName )\n root_tree = root_file.Get( \"wfm\" )\n\n gaus = ROOT.TF1(\"gaus\", \"gaus\")\n\n cfd_projection_corr = ROOT.TH1D( \"cfd_projection_corr\", \"{} cfd2[{}]-cfd3[20] corr\".format(fileName, cfd), 100, 1, 1)\n\n cfd_projection = ROOT.TH1D( \"cfd_projection\", \"{} cfd2[{}]-cfd3[20]\".format(fileName, cfd), 100, 1, 1)\n\n for ientry, entry in enumerate(root_tree):\n if( entry.pmax2[0] > toa ):\n cfd_projection_corr.Fill( entry.cfd2[cfd]-entry.thTime2[toa] )\n\n cfd_projection_corr.Fit(gaus,\"Q\")\n\n cfd_sigma_corr = gaus.GetParameter(2)\n\n return cfd_sigma_corr\n\nROOT.gROOT.SetBatch(ROOT.kTRUE)\npreRad_file_list_s = [\"preRad/Coin_stats_120V_trig395V_parse.root\",\n \"preRad/Coin_stats_130V_trig395V_parse.root\" ]\n\n_4e14_file_list_s = [\"_4E14/Coin_stats_430V_trig395V_parse.root\",\n \"_4E14/Coin_stats_400V_trig395V_parse.root\" ]\n\n_1e15_file_list_s = [\"_1E15/Coin_stats_475V_trig400V_parse.root\",\n \"_1E15/Coin_stats_460V_trig400V_parse.root\" ]\n\n_3e15_file_list_s = [\"_3E15/Coin_stats_495V_trig395V_parse.root\",\n \"_3E15/Coin_stats_470V_trig395V_parse.root\" ]\n\n_6e15_file_list_s = [\"_6E15/Coin_stats_495V_trig395V_parse.root\",\n \"_6E15/Coin_stats_470V_trig395V_parse.root\" ]\n\nfile_list = list()\n\nfile_list = preRad_file_list_s + _4e14_file_list_s + _1e15_file_list_s + _3e15_file_list_s + _6e15_file_list_s\n\nfor item in file_list[:]:\n out_name = item.split(\"/\")\n cfd_out_txt = open( \"_cfd-ToA_{}_{}_.txt\".format(out_name[0],out_name[1]),\"w\")\n cfd_out_txt.write(\"cfd sigma\")\n cfd_out_txt.write(\"\\n\")\n\n for cfd in range(100):\n sigma = CFD_with_ToA_correction( item, 5, cfd )\n cfd_out_txt.write(\"{} {}\".format(cfd, sigma))\n cfd_out_txt.write(\"\\n\")\n\n cfd_out_txt.close()\n","sub_path":"CFD_with_ToA_correction.py","file_name":"CFD_with_ToA_correction.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"572397761","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nimport streamlit as st\n# To make things easier later, we're also importing numpy and pandas for\n# working with sample data.\nimport numpy as np\nimport pandas as pd\nimport altair as alt\n\nst.title('Bet Goals')\nst.write('Bet goals is an app designed to help you make informed bets on soccer matches')\n\nst.subheader('Suggested Betting Strategy:') \nst.write('We suggest betting on games predicted by our model to end in a draw')\n\nst.subheader('Rationale:') \nst.write('In the past 4 years, the bookmakers odds have always favoured either the home team or the away team. Not a single game has been backed by the bookmaker to end in a draw in this time frame. This systematic underestimation of the chances of a game ending in a draw lets the bookmaker overestimate the chances of a home win. To exploit this inefficiency, we built a model that can identify draws with 36% precision. Even though this means we will be wrong 2 out of 3 times, the odds on draws are have historically been high enough to give us around 20% return on investment.')\n\nst.subheader('Matchday 28:')\nst.subheader('Bookmaker Odds:')\ndf=pd.read_csv('Betdata27.csv')\nst.write(df[['Fixture:','Home win odds','Draw odds','Away win odds','Predicted Result']])\n\n#option = st.selectbox(\n# 'Which match would you like to bet on?',\n# df['Fixture:'])\n\n\n#df2\n\noptions_multi = st.multiselect('What fixtures would you like to bet on? (We suggest betting on games predicted to end in draws)', df['Fixture:'])\n\n#st.write('You selected:', options_multi)\n\noption_team=pd.DataFrame(columns=['Teamselected'])\noption_amount=pd.DataFrame(columns=['Moneybet'])\noption_poss_win=pd.DataFrame(columns=['Moneywon'])\noption_prob_win=pd.DataFrame(columns=['Probwin'])\n\nfor i in range(len(options_multi)): \n df2=df[['Home Team','Away Team','Draw option']].loc[df['Fixture:']==options_multi[i]]\n option_temp = st.selectbox(\n 'Which team would you like to bet on in '+options_multi[i]+'?',\n (df2.iloc[0,0],df2.iloc[0,1],df2.iloc[0,2]))\n option_team=option_team.append({'Teamselected':option_temp}, ignore_index=True) \n# option_team[['Teamselected']].iloc[i]=option_temp\n \n d = {'Money': [10, 20, 50, 100]}\n Betopt= pd.DataFrame(data=d) \n widkey='slider'+str(i)\n option_mtemp = st.slider('How much would you like to bet?', 0, 200, 0, key=widkey)\n option_amount=option_amount.append({'Moneybet':option_mtemp}, ignore_index=True) \n \n \n if df2.iloc[0,0]==option_temp:\n a1=df[['Home win odds']].loc[df['Fixture:']==options_multi[i]]\n t1=a1.iloc[0,0]\n \n b1=df[['Home win']].loc[df['Fixture:']==options_multi[i]]*100\n t2=b1.iloc[0,0]\n money=(t1-1)*option_mtemp\n \n option_poss_win=option_poss_win.append({'Moneywon': round(money,2)}, ignore_index=True)\n option_prob_win=option_prob_win.append({'Probwin': round(t2,2)/100}, ignore_index=True)\n if option_mtemp != 0:\n 'You have a '+str(round(t2,2))+'% chance of winning '+str(round(money,2))+' dollars by betting on '+option_temp+' in '+options_multi[i]+'.'\n \n \n elif df2.iloc[0,1]==option_temp:\n a1=df[['Away win odds']].loc[df['Fixture:']==options_multi[i]]\n t1=a1.iloc[0,0]\n \n b1=df[['Away Win']].loc[df['Fixture:']==options_multi[i]]*100\n t2=b1.iloc[0,0]\n money=(t1-1)*option_mtemp\n \n option_poss_win=option_poss_win.append({'Moneywon': round(money,2)}, ignore_index=True)\n option_prob_win=option_prob_win.append({'Probwin': round(t2,2)/100}, ignore_index=True)\n \n if option_mtemp != 0:\n 'You have a '+str(round(t2,2))+'% chance of winning '+str(round(money,2))+' dollars by betting on '+option_temp+' in '+options_multi[i]+'.'\n \n \n else:\n a1=df[['Draw odds']].loc[df['Fixture:']==options_multi[i]]\n t1=a1.iloc[0,0]\n \n b1=df[['Draw']].loc[df['Fixture:']==options_multi[i]]*100\n t2=b1.iloc[0,0]\n money=(t1-1)*option_mtemp\n \n option_poss_win=option_poss_win.append({'Moneywon': round(money,2)}, ignore_index=True)\n option_prob_win=option_prob_win.append({'Probwin': round(t2,2)/100}, ignore_index=True)\n \n if option_mtemp != 0:\n 'You have a '+str(round(t2,2))+'% chance of winning '+str(round(money,2))+' dollars by betting on a draw in '+options_multi[i]+'.'\n \n\n\ncombinations=np.zeros((2**len(options_multi),len(options_multi))) \nfor i in range(2**len(options_multi)):\n temp=i\n for j in range(len(options_multi)):\n q=temp//2\n mod=temp%2\n combinations[i,j]=mod\n temp=q\n \nprob_dist=pd.DataFrame(columns=['Winning','Probability']) \nfor i in range(2**len(options_multi)):\n probability=1\n winning=0\n for j in range(len(options_multi)):\n if combinations[i,j]==1:\n probability=probability*option_prob_win['Probwin'].iloc[j]\n winning=winning+option_poss_win['Moneywon'].iloc[j]\n else:\n probability=probability*(1-option_prob_win['Probwin'].iloc[j])\n winning=winning-option_amount['Moneybet'].iloc[j]\n \n prob_dist=prob_dist.append({'Winning':winning,'Probability':probability}, ignore_index=True)\n \nprob_dist=prob_dist.sort_values(by='Winning',ascending=True)\n#prob_dist\n\n\nif prob_dist.shape[0]>1: \n d=alt.Chart(prob_dist).mark_bar().encode(\n x='Winning',\n y='Probability'\n )\n \n st.altair_chart(d)\n \n expecval=0\n \n for i in range(prob_dist.shape[0]):\n expecval=expecval+prob_dist.iloc[i,0]*prob_dist.iloc[i,1]\n \n 'The expected value of your bets is '+str(round(expecval,2))+' dollars.'\n\n \n#option_poss_win\n#option_prob_win","sub_path":"Bet_Goals.py","file_name":"Bet_Goals.py","file_ext":"py","file_size_in_byte":5820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"514491651","text":"from discord.ext import commands\nimport random\n\n\nclass PingCog(commands.Cog, name=\"Ping\"):\n\t\"\"\"A command which simply acknowledges the user's ping\"\"\"\n\n\tdef __init__(self, bot: commands.Bot):\n\t\tself.bot = bot\n\n\t@commands.command(name=\"ping\")\n\tasync def ping(self, ctx: commands.Context):\n\t\t\"\"\"A command which simply acknowledges the user's ping.\n\n\t\tUsage:\n\t\t```\n\t\t++ping\n\t\t```\n\t\t\"\"\"\n\n\t\t# log in console that a ping was received\n\t\tprint(\"Received ping\")\n\n\t\twith open(\"modules/ping/responses.txt\") as responses:\n\t\t\tawait ctx.send(random.choice(responses.readlines()))\n\n\n# This function will be called when this extension is loaded. It is necessary to add these functions to the bot.\ndef setup(bot: commands.Bot):\n\tbot.add_cog(PingCog(bot))\n","sub_path":"modules/ping/cog.py","file_name":"cog.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"532870143","text":"# An A is a black and white sketch and a B is the colored version\n# We'll start with ~30 black and white sketches (A's) and are trying to create\n# 400-1000 AB pairs to use for a TensorFlow model\n# Class SketchComponents stores a black and white sketch A and colors it by\n# geometric rules (which work on 'rule conforming' sketches) to get B.\n# Class Augmentations has the functions used to augment (to get from 30 to 400-100).\n# Most are used on both A and B with the same parameters to get A' and B' e.g. mirrorFlipPair.\n# I put some test code at the bottom that I was using for testing the classes and functions\n# You'll want to grab Flower1.png and Dragon4.png to use for testing.\n\n\nimport cv2\nimport numpy as np\nfrom skimage import measure\nfrom skimage.measure import regionprops\nfrom scipy.ndimage.filters import gaussian_filter\nfrom scipy.ndimage.interpolation import map_coordinates\nfrom sklearn.preprocessing import normalize\n\n\nclass Augmentations():\n # Part of project DragonPaint\n # Augmentations is a collection of augmentation transformations for AB pairs:\n # randxyScaledSkewedPair, randTranslationPair, randRotationPair,\n # mirrorFlipPair, radiusCubedPair (r->r**3),\n # elasticTransformColorSetPair (Gaussian filters)\n\n # still need elasticDeformationGray (done for one img but need set version?\n # incorporate read BW, transform, color into the function?)\n # still need erasures, crops\n # still need (here or elsewhere) the sequences of transformations\n\n # AFFINE TRANSFORMATIONS:\n def randxyScaledSkewedPair(self, imgA, imgB):\n # small x and y scale and skew\n # fills in background white, might crop image\n\n xscale = np.random.uniform(0.8, 1)\n yscale = np.random.uniform(0.8, 1)\n skewFactor = np.random.uniform(0, 0.1)\n M = np.float32([[xscale, 0, 0], [skewFactor, yscale, 0]])\n return self.warpAffinePair(imgA, imgB, M) \n\n def randTranslationPair(self, imgA, imgB):\n # small translation in x and y\n # fills in background white, might crop image\n\n transMax = 20\n transX = np.random.randint(transMax)\n transY = np.random.randint(transMax)\n M = np.float32([[1, 0, transX], [0, 1, transY]])\n return self.warpAffinePair(imgA, imgB, M) \n\n def randRotationPair(self, imgA, imgB, characterType='flower'):\n # chooses random rotation between 0 and maxRotation\n # fills in background white, might crop image\n\n maxRotation = {'flower': 360, 'dragon': 10}\n rotation = np.random.randint(maxRotation[characterType])\n # note: cols and rows are undefined here\n M = cv2.getRotationMatrix2D((cols / 2, rows / 2), rotation, 1)\n rotA = cv2.warpAffine(imgA, \n M, \n (size, size), \n borderMode=cv2.BORDER_CONSTANT, \n borderValue=(255,255,255))\n rotB = cv2.warpAffine(imgB, \n M, \n (size, size), \n borderMode=cv2.BORDER_CONSTANT, \n borderValue=(255,255,255))\n return rotA, rotB\n\n def warpAffinePair(self, imgA, imgB, M):\n # please rename this function if needed \n size = len(imgA) \n return [cv2.warpAffine(img,\n M,\n (size, size),\n borderMode=cv2.BORDER_CONSTANT,\n borderValue=(255, 255, 255))\n for img in (imgA, imgB)]\n \n def mirrorFlipPair(self, imgA, imgB):\n # flips across y axis\n\n flipA = cv2.flip(imgA, 1)\n flipB = cv2.flip(imgB, 1)\n return flipA, flipB\n\n # ELASTIC DEFORMATIONS/GAUSSIAN BLUR:\n def elasticTransformColorSetPair(self, imgA, imgB):\n # Assumes both imgA and imgB are 3D arrays (i.e. color not grayscale)\n # Creates a list of four transformed AB pairs with different parameters\n # for each pair\n\n alphas = [50, 200, 800, 1500]\n sigmas = [3, 4, 7, 8]\n Alist = []\n Blist = []\n for alpha, sigma in zip(alphas, sigmas):\n transA, transB = self.elasticTransformColorPair(imgA,\n imgB,\n alpha,\n sigma)\n Alist.append(transA)\n Blist.append(transB)\n return Alist, Blist\n\n def elasticTransformColorPair(self,\n imgA,\n imgB,\n alpha,\n sigma,\n random_state=None):\n # function for elasticTransformColorSetPair\n # elastic deformations/random displacement fields\n # modified from Github https://gist.github.com/erniejunior/601cdf56d2b424757de5;\n # based on Simard, et al\n # Assumes both are 3D arrays (i.e. color not grayscale)\n # Returns a single transformed AB pair\n\n if random_state is None:\n random_state = np.random.RandomState(None)\n shape = imgA.shape\n dx, dy = [self.rename_this_function(shape, alpha, sigma) for _ in range(2)]\n dz = np.zeros_like(dx)\n \n x, y, z = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), np.arange(shape[2]))\n indices = [np.reshape(array, (-1, 1)) for array in (y+dy, x+dx, z)]\n \n return [map_coordinates(img,\n indices,\n order=1,\n mode='reflect').reshape(img.shape)\n for img in (imgA, imgB)]\n\n\n def _rename_this_function(self, shape, alpha, sigma):\n # what does this function mean? \n \n return gaussian_filter((random_state.rand(*shape) * 2 - 1),\n sigma,\n mode=\"constant\",\n cval=0) * alpha\n \n def elasticDeformationGray(self, img, alpha, blurSize):\n # requires grayscale image\n # so unlike the AB pair transformations, read in as grayscale,\n # transform and change to BGR, then color A' to get B'\n # function for elasticDeformationGrayPairSet\n # similar to elasticTransformColor but different parameters and results\n # turning out enough different worth using good parameters:\n # alphas=[100,150,150,150,125]\n # blurSizes=[99,99,125,155,155]\n\n shape = img.shape\n\n dx = calculateRandomDeformation(shape, alpha, blurSize)\n dy = calculateRandomDeformation(shape, alpha, blurSize)\n x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]))\n indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1))\n\n distorted_image = map_coordinates(img,\n indices,\n order=1,\n mode='constant',\n cval=255)\n\n return distorted_image.reshape(img.shape)\n\n\n def calculateRandomDeformation(shape, alpha, blurSize):\n # there might be a better name!\n field = np.zeros((shape[0], shape[0]))\n for i in range(shape[0]):\n for j in range(shape[0]):\n field[i][j] = np.random.uniform(-1, 1)\n blur = cv2.GaussianBlur(field, (blurSize, blurSize), 0)\n normBlurField = normalize(blur)\n field = alpha * normBlurField\n return field\n\n \n # RADIUS CUBED/DISC HOMEOMORPHISM\n def radiusCubedPair(self, imgA, imgB):\n # for flower, not for dragon\n # homeomorphism of unit disk shrinks center disproportionally;\n # based on r->r**3\n\n scale = np.random.uniform(0.4, 0.6)\n return self.radiusCubed(imgA, scale), self.radiusCubed(imgB, scale)\n\n def cubeRoot(self, x):\n # function for radiusCubed\n\n if x >= 0:\n return x ** (1 / 3)\n else:\n return -(abs(x) ** (1 / 3))\n\n def radiusCubed(self, imgA, scale=.5):\n # function for radiusCubedPair\n # translate and scale to fit in unit disk; r, theta ->r**3, theta;\n # rescale, translate to original square\n\n rows, cols, ch = imgA.shape\n newA = np.zeros((rows, cols, ch))\n mid = rows // 2\n size = rows\n scaleFactor = size * scale\n for row in range(size):\n for col in range(size):\n rowOld = row\n colOld = col\n\n # move origin to middle\n rowOld = row - mid\n colOld = col - mid\n\n # scale to fit square in unit disc\n rowOld = rowOld / scaleFactor\n colOld = colOld / scaleFactor\n\n # transform unit disc in a way that shrinks center more than edge\n # r->r**3\n rSquared = self.cubeRoot(rowOld ** 2 + colOld ** 2)\n if rSquared:\n rowOld = rowOld / rSquared\n colOld = colOld / rSquared\n\n # scale back out of unit disc to full size disc/square\n rowOld = rowOld * scaleFactor\n colOld = colOld * scaleFactor\n # move origin from middle to upper left\n rowOld = rowOld + mid\n colOld = colOld + mid\n\n # modify so rowOld and colOld are valid indices\n rowOld = int(rowOld)\n colOld = int(colOld)\n rowOld = max(0, rowOld)\n rowOld = min(size - 1, rowOld)\n colOld = max(0, colOld)\n colOld = min(size-1, colOld)\n newA[row][col] = imgA[rowOld][colOld]\n return newA\n\n\nclass SketchComponents():\n # Part of project DragonPaint\n # SketchComponents stores sketch image and uses component information\n # (area, color, bounding box) to label sketch parts of image so they can\n # be colored according to a part coloring map, e.g. color dragon body\n # green, spikes yellow and leave eyes white.\n # Image = black line cartoon sketch on white background from \"Paint\"\n # program with well connected lines and certain geometric relationships\n # (e.g. background bigger than body and body bigger than any other dragon\n # part).\n\n def __init__(self, image, cartoonCharType='flower'):\n # For the two cartoonCharTypes ('flower' and 'dragon') finds white\n # components, background, body and spikes at init\n # For cartoonCharType=='dragon', finds distance between each component\n # and background, finds line width and finds 'eye'\n # Depending on incoming data, might want to change to grayscale or\n # scale to standard size\n\n # store sketch image, threshold to black and white, calculate labels\n # and regionprops for connected components\n self.image = image\n ret, self.blackWhiteSketch = cv2.threshold(self.image,\n 200,\n 255,\n cv2.THRESH_BINARY)\n\n # for region in regions we can access e.g. region.label, region.area, region.coords\n self.regions = self.labelComponents()\n\n # label background and body (largest and second largest white\n # components) and spikes+ (other white components)\n self.backgroundLabel, self.bodyLabel, self.spikeLabels = self.setBackgroundBodySpikes()\n\n if cartoonCharType == 'dragon':\n self.eyeLabels, self.spikeLabels = self.setEyeSpikes()\n\n # BACKGROUND V. BODY V. SPIKES+ for dragons\n # (OR BACKGROUND V. CENTERS V. PETALS for flowers)\n\n # Use components' size and color (white) to distinguish between background,\n # body and spikes+.\n # The largest white component is background. The second largest is body.\n # The rest of the white components are spikes (or spikes+other for dragon)\n\n # LABEL CONNECTED COMPONENTS\n def labelComponents(self):\n return regionprops(measure.label(self.blackWhiteSketch))\n\n # LABEL BACKGROUND, BODY, SPIKES\n def setBackgroundBodySpikes(self):\n # background is largest white region, body is next largest; spikeLabels\n # is the remaining white components\n # By default, measure.label assigns all black pixels label 0.\n\n spikeLabels = sorted([region.label for region in self.regions\n if region.label != 0],\n key=lambda label: self.regions[label - 1].area)\n backgroundLabel = spikeLabels.pop()\n bodyLabel = spikeLabels.pop()\n return backgroundLabel, bodyLabel, spikeLabels\n\n # SPIKES V. EYE\n def setEyeSpikes(self):\n # For dragon only; not needed for flowers.\n # The eye is in the interior of the body and the spikes are on the edge.\n # If \"spike\"'s puffed up bounding box intersects the background it's a\n # spike; if not, it's an eye (or part of an eye)\n\n linePlus = 5\n # linePlus gives a puffed up bounding box to account for drawing line\n # thickness; it may need to be adjusted if pictures have been scaled\n # down from original (400x400 image with 5 pixel line.)\n\n eyeLabels = []\n backgrd = self.regions[self.backgroundLabel-1].coords\n for spikeLabel in self.spikeLabels:\n intersection = False\n rowmin, colmin, color0, rowmax, colmax, color2 = self.regions[spikeLabel - 1].bbox\n for point in backgrd:\n if (not intersection) and point[0] > rowmin - linePlus and point[0] < rowmax + linePlus:\n if point[1] > colmin - linePlus and point[1] < colmax + linePlus:\n intersection = True\n if not intersection:\n eyeLabels.append(spikeLabel)\n newSpikeLabels = [label for label in self.spikeLabels\n if label not in eyeLabels]\n return eyeLabels, newSpikeLabels\n\n\n# TEST CODE FOR SKETCHCOMPONENTS\n# run line with \"Flower1.png\" to see flower or the line with \"Dragon4.png\"\n# to see dragon\nflower = SketchComponents(cv2.imread(\"Flower1.png\"))\n\n# flower=SketchComponents(cv2.imread(\"Dragon4.png\"), 'dragon')\nimgColored = flower.image\n\n# color spikes yellow\nfor spikeLabel in flower.spikeLabels:\n spike = flower.regions[spikeLabel - 1].coords\n for point in spike:\n imgColored[point[0], point[1], 0] = 0\n imgColored[point[0], point[1], 1] = 241\n imgColored[point[0], point[1], 2] = 253\n\n# color center orange\nbody = flower.regions[flower.bodyLabel - 1].coords\nfor point in body:\n imgColored[point[0], point[1], 0] = 0\n imgColored[point[0], point[1], 1] = 159\n imgColored[point[0], point[1], 2] = 255\ncv2.imshow('image colored', imgColored)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n# TEST CODE FOR AUGMENTATIONS (one function)\nflowerGray = cv2.imread('Flower1.png', 0)\nprint(flowerGray.shape)\naugment = Augmentations()\nelasticGray = augment.elasticDeformationGray(flowerGray, 100, 99)\ncv2.imshow('elasticGray', elasticGray)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"dragon-paint-public.py","file_name":"dragon-paint-public.py","file_ext":"py","file_size_in_byte":15403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"410469449","text":"from setuptools import find_packages, setup\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name='AWS-Mage2Connector',\n version='0.0.2',\n url='https://github.com/ideabosque/AWS-Mage2Connector',\n license='MIT',\n author='Idea Bosque',\n author_email='ideabosque@gmail.com',\n description='Use to connect Magento 2.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n packages=find_packages(),\n zip_safe=False,\n include_package_data=True,\n platforms='any',\n install_requires=['requests', 'pymysql',],\n download_url = 'https://github.com/ideabosque/AWS-Mage2Connector/tarball/0.0.2',\n keywords = ['Magento 2'], # arbitrary keywords\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ],\n python_requires='>=3.7'\n)\n","sub_path":"ext/aws_mage2connector/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"152275745","text":"import player\nimport time\nimport threading\n\nclass Queue(object):\n queue = []\n position = -1\n\n def get_queue(self):\n dict_queue = [media.dictify() for media in self.queue]\n obj = {'queue': dict_queue}\n if self.is_valid_position(self.position) and \\\n self.queue[self.position] == player.now_playing:\n obj['position'] = self.position\n return obj\n\n def add(self, media):\n self.queue.append(media)\n return self.get_queue()\n\n def remove(self, pos):\n if self.is_valid_position(pos):\n del self.queue[pos]\n if self.position > pos:\n self.position -= 1\n elif self.position == pos:\n player.stop()\n return self.get_queue()\n\n def clear(self):\n self.queue = []\n self.position = -1\n player.stop()\n return self.get_queue()\n\n def now_playing(self):\n obj = {'player_status': player.get_status()}\n if player.now_playing:\n obj['media'] = player.now_playing.dictify()\n return obj\n\n def set_position(self, pos):\n if self.is_valid_position(pos):\n self.position = pos\n player.play_media(self.queue[self.position])\n return self.queue[self.position].dictify()\n\n def play_next(self, force=False):\n if player.vlc_play_youtube():\n return self.queue[self.position].dictify()\n if self.has_next():\n return self.set_position(self.position + 1)\n if force and self.is_valid_position(0):\n return self.set_position(0)\n\n def is_valid_position(self, pos):\n return 0 <= pos < len(self.queue)\n\n def has_next(self):\n return self.is_valid_position(self.position + 1)\n\n def autoplay_thread(self):\n while True:\n if player.has_ended() and \\\n (self.has_next() or player.is_youtube_video()):\n self.play_next()\n time.sleep(0.25)\n\n def start_autoplay(self):\n thread = threading.Thread(target=self.autoplay_thread)\n thread.daemon = True\n thread.start()\n","sub_path":"acoustics/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"158254407","text":"from sklearn.metrics import accuracy_score, confusion_matrix\nfrom pyspark.ml.classification import LinearSVC\nfrom pyspark.sql import SQLContext\nfrom pyspark.ml.feature import VectorAssembler\nfrom pyspark.ml.feature import StandardScaler\nfrom pyspark.ml import Pipeline\nfrom pyspark.sql.functions import *\n\nfrom src import config as cfg\n\ncfg.dir\n\nfeatures = [\"Retweets\", \"Favorites\", \"New_Feature\"] # Class is label\n\n\nclass SVMClassifier(object):\n\n def __init__(self, file_name, spark_context, maxIter=100, regParam=0.0, tol=1e-6, threshold=0.0,\n aggregationDepth=2):\n self.sqlContext = SQLContext(spark_context)\n\n self.spark_context = spark_context\n\n self.data = self.sqlContext.read.options(header='true', inferschema='true', delimiter=',').csv(file_name)\n\n self.data.cache()\n\n self.lr_data = self.data.select(col(\"Class\").alias(\"label\"), *features)\n\n vectorAssembler = VectorAssembler(inputCols=features, outputCol=\"unscaled_features\")\n\n standardScaler = StandardScaler(inputCol=\"unscaled_features\", outputCol=\"features\")\n\n self.settings = [('maxIter',maxIter), ('regParam',regParam), ('tol',tol), ('threshold',threshold),('aggregationDepth',aggregationDepth)]\n\n self.SVM = LinearSVC(maxIter=maxIter, regParam=regParam, tol=tol, threshold=threshold,\n aggregationDepth=aggregationDepth)\n\n stages = [vectorAssembler, standardScaler, self.SVM]\n\n pipeline = Pipeline(stages=stages)\n\n self.model = pipeline.fit(self.lr_data)\n\n def classify_testdata(self, filename):\n \"\"\"\n Function that classifies the testing dataset\n :param filename: The filename that contains the testing dataset\n :return: The predicted labels for the testing dataset\n \"\"\"\n self.test_file = self.sqlContext.read.format('csv').options(header='true', inferschema='true').load(filename)\n\n lr_data = self.test_file.select(col(\"Class\").alias(\"label\"), *features)\n\n prediction = self.model.transform(lr_data)\n\n return prediction\n\n def classify(self, x):\n \"\"\"\n Function that classifies an entry\n :param x: Entry to be predicted\n :return: The predicted label\n \"\"\"\n\n x[\"Retweets\"] = int(x[\"Retweets\"])\n x[\"Favorites\"] = float(x[\"Favorites\"])\n\n data_frame = self.sqlContext.createDataFrame([x])\n output = self.model.transform(data_frame)\n return output.select(col(\"prediction\")).collect()[0].prediction\n\n def confusion_matrix(self, predict):\n \"\"\"\n Function that computes confusion matrix to evaluate the accuracy of the classification\n :param predict: The predicted labels that is used to compute the confusion matrix\n :return: The confusion matrix\n \"\"\"\n predict_list = [i.prediction for i in predict.select(\"prediction\").collect()]\n test_class = [i.Class for i in self.test_file.select(\"Class\").collect()] # self.test_file['Class']\n accuracy = accuracy_score(test_class, predict_list)\n accuracy = accuracy * 100\n print(\"###########################SVM############################\")\n print(\"Accuracy for SVM \" + str(accuracy))\n print(\"Confusion Matrix for SVM with settings\" + str(self.settings))\n print(confusion_matrix(test_class, predict_list))\n print(\"##########################################################\")\n return accuracy\n\n # def plot(self, predict):\n # \"\"\"\n # Function that builds the 3D plot\n # :return:\n # \"\"\"\n # columns_header = ['Retweets', 'Favorites', 'New_Feature', 'Class']\n #\n # testing_file_location = 'Test_feature_extracted.csv'\n # training_file_location = 'Training_feature_extracted.csv'\n #\n # train_file = pd.read_csv(training_file_location, sep=\",\", usecols=columns_header, index_col=None)\n # test_file = pd.read_csv(testing_file_location, sep=',', usecols=columns_header, index_col=None)\n #\n # train = [i.label for i in self.lr_data.select(\"label\").collect()]\n # test_class = [i.prediction for i in predict.select(\"prediction\").collect()]\n #\n # color = ['red' if label == 1 else 'green' for label in train]\n # color_test = ['black' if label == 1 else 'blue' for label in test_class]\n #\n # coef_values = self.model.stages[2].coefficients.values.tolist()\n #\n # z = lambda x, y: (-self.model.stages[2].intercept - coef_values[0] * x - coef_values[1]) / coef_values[2]\n # tmp = np.linspace(1, 140, 14)\n # x, y = np.meshgrid(tmp, tmp)\n # fig = plt.figure()\n # ax = Axes3D(fig)\n # ax.plot_surface(x, y, z(x, y))\n # ax.scatter(train_file['Retweets'], train_file['Favorites'],train_file['New_Feature'], zdir='z', s=20, depthshade=True, color=color, marker='*')\n # ax.scatter(test_file['Retweets'], test_file['Favorites'], test_file['New_Feature'], zdir='z',s=20, depthshade=True, color=color_test, marker='*')\n # plt.title(\"SVM Classifier\")\n # ax.set_xlabel('Retweets axis')\n # ax.set_ylabel('Favorites axis')\n # ax.set_zlabel('Z axis')\n # ax.view_init(azim=-70,elev=10)\n # ax.legend(loc=2)\n # plt.show()\n\n\nif __name__ == \"__main__\":\n print(\"You are in main\")\n","sub_path":"pyspark/local/classification/src/algorithms/SVM.py","file_name":"SVM.py","file_ext":"py","file_size_in_byte":5350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"643944148","text":"from collections import deque\n# deque是线程安全的,list是非线程安全的\nuser_list = deque(['laoli','laowang']) # 需要传入可迭代的对象\nuser_list2 = deque(['laowang11','laozhao12'])\n# user_list.appendleft('laolao') # deque双端队列增加了对头部和尾部数据的添加\nuser_list.extend(user_list2) # 是在user_list基础上extend,而不是重新创建\nuser_list.insert(0, 'hahaha') # 在user_list的中插入元素\nuser_list.reverse() # 在user_list基础上反转,而不是新创建\nprint(user_list)\n\n# from queue import Queue 中使用的是双端队列","sub_path":"Learn-python/python编程/01-python-collections模块/deque双端队列.py","file_name":"deque双端队列.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"310916189","text":"from PIL import Image\nfrom torch.utils.data import Dataset, DataLoader\nimport torchvision.transforms as transforms\nimport random\nimport json\nimport cv2\nimport os\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nget_video_name = lambda x:x.split('/')[-2]\n\n\nclass spatial_dataset(Dataset): \n def __init__(self, data_list_dict, rows = 320,\n cols = 640,resizerow = 224,\n resizecol = 224,transform=None,num_classes = 63):\n self.data = data_list_dict\n self.transform = transform\n self.img_rows = rows\n self.img_cols = cols\n self.resizerow = resizerow\n self.resizecol = resizecol\n self.num_classes = num_classes\n\n\n def __len__(self):\n return len(self.data)\n\n def __get_randNx(self, label, N):\n filteridx = [i for i,j in enumerate(self.data) if j['class']==label]\n return random.sample(filteridx, N)\n\n\n def __getitem__(self,idx):\n label = self.data[idx]['class']\n idxlist = self.__get_randNx(label,3)\n data = {}\n for i,idx_ in enumerate(idxlist):\n impath = self.data[idx_]['x']\n data['img'+str(i)] = self.transform(Image.fromarray(cv2.imread(impath)))\n return data, label\n\n\n\nclass spatial_dataloader():\n def __init__(self, BATCH_SIZE, num_workers, path, imagelist_path):\n self.BATCH_SIZE=BATCH_SIZE\n self.num_workers=num_workers\n self.data_list=self.read_list(imagelist_path)\n self.train_list = self.data_list[:404]\n self.test_list = self.data_list[454:]\n self.val_list = self.data_list[404:454]\n\n def __call__(self, *args, **kwargs):\n train_loader = self.get_data(self.train_list,\"Training\")\n val_loader = self.get_data(self.val_list,\"Validation\")\n test_loader = self.get_data(self.test_list,\"Test\")\n return train_loader, val_loader, test_loader\n\n def read_list(self, flowlist_path):\n with open(flowlist_path, 'r') as f:\n x = json.loads(f.read())\n return x\n\n def get_data(self,data_list,mode = ''):\n data_set = spatial_dataset(data_list_dict=data_list,\n transform=transforms.Compose([\n transforms.RandomCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ]))\n print('==> '+mode+' data :', len(data_set), 'frames')\n print(data_set[1][0]['img1'].size())\n\n data_loader = DataLoader(\n dataset=data_set,\n batch_size=self.BATCH_SIZE,\n shuffle=True,\n num_workers=self.num_workers,\n pin_memory=True)\n\n return data_loader\n\n\n\n\nif __name__ == '__main__':\n \n dataloader = spatial_dataloader(BATCH_SIZE=1,\n num_workers=1,\n path='.data/images/',\n imagelist_path='./Egok_list/imagelist.txt'\n )\n train_loader,val_loader,test_video = dataloader()\n","sub_path":"dataloader/oldpyfiles/spatial_dataloader_new.py","file_name":"spatial_dataloader_new.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"471105676","text":"import unittest\n\nfrom database.models import Offer, Flat\nfrom app import db\n\nfrom app.flats.utils import *\n\nfrom tests.app import AppTestCase\n\n\nclass SimilarityMeasuresTest(unittest.TestCase):\n\n def test_calc_euclidean_distance_between_two_vectors(self):\n v1 = [0.5, 1.0, 2.0]\n v2 = [1.0, 2.0, 0.5]\n\n dist = euclidean_distance(v1, v2)\n\n self.assertAlmostEqual(dist, 1.87, 2)\n\n def test_calc_euclidean_distance_between_vectors_of_different_size(self):\n v1 = [0.5, 1.0, 2.0]\n v2 = [1.0, 2.0]\n\n dist = euclidean_distance(v1, v2)\n\n self.assertAlmostEqual(dist, 1.12, 2)\n\n def test_calc_cosine_similarity_between_two_vectors(self):\n v1 = [0.5, 1.0, 2.0]\n v2 = [1.0, 2.0, 0.5]\n\n sim = cosine_similarity(v1, v2)\n\n self.assertAlmostEqual(sim, 0.67, 2)\n\n def test_calc_cosine_similarity_between_vectors_of_different_size(self):\n v1 = [0.5, 2.0]\n v2 = [1.0, 0.5]\n\n sim = cosine_similarity(v1, v2)\n\n self.assertAlmostEqual(sim, 0.65, 2)\n\n\nclass SerializerTest(unittest.TestCase):\n\n def test_extract_flats_attribute(self):\n flat = Flat(area=50.30, rooms=2, floor=1)\n\n extractor = Serializer(attrs=[\"area\", \"rooms\"])\n\n flat_attrs = extractor(flat)\n\n self.assertEqual(len(flat_attrs), 2)\n self.assertIn(\"area\", flat_attrs)\n self.assertIn(\"rooms\", flat_attrs)\n self.assertEqual(flat_attrs[\"area\"], 50.30)\n self.assertEqual(flat_attrs[\"rooms\"], 2)\n\n def test_raise_error_when_attribute_does_not_exist(self):\n flat = Flat(area=50.30, rooms=2, floor=1)\n\n extractor = Serializer(attrs=[\"area\", \"rooms\", \"abc\"])\n\n with self.assertRaises(AttributeError):\n flat_attrs = extractor(flat)\n\n def test_serializer_all_attributes_when_attrs_not_specified(self):\n cls = type(\"TestCls\", (object,), dict(counter=1))\n obj = cls()\n obj.name = \"Test\"\n\n extractor = Serializer()\n attrs = extractor(obj)\n\n self.assertEqual(attrs[\"counter\"], 1)\n self.assertEqual(attrs[\"name\"], \"Test\")\n \n\n def test_keep_order_of_attributes(self):\n attr_names = \"abcdefghijkl\"\n\n cls = type(\"TestCls\", (object,), dict(zip(attr_names, range(1, 100))))\n obj = cls()\n\n extractor = Serializer(attrs=attr_names)\n \n attrs = extractor(obj)\n\n self.assertEqual(attr_names, \"\".join(attrs.keys()))\n\n\nclass MinMaxScalerTest(unittest.TestCase):\n\n def test_normalize_values_of_attributes(self):\n flat = dict(area=50, rooms=3, floor=2)\n\n scaler = MinMaxScaler(\n area=[1, 75], rooms=[1, 10], floor=[1, 5]\n )\n\n flat_norm = scaler(flat)\n\n self.assertAlmostEqual(flat_norm[\"area\"], 0.66, 2)\n self.assertAlmostEqual(flat_norm[\"rooms\"], 0.22, 2)\n self.assertAlmostEqual(flat_norm[\"floor\"], 0.25, 2)\n\n def test_raise_error_when_ranges_not_defined(self):\n flat = dict(area=50, rooms=3, floor=2)\n\n scaler = MinMaxScaler(area=[1, 75])\n\n with self.assertRaises(Exception):\n flat_norm = scaler(flat)\n\n\nclass StandardScalerTest(unittest.TestCase):\n\n def test_scale_attributes_with_standard_scaler(self):\n flat = dict(area=50, rooms=3, floor=2)\n\n scaler = StandardScaler(\n area=dict(mean=75, sd=25),\n rooms=dict(mean=2, sd=2),\n floor=dict(mean=3, sd=3)\n )\n\n flat_sd = scaler(flat)\n\n self.assertAlmostEqual(flat_sd[\"area\"], -1.00, 2)\n self.assertAlmostEqual(flat_sd[\"rooms\"], 0.50, 2)\n self.assertAlmostEqual(flat_sd[\"floor\"], -0.33, 2)\n\n def test_raise_error_when_params_not_defined(self):\n flat = dict(area=50, rooms=3, floor=2)\n\n scaler = StandardScaler(area=dict(mean=75, sd=25))\n\n with self.assertRaises(Exception):\n flat_sd = scaler(flat)\n\n\nclass FlatSimCalculatorTest(unittest.TestCase):\n\n def setUp(self):\n super().setUp()\n self.sim_calc = FlatSimCalculator(\n serializer=Serializer(attrs=[\"area\", \"rooms\", \"floor\"]),\n scaler=StandardScaler(\n area=dict(mean=75, sd=25),\n rooms=dict(mean=2, sd=2),\n floor=dict(mean=3, sd=3)\n ),\n sim_measure=cosine_similarity\n )\n\n def test_calc_similarity_of_two_flats(self):\n flat1 = Flat(area=50.30, rooms=2, floor=1)\n flat2 = Flat(area=63.50, rooms=3, floor=1)\n\n sim = self.sim_calc(flat1, flat2)\n\n self.assertAlmostEqual(sim, 0.79, 2)\n","sub_path":"tests/app/flats/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":4576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"489403475","text":"from mylib import RelationData\nfrom pathlib import Path\nfrom argparse import ArgumentParser\n\n\nparser = ArgumentParser()\nparser.add_argument(\"input\", type=Path)\nparser.add_argument(\"--tex\", action=\"store_true\")\nparser.add_argument(\"--all\", action=\"store_true\")\nargs = parser.parse_args()\n\n\ndef get_stat(data):\n stat_ent = {}\n stat_rel = {}\n for dat in data.values():\n for ent in dat[\"entity\"].values():\n label = ent[\"label\"]\n if not label in stat_ent:\n stat_ent[label] = 0\n stat_ent[label] += 1\n\n for rel in dat[\"relation\"].values():\n label = rel[\"label\"]\n if not label in stat_rel:\n stat_rel[label] = 0\n stat_rel[label] += 1\n\n std_ent = dict(sorted(stat_ent.items(), key=lambda x: x[1], reverse=True))\n std_rel = dict(sorted(stat_rel.items(), key=lambda x: x[1], reverse=True))\n return std_ent,std_rel\n\n\n\n\nif args.all:\n edics = {}\n rdics = {}\n modes= ['train','devel','test']\n for m in modes:\n d = args.input/m\n if d.is_dir():\n data = RelationData(d, pattern=\"*.ann\")\n e,r = get_stat(data)\n e['all'] = sum(e.values())\n edics[m]=e\n r['all'] = sum(r.values())\n rdics[m] = r\n std_ent = []\n std_rel = []\n for k in edics['train'].keys():\n lst = [k]\n lst.extend([edics[m][k] if k in edics[m] else 0 for m in modes])\n std_ent.append(lst)\n for k in rdics['train'].keys():\n lst = [k]\n lst.extend([rdics[m][k] if k in rdics[m] else 0 for m in modes])\n std_rel.append(lst)\n\nelse:\n data = RelationData(args.input, pattern=\"*.ann\")\n std_ent,std_rel = get_stat(data)\nif args.tex:\n txt_ent = \" \\\\\\\\ \\n\".join(map(lambda x: \" & \".join(map(str, x)), std_ent))\n txt_rel = \"\\\\\\\\ \\n\".join(map(lambda x: \" & \".join(map(str, x)), std_rel))\nelse:\n txt_ent = \"\\n\".join(map(lambda x: \"\\t\".join(map(str, x)), std_ent))\n txt_rel = \"\\n\".join(map(lambda x: \"\\t\".join(map(str, x)), std_rel))\n\nprint(\"Entity:\")\nprint(txt_ent)\nprint()\nprint(\"Relation:\")\nprint(txt_rel)","sub_path":"src/stat.py","file_name":"stat.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"218816498","text":"#--------------------------------------------------------------------\n# Argument syntax:\n#\n# Yen.py filePath source destination k\n#\n# Output: [ [path], path_length]\n#\n#--------------------------------------------------------------------\n\nfrom networkx import read_edgelist\nfrom math import inf\nfrom sys import argv\nimport Dijkstra\n\n\ndef open_file(filename):\n\n try:\n fh = open(filename, 'rb')\n return fh\n except IOError:\n print(\"No such file.\")\n exit(0)\n\n\ndef get_candidate(root_path, spur_path):\n\n return root_path[:-1] + spur_path\n\n\ndef pop_shortest_candidate(candidates):\n\n shortest_candidate = {'length': inf, 'path': []}\n\n for length, path in candidates.items():\n if length < shortest_candidate['length']:\n shortest_candidate['path'] = path\n shortest_candidate['length'] = length\n\n del candidates[shortest_candidate['length']]\n\n return shortest_candidate['path']\n\n\ndef get_nodes(path, begin, end):\n\n return path[begin:end]\n\n\ndef get_path_length(graph, path):\n\n path_length = 0\n\n for index, node in enumerate(path):\n if index < len(path)-1:\n path_length += graph.get_edge_data(node, path[index+1])['weight']\n else:\n break\n\n return path_length\n\n\ndef k_shortest_paths(graph, src, dest, K):\n\n A =[]\n B = {}\n root_path = []\n removed_edges = []\n root_path_length = 0\n output = []\n\n A.append(Dijkstra.shortest_path(graph, src, dest))\n\n #import pdb; pdb.set_trace()\n for k in range(1,int(K)):\n for index, spur_node in enumerate(A[k-1]):\n\n if index == len(A[k-1])-1:\n break\n\n root_path.append(spur_node)\n try:\n weight = graph.get_edge_data(A[k - 1][index], A[k - 1][index + 1])['weight']\n except:\n continue\n\n for path in A:\n if root_path == get_nodes(path, 0, index+1):\n edge_info = []\n removed_weight = graph.get_edge_data(path[index], path[index + 1])\n try:\n graph.remove_edge(path[index], path[index + 1])\n edge_info.append(path[index])\n edge_info.append(path[index + 1])\n edge_info.append(removed_weight)\n removed_edges.append(edge_info)\n except:\n continue\n\n for root_path_node in root_path:\n if root_path_node != spur_node:\n for neighbor in graph.neighbors(root_path_node):\n edge_info = []\n removed_weight = graph.get_edge_data(root_path_node, neighbor)\n try:\n graph.remove_edge(root_path_node, neighbor)\n edge_info.append(root_path_node)\n edge_info.append(neighbor)\n edge_info.append(removed_weight)\n removed_edges.append(edge_info)\n except:\n continue\n else:\n break\n try:\n spur_path = Dijkstra.shortest_path(graph, spur_node, dest)\n spur_path_length = Dijkstra.shortest_path_length(graph, spur_node, dest)\n except:\n continue\n\n candidate = get_candidate(root_path, spur_path)\n\n B[spur_path_length + root_path_length] = candidate\n\n for edge in removed_edges:\n graph.add_edge(edge[0], edge[1], edge[2])\n\n #####\n # EM VEZ DE REPOR EDGES, REFAZER GRAFO\n ####\n\n removed_edges = []\n root_path_length += weight\n\n for edge in removed_edges:\n graph.add_edge(edge[0], edge[1], edge[2])\n\n removed_edges = []\n root_path_length += weight\n\n try:\n shortest_candidate = pop_shortest_candidate(B)\n A.append(shortest_candidate)\n except:\n print(\"No more paths.\")\n return A\n\n root_path = []\n root_path_length = 0\n\n path = A[int(K)-1]\n output.append(path)\n output.append(get_path_length(graph, path))\n\n return output\n\n\nif __name__ == \"__main__\":\n\n if len(argv) > 5:\n exit(\"Too many arguments. Exiting..\")\n\n elif len(argv) < 5:\n #exit(\"Missing arguments. Exiting..\")\n\n fh = open_file(argv[1])\n graph = read_edgelist(fh)\n\n n = 1\n while n < 15:\n for i in range(n + 1, 15):\n # print(i)\n print(k_shortest_paths(graph, str(n), str(i), '3')[1])\n n += 1\n\n else:\n fh = open_file(argv[1])\n graph = read_edgelist(fh)\n output = k_shortest_paths(graph, argv[2], argv[3], argv[4])\n print(output)\n","sub_path":"Yen.py","file_name":"Yen.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"415212956","text":"import ckan.plugins as plugins\nimport ckan.plugins.toolkit as toolkit\nimport ckanext.datapackager.logic.action.create\nimport ckanext.datapackager.logic.action.get\n\n\nclass DataPackagerPluginBase(plugins.SingletonPlugin):\n pass\n\n\nif toolkit.check_ckan_version(u'2.9'):\n from ckanext.datapackager.plugin.flask_plugin import MixinPlugin\n ckan_29_or_higher = True\nelse:\n from ckanext.datapackager.plugin.pylons_plugin import MixinPlugin\n ckan_29_or_higher = False\n\nclass DataPackagerPlugin(DataPackagerPluginBase, MixinPlugin):\n '''Plugin that adds importing/exporting datasets as Data Packages.\n '''\n plugins.implements(plugins.IActions)\n plugins.implements(plugins.IConfigurer)\n\n def update_config(self, config):\n toolkit.add_template_directory(config, 'templates')\n\n def get_actions(self):\n return {\n 'package_create_from_datapackage':\n ckanext.datapackager.logic.action.create.package_create_from_datapackage,\n 'package_show_as_datapackage':\n ckanext.datapackager.logic.action.get.package_show_as_datapackage,\n }\n","sub_path":"ckanext/datapackager/plugin/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"206265120","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport time\nplt.style.use('ggplot')\n#np.random.seed(1337)\n\n\n\nclass VMC:\n def __init__(self, dim, n_particles, mc_cycles=100000, mc_step_size=1.0,\n n_variations=10, variation_start=0.9, variation_end=1.15):\n self.dim = dim\n self.n = n_particles\n self.cycles = mc_cycles\n self.step_size = mc_step_size\n self.n_variations = n_variations\n self.variation_start = variation_start\n self.variation_end = variation_end\n self.positions = np.zeros((self.n, self.dim))\n\n\n\n def trial_function(self, positions):\n\n trial_function = np.exp(-self.alpha \\\n * np.sum(np.linalg.norm(positions, axis=1)**2))\n #print(np.sum(np.linalg.norm(self.positions, axis=1)**2), \" kalle \", trial_function, self.alpha)\n return trial_function\n\n\n def local_energy(self):\n local_energy = self.dim*self.n*self.alpha + (0.5 - 2*self.alpha**2)\\\n * np.sum(np.linalg.norm(self.positions, axis=1)**2)\n return local_energy\n\n\n\n def metropolis_brute(self, wave_function_new, wave_function_old):\n if np.random.random() < wave_function_new**2 / wave_function_old**2:\n return True\n else:\n return False\n\n\n\n def greens_function(self):\n return\n\n\n\n def drift_force(self):\n return\n\n\n\n def run_vmc(self):\n positions_old = np.zeros_like(self.positions)\n alphas = np.linspace(\n self.variation_start,\n self.variation_end,\n self.n_variations+1\n )\n energies = np.zeros_like(alphas)\n\n for i, self.alpha in enumerate(alphas):\n print(f'iteration: {i}, alpha: {self.alpha}')\n positions_old = self.step_size \\\n * (np.random.random((self.n, self.dim)) - 0.5)\n wave_function_old = self.trial_function(positions_old)\n\n energy = 0.0\n energy_squared = 0.0\n\n for cycle in range(self.cycles):\n self.positions = positions_old + self.step_size \\\n * (np.random.random((self.n, self.dim)) - 0.5)\n wave_function_new = self.trial_function(self.positions)\n delta_energy = 0.0\n \n if self.metropolis_brute(wave_function_new, wave_function_old):\n delta_energy = self.local_energy()\n wave_function_old = wave_function_new\n positions_old = self.positions\n else:\n self.positions = positions_old\n\n energy += delta_energy\n energy_squared += delta_energy**2\n\n energy = energy / self.cycles\n energy_squared = energy_squared / self.cycles\n energy_variance = energy_squared - energy**2\n energies[i] = energy\n\n\n return energies, alphas, energy_variance\n\n\n\n\n\n\nif __name__ == '__main__':\n start_time = time.time()\n \n dim = 1\n n_particles = 500 #results for n=1,10,100 with step_size=1.0\n vmc = VMC(dim, n_particles, mc_step_size=0.4) #first result for n=500 at step_size=0.4\n energies, alphas, energy_variance = vmc.run_vmc()\n print(energies)\n \n end_time = time.time()\n print(\"CPU time: \", end_time-start_time)\n \n plt.plot(alphas, energies)\n plt.xlabel(r'$\\alpha$')\n plt.ylabel(r'$\\langle E\\rangle$')\n plt.show()\n \n","sub_path":"Project 1/vmc.py","file_name":"vmc.py","file_ext":"py","file_size_in_byte":3586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"396462785","text":"up = b'Z\\x1a{\\x15\\x00\\x00\\x01\\x01'\ndown = b'X:{\\x15\\xff\\x7f\\x02\\x05'\nbuttons = {'A': (1, 1), 'b': (1, 2), 'x': (1, 0), 'y': (1, 3), 'LB': (1, 4), 'RB': (1, 5),\n 'LT': (1, 6), 'RT': (1, 7), 'back': (1, 8), 'start': (1, 9),\n 'UP': (128, 2, 5), 'Down': (127, 2, 5), 'Left': (128, 2, 4), 'Right': (127, 2, 4),\n 'L_rocker_l': (128, 2, 0), 'L_rocker_r': (127, 2, 0), 'L_rocker_u': (128, 2, 1),\n 'L_rocker_d': (127, 2, 1), 'R_rocker_l': (128, 2, 2), 'R_rocker_r': (127, 2, 2),\n 'R_rocker_u': (128, 2, 3), 'R_rocker_d': (127, 2, 3)}\nnew_buttons = {value: key for key, value in buttons.items()}\nprint(new_buttons)\n\n\ndef search_button(one_bytes, one_dict=buttons):\n a = one_bytes[-1]\n b = one_bytes[-2]\n c = one_bytes[4]\n d = one_bytes[-3]\n # button = list(buttons.keys())[list(buttons.values()).index([b, a])]\n # button1 = list(buttons.keys())[list(buttons.values()).index([d, b, a])]\n if c == 1 and d == 0:\n button = list(buttons.keys())[list(buttons.values()).index([b, a])]\n print(button + ' pressed')\n elif c == 0:\n print(button + ' released')\n elif c == 255:\n button1 = list(buttons.keys())[list(buttons.values()).index([d, b, a])]\n print(button1 + ' pressed')\n elif c == 1:\n button1 = list(buttons.keys())[list(buttons.values()).index([d, b, a])]\n print(button1 + ' pressed')\n\n\nsearch_button(down, buttons)\n\n\n","sub_path":"qt_demo1/practice/1st.py","file_name":"1st.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"51678921","text":"\"\"\"\nClass that contains attributes and methods for barriers in the angry birds simulation\n\"\"\"\n\nclass Barrier(object):\n def __init__(self,n,s,xc,yc,r):\n \"\"\"\n Initialization method for barrier class -- takes name, strength (hitpoints),\n x position for center, y position for center, and radius\n \"\"\"\n self.name = n\n self.strength = float(s)\n self.x = float(xc)\n self.y = float(yc)\n self.radius = float(r)\n \n def calculate_damage(self, bird):\n \"\"\"\n Given a bird, this method updates the barrier's strength based on\n how much the given bird inflicts.\n \"\"\"\n dmg = bird.mass * bird.speed()**2\n self.strength-=dmg\n if self.strength<0:\n self.strength=0\n ","sub_path":"RPI-CSCI-1100 Computer Science I/hw/HW8/Barrier.py","file_name":"Barrier.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"71965654","text":"class ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n def __str__(self):\n \ta = self\n \tb = []\n \twhile a!=None:\n \t\tb.append(a.val)\n \t\ta = a.next\n \treturn str(b)\n\ndef addTwoNumbers(l1,l2):\n\tans = ListNode(0)\n\tcurr = ans\n\tcarry, sum = 0, 0\n\tnew1, new2 = l1, l2\n\twhile new1!=None or new2!= None:\n\t\tq = 0 if new1 == None else new1.val\n\t\tw = 0 if new2 == None else new2.val\n\t\tsum = carry + q + w\n\t\tcarry = sum/10\n\t\tsum = sum-(carry*10)\n\t\tcurr.next = ListNode(sum)\n\t\tcurr = curr.next\n\t\tnew1 = new1.next if new1!=None else None\n\t\tnew2 = new2.next if new2!=None else None\n\tif carry!=0:\n\t\tcurr.next = ListNode(carry)\n\treturn ans.next\n\na = ListNode(5)\n#a.next = ListNode(8)\nb = ListNode(5)\nc = addTwoNumbers(a,b)\nprint(c)","sub_path":"Algorithms/NO002AddTwoNumbers.py","file_name":"NO002AddTwoNumbers.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"33268049","text":"\"\"\" Example of modifing mutable variables inside the function\r\n\r\n\"\"\"\r\ndef do_something(txt: str, lst: list, dt: dict):\r\n txt = txt.upper()\r\n lst.append('new element')\r\n lst[1] = int(lst[1])\r\n dt['new key'] = True\r\n\r\n\r\narr = [1, '2', 3] # list is mutable\r\nmsg = 'some message' # string is immutable \r\ndata = {} # dicttionary is mutable\r\n\r\ndo_something(msg, arr, data)\r\n\r\nprint(msg)\r\nprint(arr)\r\nprint(data)\r\n","sub_path":"python_training/argument_by_val_or_by_ref.py","file_name":"argument_by_val_or_by_ref.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"121911626","text":"\n\nimport numpy as np\n\n\ndef getNumberofPulses():\n \n ############ USER INPUT: ##############\n stepMin = 0.01e3 # V\n stepMax = 0.3e3 # V\n Ustart = 47.0e3 # V\n Umax = 57.1e3 # V\n multiplier = 531.2 # V\n exponent = 6.28e-5\n Npulse_per_step = 2\n RmpPulseMax = 0\n #######################################\n numberOfPulses=0\n PFNvoltage = Ustart\n while(PFNvoltage < Umax):\n numberOfPulses = numberOfPulses+Npulse_per_step\n DeltaPFNvoltage = multiplier * np.exp(-exponent * PFNvoltage)\n if DeltaPFNvoltage > stepMax:\n DeltaPFNvoltage = stepMax\n if DeltaPFNvoltage < stepMin:\n DeltaPFNvoltage = stepMin\n PFNvoltage = PFNvoltage + DeltaPFNvoltage\n numberOfPulses = numberOfPulses + RmpPulseMax\n \n return numberOfPulses\n\n\ndef NACOND_code(Umax,multiplier,exponent,Npulse_per_step): \n P_RmpVMax = Umax\n P_LogRampMultip = multiplier\n P_LogRampExp = exponent\n P_RmpPulseOriginalValue = Npulse_per_step\n P_RmpPulseMax = 0\n compt = 0\n Utest = 0\n while Utest <= P_RmpVMax:\n compt = compt + 1\n Utest = Utest + ((P_LogRampMultip*np.exp((-1)*P_LogRampExp*Utest*1000))/1000)\n# print(Utest)\n \n retour = (compt)*P_RmpPulseOriginalValue+P_RmpPulseMax\n \n return retour\n\n\n\ndef main():\n \n \n numberOfPulses = getNumberofPulses()\n print('Number of pulses during ramp: ' + str(numberOfPulses))\n\n\nif __name__ == \"__main__\":\n main()\n \n \n ","sub_path":"OBSOLETE_NACOND_ramp_pulse_count.py","file_name":"OBSOLETE_NACOND_ramp_pulse_count.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"323073510","text":"#An app that adds a GUI to the Python DB backed interface\n\nfrom tkinter import *\nimport searchDB\n#Defining the search function for Database lookup\ndef lookup():\n lookupParam = search.get()\n searchDB.searchFor(lookupParam)\n\n#Initialize the root method\nroot = Tk()\n# Add a title to the application\nroot.title(\"Database Lookup\")\n# Set the window width\nroot.geometry(\"560x400\")\nroot.minsize(360, 200)\n#Show all labels nessesary for Search\nLabel(root, text=\"Search:\").grid(row=0)\n#Show search box\nsearch = Entry(root)\n\nsearch.grid(row=0, column=1)\nButton(root, text='Search...', command=lookup).grid(row=3, column=1, sticky=W, pady=4)\n\n#Initialize the main loop and start the loop\nroot.mainloop()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"76070513","text":"#!/user/bin/env python3\n#-*- coding:utf-8 -*-\n# 'a test module'\n# __author__=\"Interfaceyang\"\n# import sys\n# def test():\n# \targs=sys.argv\n# \tif len(args)==1:\n# \t\tprint(\"Hello,World!!!\")\n# \telif len(args)==2:\n# \t\tprint(\"Hello,%s!\" %args[1])\n# \telse:\n# \t\tprint(\"Too many arguments!\")\n# if __name__=='__main__':\n# \ttest()\n\n# 'a test module'\n# __author____=\"Interfaceyang\"\n\nimport sys\n#dfgs\ndef test():\n\targs=sys.argv\n\tif len(args)==1:\n\t\tprint(\"Hello,World\")\n\telif len(args)==2:\n\t\tprint(\"Hello\",args[1])\n\telse:\n\t\tprint(\"Too many\")\nif __name__=='__main__':\n\ttest()\nprint(test.__doc__)\n\n# def _private_1(name):\n# \tprint(\"private1 say greeting for you \")\n# def _private_2(name):\n# \tprint(\"private2 say greeting for you \")\n\n# def greeting(name):\n# \tif len(name)>3:\n# \t\t_private_1\n# \telse:\n# \t\t_private_2\n\n# greeting()","sub_path":"sample/10_module.py","file_name":"10_module.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"67730135","text":"# 3. Есть два списка:\n#\n# tutors = [\n# 'Иван', 'Анастасия', 'Петр', 'Сергей',\n# 'Дмитрий', 'Борис', 'Елена'\n# ]\n# klasses = [\n# '9А', '7В', '9Б', '9В', '8Б', '10А', '10Б', '9А'\n# ]\n# Необходимо реализовать генератор, возвращающий кортежи вида (, ), например:\n#\n# ('Иван', '9А')\n# ('Анастасия', '7В')\n# ...\n# Количество генерируемых кортежей не должно быть больше длины списка tutors. Если в списке klasses меньше элементов, чем в списке tutors, необходимо вывести последние кортежи в виде: (, None), например:\n#\n# ('Станислав', None)\n\ntutors = ['Иван', 'Анастасия', 'Петр', 'Сергей', 'Дмитрий', 'Борис', 'Елена']\nklasses = ['9А', '7В', '9Б', '9В']\n\n\ni = 0\ndef return_class(list1, list2):\n i = 0\n while i < len(list1):\n if i >= len(list2):\n yield (list1[i], None)\n i+=1\n else:\n yield (list1[i], list2[i])\n i+=1\n\n\nfor gen in return_class(tutors, klasses):\n print(gen)","sub_path":"Inga_Balode_DZ_5/DZ_5_3.py","file_name":"DZ_5_3.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"262243900","text":"import freenect\nimport cv2\nimport numpy as np\n\n#function to get RGB image from kinect\ndef get_video():\n array,_ = freenect.sync_get_video()\n array = cv2.cvtColor(array,cv2.COLOR_RGB2BGR)\n return array\n\n#function to get depth image from kinect\ndef get_depth():\n array,_ = freenect.sync_get_depth()\n array = array.astype(np.uint8)\n return array\n\nif __name__ == \"__main__\":\n i = 0\n while 1:\n #get a frame from RGB camera\n frame = get_video()\n #get a frame from depth sensor\n depth = get_depth()\n #display RGB image\n cv2.imshow('RGB image',frame)\n #display depth image\n cv2.imshow('Depth image',depth)\n k = cv2.waitKey(5) & 0xFF\n if k == 27: # wait for ESC key to exit\n cv2.destroyAllWindows()\n break\n elif k == ord('s'): # wait for 's' key to save and exit\n cv2.imwrite('frame'+str(i)+'.png',frame)\n cv2.imwrite('depth'+str(i)+'.png',depth)\n i = i+1\n cv2.destroyAllWindows()\n","sub_path":"pickplace_files/get_frames_from_kinect.py","file_name":"get_frames_from_kinect.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"52230177","text":"# -*- coding: utf-8 -*-\r\nimport requests\r\nimport json\r\n\r\nfrom io import BytesIO\r\n\r\ndef img2tag(image_url):\r\n \"\"\"\r\n :param image_url: Set image_url to the URL of an image that you want to analyze.\r\n :return analysis: The 'analysis' object contains various fields that describe the image. \r\n The most relevant caption for the image is obtained from the 'description' property.\r\n \"\"\"\r\n\r\n subscription_key = \"d6b5c62ea5d140eda8ad3dd2b52be86e\"\r\n assert subscription_key\r\n\r\n # You must use the same region in your REST call as you used to get your\r\n # subscription keys. For example, if you got your subscription keys from\r\n # westus, replace \"westcentralus\" in the URI below with \"westus\".\r\n #\r\n # Free trial subscription keys are generated in the westcentralus region.\r\n # If you use a free trial subscription key, you shouldn't need to change\r\n # this region.\r\n vision_base_url = \"https://westcentralus.api.cognitive.microsoft.com/vision/v2.0/\"\r\n\r\n analyze_url = vision_base_url + \"analyze\"\r\n\r\n\r\n headers = {'Ocp-Apim-Subscription-Key': subscription_key }\r\n params = {'visualFeatures': 'Categories,Description,Color',\r\n 'language': 'zh'}\r\n data = {'url': image_url}\r\n response = requests.post(analyze_url, headers=headers, params=params, json=data)\r\n response.raise_for_status()\r\n\r\n # The 'analysis' object contains various fields that describe the image. The most\r\n # relevant caption for the image is obtained from the 'description' property.\r\n analysis = response.json()\r\n\r\n tags_str = \"\"\r\n for tag in response.json()['description']['tags'][:4]:\r\n if tags_str == \"\":\r\n tags_str = tag\r\n else:\r\n tags_str += \",\" + tag\r\n\r\n return tags_str\r\n\r\nif __name__ == \"__main__\":\r\n image_url = \"https://upload.wikimedia.org/wikipedia/commons/thumb/1/12/\" + \\\r\n \"Broadway_and_Times_Square_by_night.jpg/450px-Broadway_and_Times_Square_by_night.jpg\"\r\n print(img2tag(image_url))","sub_path":"predict_couplet/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"334710917","text":"from PyQt5.Qt import *\nimport sys\n\n\nclass Window(QMainWindow):\n def __init__(self):\n super(Window, self).__init__()\n self.setWindowTitle('第一个主窗口应用')\n self.resize(400, 400)\n self.status = self.statusBar()\n self.status.showMessage('只存在5秒的消息', 5000)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = Window()\n window.show()\n # app.setWindowIcon(QIcon('...ico'))\n sys.exit(app.exec_())\n","sub_path":"004-GUI编程/00-demo/003-类的形式创建一个窗口.py","file_name":"003-类的形式创建一个窗口.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"440575901","text":"from typing import Union\nfrom symtable import Symbol\nfrom typing import Sequence\nfrom pythautomata.base_types.alphabet import Alphabet\nimport time\n\nfrom pymodelextractor.learners.observation_table_learners.observation_table import TableInconsistency\n\n\nclass MMObservationTable:\n red: set[Sequence]\n blue: set[Sequence]\n observations: dict[Sequence, list[Symbol]]\n exp: list[Sequence]\n\n def __init__(self):\n self.red = set()\n self.blue = set()\n self.observations = {}\n self.exp = []\n\n self.redValues = set()\n\n def __getitem__(self, sequence: Sequence) -> list[Symbol]:\n return self.observations[sequence]\n\n def __setitem__(self, sequence: Sequence, observationsRow: list[Symbol]):\n self.observations[sequence] = observationsRow\n\n def is_closed(self) -> Union[Sequence, None]:\n for sequence in self.blue:\n blue_symbol = self.observations[sequence]\n if not(tuple(blue_symbol) in self.redValues):\n return sequence\n return None\n\n def update_red_values(self) -> set[list[Symbol]]:\n self.redValues = set()\n for sequence in self.red: \n redSymbol = tuple(self.observations[sequence])\n if not (self.redValues in redSymbol):\n self.redValues.add(redSymbol)\n return self.redValues\n\n def find_inconsistency(self, alphabet: Alphabet) -> Union[TableInconsistency, None]:\n redValues: dict[tuple, Sequence]\n redValues = {}\n for row in self.red:\n rowSymbols = self.observations[row]\n redValue = redValues.get(tuple(rowSymbols))\n if not (redValue is None):\n inconsistency = self._are_inconsistent(redValue, row, alphabet)\n if not (inconsistency is None):\n return inconsistency\n else:\n redValues[tuple(rowSymbols)] = row\n return None\n\n def _are_inconsistent(self, sequence1, sequence2, alphabet: Alphabet)-> \\\n Union[TableInconsistency, None]:\n for symbol in alphabet.symbols:\n suffixedSequence1 = sequence1 + symbol\n suffixedSequence2 = sequence2 + symbol\n if self.observations[suffixedSequence1] != self.observations[suffixedSequence2]:\n differenceSequence = self._observation_difference_between(\n suffixedSequence1, suffixedSequence2)\n return TableInconsistency(sequence1, sequence2, symbol, differenceSequence)\n return None\n\n def _observation_difference_between(self, sequence1: Sequence, sequence2: Sequence) -> Union[Sequence, None]:\n observations1 = self.observations[sequence1]\n observations2 = self.observations[sequence2]\n assert len(observations1) == len(observations2)\n for i in range(0, len(observations1)):\n if observations1[i] != observations2[i]:\n return self.exp[i]\n return None\n\n def move_from_blue_to_red(self, sequence: Sequence):\n self.blue.remove(sequence)\n self.add_to_red(sequence, self.observations[sequence])\n\n def add_to_red(self, sequence: Sequence, values: list[Symbol]):\n self.red.add(sequence)\n self.redValues.add(tuple(values))\n \n def add_to_blue(self, sequence: Sequence):\n self.blue.add(sequence)\n\n def __str__(self):\n lines = [\"\\nObservation Table:\",\n \"\\n=================\",\n \"\\nRED: \" + repr(self.red),\n \"\\nBLUE: \" + repr(self.blue),\n \"\\nEXP: \" + repr(self.exp),\n \"\\nOBSERVATIONS: \" + repr(self.observations) + \"\\n\"]\n return ''.join(lines)","sub_path":"pymodelextractor/learners/observation_table_learners/mm_observation_table.py","file_name":"mm_observation_table.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"359406965","text":"import numpy as np\nimport io\nimport sys\ntry: \n from pyne.data import atomic_mass\nexcept:\n print('some weird import bug in pyne that makes the first import fail')\nfrom pyne.data import atomic_mass\nlines = sys.stdin.readlines()\nw = open('out.txt', 'w')\ninventory = {}\nfor line in lines:\n line = line.split(' ')\n if line[0] == 'time':\n del(line[0])\n time = np.asarray(line)\n time.astype(np.float)\n elif line[0] == 'Inv':\n del(line[0])\n iso = line[0]+line[1]\n iso = int(float(iso))\n del(line[0:3])\n del(line[-1])\n values = np.asarray(line)\n values = values.astype(np.float) \n inventory[iso] = values\n else:\n continue\ntotal = 0.0\nfor iso in inventory:\n total += inventory[iso][0] * atomic_mass(iso)\nfor iso in inventory:\n inventory[iso] *= atomic_mass(iso) * total\nprint(inventory[92238])\n","sub_path":"class data/classex.py","file_name":"classex.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"591628592","text":"#!/usr/bin/env python2.7\nfrom __future__ import division\n# Import the Client from ambf_client package\nfrom ambf_client import Client\nimport time\nimport math\nimport rospy\nfrom std_msgs.msg import Float64\nimport tf\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.use('TkAgg')\nfrom matplotlib import pyplot as plt\nfrom scipy import signal\nimport numpy as np\nfrom scipy.signal import butter,filtfilt\nimport adaptfilt\n\n# Create a instance of the client\n_client = Client()\n\n# Connect the client which in turn creates callable objects from ROS topics\n# and initiates a shared pool of threads for bi-directional communication\n_client.connect()\n\nprint('\\n\\n----')\nraw_input(\"We can see what objects the client has found. Press Enter to continue...\")\n# You can print the names of objects found. We should see all the links found\nprint(_client.get_obj_names())\n\n# Lets get a handle to PSM and ECM, as we can see in the printed\n# object names, 'ecm/baselink' and 'psm/baselink' should exist\npsm_handle_base = _client.get_obj_handle('psm/baselink')\npsm_handle_yaw = _client.get_obj_handle('psm/yawlink')\npsm_handle_mi = _client.get_obj_handle('psm/maininsertionlink')\npsm_handle_pfl = _client.get_obj_handle('psm/pitchfrontlink')\npsm_handle_pbl = _client.get_obj_handle('psm/pitchbottomlink')\npsm_handle_pel = _client.get_obj_handle('psm/pitchendlink')\npsm_handle_tgl1 = _client.get_obj_handle('psm/toolgripper1link')\npsm_handle_tpl = _client.get_obj_handle('psm/toolpitchlink')\npsm_handle_tyl = _client.get_obj_handle('psm/toolyawlink')\npsm_handle_trl = _client.get_obj_handle('psm/toolrolllink')\n\n# fino a qua tutto uguale poi vedi funzione sotto\nclass Cartesian_control:\n\n\tgraph_f2 = []\n\tgraph_fd2 = []\n\terror_force2 = []\n\twindow = []\n\txd_plot = []\n\tyd_plot = []\n\tzd_plot = []\n\txr_plot = []\n\tyr_plot = []\n\tzr_plot = []\n\ttime_plot = []\n\tabs_err = []\n\n\tforce_raw = []\n\tgraph_f = []\n\tforce1 = []\n\tforce_vect = []\n\terror_force = []\n\terror_pos = []\n\tfr_x = []\n\tfr_y = []\n\tfr_z = []\n\ter_x = []\n\ter_y = []\n\ter_z = []\n\tgraph_px = []\n\tgraph_py = []\n\n\tdegree = 0\n\tdelta = 0.6 \n\tdelta_m = 0.00005\n\tdelta_m_start = 0.00008\n\tband = 0.01\n\tband2 = 0.5\n\tlimit_mi = 0.30\n\tupdate_pos = False\n\n\tcount_mi_loop = 0\n\tP_value = 0\n\tI_value = 0\n\tD_value = 0\n\tgraph_frn = []\n\tgraph_fd = []\n\n\tposX = 0\n\tposY = 0\n\tposZ = 0\n\tpi = math.pi\n\tl_RCC = 0.4318\n\tl_tool_original = 0.4162\n\tl_tool = 0.05 + l_tool_original\n\n\tamplitude = 0.5\n\n\tforce_const = 2.5-amplitude\n\n\tdeltat_a = 0\n\ttime = []\n\tdeltat_a_ef = 0\n\ttime_ef = []\n\n\tIntegrator = 0\n\tDerivator = 0\n\ttime_now = 0\n\n\tflag_first_pos = True\n\n\tKp = 0.08 #rqt_plot\n\tKi = 0.008\n\n\tIntegrator = 0\n\tIntegratorx = 0\n\tIntegratory = 0\n\tIntegratorz = 0\n\tDerivator = 0\n\ttime_now = 0\n\n\tflag_first_pos = True\n\t\n\tdef __init__(self):\n\t\tpass\n\n\tdef init_ROS(self):\n\n\t\tself.pub_t = rospy.Publisher('time', Float64, queue_size=10)\n\t\tself.pub_f = rospy.Publisher('force_read', Float64, queue_size=10)\n\t\tself.pub_fd = rospy.Publisher('force_desired', Float64, queue_size=10)\n\n\t\trospy.init_node('ambf_client')\n\n\n\n\tdef publish_to_plot(self):\n\n\t\tself.pub_f.publish(self.force)\n\t\t#self.pub_t.publish(self.deltat_a)\n\t\tself.pub_fd.publish(self.force_const)\n\t\n\t\n\tdef inverse_kinematics(self, X_des, Y_des, Z_des):\n\n\t\tr = math.sqrt(math.pow(X_des,2)+math.pow(Y_des,2)+math.pow(Z_des,2))\n\t\t\n\t\t#self.q1 = 0.5*(math.acos((math.pow(Z_des,2)-math.pow(X_des,2))/(math.pow(X_des,2)+math.pow(Z_des,2)))) #original\n\n\t\tq1 = math.asin((X_des)/(math.sqrt(math.pow(X_des,2)+math.pow(Z_des,2))))\n\t\tq2 = math.asin(-Y_des/r)\n\t\tq3 = self.l_RCC - self.l_tool + r\n\n\t\treturn q1, q2, q3\n\n\n\n\tdef set_position_robot(self, q1_set, q2_set, q3_set):\n\t\t\n\t\t#set position of the robot in simulation through joint values\t\t\n\t\tpsm_handle_base.set_joint_pos(0, q1_set)\n\t\tpsm_handle_pfl.set_joint_pos(0, q2_set)\n\t\tpsm_handle_pel.set_joint_pos(0, q3_set)\n\n\n\tdef get_position_joints_PSM(self):\n\t\t\n\t\t#get the joint values of the robot from the simulation\n\t\tq1_read = psm_handle_base.get_joint_pos(0)\n\t\tq2_read = psm_handle_base.get_joint_pos(3)\n\t\tq3_read = psm_handle_base.get_joint_pos(4)\n\n\t\treturn q1_read, q2_read, q3_read\n\n\n\tdef forward_kinematics(self, q1, q2, q3):\n\n\t\tx_fk = math.cos(q2)*math.sin(q1)*(self.l_tool-self.l_RCC+q3)\n\t\ty_fk = -math.sin(q2)*(self.l_tool-self.l_RCC+q3)\n\t\tz_fk = -math.cos(q1)*math.cos(q2)*(self.l_tool-self.l_RCC+q3)\n\n\t\treturn x_fk, y_fk, z_fk\n\n\n\tdef count_time(self):\n\t\ttime_end_a = time.time()\n\t\tself.deltat_a = (time_end_a-self.time_start_a) + self.deltat_a \n\t\tself.time = np.append(self.time, self.deltat_a)\n\n\tdef count_time_ef(self):\n\t\ttime_end_a_ef = time.time()\n\t\tself.deltat_a_ef = (time_end_a_ef-self.time_start_a) + self.deltat_a_ef \n\t\tself.time_ef = np.append(self.time_ef, self.deltat_a_ef)\n\n\n\tdef approach_goal_Z(self, m_start):\n\t\tself.m = m_start\n\t\tforce_old2 = 0\n\t\tforce_old1 = 0\n\n\t\tcount = 0\n\t\twindow = []\n\t\twindow_size = 10\n\t\tsum = 0\n\t\tcount1 = 0\n\t\twhile self.m < self.limit_mi:\n\t\t\t\n\t\t\tself.time_start_a = time.time()\n\t\t\t#_,_,force_raw_now = psm_handle_mi.get_force()\n\t\t\tforce_raw_now = psm_handle_mi.get_force()\n\t\t\tself.force = force_raw_now\n\t\t\tcount = count + 1\n\t\t\tif count < window_size + 1:\n\t\t\t\tself.window = np.append(self.window, force_raw_now)\n\t\t\t\tself.force = force_raw_now\n\t\t\telse:\n\t\t\t\tfor i in range(1, window_size):\n\t\t\t\t\tself.window[i-1] = self.window[i]\n\t\t\t\t\tif i == (window_size - 1):\n\t\t\t\t\t\tself.window[i] = force_raw_now\n\t\t\t\t\tsum = sum + self.window[i-1]\n\t\t\t\tself.force = sum / window_size\n\t\t\t\tsum = 0\n\n\t\t\tprint(self.force)\n\t\t\tif self.force > (self.force_const + self.band):\n\t\t\t\tself.m = self.m - self.delta_m_start/2\n\t\t\t\tpsm_handle_pel.set_joint_pos(0, self.m)\n\t\t\tif self.force < (self.force_const - self.band):\n\t\t\t\tself.m = self.m + self.delta_m_start/2\n\t\t\t\tpsm_handle_pel.set_joint_pos(0, self.m)\n\n\t\t\tif (self.force >= self.force_const):\n\t\t\t\tbreak\n\t\t\t\n\t\t\tpsm_handle_pel.set_joint_pos(0, self.m)\n\t\t\tforce_old2 = force_old1\n\t\t\tforce_old1 = self.force\n\n\t\t\tself.error_force = np.append(self.error_force, 0)\n\t\t\t\t\t\n\t\t\tself.graph_px = np.append(self.graph_px, 0)\n\t\t\tself.graph_py = np.append(self.graph_py, 0)\n\t\t\tPID = 1\n\n\t\t\tself.graph_frn = np.append(self.graph_frn, force_raw_now)\n\n\t\t\tex = 0\n\t\t\tself.er_x = np.append(self.er_x, ex)\n\t\t\tey = 0\n\t\t\tself.er_y = np.append(self.er_y, ey)\n\t\t\tez = 0\n\t\t\tself.er_z = np.append(self.er_z, ez)\n\n\n\tdef plot_sin(self):\n\n\t\tprint(\"plot...\")\n\t\ttime = []\n\t\ttime = self.time\n\t\ttime_ef = []\n\t\ttime_ef = self.time_ef\n\n\t\t#np.savetxt('ambf/ambf_ros_modules/ambf_comm/scripts/tests_ambf/01NewCode/test_plots/test_sin/04_cl_sin_time.csv', time, delimiter=\",\")\n\t\t#np.savetxt('ambf/ambf_ros_modules/ambf_comm/scripts/tests_ambf/01NewCode/test_plots/test_sin/04_cl_sin_force.csv', self.graph_f, delimiter=\",\") \n\t\t#np.savetxt('ambf/ambf_ros_modules/ambf_comm/scripts/tests_ambf/01NewCode/test_plots/test_sin/04_cl_sin_forced.csv', self.graph_fd, delimiter=\",\") \n\t\t#np.savetxt('ambf/ambf_ros_modules/ambf_comm/scripts/tests_ambf/01NewCode/test_plots/test_sin/04_cl_sin_error.csv', self.abs_err, delimiter=\",\")\n\n\t\tfig, axs = plt.subplots(nrows = 2)\n\t\taxs[0].plot(time, self.graph_f, color = 'r', label = \"actual force\")\n\t\taxs[0].plot(time, self.graph_fd, color = 'b', label = \"target force\")\n\t\t#axs[0].set(xlabel = 'Time [s]', ylabel = 'Force [N]')\t\n\t\taxs[0].set(ylabel = 'Force [N]')\t\n\t\taxs[0].legend(loc='best')\n\t\taxs[0].grid()\n\t\n\t\taxs[1].plot(time, self.abs_err, color = 'r', label = \"error\")\n\t\taxs[1].set(xlabel = 'Time [s]', ylabel = 'Force_error_norm')\n\t\taxs[1].legend(loc='best')\n\t\taxs[1].grid()\n\n\t\tplt.show()\n\n\n\tdef exert_sin_force2(self, m_start):\n\t\t\n\t\tforce_base = self.force_const\n\t\tcount = 0\n\t\twindow = []\n\t\twindow_size = 10\n\t\tsum = 0\n\t\tcount1 = 0\n\t\t\n\t\tstep = 3.46\n\t\ttimes = 0\n\t\tangle = -3.46\n\n\t\tself.f_cycle = 60\n\t\tself.exp_time = 1.734*20\n\t\tdim = self.f_cycle*self.exp_time\n\t\tdim = int(dim)\n\n\t\tt_cycle = 1/self.f_cycle\n\t\tprint(t_cycle)\n\n\t\tself.graph_f_cycle = np.zeros(dim)\n\t\tself.graph_fd_cycle = np.zeros(dim)\n\t\tself.error_force_cycle = np.zeros(dim)\n\t\t\n\t\txfk = np.zeros(dim)\n\t\tyfk = np.zeros(dim)\n\t\te_a = np.zeros(dim)\n\n\t\tforce_target = np.zeros(dim)\n\n\n\t\tfor i in range(0,dim):\n\n\t\t\tif angle >= 360:\n\t\t\t\tangle = 0\n\t\t\tangle = angle + step\n\t\t\tforce_target[i] = force_base + self.amplitude*np.sin(math.radians(angle))\n\n\t\t\t\n\t\t\n\t\tset_angle = 20\n\t\tprint(\"SET INCLINATION\")\n\t\tpsm_handle_pfl.set_joint_pos(0,math.radians(-set_angle))\n\t\ttime.sleep(0.5)\n\t\tself.approach_goal_Z(m_start)\n\t\tq1_r,q2_r,q3_r = self.get_position_joints_PSM()\n\t\tx_fk,y_fk,z_fk = self.forward_kinematics(q1_r,q2_r,q3_r)\n\n\t\tKps = 0.007 #good for step = 5\n\t\tKis = 0.0006\n\t\t\n\t\tj=0\n\n\t\twhile(jdim-1:\n\t\t\t\tbreak\n\n\t\t\twait = 1/self.f_cycle - (time.time() - starttime) \n\t\t\tif wait>0:\n\t\t\t\ttime.sleep(wait)\n\n\t\t\tprint(time.time() - starttime, j)\n\t\t\t#print(time.time()-starttime)\n\n\n\t\tfor i in range (0,dim):\n\n\t\t\tself.graph_f = np.append(self.graph_f, self.graph_f_cycle[i])\n\t\t\tself.graph_fd = np.append(self.graph_fd, self.graph_fd_cycle[i])\n\t\t\tself.error_force = np.append(self.error_force, self.error_force_cycle[i])\n\t\t\tself.abs_err = np.append(self.abs_err, e_a[i])\n\n\n\n\t\n\t\ndef main():\n\n\t# Let's sleep for a very brief moment to give the internal callbacks\n\t# to sync up new data from the running simulator\n\ttime.sleep(0.2)\n\n\tprint('\\n\\n----')\n\n\traw_input(\"Number of joints of pitchfrontLink\")\n\tnum_joints_pfl = psm_handle_pfl.get_num_joints()\n\tprint(num_joints_pfl)\n\n\traw_input(\"Name of joints of pitchfrontLink\")\n\tname_joints_pfl = psm_handle_pfl.get_joint_names()\n\tprint(name_joints_pfl)\n\n\traw_input(\"Number of joints of mainInsertionLink\")\n\tnum_joints_mi = psm_handle_mi.get_num_joints()\n\tprint(num_joints_mi)\n\n\traw_input(\"Name of joints of mainInsertionLink\")\n\tname_joints_mi = psm_handle_mi.get_joint_names()\n\tprint(name_joints_mi)\n\n\traw_input(\"Number of joints of pitchEndLink\")\n\tnum_joints_pel = psm_handle_pel.get_num_joints()\n\tprint(num_joints_pel)\n\n\traw_input(\"Name of joints of pitchEndLink\")\n\tname_joints_pel = psm_handle_pel.get_joint_names()\n\tprint(name_joints_pel)\n\n\traw_input(\"Name of joints of base\")\n\tname_joints_base = psm_handle_base.get_joint_names()\n\tprint(name_joints_base)\n\n\traw_input(\"Display movement...\")\n\n\tpsm_handle_pel.set_joint_pos(0, 0)\n\tpsm_handle_pfl.set_joint_pos(0, 0)\n\tpsm_handle_base.set_joint_pos(0, math.radians(0))\n\ttime.sleep(2)\n\tpsm_handle_pel.set_joint_pos(0, 0)\n\ttime.sleep(1)\n\tpsm_handle_pel.set_joint_pos(0, 0)\n\tm_start = 0.16\n\tpsm_handle_pel.set_joint_pos(0, m_start)\n\ttime.sleep(2)\n\t\n\n\tcart_c = Cartesian_control()\n\tcart_c.init_ROS()\n\t\n\tcart_c.exert_sin_force2(m_start)\n\tcart_c.plot_sin()\n\n\traw_input(\"Let's clean up. Press Enter to continue...\")\n\t# Lastly to cleanup\n\t_client.clean_up()\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"ambf_ros_modules/ambf_comm/scripts/tests_ambf/01NewCode/CODES_CONTROL/sin/sin_cloth.py","file_name":"sin_cloth.py","file_ext":"py","file_size_in_byte":11742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"177048613","text":"# 滑动平均模型 使模型在测试数据上更健壮\n\nimport tensorflow as tf\n\n# 定义变量用于计算滑动平均 初始值为0\n# 所有需要计算滑动平均的变量必须为实数, 手动制定变量的类型为 float32\n\nv1 = tf.Variable(0, dtype=tf.float32)\n\n# step 模拟nn中迭代轮数, 用于动态控制衰减率\nstep = tf.Variable(0, trainable=False)\n\n# 定义滑动平均类 初始化时给定衰减率 0.99 及控制衰减率的变量 step\nema = tf.train.ExponentialMovingAverage(0.99, step)\n\n# 定义更新滑动平均的操作 给定一个列表,每次执行这个操作是更新列表变量\nmaintain_averages_op = ema.apply([v1])\n\nwith tf.Session() as sess:\n init_op = tf.global_variables_initializer()\n sess.run(init_op)\n\n # 获取滑动平均后变量取值 初始化后 v1 及 v1 滑动平均都为 0\n print(sess.run([v1, ema.average(v1)]))\n\n # 更新变量 v1 的值到 5\n sess.run(tf.assign(v1, 5))\n # 更新 v1 滑动平均值 衰减率为 min(0.99, (1+step)/(10+step) = 0.1} = 0.1\n # v1 滑动平均被更新为 0.1x0 + 0.9x5 = 4.5\n sess.run(maintain_averages_op)\n print(sess.run([v1, ema.average(v1)]))\n\n # 更新 step 值为 10000\n sess.run(tf.assign(step, 10000))\n # 更新 v1 值为 10\n sess.run(tf.assign(v1, 10))\n # 更新 v1 滑动平均值 衰减率为 min{0.99, (1+step)/(10+step) 约= 0.999} = 0.99\n # v1 滑动平均被更新为 0.99x4.5 + 0.01x10 = 4.555\n sess.run(maintain_averages_op)\n print(sess.run([v1, ema.average(v1)]))\n\n # 再次更新滑动平均为 0.99x4.555 + 0.01x10 = 4.60945\n sess.run(maintain_averages_op)\n print(sess.run([v1, ema.average(v1)]))","sub_path":"tf_gdl_in_action/4.4.3.py","file_name":"4.4.3.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"208684912","text":"#!/usr/bin/python \n# -*- coding: utf-8 -*- \n\nimport Queue\nimport time\nimport threading\nimport sys\nimport socket\nimport select\nimport serial\nfrom xml.dom.minidom import parse, parseString\n\nclass SerialDataPump(object):\n\tser = None\n\tt = None\n\tfifo = None\n\trunning = 1\n\tdef receiving(self, those):\n\t\tglobal last_received\n\t\tbuffer = ''\n\n\t\twhile self.ser and self.running == 1:\n\t\t\tlast_received = self.ser.readline()\n\t\t\tif last_received:\n\t\t\t\tlast_received = last_received.rstrip()\n\t\t\t\tself.fifo.put( last_received )\n\n\tdef __init__(self, port, fifo):\n\t\tself.fifo = fifo\n\t\ttry:\n\t\t\tself.ser = ser = serial.Serial(\n\t\t\t\tport=port,\n\t\t\t\tbaudrate=9600,\n\t\t\t\tbytesize=serial.EIGHTBITS,\n\t\t\t\tparity=serial.PARITY_NONE,\n\t\t\t\tstopbits=serial.STOPBITS_ONE,\n\t\t\t\ttimeout=0.1,\n\t\t\t\txonxoff=0,\n\t\t\t\trtscts=0,\n\t\t\t\tinterCharTimeout=None\n\t\t\t)\n\t\texcept serial.serialutil.SerialException:\n\t\t\t#no serial connection\n\t\t\tself.ser = None\n\t\telse:\n\t\t\tself.t = threading.Thread(target=self.receiving, args=(self, ))\n\t\t\tself.t.start()\n\n\n\nclass FifoPump:\n\trunning = 1\n\ti = 0\n\tt = None\n\tfifo = None\n\t\n\tdef loop( self, those ):\n\t\tprint(\"starting loop\")\n\t\twhile ( self.running == 1 ) :\n\t\t\tfifo.put( str( self.i ) )\n\t\t\tself.i += 1\n\t\t\ttime.sleep( 1 )\n\t\t\n\tdef __init__( self, fifo ):\n\t\tself.fifo = fifo\n\t\tself.fifo.put (\"starting FifoPump\")\n\t\tself.t = threading.Thread( target = self.loop, args=(self,) )\n\t\tself.t.start()\n\n\nclass TCPPump2:\n\trunning = 1\n\tport = None\n\tfifo = None\n\tdef loop(self, those):\n\t\tconnexion_principale = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tconnexion_principale.bind(('', self.port))\n\t\tconnexion_principale.listen(5)\n\t\tprint(\"Le serveur écoute à présent sur le port {}\".format( self.port ))\n\n\t\tclients_connectes = []\n\t\twhile self.running == 1:\n\t\t\t# On va vérifier que de nouveaux clients ne demandent pas à se connecter\n\t\t\t# Pour cela, on écoute la connexion_principale en lecture\n\t\t\t# On attend maximum 50ms\n\t\t\tconnexions_demandees, wlist, xlist = select.select([connexion_principale],[], [], 0.05)\n\n\t\t\tfor connexion in connexions_demandees:\n\t\t\t\tconnexion_avec_client, infos_connexion = connexion.accept()\n\t\t\t\t# On ajoute le socket connecté à la liste des clients\n\t\t\t\tclients_connectes.append(connexion_avec_client)\n\n\t\t\t# Maintenant, on écoute la liste des clients connectés\n\t\t\t# Les clients renvoyés par select sont ceux devant être lus (recv)\n\t\t\t# On attend là encore 50ms maximum\n\t\t\t# On enferme l'appel à select.select dans un bloc try\n\t\t\t# En effet, si la liste de clients connectés est vide, une exception\n\t\t\t# Peut être levée\n\t\t\tclients_a_lire = []\n\t\t\ttry:\n\t\t\t\tclients_a_lire, wlist, xlist = select.select(clients_connectes, [], [], 0.05)\n\t\t\texcept select.error:\n\t\t\t\t\tpass\n\t\t\telse:\n\t\t\t\t# On parcourt la liste des clients à lire\n\t\t\t\tfor client in clients_a_lire:\n\t\t\t\t\t# Client est de type socket\n\t\t\t\t\tmsg_recu = client.recv(1024)\n\t\t\t\t\t# Peut planter si le message contient des caractères spéciaux\n\t\t\t\t\tmsg_recu = msg_recu.decode()\n\t\t\t\t\tprint(\"Reçu {}\".format(msg_recu))\n\t\t\t\t\tself.fifo.put( msg_recu )\n\t\t\t\t\tclient.send(b\"5 / 5\")\n\t\t\t\t\tclient.close()\n\t\t\t\t\t#if msg_recu == \"fin\":\n\t\t\t\t\t#\tself.running = 0\n\n\t\t#print(\"Fermeture des connexions\")\n\t\t#for client in clients_connectes:\n\t\t#\tclient.close()\n\n\t\tconnexion_principale.close()\n\n\tdef __init__(self, port, fifo):\n\t\tself.fifo = fifo\n\t\tself.port = port\n\t\tself.fifo.put (\"starting TCPPump2\")\n\t\tself.t = threading.Thread( target = self.loop, args=(self,) )\n\t\tself.t.start()\t\n\nclass TCPPump:\n\trunning = 1\n\ti = 0\n\tt = None\n\tport = None\n\tfifo = None\n\t\n\tdef loop( self, those ):\n\t\tprint(\"starting loop network\")\n\t\tconnexion_principale = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tconnexion_principale.bind(('', self.port))\n\t\tconnexion_principale.listen(5)\n\t\tconnexion_avec_client = None\n\t\twhile ( self.running == 1 ) :\n\t\t\ttry:\n\t\t\t\t# TODO : Voir pour rendre ça non bloquant pour pouvoir terminer le thread sans kill\n\t\t\t\t# Solution à base de select ?\n\t\t\t\tconnexion_avec_client, infos_connexion = connexion_principale.accept()\n\t\t\t\tconnexion_avec_client.send(\"FIFO waiting!\\n\\r\")\n\t\t\t\tmsg_recu = connexion_avec_client.recv(1024)\n\t\t\t\tmsg_recu = msg_recu.rstrip();\n\t\t\t\tfifo.put( msg_recu)\n\t\t\t\tconnexion_avec_client.close()\n\t\t\texcept:\n\t\t\t\tif connexion_avec_client :\n\t\t\t\t\tconnexion_avec_client.close()\n\t\tconnexion_principale.close()\n\t\t\t\n\tdef __init__( self, port, fifo ):\n\t\tself.fifo = fifo\n\t\tself.port = port\n\t\tself.fifo.put (\"starting FifoPumpTCP\")\n\t\tself.t = threading.Thread( target = self.loop, args=(self,) )\n\t\tself.t.start()\t\n\t\t\nclass FifoReader:\n\trunning = 1\n\tt = None\n\tdef read( self, those ):\n\t\twhile ( self.running == 1 ) :\n\t\t\ttry:\n\t\t\t\t#chaine = fifo.get(False, 5)\n\t\t\t\tchaine = fifo.get()\n\t\t\t\tprint ( \"FifoReader :\" + chaine )\n\t\t\texcept Queue.Empty:\n\t\t\t\tpass\n\t\t\n\tdef __init__( self, fifo ):\n\t\tself.fifo = fifo\n\t\tself.fifo.put(\"starting FifoReader\")\n\t\tself.t = threading.Thread(target = self.read, args=(self,) )\n\t\tself.t.start()\n\n\t\t\nclass TelecommandeRF :\n\t# Liste des télécommandes RF disponibles\n\t# Pour chaque id de télécommande définir une liste des actions suivant les valeurs des boutons\n\tdef __init__( self ):\n\t\t# lecture du fichier de config\n\t\tpass\n\t\n\tdef convertir( self ):\n\t\ta = Action()\n\n\n\t\t\nclass ActionSocket :\n\thost = None\n\tport = None\n\texecute = None\n\t\n\tdef __init__(self):\n\t\tpass\n\t\n\tdef start(self):\n\t\ts = socket.socket( socket.AF_INET, socket.SOCK_STREAM )\n\t\t#s.connect((\"www.mcmillan-inc.com\", 80))\n\n\nclass ActionLocal:\n\texecute = None\n\tdef __init__(self, xmlFile):\n\t\tpass\n\t\n\tdef start(self):\n\t\tpass\n\n\t\t\nif __name__ == \"__main__\":\n\tfifo = Queue.Queue()\n\n\t#fp = FifoPump( fifo )\n\tfr = FifoReader( fifo )\n\ttcp = TCPPump( 9091, fifo )\n\tsdp = SerialDataPump( \"/dev/ttyACM0\", fifo)\n\t#tcp2 = TCPPump2( 9091, fifo)\n\t\n\ttry :\n\t\twhile True:\n\t\t\ttime.sleep(1)\n\texcept:\n\t\tsdp.running = 0\n\t\ttcp.running = 0\n\t\t#fp.running = 0\n\t\tfr.running = 0\n\t\t#tcp2.running = 0\n\t\tsys.exit()\n\t\n","sub_path":"mimo/fifo.py","file_name":"fifo.py","file_ext":"py","file_size_in_byte":5868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"434168268","text":"import os\nimport numpy as np\nimport torch as T\nimport random as R\nimport torch.nn.functional as F\nfrom torch.optim.adam import Adam\nfrom collections import namedtuple\nfrom agent.utils.networks import Critic\nfrom agent.utils.replay_buffer import GridWorldHindsightReplayBuffer\nfrom agent.utils.exploration_strategy import ExpDecayGreedy\nt = namedtuple(\"transition\",\n ('state', 'inventory', 'desired_goal', 'action',\n 'next_state', 'next_inventory', 'next_goal', 'achieved_goal', 'reward', 'done'))\n\n\nclass HindsightTD3(object):\n def __init__(self, env_params, path=None, seed=0, hindsight=True,\n lr=1e-4, mem_capacity=int(1e6), batch_size=512, tau=0.5,\n optimization_steps=2, gamma=0.99, eps_start=1, eps_end=0.05, eps_decay=5000):\n T.manual_seed(seed)\n R.seed(seed)\n if path is None:\n self.ckpt_path = \"ckpts\"\n else:\n self.ckpt_path = path+\"/ckpts\"\n if not os.path.isdir(self.ckpt_path):\n os.mkdir(self.ckpt_path)\n use_cuda = T.cuda.is_available()\n self.device = T.device(\"cuda\" if use_cuda else \"cpu\")\n \n self.input_dim = env_params['input_dim']\n self.output_dim = env_params['output_dim']\n self.max = env_params['max']\n self.input_max = env_params['input_max']\n self.input_max_no_inv = np.delete(self.input_max, [n for n in range(4, len(self.input_max))], axis=0)\n self.input_min = env_params['input_min']\n self.input_min_no_inv = np.delete(self.input_min, [n for n in range(4, len(self.input_min))], axis=0)\n\n self.exploration = ExpDecayGreedy(start=eps_start, end=eps_end, decay=eps_decay)\n self.agent_1 = Critic(self.input_dim, self.output_dim).to(self.device)\n self.agent_2 = Critic(self.input_dim, self.output_dim).to(self.device)\n self.optimizer_1 = Adam(self.agent_1.parameters(), lr=lr)\n self.optimizer_2 = Adam(self.agent_2.parameters(), lr=lr)\n self.target = Critic(self.input_dim, self.output_dim).to(self.device)\n self.hindsight = hindsight\n self.memory = GridWorldHindsightReplayBuffer(mem_capacity, t, seed=seed)\n self.batch_size = batch_size\n\n self.gamma = gamma\n self.soft_update(tau=1)\n self.tau = tau\n self.optimization_steps = optimization_steps\n \n def select_action(self, obs, ep=None):\n inputs = np.concatenate((obs['state'], obs['desired_goal_loc'], obs['inventory_vector']), axis=0)\n inputs = self.scale(inputs)\n inputs = T.tensor(inputs, dtype=T.float).to(self.device)\n self.target.eval()\n values = self.target(inputs)\n if ep is None:\n action = T.argmax(values).item()\n return action\n else:\n _ = R.uniform(0, 1)\n if _ < self.exploration(ep):\n action = R.randint(0, self.max)\n else:\n action = T.argmax(values).item()\n return action\n \n def learn(self, steps=None, batch_size=None):\n if self.hindsight:\n self.memory.modify_episodes()\n self.memory.store_episodes()\n if steps is None:\n steps = self.optimization_steps\n if batch_size is None:\n batch_size = self.batch_size\n if len(self.memory) < batch_size:\n return\n\n for s in range(steps):\n batch = self.memory.sample(batch_size)\n inputs = np.concatenate((batch.state, batch.desired_goal, batch.inventory), axis=1)\n inputs_ = np.concatenate((batch.next_state, batch.next_goal, batch.next_inventory), axis=1)\n inputs = self.scale(inputs)\n inputs_ = self.scale(inputs_)\n inputs = T.tensor(inputs, dtype=T.float).to(self.device)\n inputs_ = T.tensor(inputs_, dtype=T.float).to(self.device)\n\n actions = T.tensor(batch.action, dtype=T.long).unsqueeze(1).to(self.device)\n rewards = T.tensor(batch.reward, dtype=T.float).unsqueeze(1).to(self.device)\n episode_done = T.tensor(batch.done, dtype=T.float).unsqueeze(1).to(self.device)\n\n self.agent_1.train()\n maximal_next_values_1 = self.agent_1(inputs_).max(1)[0].view(batch_size, 1)\n self.agent_2.train()\n maximal_next_values_2 = self.agent_2(inputs_).max(1)[0].view(batch_size, 1)\n\n maximal_next_values = T.min(maximal_next_values_1, maximal_next_values_2).detach()\n target_values = rewards + episode_done*self.gamma*maximal_next_values\n\n estimated_values_1 = self.agent_1(inputs).gather(1, actions)\n loss_1 = F.smooth_l1_loss(estimated_values_1, target_values)\n self.optimizer_1.zero_grad()\n loss_1.backward()\n self.optimizer_1.step()\n self.agent_1.eval()\n\n estimated_values_2 = self.agent_2(inputs).gather(1, actions)\n loss_2 = F.smooth_l1_loss(estimated_values_2, target_values)\n self.optimizer_2.zero_grad()\n loss_2.backward()\n self.optimizer_2.step()\n self.agent_2.eval()\n\n self.soft_update()\n\n def remember(self, new, *args):\n self.memory.store_experience(new, *args)\n \n def soft_update(self, tau=None):\n if tau is None:\n tau = self.tau\n for target_param, param in zip(self.target.parameters(), self.agent_1.parameters()):\n target_param.data.copy_(\n target_param.data * (1.0 - tau) + param.data * tau\n )\n\n def save_network(self, epo):\n T.save(self.agent_1.state_dict(), self.ckpt_path+\"/ckpt_agent_1_epo\"+str(epo)+\".pt\")\n T.save(self.agent_2.state_dict(), self.ckpt_path+\"/ckpt_agent_2_epo\"+str(epo)+\".pt\")\n T.save(self.target.state_dict(), self.ckpt_path+\"/ckpt_target_epo\"+str(epo)+\".pt\")\n\n def load_network(self, epo):\n self.agent_1.load_state_dict(T.load(self.ckpt_path+'/ckpt_agent_1_epo' + str(epo)+'.pt'))\n self.agent_2.load_state_dict(T.load(self.ckpt_path+'/ckpt_agent_2_epo' + str(epo)+'.pt'))\n self.target.load_state_dict(T.load(self.ckpt_path+'/ckpt_target_epo'+str(epo)+'.pt'))\n\n def scale(self, inputs):\n ins = (inputs - self.input_min) / (self.input_max - self.input_min)\n return ins\n","sub_path":"agent/td3_her_discrete.py","file_name":"td3_her_discrete.py","file_ext":"py","file_size_in_byte":6315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"335882814","text":"def _task(data):\n import re\n from utils.db import TwitterDBUtil\n from tasks.harvester import FSHarvester\n \n counter = 0; found = False\n db_util = TwitterDBUtil()\n conn = db_util.get_connection()\n harvester = FSHarvester()\n \n try:\n for d_row in data:\n urls = re.findall(r'(https?://\\S+)', d_row[1])\n for url in urls:\n metadata = harvester.get_checkin_metadata(url)\n if metadata:\n print (db_util.save_metadata(conn, tweet_id=d_row[0], metadata=metadata))\n counter += 1; found = True; break\n if found == False: db_util.save_metadata(conn, tweet_id=d_row[0], metadata={})\n \n finally: db_util.close_connection(conn)\n return counter, len(data)\n\n\ndef _main():\n import os\n import sys\n import math\n import concurrent.futures\n from datetime import datetime\n from datetime import timedelta\n from utils.db import TwitterDBUtil\n from utils.foursquare import RateLimitExceeded\n \n db_util = TwitterDBUtil()\n conn = db_util.get_connection()\n \n try:\n tweets = db_util.list_tweets_without_metadata(conn)\n is_ = 0; return_code = -1; counter1 = 0; counter2 = 0; total = len(tweets); nsets = 100; split_factor = 1 / nsets; \n block_size = math.ceil(total * split_factor); block_size = block_size if block_size > 100 else 100; blocks = math.ceil(total / block_size)\n partition = [tweets[i * block_size: (i + 1) * block_size] for i in range(0, blocks)]\n tweets = None\n \n for subset in partition:\n \n is_ += 1\n subtotal = len(subset)\n workers = 8 * os.cpu_count()\n split_factor = 1 / workers; block_size = math.ceil(subtotal * split_factor); blocks = math.ceil(subtotal / block_size)\n partition2 = [subset[i * block_size: (i + 1) * block_size] for i in range(0, blocks)]\n \n start = datetime.now()\n with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as executor: \n \n tasks = []\n print ('Start harvesting.')\n for subset2 in partition2: tasks.append(executor.submit(_task, data=[(t['tweet_id'], t['text']) for t in subset2]))\n \n print ('Start waiting subprocesses.') \n for t in concurrent.futures.as_completed(tasks):\n counter2_, counter1_ = t.result()\n counter2 += counter2_; counter1 += counter1_; \n if return_code < 0 and counter2 > 0: return_code = 0\n print (str(counter2) + '/' + str(counter1) + '/' + str(total) + ' done. ') \n \n current = datetime.now() \n elapsed = (current - start).total_seconds() / 3600\n timeleft = ((total - counter1) * elapsed) / subtotal\n finish = current + timedelta(seconds=timeleft * 3600)\n print ('Subset {}/{} done.'.format(is_, nsets)) \n print ('Elapsed time: {} hours'.format(round(elapsed, 1)))\n print ('Remaining time: {} hours'.format(round(timeleft, 1))) \n print ('Estimated finish time: {}\\n'.format(finish.strftime('%H:%M'))) \n \n except RateLimitExceeded: return return_code\n finally: db_util.close_connection(conn)\n sys.exit(0)\n \n\n\nif __name__ == '__main__':\n from utils import run\n run(_main)\n\n","sub_path":"MyTwitter/src/tasks/harvester/1_harvest_metadata.py","file_name":"1_harvest_metadata.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"419664953","text":"import boto3\n\ndef lambda_handler(message, context):\n \n emr = boto3.client(\"emr\", region_name=\"us-east-1\")\n \n # we retrive the cluster_id that was passed by the lambda function\n cluster_id = message[\"cluster_id\"]\n \n step1 = {'Name': 'price_etl',\n 'ActionOnFailure': 'CONTINUE',\n 'HadoopJarStep': {\n 'Jar': 'command-runner.jar',\n 'Args': [\"spark-submit\", \"--deploy-mode\", \"cluster\", \"s3://web-app-project/spark-jobs/price_data_etl.py\"]\n }\n }\n \n step2 = {'Name': 'twitter_etl',\n 'ActionOnFailure': 'CONTINUE',\n 'HadoopJarStep': {\n 'Jar': 'command-runner.jar',\n 'Args': [\"spark-submit\", \"--deploy-mode\", \"cluster\", \"s3://web-app-project/spark-jobs/twitter_data_etl.py\"]\n }\n }\n \n \n \n action = emr.add_job_flow_steps(JobFlowId=cluster_id, Steps=[step1, step2])\n response = {}\n response[\"step_id\"] = action[\"StepIds\"][0]\n response[\"cluster_id\"] = cluster_id\n return response\n","sub_path":"lambda/ETL.py","file_name":"ETL.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"536478946","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass ConnectToTargetSqlDbTaskOutput(Model):\n \"\"\"Output for the task that validates connection to SQL DB and target server\n requirements.\n\n Variables are only populated by the server, and will be ignored when\n sending a request.\n\n :ivar id: Result identifier\n :vartype id: str\n :ivar databases: Source databases as a map from database name to database\n id\n :vartype databases: dict[str, str]\n :ivar target_server_version: Version of the target server\n :vartype target_server_version: str\n :ivar target_server_brand_version: Target server brand version\n :vartype target_server_brand_version: str\n \"\"\"\n\n _validation = {\n 'id': {'readonly': True},\n 'databases': {'readonly': True},\n 'target_server_version': {'readonly': True},\n 'target_server_brand_version': {'readonly': True},\n }\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'databases': {'key': 'databases', 'type': '{str}'},\n 'target_server_version': {'key': 'targetServerVersion', 'type': 'str'},\n 'target_server_brand_version': {'key': 'targetServerBrandVersion', 'type': 'str'},\n }\n\n def __init__(self, **kwargs) -> None:\n super(ConnectToTargetSqlDbTaskOutput, self).__init__(**kwargs)\n self.id = None\n self.databases = None\n self.target_server_version = None\n self.target_server_brand_version = None\n","sub_path":"src/dms-preview/azext_dms/vendored_sdks/datamigration/models/connect_to_target_sql_db_task_output_py3.py","file_name":"connect_to_target_sql_db_task_output_py3.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"210691851","text":"from keras.models import load_model\nclassifier = load_model('my_model.h5')\nimport numpy as np\nfrom keras.preprocessing import image\ntest_image = image.load_img('/Users/saranshmittal/Development/Data-Science/Cat classification/train/cat/cat.0.jpg', target_size = (64, 64))\ntest_image = image.img_to_array(test_image)\ntest_image = np.expand_dims(test_image, axis = 0)\nresult = classifier.predict(test_image)\nif result[0][0] == 1:\n prediction = 'cat'\n print(prediction)\nelse:\n prediction = 'dog'\n print(prediction)","sub_path":"Cat classification/Prediction.py","file_name":"Prediction.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"63613090","text":"__author__ = \"David Camhy, Markus Pichler\"\n__copyright__ = \"Copyright 2017, University of Technology Graz\"\n__credits__ = [\"David Camhy\", \"Markus Pichler\"]\n__license__ = \"MIT\"\n__version__ = \"1.0.0\"\n__maintainer__ = \"David Camhy, Markus Pichler\"\n\n\nimport codecs\nimport requests\nfrom io import BytesIO, TextIOWrapper, IOBase, StringIO\nfrom zipfile import ZipFile\nfrom os import path\nimport pandas as pd\nfrom pandas.errors import ParserError\n\n\ndef csv_args(unix=False):\n if unix:\n return dict(sep=',', decimal='.')\n else:\n return dict(sep=';', decimal=',')\n\n\ndef check_path(pth=None):\n if pth == '':\n return pth\n elif pth is None:\n return ''\n elif path.isdir(pth):\n return pth\n else:\n raise UserWarning('Path is not available')\n\n\ndef export_series(series, filename, export_path=None, save_as='csv', unix=False):\n \"\"\"\n export the series to a given format\n may be extended\n\n :param export_path: path where file will be stored\n :type export_path: str\n\n :param filename: name of the file\n :type filename: str\n\n :type series: pd.Series\n\n :param save_as: export format\n :type save_as: str\n\n :param unix: whether to use \",\" or \";\" for the csv\n :type unix: bool\n \"\"\"\n fn = path.join(check_path(export_path), '{}.{}'.format(filename, save_as))\n\n if save_as is 'csv':\n series.to_csv(fn, **csv_args(unix))\n\n if save_as is 'ixx':\n series.to_csv(fn, date_format='%d.%m.%Y %H:%M:%S', sep='\\t')\n\n elif save_as is 'parquet':\n series.to_frame().to_parquet(fn)\n\n else:\n raise NotImplementedError('Sorry, but only csv files are implemented. Maybe there will be more options soon.')\n\n return fn\n\n\ndef import_series(filename, series_label='precipitation', index_label='datetime', unix=False):\n \"\"\"\n\n :param filename:\n :param series_label:\n :param index_label:\n :param unix: whether to use \",\" or \";\" for the csv\n :type unix: bool\n :return:\n \"\"\"\n if filename.endswith('csv'):\n try:\n ts = pd.read_csv(filename, index_col=0, header=None, squeeze=True, names=[series_label], **csv_args(unix))\n ts.index = pd.to_datetime(ts.index)\n ts.index.name = index_label\n return ts\n except ParserError:\n return _parse(filename)\n elif filename.endswith('parquet'):\n return pd.read_parquet(filename).iloc[:, 0].asfreq('T').copy()\n else:\n raise NotImplementedError('Sorry, but only csv files are implemented. Maybe there will be more options soon.')\n\n\nehyd_stations = {100180: 'Tschagguns',\n 100370: 'Thüringen',\n 100446: 'Lustenau',\n 100479: 'Dornbirn',\n 100776: 'Bregenz',\n 101303: 'Leutasch-Kirchplatzl',\n 101816: 'Ladis-Neuegg',\n 102772: 'Kelchsau',\n 103143: 'St. Johann in Tirol-Almdorf',\n 103895: 'Eugendorf',\n 104604: 'Schlägl',\n 104877: 'Linz-Urfahr',\n 105445: 'Vöcklabruck',\n 105528: 'Wels',\n 105908: 'Flachau',\n 106112: 'Liezen',\n 106252: 'Wildalpen',\n 106435: 'Klaus an der Pyhrnbahn',\n 106559: 'Steyr',\n 106856: 'Weitersfelden-Ritzenedt',\n 107029: 'Lunz am See',\n 107284: 'Melk',\n 107854: 'Hollabrunn',\n 108118: 'Wien (Botanischer Garten)',\n 108456: 'Gutenstein',\n 108563: 'Naglern',\n 109280: 'Waidhofen an der Thaya',\n 109918: 'Neunkirchen',\n 110064: 'Gattendorf',\n 110312: 'Karl',\n 110734: 'Eisenstadt',\n 111112: 'Oberwart',\n 111435: 'Alpl',\n 111716: 'Judenburg',\n 112086: 'Graz-Andritz',\n 112391: 'St.Peter am Ottersbach',\n 112995: 'Ried im Innkreis',\n 113001: 'Sillian',\n 113050: 'Matrei in Osttirol',\n 113548: 'Afritz',\n 113670: 'Waidegg',\n 114561: 'Klagenfurt',\n 114702: 'Wolfsberg',\n 115055: 'Kendlbruck',\n 115642: 'St.Pölten',\n 120022: 'Hall in Tirol'}\n\n\ndef _get_file(id_):\n \"\"\"\n\n :param id_:\n :return:\n \"\"\"\n url = 'https://ehyd.gv.at/eHYD/MessstellenExtraData/nlv?id={id}&file=2'.format(id=id_)\n r = requests.get(url, allow_redirects=True)\n c = r.content\n if c != b'':\n z = ZipFile(BytesIO(c))\n filename = z.namelist()[0]\n csv_file = TextIOWrapper(z.open(filename), encoding='iso8859')\n return csv_file\n\n\ndef _parse(filepath_or_buffer, series_label='precipitation', index_label='datetime', with_meta=False):\n \"\"\"\n\n :param filepath_or_buffer:\n :param series_label:\n :param index_label:\n :return:\n \"\"\"\n if isinstance(filepath_or_buffer, str):\n csv_file = codecs.open(filepath_or_buffer, 'r', encoding='iso8859')\n elif isinstance(filepath_or_buffer, IOBase):\n csv_file = filepath_or_buffer\n else:\n raise NotImplementedError()\n\n if with_meta:\n meta = []\n for line in csv_file:\n if line.startswith('Werte:'):\n break\n elif with_meta:\n meta.append(line)\n if with_meta:\n meta = pd.Series(meta).str.replace('\\n', '').str.split(';', expand=True).fillna('').apply(lambda x: x.str.strip())\n # meta = ''.join(meta)\n\n f = csv_file.read().replace(' ', '').replace(',', '.')\n csv_file.close()\n ts = pd.read_csv(StringIO(f), sep=';', header=None, index_col=0, squeeze=True, names=[series_label],\n na_values=['Lücke'], date_parser=lambda s: pd.to_datetime(s, format='%d.%m.%Y%H:%M:%S'))\n ts = ts.rename_axis(index_label, axis='index')\n ts = ts.resample('1T').ffill()\n\n if with_meta:\n return ts, meta\n else:\n return ts\n\n\ndef get_station(id_):\n \"\"\"\n\n :param id_:\n :return:\n \"\"\"\n return ehyd_stations[id_]\n\n\ndef get_all_stations():\n \"\"\"\n\n :return:\n \"\"\"\n for id, location in ehyd_stations:\n print(id, ':', location)\n\n\ndef get_series(id_, with_meta=False):\n \"\"\"\n\n :param id_:\n :type id_: int\n\n :param with_meta: whether to return meta data or not\n :type with_meta: bool\n\n :return: data or data with the meta-data\n :rtype: pd.Series | list[pd.Series, str]\n \"\"\"\n if id_ in ehyd_stations:\n print('You choose the station: \"{}\" with the id: \"{}\".'.format(get_station(id_), id_))\n return _parse(_get_file(id_), with_meta=with_meta)\n\n\ndef get_station_meta(id_):\n \"\"\"\n\n :param id_:\n :return:\n \"\"\"\n url = 'https://ehyd.gv.at/eHYD/MessstellenExtraData/nlv?id={id}&file=1'.format(id=id_)\n r = requests.get(url, allow_redirects=True)\n c = r.content\n if c != b'':\n file = TextIOWrapper(BytesIO(c), encoding='iso8859')\n return file.read()\n\n# if __name__ == '__main__':\n# print(pd.Series(ehyd_stations).to_string())\n# NOW = time.time()\n# print('{:0.0f}'.format(time.time() - NOW))\n# get_series(105445)\n# print('{:0.0f}'.format(time.time() - NOW))\n","sub_path":"ehyd_tools/in_out.py","file_name":"in_out.py","file_ext":"py","file_size_in_byte":7324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"376847589","text":"# Copyright (c) 2014 Cisco Systems, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nTests for solver scheduler Non-Trivial-Solution linearconstraint.\n\"\"\"\n\nfrom nova.tests.scheduler import fakes\nfrom nova.tests.scheduler.solvers import test_linearconstraints as lctest\n\n\nclass NonTrivialSolutionConstraintTestCase(lctest.LinearConstraintsTestBase):\n \"\"\"Test case for NonTrivialSolutionConstraint.\"\"\"\n\n def setUp(self):\n super(NonTrivialSolutionConstraintTestCase, self).setUp()\n\n def test_get_coefficient_vectors(self):\n variables = [[1, 2],\n [3, 4]]\n host1 = fakes.FakeHostState('host1', 'node1',\n {'service': {'disabled': False}})\n host2 = fakes.FakeHostState('host2', 'node2',\n {'service': {'disabled': True}})\n hosts = [host1, host2]\n fake_instance1_uuid = 'fake-instance1-id'\n fake_instance2_uuid = 'fake-instance2-id'\n instance_uuids = [fake_instance1_uuid, fake_instance2_uuid]\n request_spec = {'instance_type': 'fake_type',\n 'instance_uuids': instance_uuids,\n 'num_instances': 2}\n filter_properties = {'context': self.context.elevated()}\n constraint_cls = self.class_map['NonTrivialSolutionConstraint'](\n variables, hosts, instance_uuids, request_spec, filter_properties)\n coeff_vectors = constraint_cls.get_coefficient_vectors(variables,\n hosts, instance_uuids, request_spec, filter_properties)\n ref_coeff_vectors = [[1, 1, -1],\n [1, 1, -1]]\n self.assertEqual(coeff_vectors, ref_coeff_vectors)\n\n def test_get_variable_vectors(self):\n variables = [[1, 2],\n [3, 4]]\n host1 = fakes.FakeHostState('host1', 'node1',\n {'service': {'disabled': False}})\n host2 = fakes.FakeHostState('host2', 'node2',\n {'service': {'disabled': True}})\n hosts = [host1, host2]\n fake_instance1_uuid = 'fake-instance1-id'\n fake_instance2_uuid = 'fake-instance2-id'\n instance_uuids = [fake_instance1_uuid, fake_instance2_uuid]\n request_spec = {'instance_type': 'fake_type',\n 'instance_uuids': instance_uuids,\n 'num_instances': 2}\n filter_properties = {'context': self.context.elevated()}\n constraint_cls = self.class_map['NonTrivialSolutionConstraint'](\n variables, hosts, instance_uuids, request_spec, filter_properties)\n\n variable_vectors = constraint_cls.get_variable_vectors(variables,\n hosts, instance_uuids, request_spec, filter_properties)\n\n ref_variable_vectors = [[1, 3, 1],\n [2, 4, 1]]\n self.assertEqual(variable_vectors, ref_variable_vectors)\n\n def test_get_operations(self):\n variables = [[1, 2],\n [3, 4]]\n host1 = fakes.FakeHostState('host1', 'node1',\n {'service': {'disabled': False}})\n host2 = fakes.FakeHostState('host2', 'node2',\n {'service': {'disabled': True}})\n hosts = [host1, host2]\n fake_instance1_uuid = 'fake-instance1-id'\n fake_instance2_uuid = 'fake-instance2-id'\n instance_uuids = [fake_instance1_uuid, fake_instance2_uuid]\n request_spec = {'instance_type': 'fake_type',\n 'instance_uuids': instance_uuids,\n 'num_instances': 2}\n filter_properties = {'context': self.context.elevated()}\n constraint_cls = self.class_map['NonTrivialSolutionConstraint'](\n variables, hosts, instance_uuids, request_spec, filter_properties)\n\n operations = constraint_cls.get_operations(variables,\n hosts, instance_uuids, request_spec, filter_properties)\n\n ref_operations = [(lambda x: x == 0), (lambda x: x == 0)]\n self.assertEqual(len(operations), len(ref_operations))\n for idx in range(len(operations)):\n for n in range(4):\n self.assertEqual(operations[idx](n), ref_operations[idx](n))\n","sub_path":"nova/tests/scheduler/solvers/test_non_trivial_solution_constraint.py","file_name":"test_non_trivial_solution_constraint.py","file_ext":"py","file_size_in_byte":4801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"609059007","text":"import turtle\r\nob = turtle.Turtle()\r\nob.speed(10)\r\nd=200\r\ncolors = ['red','purple','blue','yellow']\r\nfor i in range(1,200):\r\n\r\n ob.forward(d)\r\n ob.pencolor(colors[i % 4])\r\n ob.lt(90)\r\n\r\n d = d - 1\r\n","sub_path":"squre_shape.py","file_name":"squre_shape.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"108379716","text":"import threading\nimport requests\nfrom bs4 import BeautifulSoup\n\nclass FetchThread(threading.Thread):\n\tdef __init__(self, item_list):\n\t\tthreading.Thread.__init__(self)\n\t\tself.list = item_list\n\t\tself.results = \"\"\n\t\n\tdef run(self):\n\t\tfor (name, url, exp) in self.list:\n\t\t\tpage = requests.get(url)\n\t\t\tsource = page.content\n\t\t\tself.results += self.gatherDetails(name, exp, BeautifulSoup(source, \"html.parser\"))\n\t\t\tself.results += \"},\\n\"\n\n\tdef gatherDetails(self, name, exp, soup):\n\t\tc = name.split(' x')\n\t\tn = c[0].split('(')\n\t\tresults = '{{\\n\"name\":\"{}\",\\n\"expansion\":\"{}\",\\n'.format(cleanUp(n[0]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t cleanUp(exp.split(':')[0]))\n\t\tresults += '\"count\":{},\\n'.format(cleanUp(c[1]) if len(c) > 1 else '1')\n\t\t\t\n\t\tstart = soup.find('div', id='bodyContent')\n\t\ttry:\n\t\t\timage = start.find_next(imageLink)\n\t\t\tresults += '\"image\":\"{}\",\\n'.format(image[\"href\"])\n\t\texcept:\n\t\t\tprint('Could not find image for ' + name)\n\t\t\n\t\tstart = soup.find('span', string='Card Information')\n\t\ttry:\n\t\t\tdetail = start.find_next('p')\n\t\texcept:\n\t\t\treturn results + '}'\n\t\t\n\t\twhile detail:\n\t\t\ttry:\n\t\t\t\tcontents = [i for i in detail.stripped_strings]\n\t\t\t\tif len(contents) > 0:\n\t\t\t\t\tresults += '\"{}\":\"{}\",\\n'.format(cleanTitle(contents[0]), cleanUp('\\\\n'.join(contents[1:])))\n\t\t\texcept Exception as e:\n\t\t\t\tprint('Could not find detail for ' + name)\n\t\t\tfinally:\n\t\t\t\tdetail = findRunOn(detail)\n\t\t\n\t\tmonster = start.find_next('table')\n\t\ttry:\n\t\t\tfinal = []\n\t\t\tmovement = monster.find_all('tr')\n\t\t\tfor row in movement:\n\t\t\t\tcell = row.find('td')\n\t\t\t\tif 'style' in cell.attrs:\n\t\t\t\t\ttemp = '{\"color\":\"Black\",\"shapes\":['\n\t\t\t\telse:\n\t\t\t\t\ttemp = '{\"color\":\"White\",\"shapes\":['\n\t\t\t\tshapes = []\n\t\t\t\tfor a in cell.find_all('a'):\n\t\t\t\t\tshapes.append(cleanUp(a['title'].split(' ')[0]))\n\t\t\t\ttemp += '\"{}\"]}}'.format('\",\"'.join(shapes))\n\t\t\t\tfinal.append(temp)\n\t\t\tresults += '\"movement\":[{}]'.format(','.join(final))\n\t\texcept Exception as e:\n\t\t\tprint('Could not get movement')\n\t\t\tprint(name.split(' ')[:2])\n\t\n\t\treturn results\n\n\ndef nextNotWhitespace(tag):\n\tfor t in tag.next_siblings:\n\t\ttry:\n\t\t\tif t.isspace():\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\treturn t\n\t\texcept:\n\t\t\tif t is None:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\treturn t\n\ndef imageLink(tag):\n\treturn tag.name == \"a\" and ('.png' in tag['href'] or '.jpg' in tag['href'])\n\t\t\t\t\ndef findRunOn(tag):\n\tsibling = nextNotWhitespace(tag)\n\tif sibling.name == \"p\" or sibling.name == \"a\" or sibling.name == 'ul':\n\t\treturn sibling\n\telse:\n\t\treturn False\n\t\t\ndef getItemList(soup, base_url):\n\tstart = soup.find(id=\"List_of_Mythos_Cards\").parent\n\ttable = nextNotWhitespace(start)\n\trows = table.find_all(\"tr\")\n\tdetails = [row.find_all(\"td\") for row in rows]\n\treturn_list = []\n\tfor row in details[1:]:\n\t\titem = row[0].find(\"a\")\n\t\texp_img = row[2].find(\"a\")\n\t\treturn_list.append((item.text + (item.next_sibling if item.next_sibling else ''),\n\t\t\t\t\t\t\tbase_url+item[\"href\"],\n\t\t\t\t\t\t\texp_img[\"title\"] if exp_img else \"\"))\n\treturn return_list\n\ndef cleanUp(text):\n\tstep1 = text.strip()\n\tstep1 = step1.replace('\\n', '')\n\tstep1 = step1.replace('\"', '\\\\\"')\n\treturn step1\n\ndef cleanTitle(text):\n\tstep1 = text.strip()\n\tstep1 = step1.lower()\n\tstep1 = step1.replace(' ', '_')\n\tstep1 = step1.replace(':', '')\n\treturn step1\n\t\nbase_url = \"http://www.arkhamhorrorwiki.com\"\npage = requests.get(base_url + \"/Mythos\")\npage.encoding = \"ISO-8859-1\"\nsource_soup = BeautifulSoup(page.text, \"html.parser\")\nunique_items = getItemList(source_soup, base_url)\n\nthreads = []\nnumThreads = 3\nthreadLength = len(unique_items)//numThreads\nstart = 0\nfor i in range(0,numThreads):\n\tthread = FetchThread(unique_items[start:start+threadLength])\n\tthread.start()\n\tthreads.append(thread)\n\tstart += threadLength\nthread = FetchThread(unique_items[start:])\nthread.start()\nthreads.append(thread)\n\nfor t in threads:\n\tt.join()\n\ninv_json = \"[\"\nfor t in threads:\n\tinv_json += t.results\ninv_json += \"]\"\n\nwith open(\"mythos.json\", \"w\", encoding=\"utf8\") as outfile:\n\toutfile.write(inv_json)","sub_path":"pythonHelpers/gatherMythos.py","file_name":"gatherMythos.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"378785067","text":"#coding:utf-8\n#Author: wang.huaiyu.shui@gmail.com\nSEPARATOR = \"#SEPARATOR#\"\nfrom htmlelement import DIV, LABEL, INPUT, FORM, XML, UL, LI, LEGEND, FIELDSET\n\nclass FormBuilder(object):\n def __init__(self, formname, fields, layout=None, hidden=[], render_to_form=True, **attributes):\n \"\"\"\n layout controls the arrangment of fields in the form, there is an example:\n [\n {\n \"name\":\"userinfo\", \"legend\":\"用户信息\",\n \"fields\":[\n {\"name\":\"username\", \"width\":700}\n ,{\"name\":\"birth\", \"width\":700}\n ,{\"sex\":\"200\"}\n ]\n },\n {\n \"name\":\"#SEPARATOR#\"\n },\n {\n \"name\":\"course\", \"legend\":\"用户课程\",\n \"fields\":[\n {\"name\":\"coursename\", \"width\":43}\n ,{\"name\":\"startdate\", \"width\":90}\n ,{\"name\":\"howlong\", \"width\":96}\n ]\n }\n\n ],\n user also can layout it like this:\n [\n \"#SEPARATOR#\", \"coursename\", \"startdate\"\n ]\n \"\"\"\n self.tablename = formname\n if not isinstance(fields, (list, tuple)):\n fields = [fields]\n self.fields = {}\n self._fnames = []\n self.layout = layout\n self.attributes = attributes\n for f in fields:\n f.tablename = formname\n self.fields[f.name] = f\n self._fnames.append(f.name)\n self.hidden = hidden\n self.reset_value, self.submit_value = attributes.get(\"reset\", None), attributes.get(\"submit\", None)\n self.render_to_form = render_to_form\n\n def render(self, _vars=None):\n \"\"\"\n render to be a form\n \"\"\"\n gap = self.attributes.get(\"gap_percentage\" , 1)\n vars = _vars or {}\n layout = self.layout or self._fnames\n captions = []\n HEADERS = []\n DIVS = []\n for f in layout:\n if isinstance(f, dict):\n fsetname = f.get(\"legend\", \"\")\n if f.get(\"name\", \"\") == SEPARATOR:\n DIVS.append(DIV(_class=\"separator\"))\n else:\n HEADERS.append(LI(f.get(\"name\", fsetname), _id=\"h_%s_%s\"%(self.tablename, fsetname)))\n tmpdivs = []\n totalwidth = sum([itm.get(\"width\", 0) for itm in f[\"fields\"] if itm[\"name\"] in self._fnames])\n for field in f[\"fields\"]:\n if totalwidth > 0:\n rati = field.get(\"width\", 0)*100.0/totalwidth- gap\n else:\n rati = None\n if not field[\"name\"] in self._fnames:\n continue\n objfield = self.fields[field[\"name\"]]\n divlabel = DIV(LABEL(objfield.label), _class=\"rowlbl\")\n divctnt = DIV(objfield(vars.get(field[\"name\"], None), vars), _class=\"rowctnt\")\n divcmnt = DIV(objfield.comment or \"\", _class=\"rowcmnt\")\n tmpdiv = DIV(divlabel, divctnt, divcmnt, _id=\"%s_%s\"%(self.tablename, field[\"name\"]))\n if rati:\n tmpdiv[\"_style\"] = \"width:%0.2f\"%rati + \"%;\"\n tmpdivs.append(tmpdiv)\n DIVS.append(FIELDSET(LEGEND(f.get(\"name\", fsetname)), *tmpdivs, _class=\"recrow\", _id=\"wrp_%s_%s\"%(self.tablename, fsetname)))\n else:\n if f == SEPARATOR:\n DIVS.append(DIV(_class=\"separator\"))\n else:\n objfield = self.fields[f]\n divlabel = DIV(LABEL(objfield.label), _class=\"rowlbl\", _for=\"%s_%s\"%(self.tablename, f))\n divctnt = DIV(XML(objfield(vars.get(f, None), vars)), _class=\"rowctnt\")\n divcmnt = DIV(objfield.comment or \"\", _class=\"rowcmnt\")\n tmpdiv = DIV(divlabel, divctnt, divcmnt,_class=\"recrow\", _id=\"wrp_%s_%s\"%(self.tablename, f))\n DIVS.append(tmpdiv)\n #header\n HEADER_DIV = DIV(UL(*HEADERS), _class=\"legends\")\n #body\n BODY_DIV = DIV(*DIVS, _class=\"bodyrows\")\n #hidden\n hiddens = []\n for itm in self.hidden:\n if not isinstance(itm, (list, tuple)):\n itm = [itm, None]\n hiddens.append(INPUT(_type=\"hidden\", _name=itm[0], _value=itm[1]))\n HIDDEN_DIV = DIV(*hiddens, _class=\"hiddenrows\")\n\n #footer\n FOOTER_DIV = DIV(INPUT(_type='reset', _value=self.reset_value or '取 消'),INPUT(_type='submit', _value=self.submit_value or \"提 交\"), _class=\"footerrows\")\n if self.render_to_form:\n wrappertag = FORM\n else:\n wrappertag = DIV\n return wrappertag(HEADER_DIV, BODY_DIV, HIDDEN_DIV, FOOTER_DIV,_name=self.tablename, _id=\"hyform_%s\"%self.tablename, _method=\"post\",_enctype=\"multipart/form-data\", _action=self.attributes.get(\"url\", \".\"))\n\n def validate(self):\n #check user value\n pass\n\nif __name__ == \"__main__\":\n from field import Field\n from widgets import *\n hidden = [(\"session\", \"67asda\"), (\"_xsrf\", \"sdfsf4rwfwe\")]\n\n fs = [\n Field(\"name\", label=\"姓名\", comment=\"您贵姓\", widget = SelectBox),\n Field(\"age\", \"int\"),\n Field(\"desc\", \"text\")\n ]\n\n vars = {\"age\":76, \"name\":['怀玉', 56], \"desc\":\"

df

\"}\n layout = [\n {\"name\":\"基本信息\", \"legend\":\"basic\", \"fields\":[{\"name\":\"name\", \"width\":50},{\"name\":\"age\", \"width\":50}]}\n ,{\"name\":SEPARATOR}\n ,{\"name\":\"描述\", \"legend\":\"desc\", \"fields\":[{\"name\":\"desc\", \"width\":50}]}\n ]\n form = FormBuilder(\"person\", fs, layout=layout, hidden=hidden, url=\"/post\")\n ret = form.render(vars)\n open(\"Noname1.html\", \"w\").write(str(ret))\n\n","sub_path":"modules/hyform/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":6018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"205752570","text":"import json\nfrom mock import patch\n\nfrom django.test import override_settings\n\nfrom djangocms_moderation.helpers import (\n get_active_moderation_request,\n get_form_submission_for_step,\n get_page_or_404,\n get_workflow_or_none,\n is_moderation_enabled,\n)\nfrom djangocms_moderation.models import (\n ConfirmationFormSubmission,\n ConfirmationPage,\n PageModeration,\n Workflow,\n)\n\nfrom .utils import BaseTestCase\n\n\nclass GetWorkflowOrNoneTest(BaseTestCase):\n\n def test_existing_workflow(self):\n workflow = Workflow.objects.get(pk=1)\n self.assertEqual(get_workflow_or_none(1), workflow)\n workflow = Workflow.objects.get(pk=2)\n self.assertEqual(get_workflow_or_none(2), workflow)\n\n def test_non_existing_workflow(self):\n self.assertIsNone(get_workflow_or_none(10))\n\n\nclass GetCurrentModerationRequestTest(BaseTestCase):\n\n def test_existing_moderation_request(self):\n active_request = get_active_moderation_request(self.pg1, 'en')\n self.assertEqual(active_request, self.moderation_request1)\n\n def test_no_moderation_request(self):\n active_request = get_active_moderation_request(self.pg2, 'en')\n self.assertIsNone(active_request)\n\n\nclass GetPageOr404Test(BaseTestCase):\n\n def test_returns_page(self):\n self.assertEqual(get_page_or_404(self.pg1.pk, 'en'), self.pg1)\n\n\nclass IsModerationEnabledTest(BaseTestCase):\n\n @override_settings(CMS_MODERATION_ENABLE_WORKFLOW_OVERRIDE=True)\n def test_returns_true_with_override_no_moderation_object(self):\n self.assertTrue(is_moderation_enabled(self.pg1))\n\n @override_settings(CMS_MODERATION_ENABLE_WORKFLOW_OVERRIDE=True)\n def test_returns_true_with_override_moderation_object_enabled(self):\n PageModeration.objects.create(extended_object=self.pg1, enabled=True, workflow=self.wf1,)\n self.assertTrue(is_moderation_enabled(self.pg1))\n\n @override_settings(CMS_MODERATION_ENABLE_WORKFLOW_OVERRIDE=True)\n def test_returns_false_with_override_moderation_object_disabled(self):\n PageModeration.objects.create(extended_object=self.pg1, enabled=False, workflow=self.wf1,)\n self.assertFalse(is_moderation_enabled(self.pg1))\n\n @override_settings(CMS_MODERATION_ENABLE_WORKFLOW_OVERRIDE=True)\n def test_returns_false_with_override_no_workflows(self):\n Workflow.objects.all().delete()\n self.assertFalse(is_moderation_enabled(self.pg1))\n\n def test_returns_true_default_settings_has_default_workflow(self):\n self.assertTrue(is_moderation_enabled(self.pg1))\n\n def test_returns_true_default_settings_moderation_object_enabled(self):\n PageModeration.objects.create(extended_object=self.pg1, enabled=True, workflow=self.wf1,)\n self.assertTrue(is_moderation_enabled(self.pg1))\n\n def test_returns_false_default_settings_moderation_object_disabled(self):\n PageModeration.objects.create(extended_object=self.pg1, enabled=False, workflow=self.wf1,)\n self.assertFalse(is_moderation_enabled(self.pg1))\n\n @patch('djangocms_moderation.helpers.get_page_moderation_workflow', return_value=None)\n def test_returns_false_default_settings_no_workflow(self, mock_gpmw):\n self.assertFalse(is_moderation_enabled(self.pg1))\n\n\nclass GetFormSubmissions(BaseTestCase):\n\n def test_returns_form_submission_for_step(self):\n cp = ConfirmationPage.objects.create(\n name='Checklist Form',\n )\n self.role1.confirmation_page = cp\n self.role1.save()\n\n cfs1 = ConfirmationFormSubmission.objects.create(\n request=self.moderation_request1,\n for_step=self.wf1st1,\n by_user=self.user,\n data=json.dumps([{'label': 'Question 1', 'answer': 'Yes'}]),\n confirmation_page=cp,\n )\n ConfirmationFormSubmission.objects.create(\n request=self.moderation_request1,\n for_step=self.wf1st2,\n by_user=self.user,\n data=json.dumps([{'label': 'Question 1', 'answer': 'Yes'}]),\n confirmation_page=cp,\n )\n result = get_form_submission_for_step(active_request=self.moderation_request1, current_step=self.wf1st1,)\n self.assertEqual(result, cfs1)\n","sub_path":"tests/test_helpers.py","file_name":"test_helpers.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"360988897","text":"import currnent_state as cs \nimport gameplay_turns as t4\nimport numpy as np\nimport pandas as pd\n\n# Deck size [monster, spell, trap].\ndA=[20,10,10]\ndB=[20,10,10]\n\n# nothing on field to start\nfA = [0,0,0]\nfB = [0,0]\n\n#cs.Game(n, dA, dB, r, lpA, lpB, hA, hsB, t, fA, fB, stratA, stratB, games)\n\n# data for adjustment function\ngames = pd.DataFrame()\n# cannot have 100% not attack in infinite deck or else never end\nfor x in [x/10 for x in range(0, 7)]:\n for y in [x/10 for x in range(0, 7)]:\n for z in [x/10 for x in range(0, 7)]:\n games = games.append(t4.Game(100,dA,dB,False,1,1,5,5,0,fA,fB,\n [0,1],[0,1],[0,1],\n [x,1-x], [y,1-y], [z,1-z]))\n\n# change non-ones to zeros if not zero\n# sometimes loss can have negatives\n# a negative can show that you are lossing worse though\ngames.loc[games.LP_A <= 0, \"LP_A\"] = 0\n# make winning binomial\ngames.loc[games.LP_A >= 1, \"LP_A\"] = 1\n \n# train-test split\nfrom sklearn.model_selection import train_test_split\n\ntrain_dataa, test_dataa = train_test_split(games,test_size = 0.20,random_state = 42)\n\n# GLM binomial family\n\nfrom patsy import dmatrices\n\nformula = ('LP_A ~ FirstA + HAMon + HASpell + HATrap + PrAwMST + PrAwM + PrAwST')\n#Carve out the training matrices from the training data frame using the regression formula\ny_traina, X_traina = dmatrices(formula, train_dataa, return_type='dataframe')\n#Carve out the testing matrices from the testing data frame using the regression formula\ny_testa, X_testa = dmatrices(formula, test_dataa, return_type='dataframe')\n \nimport statsmodels.api as sm\nbinom_model = sm.GLM(y_traina, X_traina, family=sm.families.Binomial())\nbm_results = binom_model.fit()\n#bm_results.summary()\n\n# make a nice table\nfrom statsmodels.iolib.summary2 import summary_col\n# summary_col([bm_results],stars=True,float_format='%0.3f')\n\n# export table as tex file\nwith open('../Output/bm_summary_table.tex', 'w') as fh:\n fh.write(summary_col([bm_results],stars=True,float_format='%0.3f',model_names=['Dependent Variable: \\n win (1) or lose (0)'],\n info_dict={'N':lambda x: \"{0:d}\".format(int(x.nobs))}).as_latex().replace('\\caption{}', '\\caption{Logistic Model: Starting State}', 1))\n\n##################################################################################\n\n# data for comparing strategies\ngametest = cs.Game(200, dA, dB, False, 1, 1, 5, 5, 0, \n fA, fB, 'always', 'always', bm_results)\ngametest = gametest.append(cs.Game(200, dA, dB, False, 1, 1, 5, 5, 0, \n fA, fB, 'half', 'always', bm_results))\ngametest = gametest.append(cs.Game(200, dA, dB, False, 1, 1, 5, 5, 0, \n fA, fB, 'always', 'half', bm_results))\ngametest = gametest.append(cs.Game(200, dA, dB, False, 1, 1, 5, 5, 0, \n fA, fB, 'half', 'half', bm_results))\n# ADP doesnt work yet\ngametest = gametest.append(cs.Game(200, dA, dB, False, 1, 1, 5, 5, 0, \n fA, fB, 'ADP', 'always', bm_results))\ngametest = gametest.append(cs.Game(200, dA, dB, False, 1, 1, 5, 5, 0, \n fA, fB, 'ADP', 'half', bm_results))\n\n# test\n#gamet = cs.Game(100, dA, dB, False, 1, 1, 5, 5, 0,\n# fA, fB, 'ADP', 'always', games, bm_results)\n\n# gametest['LP_A'].value_counts()\n# gametest['LP_B'].value_counts()\n# gametest.loc[gametest.LP_A == 1, \"AsADP\"].value_counts()\n\n# instead of redoing the games for max weight func have a reserve to pick from.\n# filter by current state\ngametest.loc[gametest.LP_A <= 0, \"LP_A\"] = 0\n# make winning binomial\ngametest.loc[gametest.LP_A >= 1, \"LP_A\"] = 1\n\n\n# train-test split\nfrom sklearn.model_selection import train_test_split\n\ntrain_data, test_data = train_test_split(gametest,\n test_size = 0.20,\n random_state = 42)\n\n\n# GLM binomial family\nformula = ('LP_A ~ FirstA + AsHalf + AsAlways + AsADP')\n\n#Carve out the training matrices from the training data frame using the regression formula\ny_train2, X_train2 = dmatrices(formula, train_data, return_type='dataframe')\n#Carve out the testing matrices from the testing data frame using the regression formula\ny_test2, X_test2 = dmatrices(formula, test_data, return_type='dataframe')\n\n\nbinom_model2 = sm.GLM(y_train2, X_train2, family=sm.families.Binomial())\nbm_results2 = binom_model2.fit()\n#bm_results2.summary()\n\n\n# export results.summary as .txt\n#with open('../Output/bm_summary.txt', 'w') as fh:\n# fh.write(bm_results2.summary().as_text())\n\n# make a nice table\n# summary_col([bm_results2],stars=True,float_format='%0.3f')\n\n# export table as tex file\nwith open('../Output/bm_summary_table2.tex', 'w') as fh:\n fh.write(summary_col([bm_results2],stars=True,float_format='%0.3f',model_names=['Dependent Variable: \\n win (1) or lose (0)'],\n info_dict={'N':lambda x: \"{0:d}\".format(int(x.nobs))}).as_latex().replace('\\caption{}', '\\caption{Logistic Model: Evaluate Strategies}', 1))\n\n# prob of winning\n# v = sum(bm_results.params * [1,0.5,0.5,0.5])\n# odds = np.exp(bm_results.params)\n# prob = odds / (1+odds)\n\n# prob A wins\n# v = b_0 + b_1 * x_1 + ... + b_n * x_n\n# A_w = np.exp(v)/(1+np.exp(v))\n\npA_w = pd.DataFrame()\nfor a in [x for x in range(0, 2)]:\n for b in [x for x in range(0, 2)]:\n for c in [x for x in range(0, 2)]:\n if a + b + c == 1: # must pick only one\n v = sum(bm_results2.params * [1,1,a,b,c])\n pA_w = pA_w.append({'ProbAwins':np.exp(v)/(1+np.exp(v)),\n 'Weights':[a,b,c]},ignore_index=True)\n else:\n pass\n\n \nmax_w = pA_w.loc[pA_w['ProbAwins'].idxmax()]\n\n\n\n# confusion matrix\n# Compute prediction\npredicted = bm_results2.predict(X_test2)\n\n# Define the cutoff\ncutoff = 0.5\n\n# Compute class predictions: y_prediction\ny_prediction = np.where(predicted > cutoff, 1, 0)\n\n# Assign actual class labels from the test sample to y_actual\ny_actual = test_data[\"LP_A\"]\n\n\n# Compute and print confusion matrix using crosstab function\nconf_matrix = pd.crosstab(y_actual, y_prediction,\n rownames = [\"Actual\"], \n colnames = [\"Predicted\"], \n margins = True)\n \n# Print the confusion matrix\n#print(conf_matrix)\n\n# Accuracy\nfrom sklearn.metrics import accuracy_score\n\naccuracy = accuracy_score(y_actual, y_prediction)\n\n#print('Accuracy: %.2f' % accuracy + \"%\")\n\n\n\n# investigating the weights -- did not end up useful\n\n#formula = ('LP_A ~ FirstA + PrAwMST + PrAwM + PrAwST')\n\n#Carve out the training matrices from the training data frame using the regression formula\n#y_train3, X_train3 = dmatrices(formula, train_data, return_type='dataframe')\n#Carve out the testing matrices from the testing data frame using the regression formula\n#y_test3, X_test3 = dmatrices(formula, test_data, return_type='dataframe')\n\n\n#binom_model3 = sm.GLM(y_train3, X_train3, family=sm.families.Binomial())\n#bm_results3 = binom_model3.fit()\n#bm_results3.summary()\n\n\n# make a nice table\n#summary_col([bm_results3],stars=True,float_format='%0.3f')\n\n# export table as tex file\n#with open('../Output/bm_summary_table3.tex', 'w') as fh:\n# fh.write(summary_col([bm_results3],stars=True,float_format='%0.3f',model_names=['Dependent Variable: \\n win (1) or lose (0)'],\n# info_dict={'N':lambda x: \"{0:d}\".format(int(x.nobs))}).as_latex().replace('\\caption{}', '\\caption{Logistic Model: Evaluate Weights}', 1))\n\n\n#pA_w = pd.DataFrame()\n#for x in [x/10 for x in range(0, 11)]:\n# for y in [x/10 for x in range(0, 11)]:\n# for z in [x/10 for x in range(0, 11)]:\n# if x + y + z > 0:\n# v = sum(bm_results3.params * [1,1,x,y,z])\n# pA_w = pA_w.append({'ProbAwins':np.exp(v)/(1+np.exp(v)),\n# 'Weights':[x,y,z]},ignore_index=True)\n\n \n#max_w = pA_w.loc[pA_w['ProbAwins'].idxmax()]\n\n\n\n# saving dataframe to csv\ngames.round(4).iloc[:,1:9].head(10).to_latex('../Output/game1.tex',index=False)\ngametest.round(4).iloc[:,1:12].head(10).to_latex('../Output/game2.tex',index=False)\n\n","sub_path":"Game/data_analytics.py","file_name":"data_analytics.py","file_ext":"py","file_size_in_byte":8252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"201666980","text":"import os\nimport re\nimport unittest\n\n\nclass TestUpdateScript(unittest.TestCase):\n def test_dependencies_file(self):\n dependencies_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"../../octoprint_mrbeam/dependencies.txt\",\n )\n dependencies_pattern = r\"([a-z]+(?:[_-][a-z]+)*)==(([1-9][0-9]*!)?(0|[1-9][0-9]*)(\\.(0|[1-9][0-9]*))*((a|b|rc)(0|[1-9][0-9]*))?(\\.post(0|[1-9][0-9]*))?(\\.dev(0|[1-9][0-9]*))?$)\" # $ ad the end needed so we see if there is a leftover at the end\n with open(dependencies_path, \"r\") as f:\n lines = f.readlines()\n for line in lines:\n self.assertRegexpMatches(line, dependencies_pattern)\n","sub_path":"tests/softwareupdate/test_dependencies.py","file_name":"test_dependencies.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"99750710","text":"from glob import glob\nimport os\nfrom setuptools import setup\nfrom setuptools.extension import Extension\n\nimport numpy as np\n\ntry:\n from Cython.Build import cythonize\n\n USE_CYTHON = True\nexcept ImportError:\n print('Cython is not available; using pre-generated C files')\n USE_CYTHON = False\n\next = '.pyx' if USE_CYTHON else '.c'\nextensions = []\nfor source_file in glob('probfit/*' + ext):\n fname, _ = os.path.splitext(os.path.basename(source_file))\n extensions.append(\n Extension('probfit.{0}'.format(fname),\n sources=['probfit/{0}{1}'.format(fname, ext)],\n include_dirs=[np.get_include()])\n )\n\nif USE_CYTHON:\n extensions = cythonize(extensions)\n\n\ndef get_version():\n version = {}\n with open('probfit/version.py') as fp:\n exec(fp.read(), version)\n return version['__version__']\n\n\n__version__ = get_version()\n\nsetup(\n name='probfit',\n version=__version__,\n description='Distribution Fitting/Regression Library',\n long_description=''.join(open('README.rst').readlines()[4:]),\n author='Piti Ongmongkolkul',\n author_email='piti118@gmail.com',\n url='https://github.com/scikit-hep/probfit',\n package_dir={'probfit': 'probfit'},\n packages=['probfit'],\n ext_modules=extensions,\n install_requires=[\n 'setuptools',\n 'numpy',\n 'iminuit'\n ],\n classifiers=[\n \"Programming Language :: Python\",\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Intended Audience :: Science/Research',\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: MIT License'\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"649885996","text":"import glob\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy as sc\nfrom sklearn import metrics\nfrom sklearn.metrics import f1_score, accuracy_score\nfrom sklearn.metrics import roc_curve, confusion_matrix\nimport torch\nimport torch.nn as nn # All neural network modules, nn.Linear, nn.Conv2d, BatchNorm, Loss functions\nimport torch.optim as optim # For all Optimization algorithms, SGD, Adam, etc.\nimport torch.nn.functional as F # All functions that don't have any parameters\nfrom torch.utils.data import DataLoader, ConcatDataset\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import matthews_corrcoef\n\n# Set device\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint(\"Using device (CPU/GPU):\", device)\n#device = torch.device(\"cpu\")\n\n# ML architecture\n###############################\n### Define network ###\n###############################\n\nprint(\"Initializing network\")\n\n# Hyperparameters\ninput_size = 528\nnum_classes = 1\nlearning_rate = 0.01\n\nclass Net(nn.Module):\n def __init__(self, num_classes):\n super(Net, self).__init__()\n self.layers = nn.Sequential( \n nn.Conv1d(in_channels=7, out_channels=100, kernel_size=3, stride=2, padding=1),\n nn.ReLU(),\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.BatchNorm1d(100),\n \n nn.Conv1d(in_channels=100, out_channels=100, kernel_size=3, stride=2, padding=1),\n nn.ReLU(),\n nn.BatchNorm1d(100),\n\n nn.Flatten(),\n nn.Linear(6600, 128),\n nn.ReLU(),\n nn.Linear(128, 16),\n nn.ReLU(),\n nn.Linear(16,num_classes),\n nn.Sigmoid()\n )\n\n def forward(self, x): \n x = self.layers(x)\n return x\n \n# Initialize network\nnet = Net(num_classes=num_classes).to(device)\n\ndef reset_weights(m):\n '''\n Try resetting model weights to avoid\n weight leakage.\n '''\n for layer in m.children():\n if hasattr(layer, 'reset_parameters'):\n #print(f'Reset trainable parameters of layer = {layer}')\n layer.reset_parameters()\n #print('Reset trainable parameters for model')\n\n# Loss and optimizer\ncriterion = nn.BCELoss()\noptimizer = optim.SGD(net.parameters(), lr=learning_rate)\n","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"183247719","text":"import pprint\nimport json\nimport csv\nimport pandas as pd\n\ndef loadCountryCode(filename):\n\twith open(filename) as file:\n\t\treader = csv.reader(file)\n\t\tcountryCode = {}\n\t\tfor line in reader:\n\t\t\t#print(line)\n\t\t\tcountryCode[line[0]]=int(line[1])\n\treturn countryCode\n\ndef load(filename):\n\t#Load the database\n\twith open(filename,\"r\",encoding=\"utf-8\") as f:\n\t\tdb=json.load(f)\n\tprint(\"Total number of items:\", len(db))\n\treturn db\n\ndef jsonDump(db,filename):\n\twith open(filename,\"w\",encoding=\"utf-8\") as f:\n\t\tjson.dump(db,f)\n\ndef statistify(db):\n\tdb2=[]\n\tfor item in db:\n\t\ttmp={}\n\t\ttmp[\"_counter\"]=item[\"_counter\"]\n\t\t#print(tmp[\"_counter\"])\n\t\ttmp[\"budget\"]=item[\"budget\"]\n\t\ttmp[\"revenue\"]=item[\"revenue\"]\n\t\ttmp[\"runtime\"]=item[\"runtime\"]\n\t\ttmp[\"crew\"]=len(item[\"crew\"])\n\t\ttmp[\"cast\"]=len(item[\"cast\"])\n\t\tif len(item[\"genres\"])>=1:\n\t\t\ttmp[\"genre\"]=item[\"genres\"][0][\"id\"]\n\t\telse:\n\t\t\ttmp[\"genre\"]=-1\n\t\tif len(item[\"production_countries\"])>=1:\n\t\t\ttmp[\"country\"]=countryCode[item[\"production_countries\"][0][\"iso_3166_1\"]]\n\t\telse:\n\t\t\ttmp[\"country\"]=-1\n\t\tif len(item[\"production_companies\"])>=1:\n\t\t\ttmp[\"company\"]=item[\"production_companies\"][0][\"id\"]\n\t\telse:\n\t\t\ttmp[\"company\"]=-1\n\t\t#tmp[\"trend\"]\n\t\t#tmp[\"#view\"]\n\t\ttmp[\"_info\"]={\"title\":item[\"title\"],\"id\":item[\"id\"]}\n\t\tdb2.append(tmp)\n\t\t#print(tmp)\n\tjsonDump(db2,\"movieDbStat.json\")\n\tdata=[[d['_counter'], d['budget'], d['revenue'], d['runtime'], d['crew'], d['cast'], d['genre'], d['country'], d['company']] for d in db2]\n\tdf=pd.DataFrame(data, columns=['_counter','budget','revenue','runtime','crew','cast','genre','country','company'])\n\tprint(df)\n\treturn [db, df]\n\ndef analysis(df):\n\tme=df.mean()\n\ncountryCode=loadCountryCode(\"iso_3166_1.csv\")\ndb=load(\"movieDbClean.json\")\n[db, df]=statistify(db)\n\nprint(\"\\nOne example of statistical purpose database:\")\nprint(db[0])\n\nanalysis(df)","sub_path":"2/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"551526466","text":"import sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QTextBrowser, QPushButton\nimport random\n\nclass Example(QWidget):\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(300, 300, 550, 50)\n self.setWindowTitle('Cлучайная строка')\n self.button = QPushButton(\"Получить\", self)\n self.button.move(10, 10)\n self.button.resize(100, 30)\n self.str = QTextBrowser(self)\n self.str.move(120,10)\n self.str.resize(430,30)\n self.button.clicked.connect(self.load_random_string)\n\n\n\n def load_random_string(self):\n try:\n with open('lines.txt', encoding='utf8') as f:\n text = f.read().split('\\n')\n\n self.str.setText(random.choice(text) if text else \"Файл пустой\")\n except FileNotFoundError:\n self.str.setText(\"Файл не найден\")\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Example()\n ex.show()\n sys.exit(app.exec())\n\n\n\n\n","sub_path":"2nd_year/7/ClassWork/Случайная строка из файла 2/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"403940695","text":"def di(str1):\n dict0 = {}\n for i in range(len(str1)):\n if str1[i] not in dict0:\n dict0[str1[i]] = 1\n else:\n dict0[str1[i]] = dict0[str1[i]] + 1\n return dict0\n\n\ndef difference(N,a):\n res=0\n while(a!=[]):\n a=[i for i in a if i!=a[0]]\n res=res+1\n print(str(res)+'')\n \n\n\na=[]\nN=int(input())\nfor j in range(N):\n str1=input()\n a.append(di(str1))\n\ndifference(N,a)","sub_path":"Code/CodeRecords/2908/59018/295476.py","file_name":"295476.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"637949887","text":"import json\nfrom typing import List\n\n\nclass JsonParser:\n def parse(self, event: bytes) -> List[dict]:\n try:\n data = json.parse(event.decode())\n except Exception as e:\n raise ValueError from e\n\n if isinstance(data, dict):\n return [data]\n if isinstance(data, list):\n return data\n\n raise ValueError\n\n\nparsers = {\n 'application/json': JsonParser,\n}\n","sub_path":"src/apps/main/event_parsers.py","file_name":"event_parsers.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"447304536","text":"from django.urls import path \nfrom . import views\n\nurlpatterns = [\n path('root', views.main),\n path('register', views.register),\n path('success', views.success, name='success'),\n path('clear', views.delete),\n path('login', views.login, name='login'),\n path('logout', views.logout),\n]","sub_path":"login_proj/login_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"594177739","text":"import os\nfrom dealer import Dealer\nfrom player import Player\n\nclass Game:\n def __init__(self):\n self.dealer = Dealer()\n self.players = [] \n self.players_playing = [] \n self.dealer.set_game()\n self.create_players()\n self.game()\n \n def clear(self):\n if os.name == \"nt\":\n os.system(\"cls\")\n elif os.name == \"posix\":\n os.system(\"clear\")\n \n def create_players(self):\n self.clear()\n n_players = int(input(\"How many players: \"))\n while True:\n try:\n for _ in range(n_players):\n name = input(\"Tell me you name: \")\n money = int(input(\"How much money: \"))\n self.players.append(Player(name, money)) \n break\n except ValueError:\n pass\n self.players_playing = self.players[:]\n\n def player_off_play(self, player):\n #delete player from the play and give him the money\n p_idx = self.players_playing.index(player)\n del self.players_playing[p_idx]\n \n def round1(self):\n #fist bet\n for player in self.players:\n player.betting1()\n\n #first cards\n ##players\n for player in self.players:\n hand = [self.dealer.draw_card() for _ in range(2)]\n player.cards.append(hand)\n\n ##dealer\n self.dealer.hidden_card.append(self.dealer.draw_card())\n self.dealer.up_cards.append(self.dealer.draw_card())\n \n\n #check for blackjack\n for player in self.players_playing:\n for hand in player.cards: \n if self.dealer.blackjack(hand):\n self.player_off_play(player)\n player.win()\n player.reset()\n print(\"Player {}\".format(player))\n print(\"BLACKJACK\")\n input(\"\")\n \n #check for spliting\n #just need to check the slipt for the first hand, splitting is not possible on second hands\n for player in self.players_playing:\n for hand in player.cards:\n if hand[0][0:-1] == hand[1][0:-1]:\n player.split = True\n \n def round2(self):\n for player in self.players_playing:\n for hand in player.cards:\n while True:\n self.clear()\n self.display_board()\n if self.dealer.points(hand) >= 21: #just in case to not process this one\n break\n print(\"PLAYER: {}\".format(player.name))\n print(\"HAND PLAYING: {}\".format(hand))\n\n if len(player.cards) == 1:\n if player.split == True:\n print(\"SPLIT\")\n print(\"HIT\")\n print(\"STAY\")\n \n ans = input(\">> \")\n\n if ans.lower() == \"split\":\n player.splitting()\n player.betting2()\n player.split = False\n \n elif ans.lower() == \"hit\": \n hand.append(self.dealer.draw_card()) \n \n elif ans.lower() == \"stay\":\n #jumps to next hand or player\n break\n \n def round3(self):\n #shows dealer's cards\n self.dealer.show_card()\n self.dealer.dealer_draw()\n\n #checking for winners and ties\n for player in self.players_playing:\n for idx, hand in enumerate(player.cards):\n self.clear()\n self.display_board()\n print(\"PLAYER: {}\".format(player.name))\n print(\"ON HAND: {}\".format(hand))\n dealer_pts = self.dealer.points()\n player_pts = self.dealer.points(hand) \n winner = self.dealer.winner(hand)\n print(\"Your points {} and dealer's points {}\".format(player_pts, dealer_pts))\n input()\n # returns true or false\n if winner == \"dealer\":\n pass\n elif winner == \"player\":\n if idx == 0:\n player.win() #first hand\n else:\n player.win(2)\n elif winner == \"tie\":\n if idx == 0:\n player.tie(0) #first hand\n else:\n player.tie(2)\n player.reset()\n \n return False\n\n def display_board(self):\n print(\"-\"*33)\n\n #dealer cards\n print(\"|\", end=\"\")\n print(\"{:<10}\".format(\"Dealer\"), end=\"\")\n for card in self.dealer.up_cards:\n print(\"{:<4}\".format(card), end=\"\")\n\n if len(self.dealer.hidden_card) >= 1:\n print(\"{:<4}\".format(\"**\"), end=\"\")\n\n if self.dealer.points() != 0:\n print(\"pts:{:<3}\".format(self.dealer.points()), end=\"\")\n print(\"{:>8}\".format(\"|\"), end=\"\")\n print(\"\")\n\n #players\n for player in self.players:\n print(\"|\", end=\"\")\n print(\"{:<10}\".format(player.name), end=\"\")\n\n for hand in player.cards:\n for card in hand:\n print(\"{:<4}\".format(card), end=\"\")\n print(\"pts:{:<3}\".format(self.dealer.points(hand)), end=\"\")\n\n print(\"{:>8}\".format(\"|\"), end=\"\")\n print(\"\")\n \n print(\"-\"*33)\n\n\n #### DEBUG #####\n def cards_pts(self):\n self.clear()\n for player in self.players_playing:\n for hand in player.cards:\n print(player.cards, end=\"\")\n print(self.dealer.points(hand))\n input()\n\n\n\n def game(self):\n game_on = True\n counter = 1\n\n while True:\n self.clear()\n self.display_board()\n\n if counter == 1:\n self.round1()\n counter += 1\n elif counter == 2:\n self.round2()\n counter += 1\n elif counter == 3:\n game_on = self.round3()\n\n if game_on == False:\n self.dealer.set_game()\n self.players_playing = self.players[:]\n self.clear()\n print(\"READY FOR ANOTHER\")\n input()\n counter = 1\n game_on = True\n continue\n\n\n\nif __name__ == \"__main__\":\n Game()\n \n\n \n","sub_path":"BlackJack/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":6796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"202847690","text":"import csv,json\nmyfile='C:/Users/gulsh/Desktop/Prodapt/Assessment/day14/petrol.csv'\njsonfilepath= \"petrol.json\"\nli = []\nwith open(myfile,\"r\",encoding='utf-8') as f:\n datatreader=csv.DictReader(f)\n for i in datatreader:\n li.append(i)\n\npetrol_li=json.dumps(li)\nwith open(jsonfilepath,'w',encoding='utf-8') as f:\n f.write(petrol_li)","sub_path":"day14/petrol.py","file_name":"petrol.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"228257715","text":"import pytest\nfrom freezegun import freeze_time\n\nfrom tests.test_helpers import mce_helpers\nfrom tests.test_helpers.click_helpers import run_datahub_cmd\nfrom tests.test_helpers.docker_helpers import is_responsive, wait_for_port\n\nFROZEN_TIME = \"2020-04-14 07:00:00\"\n\npytestmark = pytest.mark.skip(\n reason=\"Vertica tests are disabled due to a dependency conflict with SQLAlchemy 1.3.24\"\n)\n\n\n@pytest.fixture(scope=\"module\")\ndef test_resources_dir(pytestconfig):\n return pytestconfig.rootpath / \"tests/integration/vertica\"\n\n\n@pytest.fixture(scope=\"module\")\ndef vertica_runner(docker_compose_runner, pytestconfig, test_resources_dir):\n with docker_compose_runner(\n test_resources_dir / \"docker-compose.yml\", \"vertica\"\n ) as docker_services:\n wait_for_port(\n docker_services,\n \"vertica-ce\",\n 5433,\n timeout=120,\n checker=lambda: is_responsive(\"vertica-ce\", 5433, hostname=\"vertica-ce\"),\n )\n yield docker_services\n\n\n# Test needs more work to be done , currently it is working fine.\n@freeze_time(FROZEN_TIME)\n@pytest.mark.integration\n@pytest.mark.skip(\"This does not work yet and needs to be fixed.\")\ndef test_vertica_ingest_with_db(vertica_runner, pytestconfig, tmp_path):\n test_resources_dir = pytestconfig.rootpath / \"tests/integration/vertica\"\n # Run the metadata ingestion pipeline.\n config_file = (test_resources_dir / \"vertica_to_file.yml\").resolve()\n run_datahub_cmd(\n [\"ingest\", \"--strict-warnings\", \"-c\", f\"{config_file}\"], tmp_path=tmp_path\n )\n\n # Verify the output.\n mce_helpers.check_golden_file(\n pytestconfig,\n output_path=tmp_path / \"vertica.json\",\n golden_path=test_resources_dir / \"vertica_mces_with_db_golden.json\",\n )\n","sub_path":"metadata-ingestion/tests/integration/vertica/test_vertica.py","file_name":"test_vertica.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"256650434","text":"# encoding = utf-8\n\nfrom sqlalchemy import Column, String, create_engine, table, column, select, update, insert\nfrom sqlalchemy import Integer, BLOB, CHAR, DECIMAL\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import update, insert\n\nimport mysql.connector.pooling\n\nfrom models.crawler import *\n\n\n# # 初始化数据库连接:\n# engine = create_engine('mysql+mysqlconnector://root:password@localhost:3306/collect')\n# # 创建DBSession类型:\n# DBSession = sessionmaker(bind=engine)\n# # 创建Session:\n# session = DBSession()\n# # 创建Query查询,filter是where条件,最后调用one()返回唯一行,如果调用all()则返回所有行:\n# # user = session.query(Movie).filter(Movie.id=='5').one()\n# user = list(session.query(Movie))\n# # 打印类型和对象的name属性:\n# # print('type:', type(user))\n# # print('name:', user.name)\n#\n# print(len(user))\n# # 关闭Session:\n# session.close()\n\n\nclass DbOperate:\n def __init__(self):\n self.dbconfig = {\n \"database\": \"collect\",\n \"user\": \"root\",\n \"password\": \"password\",\n }\n self.engine = create_engine('mysql+mysqlconnector://root:password@localhost:3306/collect', echo=True)\n self.DBSession = sessionmaker(bind=self.engine)\n\n def normal_conn(self):\n cnxpool = mysql.connector.pooling.MySQLConnectionPool(pool_name=\"mypool\",\n pool_size=3,\n **self.dbconfig)\n conn = cnxpool.get_connection()\n print(type(conn))\n cursor = conn.cursor()\n\n # cursor.execute('select * from mvoies', ('1',))\n cursor.execute('select * from movies')\n values = cursor.fetchall()\n # print('type: ' + str(type(values)))\n # print(values)\n cursor.close()\n\n def orm_query(self):\n session = self.DBSession()\n q = session.query(Movie)\n user = list(q)\n print(len(user))\n session.close()\n\n def orm_insert_one(self, movieToInsert):\n session = self.DBSession()\n session.add(movieToInsert)\n session.commit()\n session.close()\n\n def orm_insert_multi(self, moviesToInsert):\n session = self.DBSession()\n session.add_all(moviesToInsert)\n session.commit()\n session.close()\n\n def orm_insert_table(self, toinsert):\n session = self.DBSession()\n session.add_all(toinsert)\n session.commit()\n session.close()\n\nif __name__ == '__main__':\n\n # c = MovieCrawler()\n # infolist = c.get_from_cathay()\n # print('Count got: ' + str(len(infolist)))\n dbop = DbOperate()\n # m = Movie(['name_', '', '', '', ''])\n # dbop.orm_insert_one(m)\n\n dbop.orm_query()\n\n\n # dbop.orm_insert_multi(infolist)\n # print('Insert success')\n\n # l = [TestTableItem(teststring = x) for x in ['abc','bcd']]\n # l = [Movie(list(x)) for x in ['abc','bcd']]\n # dbop.orm_insert_table(l)\n","sub_path":"db/db_operator.py","file_name":"db_operator.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"197519732","text":"# Copyright 2017 The Bazel Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Rules to load all dependencies of rules_databricks.\"\"\"\n\nload(\"@bazel_tools//tools/build_defs/repo:http.bzl\", \"http_archive\", \"http_file\")\nload(\"@bazel_tools//tools/build_defs/repo:git.bzl\", \"git_repository\")\nload(\"//toolchain/databricks:configure.bzl\", databricks_toolchain_configure = \"toolchain_configure\")\n\ndef repositories():\n \"\"\"Download dependencies of container rules.\"\"\"\n excludes = native.existing_rules().keys()\n\n if \"bazel_skylib\" not in excludes:\n\n http_archive(\n name = \"bazel_skylib\",\n sha256 = \"e5d90f0ec952883d56747b7604e2a15ee36e288bb556c3d0ed33e818a4d971f2\",\n strip_prefix = \"bazel-skylib-1.0.2\",\n urls = [\n \"https://github.com/bazelbuild/bazel-skylib/archive/1.0.2.tar.gz\"\n ],\n )\n\n if \"rules_python\" not in excludes:\n\n http_archive(\n name = \"rules_python\",\n sha256 = \"aa96a691d3a8177f3215b14b0edc9641787abaaa30363a080165d06ab65e1161\",\n urls = [\n \"https://github.com/bazelbuild/rules_python/releases/download/0.0.1/rules_python-0.0.1.tar.gz\"\n ],\n )\n\n if \"subpar\" not in excludes:\n\n git_repository(\n name = \"subpar\",\n remote = \"https://github.com/google/subpar\",\n commit = \"9fae6b63cfeace2e0fb93c9c1ebdc28d3991b16f\",\n shallow_since = \"1565833028 -0400\"\n )\n\n if \"jq\" not in excludes:\n\n http_file(\n name = \"jq\",\n executable = True,\n sha256 = \"af986793a515d500ab2d35f8d2aecd656e764504b789b66d7e1a0b727a124c44\",\n urls = [\n \"https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64\"\n ],\n )\n\n if \"databricks_src\" not in excludes:\n http_archive(\n name = \"databricks_src\",\n build_file_content = \"\"\"\npackage(default_visibility = [\"//visibility:public\"])\nfilegroup(\n name = \"src\",\n srcs = glob(\n [\"databricks_cli/**/*.py\"],\n ),\n visibility = [\"//visibility:public\"],\n)\n\"\"\",\n sha256 = \"6b7748da9595b818618ce3810647f900304219122114472e6653c4ffcd302537\",\n strip_prefix = \"databricks-cli-0.9.1\",\n urls = [\n \"https://github.com/databricks/databricks-cli/archive/0.9.1.tar.gz\"\n ],\n )\n\n native.register_toolchains(\n \"@rules_databricks//toolchain/databricks:default_linux_toolchain\"\n )\n\n if \"databricks_config\" not in excludes:\n databricks_toolchain_configure(name = \"databricks_config\")\n","sub_path":"databricks/repositories.bzl","file_name":"repositories.bzl","file_ext":"bzl","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"134699964","text":"import glob\nimport os\nimport random\nimport re\n\ndef build_string(codon):\n output_string = codon\n\n while codon[-1] != '\\0':\n next_letter = random.choice(codons[codon])\n output_string += next_letter\n codon = output_string[-codon_size:]\n\n print(output_string)\n\ndef build_markov():\n for file in glob.glob(os.path.join(path, '*.vtt')):\n with open(file, 'r+') as f:\n str = re.sub(trim_re, '', f.read())\n for i in range(len(str)):\n codon = str[i: i + codon_size]\n if i == 0:\n starting_codons.append(codon)\n\n try:\n next_letter = str[i + codon_size]\n except IndexError:\n next_letter = '\\0'\n\n if codon not in codons:\n codons[codon] = []\n\n codons[codon].append(next_letter)\n\n starting_codon = random.choice(starting_codons)\n build_string(starting_codon)\n\nif __name__ == '__main__':\n trim_re = r'(\\d+:\\d+:\\d+\\.\\d+ \\-\\-\\> \\d+:\\d+:\\d+\\.\\d+)|\\n{2}|WEBVTT\\n|Kind: captions\\n|Language: en\\n'\n\n path = 'subs_raw/'\n codon_size = 8\n codons = {}\n starting_codons = []\n\n build_markov()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"460388129","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 9 18:54:38 2020\n\n@author: 15106\n\n\"\"\"\n\nimport bt\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n########### Getting S&P data ###########\nbeginning = '2016-01-04'\ndata_spgsg = bt.get('^GSPC', start=beginning)\n\n\ns_spgsg = bt.Strategy('S&P gsg only', \n [bt.algos.RunMonthly(),\n bt.algos.SelectAll(),\n bt.algos.WeighEqually(),\n bt.algos.Rebalance()])\n \nb_spgsg = bt.Backtest(s_spgsg, data_spgsg)\nresult = bt.run(b_spgsg)\nresult.plot()\n\n########### Getting Risk Free Rate ###########\nriskfree = bt.get('^IRX', start=beginning)\nriskfree_rate = float(riskfree.mean()) / 100\nprint(riskfree_rate)\n\n\n\n########### Selecting Algos ###########\n\n\n### Random 5 strategies ###\nequity_list = ['vaw', 'vis', 'vcr', 'vdc','vht', 'vfh', 'vgt', 'vox', 'vpu', 'vnq', 'vde']\ndata = bt.get(equity_list, start=beginning)\ndata.head()\n\ns_random = bt.Strategy('Random 5', \n [bt.algos.RunMonthly(),\n bt.algos.SelectRandomly(5),\n bt.algos.WeighEqually(),\n bt.algos.Rebalance()])\n\nb_random = bt.Backtest(s_random, data)\n\nresult = bt.run(b_random, b_spgsg)\n\nresult.set_riskfree_rate(riskfree_rate)\nresult.plot()\nresult.display()\n\n#### best 5 securities with a lookback period of 3 months\ns_best = bt.Strategy('Best 5', \n [bt.algos.RunMonthly(),\n bt.algos.SelectAll(),\n bt.algos.SelectMomentum(5, lookback=pd.DateOffset(months=3)),\n bt.algos.WeighEqually(),\n bt.algos.Rebalance()])\n\nb_best = bt.Backtest(s_best, data)\n\nresult = bt.run(b_random, b_best, b_spgsg)\n\nresult.set_riskfree_rate(riskfree_rate)\nresult.plot()\nresult.display()\n\n# comparing the statostics for the 3 strategies \ndf_results_key = result.stats.assign()\n\n\n#### using weights strategies #####\n\n### Inverse of Volatility | bt.algos.WeighInvVol()\ns_inv = bt.Strategy('Inverse of Volatility', \n [bt.algos.RunMonthly(),\n bt.algos.SelectAll(),\n bt.algos.WeighInvVol(),\n bt.algos.Rebalance()])\n\nb_inv = bt.Backtest(s_inv, data)\nresult = bt.run(b_inv, b_random, b_best, b_spgsg)\nresult.set_riskfree_rate(riskfree_rate)\nresult.plot()\nresult.display()\n\ndf_results_key_2 = result.stats.assign()\n\n### Using Markowitz\n##s_mark = bt.Strategy('Markowitz', \n #[bt.algos.RunEveryNPeriods(10, 3),\n #bt.algos.SelectAll(),\n #bt.algos.WeighMeanVar(),\n #bt.algos.Rebalance()])\n\n##b_mark = bt.Backtest(s_mark, data)\n\nresult = bt.run(b_mark, b_inv, b_random, b_best, b_spgsg)\nresult.set_riskfree_rate(riskfree_rate)\nresult.plot()\nplt.title(label = \"SMA_50_200 vs SMA_32_248 | Equity Progression\", fontsize=20)\n\n\n\n\n\n\n\n\n","sub_path":"Momentum.py","file_name":"Momentum.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"320406331","text":"#!/bin/python3\nimport unittest\n\n# Complete the diagonalDifference function below.\n\n\ndef diagonalDifference(arr):\n \"\"\"\n Given a square matrix, calculate the absolute difference between the sums of its diagonals. \n \"\"\"\n\n sum_diag1 = 0\n sum_diag2 = 0\n\n j = len(arr) - 1\n for i in range(len(arr)):\n sum_diag1 += arr[i][i]\n sum_diag2 += arr[i][j]\n j -= 1\n return abs(sum_diag1 - sum_diag2)\n\n\nif __name__ == '__main__':\n arr = [[11, 2, 4], [4, 5, 6], [10, 8, -12]]\n result = 15\n\n tc = unittest.TestCase('__init__')\n tc.assertEqual(diagonalDifference(arr), result)\n","sub_path":"algorithms/diag_diff.py","file_name":"diag_diff.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"517156556","text":"from flask import Flask, render_template, redirect,url_for\nfrom flask_assets import Environment, Bundle\nfrom flask_mongoengine import MongoEngine\nfrom flask_breadcrumbs import Breadcrumbs\nfrom flask_mail import Mail\nfrom functools import wraps\n\nfrom flask_login import current_user\n\n# create the flask object\napp = Flask(__name__)\n\n# Initialize Flask-Breadcrumbs\nBreadcrumbs(app=app)\n\n# Configurations\napp.config.from_object('config')\napp.jinja_env.globals['APP_SETTINGS'] = app.config['APP_SETTINGS']\n\n# mail object\nmail = Mail(app)\n\n# Define the database object which is imported\n# by modules and controllers\ndb = MongoEngine(app)\n\n#assets configuration\nassets = Environment(app)\n\n# to minify ---> filters='cssmin', filters='jsmin'\ncss = Bundle('css/bootstrap.min.css', 'css/font-awesome.min.css', 'css/ionicons.min.css', 'css/AdminLTE.min.css', 'css/skin-black.min.css', 'css/custom.css', output='gen/main.css')\njs = Bundle('js/jquery.min.js', 'js/bootstrap.min.js', 'js/adminlte.min.js', 'js/custom.js', output='gen/main.js')\n\n# make available in template\nassets.register('main_js', js)\nassets.register('main_css', css)\n\n\n\n# ---------------------- ERRORS HANDLER -----------------\n\n@app.errorhandler(403)\ndef forbidden(e):\n app.config['APP_SETTINGS']['title'] = 'Forbidden'\n return render_template('error.html', code=403, txt='Forbidden Page'), 403\n\n\n@app.errorhandler(404)\ndef not_found(e):\n app.config['APP_SETTINGS']['title'] = 'Not Found'\n return render_template('error.html', code=404, txt='Page not Found'), 404\n\n\n@app.errorhandler(405)\ndef method_not_allowed(e):\n app.config['APP_SETTINGS']['title'] = 'Method not Allowed'\n return render_template('error.html', code=405, txt='Method not Allowed'), 405\n\n\n@app.errorhandler(500)\ndef server_error(e):\n app.config['APP_SETTINGS']['title'] = 'Server Error'\n return render_template('error.html', code=500, txt='Server Error'), 500\n\n\n\n# ------------------------ FUNCTIONS --------------------\ndef flash_errors(form):\n \"\"\" Transform FlaskForm errors into a single object to display in template with Flash Plugin\n *This function must be called after form.validate() failed\n\n Arguments:\n form {FlaskForm} -- Make sure that all fields have validators \n\n Returns:\n dist -- ['text error1', 'text error2', ...]\n \"\"\"\n messagens = []\n for field, errors in form.errors.items(): \n for error in errors:\n messagens.append(\"Error in the %s field - %s\" % (getattr(form, field).label.text, error))\n\n return messagens\n\n\n\ndef login_roles_required(role=app.config['ACCESS'][0]):\n \"\"\"This decorator function make sure if the user has been logged and if he has access to this call\n We have levels hierarchy, you can see the options and your priorities on config.py\n\n Keyword Arguments:\n role {str} -- Must be the minimum level for this call (default: minimum level set on config.py)\n\n ** If the user has been logged, we add USER as global var for every template has access \n \"\"\"\n def wrapper(f):\n @wraps(f)\n def wrapped(*args, **kwargs):\n if current_user.is_authenticated == False:\n return redirect(url_for('login'))\n else:\n if app.config['ACCESS'].index(role) > current_user.role:\n return not_found(404)\n else:\n app.jinja_env.globals['USER'] = current_user\n return f(*args, **kwargs)\n return wrapped\n return wrapper\n\n \n#import all controllers\nfrom app.controllers import *","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"495339510","text":"import os\nimport sys\nfrom re import search\nfrom datetime import datetime\nfrom uuid import uuid4\n\nfrom sensors import *\n\n\nclass dispatcher():\n \"\"\"class used to interpret network file system paths and to dispatch them to the correct sensor parser\"\"\"\n\n def __init__(self, verbose=False):\n self.verbose = verbose\n\n def get_subclasses(self):\n \"\"\"Detects direct subclasses of the sensor class and returns them\"\"\"\n return sensor.sensor.__subclasses__()\n\n def dispatch(self, basename, file, acq_id, dev_id, user, metrics_args):\n \"\"\"given an acquisition filename, assign it to the correct sensor\"\"\"\n if basename == \"description.xml\":\n return {}, {}\n user_context = \"[user: %s]\" % user\n can_receive = list(filter(lambda s: search(s.filter, file), self.get_subclasses()))\n if not len(can_receive):\n if self.verbose:\n print(user_context, \"No dispatcher found for '%s'\" % basename, file=sys.stderr)\n return {}, {}\n else:\n s = can_receive[0]\n if self.verbose:\n if len(can_receive) > 1:\n print(user_context, \"Multiple dispatchers found for '%s', using first: %s\" % (basename, s), file=sys.stderr)\n else:\n print(user_context, \"Dispatcher found for %s: %s\" % (basename, s))\n\n sensor_id = uuid4()\n sensor_parser = s(file, metrics_args)\n metrics, datapoints = sensor_parser.parse(acq_id, dev_id, sensor_id)\n return {\"_id\": sensor_id, \"sensorType\": s.name, \"metrics\": metrics}, datapoints # sensor\n","sub_path":"stuns/dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"522459710","text":"# -*- coding: utf-8 -*-\nimport time\nfrom crawling import Crawling\nfrom multiprocessing import Process\n\nkeyword=None\n\ndef gs(k):\n global keyword\n keyword = k\n \ndef run():\n global keyword\n while True:\n if(keyword is None):\n time.sleep(2)\n else:\n var = keyword\n keyword=None\n print(var)\n p=Process(target=Crawling.MainCrawling,args=(var,))\n p.start()","sub_path":"crawling project/server/Crawler.py","file_name":"Crawler.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"578995127","text":"from models import *\nfrom forms import *\n\nfrom datetime import datetime\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.http import Http404, HttpResponseRedirect\nfrom django.db.models import Avg, Count\nfrom django.template import RequestContext\n\ndef get_next_entry(request, jam):\n try:\n return Entry.objects.filter(gamejam=jam).exclude(user__pk=request.user.pk).exclude(vote__user__pk=request.user.pk).annotate(votes=Count('vote')).order_by('votes')[0]\n except IndexError:\n return None\n\ndef jam_detail(request, jam_id):\n jam = get_object_or_404(Jam, pk=jam_id)\n user_profile = None\n now = datetime.now()\n\n not_started = now < jam.date_start\n not_finished = jam.date_start < now < jam.date_end\n voting_period = jam.date_end < now < jam.date_vote_end\n can_vote = request.user.is_authenticated() and Entry.objects.filter(user=request.user, gamejam=jam).count() == 1\n cant_vote = not can_vote and voting_period\n entering = request.user.is_authenticated() and not_finished\n voting = can_vote and voting_period\n entries = None\n if now > jam.date_vote_end:\n entries = Entry.objects.all()\n next_entry = None\n if voting:\n next_entry = get_next_entry(request, jam)\n entry_form = None\n try:\n instance = Entry.objects.get(user=request.user, gamejam=jam)\n except Entry.DoesNotExist:\n instance = None\n if entering:\n if request.method == 'POST':\n entry_form = EntryForm(request.POST, instance=instance)\n if entry_form.is_valid():\n entry = entry_form.save(commit=False)\n entry.user = request.user\n entry.gamejam = jam\n entry.save()\n return HttpResponseRedirect(jam.get_absolute_url())\n else:\n entry_form = EntryForm(instance=instance)\n \n return render_to_response('jam/jam_detail.html', {\n 'jam': jam,\n 'not_started': not_started,\n 'not_finished': not_finished,\n 'cant_vote': cant_vote,\n 'entries': entries,\n 'voting': voting,\n 'next_entry': next_entry,\n 'entry_form': entry_form,\n }, context_instance=RequestContext(request))\n\ndef entry_detail(request, entry_id):\n entry = get_object_or_404(Entry, pk=entry_id)\n user_profile = None\n now = datetime.now()\n can_vote = request.user.is_authenticated() and Vote.objects.filter(user=request.user, entry=entry).count() == 0 and Entry.objects.filter(user=request.user, gamejam=entry.gamejam).count() == 1 and request.user != entry.user\n voting_period = entry.gamejam.date_end < now < entry.gamejam.date_vote_end\n voting = can_vote and voting_period\n if not voting and now < entry.gamejam.date_vote_end:\n raise Http404\n rating_form = None\n next_entry = None\n if voting:\n if request.method == 'POST':\n abstain = 'abstain' in request.POST\n rating_form = RatingForm(request.POST)\n if rating_form.is_valid() or abstain:\n vote = Vote()\n vote.user = request.user\n vote.entry = entry\n vote.abstain = abstain\n vote.save()\n if not abstain:\n rating = rating_form.save(commit=False)\n rating.vote = vote\n rating.save()\n next_entry = get_next_entry(request, entry.gamejam)\n if next_entry:\n return HttpResponseRedirect(next_entry.get_absolute_url())\n else:\n return HttpResponseRedirect('/jams/voting-finished/')\n else:\n rating_form = RatingForm()\n rating = {}\n if now > entry.gamejam.date_vote_end:\n rating['gameplay'] = entry.vote_set.aggregate(Avg('rating__gameplay')).values()[0]\n rating['graphics'] = entry.vote_set.aggregate(Avg('rating__graphics')).values()[0]\n rating['music'] = entry.vote_set.aggregate(Avg('rating__music')).values()[0]\n rating['fun'] = entry.vote_set.aggregate(Avg('rating__fun')).values()[0]\n rating['overall'] = entry.vote_set.aggregate(Avg('rating__overall')).values()[0]\n\n return render_to_response('jam/entry_detail.html', {\n 'entry': entry,\n 'rating_form': rating_form,\n 'rating': rating,\n }, context_instance=RequestContext(request))\n","sub_path":"django-site/project/jam/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"593852514","text":"# -*- coding: utf-8 -*-\n\nimport rospy\nfrom InitPose import SetInitPose\nfrom GoPoint import GoToPose\nfrom Getpos import Robot\nfrom CtrlLift import CtrlLift\n\nA = {\"name\":\"1st_elevator_front\",\"pose\":(-7.809, -9.057, 0.0),\"orientation\":(0.0, 0.0, 0.999, -0.05)}\nB = {\"name\":\"1st_elevator_in\",\"pose\":(-9.39, -9.05, 0.0),\"orientation\":(0.0, 0.0, 0.001, 0.999)}\nC = {\"name\":\"2nd_elevator_in\",\"pose\":(-9.04, 25.485, 0.0),\"orientation\":(0.0, 0.0, -0.139, 0.990)}\nD = {\"name\":\"2nd_elevator_front\",\"pose\":(-7.809, 25.3, 0.0),\"orientation\":(0.0, 0.0, 0.999, -0.05)}\nLocations = [A,B,C,D]\n\nRobot1_Init = {\"pose\":(-11.007, 6.646, 0.0),\"orientation\":(0.0, 0.0, 1, -0.023)}\nRobot2_Init = {\"pose\":(-1.43, 2.13, 0.0),\"orientation\":(0.0, 0.0, -0.694, -0.720)}\nRobot3_Init = {\"pose\":(-5.32, 31.4, 0.0),\"orientation\":(0.0, 0.0, -0.694, -0.720)}\n\ndef Init_pose():\n SetInitPose(Robot1_Init[\"pose\"],Robot1_Init[\"orientation\"],\"robot1\")\n SetInitPose(Robot2_Init[\"pose\"],Robot2_Init[\"orientation\"],\"robot2\")\n SetInitPose(Robot3_Init[\"pose\"],Robot3_Init[\"orientation\"],\"robot3\")\n\ndef UpStairs(classNav,classPose):\n Nav_state = classNav.Point_Navigation(Locations[0])\n if Nav_state:\n CtrlLift(0)\n rospy.sleep(2) #wait elevator open\n Nav_state = classNav.Point_Navigation(Locations[1])\n if Nav_state:\n tmp_trans = None\n while tmp_trans == None:\n tmp_trans,tmp_rot = classPose.get_pos()\n tmp_trans[1] = tmp_trans[1]+34.65\n SetInitPose(tmp_trans,tmp_rot,\"robot1\")\n rospy.sleep(2)\n CtrlLift(1)\n rospy.sleep(0.5)\n CtrlLift(1)\n rospy.sleep(2) #wait elevator open\n Nav_state = classNav.Point_Navigation(Locations[3])\n\ndef DownStairs(classNav,classPose):\n Nav_state = classNav.Point_Navigation(Locations[3])\n if Nav_state:\n CtrlLift(1)\n rospy.sleep(2) #wait elevator open\n Nav_state = classNav.Point_Navigation(Locations[2]) #进电梯 D\n if Nav_state:\n tmp_trans = None\n while tmp_trans == None:\n tmp_trans,tmp_rot = classPose.get_pos()\n tmp_trans[1] = tmp_trans[1]-34.65\n SetInitPose(tmp_trans,tmp_rot,\"robot1\") #重定位 C\n CtrlLift(0)\n rospy.sleep(0.5)\n CtrlLift(0)\n rospy.sleep(2)\n Nav_state = classNav.Point_Navigation(Locations[0])\n\nif __name__ == '__main__':\n A_1st = {\"name\":\"A\",\"pose\":(-11.007, 6.646, 0.0),\"orientation\":(0.0, 0.0, 1, -0.023),\"object\":2}\n B_1st = {\"name\":\"A\",\"pose\":(-6.68, 1.98, 0.0),\"orientation\":(0.0, 0.0, 1, -0.023),\"object\":2}\n A_2nd = {\"name\":\"F\",\"pose\":(-4.86, 26, 0.0),\"orientation\":(0.0, 0.0, 0.997, -0.08),\"object\":0}\n rospy.init_node('robot1_display', anonymous=False)\n Init_pose()\n\n navigation = GoToPose(\"robot1\")\n robotpose = Robot(\"robot1\")\n try:\n while True:\n Nav_state = navigation.Point_Navigation(B_1st)\n rospy.sleep(3)\n UpStairs(navigation,robotpose)\n Nav_state = navigation.Point_Navigation(A_2nd)\n DownStairs(navigation,robotpose)\n Nav_state = navigation.Point_Navigation(A_1st)\n except KeyboardInterrupt:\n navigation.shutdown()\n\n\n\n\n \n","sub_path":"pythons/robot1_run.py","file_name":"robot1_run.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"381834029","text":"from array import array\nfrom bisect import *\n\nif __name__ == \"__main__\":\n n = int(input())\n arr = []*n\n arr = list(map(int,input().split()))\n l=[None]*n\n max = 0\n\n for i in range(n):\n pos = bisect_left(l, arr[i],0,max)\n l[pos] = arr[i]\n if pos+1 > max:\n max = pos+1\n\n print(max)","sub_path":"pythonProject/05.py","file_name":"05.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"210963356","text":"#!/usr/bin/env python3\n\n#\n# create a depth/stage shapefile for a specific day OR every day in the eden netCDF file\n\n##import osgeo\n##import osgeo.osr\n##import osgeo.ogr\n\n# if you use the sytax below, then you dont\n# have to prefix all of them with osgeo\n#\n\nfrom osgeo import osr\nfrom osgeo import ogr\nfrom osgeo import gdal\nimport sys\nimport string\nimport os\nimport subprocess\nimport numpy\nimport datetime\nimport time\nfrom argparse import ArgumentParser\nfrom scipy.io import netcdf\nimport subprocess\n\ndef main():\n\n args = parseCmdLine()\n\n if args.verbose: print( __name__ )\n if args.verbose: print( args )\n\n# this is probably not need as I am not sure exactly what it does\n#\n gdal.UseExceptions()\n\n# netCDF_fileName is the quarterly netCDF file from eden\n#\n surfaceDateInFile = False\n\n# Determine if we are running all days in the netCDF File\n# or just one day\n#\n if args.surfaceDateString:\n\n allDays = False\n date_field = args.surfaceDateString.split( '-' ) \n\n year = date_field[ 0 ]\n month = date_field[ 1 ]\n day = date_field[ 2 ]\n surfaceDate = datetime.date( int( year ), int( month ), int( day ) )\n\n else:\n surfaceDate = None\n allDays = True\n\n dataDir = \"/physical/agency_data/eden\"\n gisDir = \"/physical/gis/eden\"\n\n#\tlsel netcdf hardcoded b/c it rarely changes\n#\n netcdf_lsel_filename = dataDir + \"/dem/eden_dem_cm_oc11.nc\"\n\n#\topen netCDF files\n#\n if args.verbose: print ( \"netcdf stage file: %s\" % args.netCDF_fileName )\n stage_input = netcdf.netcdf_file( args.netCDF_fileName, 'r' )\n\n if args.verbose: print ( \"netcdf lsel file: %s\" % netcdf_lsel_filename )\n lsel_input = netcdf.netcdf_file( netcdf_lsel_filename, 'r' )\n\n depth = numpy.zeros( ( 405, 287 ), 'f' )\n\n stage = stage_input.variables[ 'stage' ][ : ]\n lsel = lsel_input.variables[ 'dem' ][ : ]\n ntime,nrowStage, ncolStage = stage.shape\n nrowLsel, ncolLsel = lsel.shape\n\n if nrowStage != nrowLsel or ncolStage != ncolLsel:\n print ( \"Warning lsel shape and stage shape are NOT the same\" )\n print ( \"stage nrow: %s ncol: %s\" % ( nrowStage, ncolStage ) )\n print ( \" lsel nrow: %s nclo: %s\" % ( nrowLsel, ncolLsel ) )\n\n#\n# Get a python date from the netCDF time attribute: time.units\n#\n netCdfStageDate = stage_input.variables[ 'time' ]\n\n dateUnitString = netCdfStageDate.units.decode() \n\n dateUnitStringField = dateUnitString.split()\n\n iso_datetime = dateUnitStringField[ 2 ]\n\n date_time_field = str( iso_datetime).split('T')\n\n date_field = date_time_field[ 0 ].split( '-' )\n\n year = date_field[ 0 ]\n month = date_field[ 1 ]\n day = date_field[ 2 ]\n\n netCdfStartDate = datetime.date( int( year ), int( month ), int( day ) )\n\n# for each day in the netCdf file...\n#\n netCdfDateList = []\n for day in range( 0, ntime ):\n\n day_offset = datetime.timedelta( days = day )\n netCdfSurfaceDate = netCdfStartDate + day_offset\n\n if args.verbose: netCdfDateList.append( netCdfSurfaceDate )\n\n# figure out if we are doing just one day or all days in the \n# netCDF file\n# \n if args.surfaceDateString: \n\n if netCdfSurfaceDate != surfaceDate:\n continue\n else:\n surfaceDateInFile = True\n\n dateStamp = \"%d%02d%02d\" % ( netCdfSurfaceDate.year, netCdfSurfaceDate.month, netCdfSurfaceDate.day )\n iso_date = \"%d-%02d-%02d\" % ( netCdfSurfaceDate.year, netCdfSurfaceDate.month, netCdfSurfaceDate.day )\n\n# For each column for each row\n# calc depth from stage and lsel\n#\n\n for i in range( 0, nrowStage ):\n for j in range( 0, ncolStage ):\n\n if numpy.isnan( lsel[ i, j ] ): \n continue\n\n depth[ i, j ] = ( stage[ day, i, j ] - lsel[ i, j ] )\n#\n# Create and Define the Spatial Reference\n\n if args.verbose: print ( \"Creating Spatial Reference\" ) \n\n spatialReference = osr.SpatialReference() \n spatialReference.ImportFromProj4('+proj=utm +zone=17 +ellps=WGS84 +datum=WGS84 +units=m')\n\n#\n# Establish Driver for Shapefile\n\n if args.verbose: print ( \"creating driver\" ) \n\n driverName = \"ESRI Shapefile\"\n\t\n driver = ogr.GetDriverByName( driverName )\n\n if driver is None:\n print ( \"Driver Not Available: %s\" % driverName ) \n sys.exit( 1 )\n\n if args.verbose: print ( \"creating dataSource\" ) \n\n dataSource = driver.CreateDataSource( gisDir )\n\n if dataSource is None:\n print ( \"Creation of DataSource output file FAILED\" ) \n sys.exit( 1 )\n\n if args.verbose: print ( \"creating Layer\" ) \n\n# GIS Section\n#\n# Create the Layer\n# This will be the name of the shapefile that is written to the DataSource Directory\n#\n\n layer_name = \"eden_epa\" + dateStamp\n\n# delete file if it exists\n#\n pathFileName = gisDir + \"/\" + layer_name + \".shp\"\n\n if os.path.isfile( pathFileName ):\n\n print ( \"File exists, must be deleted: %s\" % pathFileName ) \n\n## cmd = [ 'rm', pathFileName ]\n## if ( subprocess.call( cmd ) ) == 0:\n\n fileSpecification = gisDir + '/' + layer_name + '.*'\n systring = 'rm ' + fileSpecification \n if subprocess.call( systring, shell=True ) == 0:\n print ( \"file deleted\" ) \n else: print ( \"could not delete file\" ) \n\n if args.verbose: print ( \"Creating: %s\\n\" % pathFileName ) \n\n try:\n grid_layer = dataSource.CreateLayer( layer_name, spatialReference, ogr.wkbMultiPolygon )\n except NameError:\n print( 'Creations of grid_layer Failed Miserably' )\n raise\n\n print ( type( grid_layer ) )\n print ( \"grid_layer\" ) \n\n# Add fields to the layer\n# ogr.FieldDefn return instances of the class osgeo.ogr.FieldDefn\n#\n\n field_row = ogr.FieldDefn( \"row\", ogr.OFTInteger )\n field_col = ogr.FieldDefn( \"col\", ogr.OFTInteger )\n field_Stage = ogr.FieldDefn( \"Stage\", ogr.OFTReal )\n field_WaterDepth = ogr.FieldDefn( \"WaterDepth\", ogr.OFTReal )\n\n\n# Create the fields in the layer from the FieldDefns above\n#\n\n grid_layer.CreateField( field_row )\n grid_layer.CreateField( field_col )\n grid_layer.CreateField( field_WaterDepth )\n grid_layer.CreateField( field_Stage )\n\n# dont think I need this. The layer is a multipolygon already\n# multipolygon = ogr.Geometry( ogr.wkbMultiPolygon )\n\n#\n# get the layer def to create the features\n#\n grid_layer_defn = grid_layer.GetLayerDefn()\n\n if args.verbose: print ( \"creating Geometry\" ) \n\n##left_edge_x = 476800\n left_edge_x = 463200\n bottom_edge_y = 2790000\n\n start_y = bottom_edge_y\n\n for i in range( 0, nrowStage ):\n\n#\toffset start_x 400m to west which is cancelled out in next loop\n#\n start_x = left_edge_x - 400\n start_y = bottom_edge_y + ( i * 400 )\n\n for j in range( 0, ncolStage ):\n\n start_x += 400\n\n if numpy.isnan( lsel[ i, j ] ): \n continue\n\n feature = ogr.Feature( grid_layer_defn )\n#\n#\tThis code would not work until I casted the depth as float\n#\tBut the rest of them worked fine, probably b/c they are ints\n#\n\n feature.SetField( \"WaterDepth\", float( depth[ i, j ] ) ) \n feature.SetField( \"Stage\", float( stage[day, i, j ] ) ) \n feature.SetField( \"row\", i ) \n feature.SetField( \"col\", j ) \n# \n# Fields of the feature are set individually, then destroyed\n# the new one is created above with the same grid_layer_defn...\n# i think?\n\n\n ring = ogr.Geometry( ogr.wkbLinearRing )\n\n ring.AddPoint( start_x, start_y )\n ring.AddPoint( start_x + 400, start_y )\n ring.AddPoint( start_x + 400, start_y + 400 )\n ring.AddPoint( start_x, start_y + 400 )\n ring.AddPoint( start_x, start_y )\n cell = ogr.Geometry( ogr.wkbPolygon )\n cell.AddGeometry( ring )\n\t\n feature.SetGeometry( cell )\n\n## it seems that a shapefile provide this for free\n##\t\tfeatureIndex = ( ( i * j) + j ) \n##\t\tfeature.SetFID( featureIndex )\n\n grid_layer.CreateFeature( feature )\n# guidance from here: https://trac.osgeo.org/gdal/wiki/PythonGotchas indicates\n# that one should basically never use destroy. It says it is not needed at\n# all to mitigate for leaks b/c python takes care of it when it goes out of scope\n# anyway. it suggested setting it to none if you really wanted to do something useful\n# AND, serendipitously using the None made this error go away:\n#\n# file exists, must be deleted: /physical/gis/eden/eden_epa20000105.shp\n# file deleted\n# ERROR 4: Unable to open /physical/gis/eden/eden_epa20000105.shp or /physical/gis/eden/eden_epa20000105.SHP.\n# ERROR 4: Failed to open file /physical/gis/eden/eden_epa20000105.shp.\n# It may be corrupt or read-only file accessed in update mode.\n#\n# feature.Destroy()\n feature = None\n\n\n if allDays == False and surfaceDateInFile == False:\n sys.exit( \"\\nDate Entered ***** %s ***** is Not in the netCDF File\\n\" % surfaceDateString ) \n\n# dataSource.Destroy()\n dataSource = None\n\n if args.verbose: print( netCdfDateList )\n\n#-----------------------------------------------------------------------------------------------------------\n# Parse Command Line Args\n\ndef parseCmdLine():\n\n home_dir = os.getenv( 'HOME', default = os.getcwd() )\n\n parser = ArgumentParser( description = \"Eden netCDF to Shapefile\" )\n\n parser.add_argument( 'netCDF_fileName',\n type = str,\n action = 'store',\n help = 'netCDF File Name' )\n\n parser.add_argument( '-d', '--date',\n dest = 'surfaceDateString',\n type = str,\n action = 'store', \n help = 'date yyyy-mm-dd for shapefile output' )\n\n parser.add_argument( '-v', '--verbose',\n action = 'store_true',\n help = 'to provide additional output' )\n\n args = parser.parse_args()\n return args\n\n#----------------------------------------------------------------------------\n# Provide for cmd line invocation: not executed on import\n\nif __name__ == '__main__' :\n main()\n","sub_path":"edenEpaSurfaceToShapefile.py","file_name":"edenEpaSurfaceToShapefile.py","file_ext":"py","file_size_in_byte":10545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"419557849","text":"import math\nimport numpy as np\nfrom IPython.display import Markdown\n\n\ndef setup_percentile_zscore_adults(percentiles_clean):\n \"\"\"\n Creates mean/sd values to merge to adult data for z-score calculations\n\n Parameters:\n percentiles_clean: (DataFrame) with adult percentiles\n\n Returns:\n Dataframe with mean/sd values\n \"\"\"\n dta_forz_long = percentiles_clean[[\"Mean\", \"Sex\", \"param\", \"age\", \"sd\"]]\n\n def label_param(row):\n if row[\"param\"] == \"WEIGHTKG\":\n return \"weight\"\n if row[\"param\"] == \"BMI\":\n return \"bmi\"\n if row[\"param\"] == \"HEIGHTCM\":\n return \"height\"\n\n param_col = dta_forz_long.apply(lambda row: label_param(row), axis=1)\n dta_forz_long = dta_forz_long.assign(param2=param_col.values)\n # preserving some capitalization to maintain compatibility with pediatric\n # percentiles data\n dta_forz = dta_forz_long.pivot_table(\n index=[\"Sex\", \"age\"], columns=\"param2\", values=[\"Mean\", \"sd\"], aggfunc=\"first\"\n )\n dta_forz = dta_forz.sort_index(axis=1, level=1)\n dta_forz.columns = [f\"{x}_{y}\" for x, y in dta_forz.columns]\n dta_forz = dta_forz.reset_index()\n dta_forz[\"rounded_age\"] = dta_forz[\"age\"]\n dta_forz.rename(columns={\"Sex\": \"sex\"}, inplace=True)\n return dta_forz\n\n\ndef add_mzscored_to_merged_df_adults(merged_df, pctls):\n \"\"\"\n Merges mean/sd values onto adult data for z-score calculations\n\n Parameters:\n merged_df: (DataFrame) with subjid, bmi, include_height, include_weight, rounded_age\n and sex columns\n pctls: (DataFrame) with mean/sd values for adults\n\n Returns:\n merged Dataframe\n \"\"\"\n pct_df = pctls.drop(columns={\"age\"})\n merged_df = merged_df.merge(pct_df, on=[\"sex\", \"rounded_age\"], how=\"left\")\n return merged_df\n\n\ndef add_mzscored_to_merged_df_pediatrics(\n merged_df, wt_percentiles, ht_percentiles, bmi_percentiles\n):\n \"\"\"\n Merges mean/sd values onto pediatrics data for z-score calculations\n\n Parameters:\n merged_df: (DataFrame) with subjid, bmi, include_height, include_weight, rounded_age\n and sex columns\n wt_percentiles: (DataFrame) with weight percentiles\n ht_percentiles: (DataFrame) with height percentiles\n bmi_percentiles: (DataFrame) with bmi percentiles\n\n Returns:\n merged Dataframe\n \"\"\"\n\n merged_df = calculate_modified_zscore_pediatrics(\n merged_df, wt_percentiles, \"weight\"\n )\n merged_df = calculate_modified_zscore_pediatrics(\n merged_df, ht_percentiles, \"height\"\n )\n merged_df = calculate_modified_zscore_pediatrics(merged_df, bmi_percentiles, \"bmi\")\n\n return merged_df\n\n\ndef add_smoothed_zscore_to_merged_df_pediatrics(df_merged, df_percentiles):\n \"\"\"\n Adds smoothed Z score calculations to pediatrics data\n\n Parameters:\n df_merged: (DataFrame) merged subject observations including height, weight,\n and bmi\n df_percentiles: (DataFrame) combined WHO and CDC percentiles\n \"\"\"\n df_merged = calculate_smoothed_zscore_pediatrics(df_merged, df_percentiles)\n return df_merged\n\n\ndef bmi_stats(\n merged_df,\n out=None,\n include_min=True,\n include_mean=True,\n include_max=True,\n include_std=True,\n include_mean_diff=True,\n include_count=True,\n age_range=[20, 65],\n include_missing=False,\n):\n \"\"\"\n Computes summary statistics for BMI. Clean values are for BMIs computed when both\n the height and weight values are categorized by growthcleanr as \"Include\". Raw\n values are computed for all observations. Information is provided by age and sex.\n\n Parameters:\n merged_df: (DataFrame) with bmi, rounded_age and sex columns\n out: (ipywidgets.Output) to display the results, if provided\n include_min: (bool) Whether to include the minimum value column\n include_mean: (bool) Whether to include the mean value column\n include_max: (bool) Whether to include the maximum value column\n include_std: (bool) Whether to include the standard deviation column\n include_mean_diff: (bool) Whether to include the difference between the raw and\n clean mean value column\n include_count: (bool) Whether to include the count column\n age_range: (list) Two elements containing the minimum and maximum ages that should\n be included in the statistics\n include_missing: (bool) Whether to include the missing (0) heights and weights that\n impact raw columns\n\n Returns:\n If out is None, it will return a DataFrame. If out is provided, results will be\n displayed in the notebook.\n \"\"\"\n # Incoming data is float, not int\n merged_df[\"rounded_age\"] = merged_df[\"rounded_age\"].astype(int)\n\n if include_missing:\n age_filtered = merged_df[\n (merged_df.rounded_age >= age_range[0])\n & (merged_df.rounded_age <= age_range[1])\n ]\n else:\n age_filtered = merged_df[\n (merged_df.rounded_age >= age_range[0])\n & (merged_df.rounded_age <= age_range[1])\n & (merged_df.weight > 0)\n & (merged_df.height > 0)\n ]\n age_filtered[\"sex\"] = age_filtered.sex.replace(0, \"M\").replace(1, \"F\")\n agg_functions = []\n formatters = {}\n\n if include_min:\n agg_functions.append(\"min\")\n formatters[\"min_clean\"] = \"{:.2f}\".format\n formatters[\"min_raw\"] = \"{:.2f}\".format\n if include_mean:\n agg_functions.append(\"mean\")\n formatters[\"mean_clean\"] = \"{:.2f}\".format\n formatters[\"mean_raw\"] = \"{:.2f}\".format\n if include_max:\n agg_functions.append(\"max\")\n formatters[\"max_clean\"] = \"{:.2f}\".format\n formatters[\"max_raw\"] = \"{:.2f}\".format\n if include_std:\n agg_functions.append(\"std\")\n formatters[\"sd_clean\"] = \"{:.2f}\".format\n formatters[\"sd_raw\"] = \"{:.2f}\".format\n if include_count:\n agg_functions.append(\"count\")\n clean_groups = (\n age_filtered[age_filtered.include_both]\n .groupby([\"sex\", \"rounded_age\"])[\"bmi\"]\n .agg(agg_functions)\n )\n raw_groups = age_filtered.groupby([\"sex\", \"rounded_age\"])[\"bmi\"].agg(agg_functions)\n merged_stats = clean_groups.merge(\n raw_groups, on=[\"sex\", \"rounded_age\"], suffixes=(\"_clean\", \"_raw\")\n )\n if include_mean & include_count & include_mean_diff:\n merged_stats[\"count_diff\"] = (\n merged_stats[\"count_raw\"] - merged_stats[\"count_clean\"]\n )\n if include_std:\n merged_stats = merged_stats.rename(\n columns={\"std_raw\": \"sd_raw\", \"std_clean\": \"sd_clean\"}\n )\n if out is None:\n return merged_stats\n else:\n # Clear output on first update and all subsequent updates, see\n # https://github.com/jupyter-widgets/ipywidgets/issues/3260#issuecomment-907715980\n # Without out.outputs = (), will append only on first update\n out.outputs = ()\n out.append_display_data(Markdown(\"## Female\"))\n out.append_display_data(merged_stats.loc[\"F\"].style.format(formatters))\n out.append_display_data(Markdown(\"## Male\"))\n out.append_display_data(merged_stats.loc[\"M\"].style.format(formatters))\n\n\ndef calculate_modified_zscore_pediatrics(merged_df, percentiles, category):\n \"\"\"\n Adds a column to the provided DataFrame with the modified Z score for the provided\n category\n\n Parameters:\n merged_df: (DataFrame) with subjid, sex, weight and age columns\n percentiles: (DataFrame) CDC growth chart DataFrame with L, M, S values for the\n desired category\n category: (str) name of category\n\n Returns\n The dataframe with a new zscore column mapped with the z_column_name list\n \"\"\"\n pct_cpy = percentiles.copy()\n pct_cpy[\"half_of_two_z_scores\"] = (\n pct_cpy[\"M\"]\n * np.power((1 + pct_cpy[\"L\"] * pct_cpy[\"S\"] * 2), (1 / pct_cpy[\"L\"]))\n ) - pct_cpy[\"M\"]\n # Calculate an age in months by rounding and then adding 0.5 to have values that\n # match the growth chart\n merged_df[\"agemos\"] = np.around(merged_df[\"ageyears\"] * 12) + 0.5\n mswpt = merged_df.merge(\n pct_cpy[[\"Agemos\", \"M\", \"Sex\", \"half_of_two_z_scores\"]],\n how=\"left\",\n left_on=[\"sex\", \"agemos\"],\n right_on=[\"Sex\", \"Agemos\"],\n )\n z_column_name = {\"weight\": \"wtz\", \"height\": \"htz\", \"bmi\": \"bmiz\"}\n mswpt[z_column_name[category]] = (mswpt[category] - mswpt[\"M\"]) / mswpt[\n \"half_of_two_z_scores\"\n ]\n return mswpt.drop(columns=[\"Agemos\", \"Sex\", \"M\", \"half_of_two_z_scores\"])\n\n\ndef calculate_smoothed_zscore_pediatrics(df_merged, df_percentiles):\n \"\"\"\n Add column to provided DataFrame with smoothed Z scores\n\n Parameters:\n df_merged: (DataFrame) with subjid, sex, weight, and age columns\n df_percentiles: (DataFrame) growth chart w/WHO and CDC L, M, S values for\n each measurement type\n\n Returns:\n DataFrame with smoothed zscore column for each measurement type\n \"\"\"\n df_pct = df_percentiles.copy()\n\n # Merge z scores into observations\n df = df_merged.merge(\n df_pct,\n how=\"left\",\n left_on=[\"agedays\", \"ageyears\", \"sex\"],\n right_on=[\"agedays\", \"age\", \"Sex\"],\n )\n\n for p, param in ((\"ht\", \"height\"), (\"wt\", \"weight\"), (\"bmi\", \"bmi\")):\n cdc_l_var = f\"cdc_{p}_l\"\n cdc_m_var = f\"cdc_{p}_m\"\n cdc_s_var = f\"cdc_{p}_s\"\n cdc_csd_pos_var = f\"cdc_{p}_csd_pos\"\n cdc_csd_neg_var = f\"cdc_{p}_csd_neg\"\n cdc_z_var = f\"cdc_{p}_z\"\n who_z_var = f\"who_{p}_z\"\n s_z_var = f\"{p}z\"\n\n # Assign CDC z scores\n df[cdc_z_var] = np.where(\n df[cdc_l_var] != 0,\n (\n (((df[param] / df[cdc_m_var]) ** df[cdc_l_var]) - 1)\n / (df[cdc_l_var] * df[cdc_s_var])\n ),\n (np.log(df[param] / df[cdc_m_var]) / df[cdc_s_var]),\n )\n\n # Assign WHO z scores\n df.loc[df[param] == df[cdc_m_var], who_z_var] = 0\n df.loc[df[param] > df[cdc_m_var], who_z_var] = (df[param] - df[cdc_m_var]) / (\n df[cdc_csd_pos_var] / 2\n )\n df.loc[df[param] < df[cdc_m_var], who_z_var] = (df[param] - df[cdc_m_var]) / (\n df[cdc_csd_neg_var] / 2\n )\n\n # Assign z scores, smoothing between 2-4\n df.loc[df[\"ageyears\"] <= 2, s_z_var] = df[who_z_var]\n df.loc[df[\"ageyears\"].between(2, 4, inclusive=\"neither\"), s_z_var] = (\n (df[who_z_var] * df[\"whoweight\"]) + (df[cdc_z_var] * df[\"cdcweight\"])\n ) / 2\n df.loc[df[\"ageyears\"] >= 4, s_z_var] = df[cdc_z_var]\n\n return df\n","sub_path":"growthviz/sumstats.py","file_name":"sumstats.py","file_ext":"py","file_size_in_byte":10543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"17275685","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('register/', views.registerUser, name='register'),\n path('login/', views.hadleLogin, name='login'),\n path('profile/', views.AddProfile.as_view(), name=\"profile\"),\n path('members/', views.members, name='members'),\n path('delete_member//', views.delete_members, name=\"delete_member\"),\n path('update//', views.Update_member, name='update'),\n path('logout/', views.handleLogout, name='logout'),\n path('usermembers/', views.user_members_add, name='user_members'),\n path('agent/', views.agents, name=\"agents\"),\n path('search/', views.SearchView.as_view(), name=\"search\"),\n \n \n]","sub_path":"mysite/home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"225408189","text":"import mariadb\nimport dbcreds\nimport traceback\n\n\ndef openConnection():\n try:\n return mariadb.connect(\n user=dbcreds.user,\n password=dbcreds.password,\n host=dbcreds.host,\n port=dbcreds.port,\n database=dbcreds.database,\n )\n\n except:\n print(\"Error opening connextion to DB!\")\n traceback.print_exc()\n return None\n\n\ndef closeConnection(conn):\n if(conn == None):\n return True\n try:\n conn.close()\n return True\n\n except:\n print(\"Error closing connection to DB!\")\n traceback.print_exc()\n return False\n\n\ndef openCursor(conn):\n # ! Not sure I need this here since the except block will close it!?\n # if(conn == None):\n # print('No connection to database, closing your connection!')\n # return None\n try:\n return conn.cursor()\n except:\n print(\"Error opening cursor on DB, closing connection!\")\n traceback.print_exc()\n return None\n\n\ndef closeCursor(cursor):\n if(cursor == None):\n return True\n try:\n cursor.close()\n return True\n\n except:\n print(\"Error closing cursor on DB!\")\n traceback.print_exc()\n return False\n\n\ndef closeAll(conn, cursor):\n closeCursor(cursor)\n closeConnection(conn)\n print('Cursor and connection closed!')\n\n\ndef loopItems(cursor, rows):\n headers = [i[0] for i in cursor.description]\n result = []\n for row in rows:\n result.append(dict(zip(headers, row)))\n return result\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"288798565","text":"'''\n The string \"PAYPALISHIRING\" is written in a zigzag pattern on a given number of rows like this: (you may want to display this pattern in a fixed font for better legibility)\n\nP A H N\nA P L S I I G\nY I R\n\nAnd then read line by line: \"PAHNAPLSIIGYIR\"\n\nWrite the code that will take a string and make this conversion given a number of rows:\n\nstring convert(string text, int nRows);\n\nconvert(\"PAYPALISHIRING\", 3) should return \"PAHNAPLSIIGYIR\". \n'''\n\ndef alterJoin(a, b):\n return ''.join(map(lambda x, y: (x or '') + (y or ''), a, b))\n\nclass Solution:\n # @return a string\n def convert(self, s, nRows):\n if nRows == 1:\n return s\n length = 2 * nRows - 2\n array = [s[x::length] for x in xrange(length)]\n for i in xrange(1, length / 2):\n array[i] = alterJoin(array[i], array[length - i])\n return ''.join(array[:nRows])\n\nimport unittest\n\nclass TestZigZagConversion(unittest.TestCase):\n def testZigZag(self):\n sol = Solution()\n self.assertEqual(\"PAHNAPLSIIGYIR\", sol.convert(\"PAYPALISHIRING\", 3))\n self.assertEqual(\"ACBD\", sol.convert(\"ABCD\", 2))\n self.assertEqual(\"A\", sol.convert(\"A\", 1))\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test_zig_zag_conversion.py","file_name":"test_zig_zag_conversion.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"305316033","text":"def find_missing(list1, list2):\n \"\"\"\n This function returns the extra number in the second list that is not in the first list when the input is two \n lists and returns 0 if the lists are the same or if they are empty\n \"\"\"\n\n missing = list(set(list2).difference(set(list1)))\n if not missing:\n missing.append(0)\n return missing[0]\n","sub_path":"Day_4/FindMissing/find_missing.py","file_name":"find_missing.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"281955684","text":"'''\n 문제설명\n 프로그래머스 전화번호목록\n 주어진 전화번호 중 다른 전화번호를 접두사로 쓰는 번호가 있으면 False 없으면 True를 반환한다.\n 해결전략\n 1. 정렬과 zip(), startswith()을 이용해서 해결할 수 있다.\n 문자열을 정렬하면 가장 앞에를 기준으로 정렬된다.\n (0번 인덱스부터 ~ 마지막에서 하나 앞 인덱스까지 + 1번 인덱스부터 마지막 인덱스까지) 동일한 크기의 리스트를 zip해서 쌍으로 묶어준다.\n 정렬되었기 때문에 근처끼리만 startswith()으로 검사해줘도 결과를 얻을 수 있다.\n 2. 해시를 이용한 비교를 통해 해결할 수 있다.\n 전화번호를 키로 딕셔너리를 만든다.\n 다시 전화번호를 하나씩 꺼내고 그 번호에서도 한자리씩 추가해가면서 그것이 딕셔너리의 키로 존재하는지 그리고 그게 해당 번호와 다를 떄 False로 반환한다.\n 이런 경우가 아니면 True 반환한다.\n'''\ndef solution(phone_book):\n phone_book.sort()\n for p1, p2 in zip(phone_book[:-1], phone_book[1:]):\n if p2.startswith(p1):\n return False\n return True\n\ndef solution(phone_book):\n answer = True\n hash = dict()\n\n for num in phone_book:\n hash[num] = 1\n \n for num in phone_book:\n tmp = \"\"\n for n in num:\n tmp += n\n if tmp in hash and tmp != num:\n answer = False\n return answer\n","sub_path":"week9/HongheeLee/PGM_전화번호목록_210226.py","file_name":"PGM_전화번호목록_210226.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"496191609","text":"import pygame\r\npygame.init()\r\n\r\nwindow = pygame.display.set_mode((360,740))\r\n\r\npygame.display.set_caption(\"movement\")\r\n\r\npath = pygame.image.load('path.png')\r\nwalking = [pygame.image.load('rightfoot.png'), pygame.image.load('leftfoot.png')]\r\nrightfoot = pygame.image.load('rightfoot.png')\r\nleftfoot = pygame.image.load('leftfoot.png')\r\n\r\n\r\nx = 140\r\ny = 480\r\ny2 = 0\r\nwidth = 64\r\nheight = 64\r\nwalking = False\r\ncolor = (0,255,0)\r\nmov = 10\r\ncount = 0\r\n\r\ndef gameDisplay():\r\n\t\tglobal count\r\n\t\twindow.blit(path, (0,y2))\r\n\t\twindow.blit(rightfoot, (x,y))\r\n\t\tpygame.display.update()\r\n\t\t\t\t\r\n\r\n\r\n#main\r\nrun = True\r\nwhile run:\r\n\t\tpygame.time.delay(100)\r\n\t\t\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\trun = False\r\n\t\t\r\n\t\tkeys = pygame.key.get_pressed()\r\n\t\tif keys[pygame.K_SPACE]:\r\n\t\t\ty2 -= mov\r\n\r\n\t\t\t\t\r\n\t\tgameDisplay()\r\n\t\t\t\t\r\npygame.quit\r\n","sub_path":"movement/movement2.py","file_name":"movement2.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"277728906","text":"from data.tasks import WebisCLS10_task_generator, WebisCLS10_crossdomain_crosslingual_task_generator\nfrom experiments.common import DCIclassify\nfrom model.dci import DCI\nfrom model.pivotselection import pivot_selection\nimport os\nfrom time import time\nfrom util.results import Result\n\ndcf='cosine'\nnpivots = 450\n\noptimize = True\ndataset_home='../datasets/Webis-CLS-10'\n\nrperf = Result(['dataset', 'task', 'method', 'acc', 'pivot_t', 'dci_t', 'svm_t', 'test_t'])\nfor source, target, oracle, taskname in WebisCLS10_crossdomain_crosslingual_task_generator(os.path.abspath(dataset_home)):\n\n # pivot selection\n tinit = time()\n s_pivots, t_pivots = pivot_selection(npivots, source.X, source.y, source.U, target.U,\n source.V, target.V,\n oracle=oracle, phi=30, show=min(10, npivots), cross=True)\n pivot_time = time() - tinit\n print('pivot selection took {:.3f} seconds'.format(pivot_time))\n\n dci = DCI(dcf=dcf, unify=True, post='normal')\n acc, dci_time, svm_time, test_time = DCIclassify(source, target, s_pivots, t_pivots, dci, optimize=optimize)\n\n\n rperf.add(dataset='Webis-CLS-10', task=taskname, method=str(dci),\n acc=acc,\n pivot_t=pivot_time, dci_t=dci_time, svm_t=svm_time, test_t=test_time)\n\n rperf.dump('./DCI.{}.m{}.opt{}.WebisCLS10.crossdom_crosslin.acc'.format(dcf, npivots, optimize))\n rperf.pivot(grand_totals=True)\n\n\n\n\n\n","sub_path":"src/cross_domain_cross_lingual_sentiment.py","file_name":"cross_domain_cross_lingual_sentiment.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"81514099","text":"\"\"\"Packaging settings.\"\"\"\n\n\nfrom codecs import open\nfrom os.path import abspath, dirname, join\n\nfrom setuptools import setup, find_packages\n\nthis_dir = abspath(dirname(__file__))\nwith open(join(this_dir, 'README.rst'), encoding='utf-8') as file:\n long_description = file.read()\n\n\nsetup(\n name = 'mi',\n version = '1.0.0',\n description = 'skeleton cli in Python.',\n long_description = long_description,\n author = 'Sloan Liu',\n license = 'UNLICENSE',\n classifiers = [\n 'Intended Audience :: Anyone',\n 'Topic :: Python',\n 'License :: Public Domain',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.4',\n ],\n keywords = ['cli', 'skele', 'mi'],\n packages = find_packages(exclude=['docs', 'tests*']),\n install_requires = ['docopt'],\n entry_points = {\n 'console_scripts': [\n 'mi=mi.__main__:main'],\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"146881814","text":"\"\"\"\n !/usr/bin/env python3.6\n -*- coding: utf-8 -*-\n --------------------------------\n Description :\n 1. 爬去文章\n 2. 下载图片\n 3. 替换图片\n 4. 输出pdf\n --------------------------------\n @Time : 2018/12/21 21:22\n @File : GZH.py\n @Software: PyCharm\n --------------------------------\n @Author : lixj\n @contact : lixj_zj@163.com\n\"\"\"\n\nimport requests\nfrom lxml import etree\nimport logging\nimport random\nimport re\nimport os\nimport configure.userAgent\n\n# logging.basicConfig函数对日志的输出格式及方式做相关配置\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')\n\n\n# 选取随机的IP地址\ndef getRandomIP():\n with open(\"ipPool.txt\", \"r\") as f: # 构建IP池\n content = f.read()\n contList = content.split(\"', '\")\n ipList = contList[1:len(contList) - 1]\n random_ip = random.choice(ipList)\n proxy_ip = \"http://\" + random_ip\n proxies = {\"http\": proxy_ip}\n return proxies\n\n\ndef downloadImg(imgLinkList, imgPath):\n if not os.path.exists(imgPath):\n os.makedirs(imgPath)\n os.chdir(imgPath) # 切换下载图片的目录\n for imgNum, imgLink in enumerate(imgLinkList):\n img = requests.get(imgLink, headers=headers, proxies=proxies)\n suffix = imgLink.split(\"=\")[-1]\n try:\n with open(str(imgNum) + \".\" + suffix, \"wb\") as f:\n f.write(img.content)\n logging.info(\"Download %s th img succeed!\" % str(imgNum))\n except Exception as e:\n logging.error(str(e))\n\n\ndef getImgLinkList(url):\n req = requests.get(url, headers=headers, proxies=proxies)\n struct = etree.HTML(req.text)\n # 获取所有图片地址\n xPath = \"//img/@data-src\" # 匹配任意深度含有data-src熟悉的图片,获取链接\n imgLinkList = struct.xpath(xPath)\n return imgLinkList\n\n\ndef downloadHtml(url, htmlPath):\n try:\n req = requests.get(url, headers=headers, proxies=proxies)\n struct = etree.HTML(req.text)\n xPath = \"//h2/text()\"\n title = struct.xpath(xPath)\n htmlName = title[0].replace(\"\\\\n\", \"\").strip()\n with open(htmlPath + htmlName + \".html\", \"w+\", encoding=\"utf-8\") as f:\n f.write(req.text)\n return htmlName\n except Exception as e:\n logging.error(str(e))\n\n\ndef replaceImg(htmlPath, htmlName, imgPath):\n pathList = os.listdir(imgPath)\n pathList.sort(key=lambda x: int(x.split(\".\")[0])) # 顺序读取\n\n with open(htmlPath + htmlName + \".html\", \"r+\", encoding=\"utf-8\") as f:\n html = f.read()\n pattern = r''\n imgre = re.compile(pattern)\n imglist = re.findall(imgre, html)\n\n for img, path in zip(imglist, pathList):\n imgTagList = img.split(\" />\")\n fullImgPath = imgPath + \"\\\\\" + path\n newImgTag = imgTagList[0] + \"src=\" + \"\\\"\" + fullImgPath + \"\\\"\" + \" />\"\n if html.__contains__(img):\n newHtml = html.replace(img, newImgTag)\n html = newHtml\n return html\n\n\ndef writeImgToNewHTML(newHtmlPath, html, htmlName):\n with open(newHtmlPath + htmlName + \".html\", \"w+\", encoding=\"utf-8\") as f:\n f.write(html)\n\n\nglobal proxies, headers\nuserAgentMiddleware = configure.userAgent.randomUserAgentMiddleware\nheaders = userAgentMiddleware.getRandomHeaders()\n# 随机IP\nproxies = getRandomIP()\n\nif __name__ == '__main__':\n # 定义常量\n imgPath = \"F:\\\\GZH\\\\img\"\n htmlPath = \"F:\\\\GZH\\\\\"\n newHtmlPath = \"F:\\\\GZH\\\\\"\n\n url = \"https://mp.weixin.qq.com/s?timestamp=1546254005&src=3&ver=1&signature=64KOvajKkM5b-oRNW0N-Foy2OKtwxDVyV58DiofRbumRAlLgMdKssCvwMw*htwxliMjBveSD3ATjXrL1IOV4DoMgoX261NC*lK0*5lztLB0P7k1DZRwsibekTLRXQPDtGwegLs-O0CCVnmCxyxbceEeqTKNwfcGMjlNZD*8pDlc=\"\n\n # 下载HTML文件\n htmlName = downloadHtml(url, htmlPath, headers, proxies)\n\n # 下载图片\n imgList = getImgLinkList(url, headers, proxies)\n downloadImg(imgList, headers, proxies, imgPath)\n\n # 替换图片写入新的HTML\n afterReplaceImgHtml = replaceImg(htmlPath, htmlName, imgPath)\n writeImgToNewHTML(newHtmlPath, afterReplaceImgHtml, htmlName)\n","sub_path":"Spider/GZH/GZH.py","file_name":"GZH.py","file_ext":"py","file_size_in_byte":4207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"512306878","text":"from django.shortcuts import render\nfrom BoxApi.models.TokenModel import Token\nimport logging\nlogger = logging.getLogger('boxApiLog')\n\ndef boxconnectorTop(request):\n logger.info(\"登録済ATの検索開始\")\n tokenDict ={\n 'tokens':Token.objects.all()\n }\n logger.info(tokenDict)\n return render(request, 'BoxConnectorTopTemplate.html', context=tokenDict)","sub_path":"BoxApi/View/BoxConnectorTopView.py","file_name":"BoxConnectorTopView.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"150987162","text":"# -*- coding: utf-8 -*-\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas\nfrom statsmodels.formula.api import ols\nfrom statsmodels.stats.anova import anova_lm\n\nx = np.linspace(-5, 5, 20)\n\nnp.random.seed(1)\n\ny = -5 + 3*x + 4 * np.random.normal(size=x.shape)\n\nplt.figure(figsize=(5, 4))\nplt.plot(x, y, 'o')\n\n\n\n\ndata = pandas.DataFrame({'x': x, 'y': y})\nmodel = ols(\"y ~ x\", data).fit()\n\nprint(model.summary())\n\nprint('\\nANOVA results')\n\n\noffset, coef = model._results.params\nplt.plot(x, x*coef + offset)\nplt.xlabel('x')\nplt.ylabel('y')\n\nplt.show()","sub_path":"regression1.py","file_name":"regression1.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"153934632","text":"from jarbas_hive_mind import get_listener\nfrom jarbas_hive_mind.configuration import CONFIGURATION\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser(description='Start HiveMind as a server')\n parser.add_argument(\"--port\", help=\"HiveMind port number\", type=int)\n args = parser.parse_args()\n config = CONFIGURATION\n listener = get_listener()\n listener.load_config(config)\n # Replace defined values\n if args.port is not None:\n listener.port = args.port\n listener.listen()\n\nif __name__ == '__main__':\n main()\n","sub_path":"jarbas_hive_mind/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"562741336","text":"from pathlib import Path\nimport pandas as pd\nfrom rdkit.Chem import MolFromSmiles\nimport numpy as np\nimport torch\nfrom scipy.sparse import coo_matrix\nimport torch\nimport torch.sparse as sparse\n\ndef one_of_k_encoding(x, allowable_set):\n if x not in allowable_set:\n raise Exception(\"input {0} not in allowable set{1}:\".format(\n x, allowable_set))\n return list(map(lambda s: x == s, allowable_set))\n\n\ndef one_of_k_encoding_unk(x, allowable_set):\n \"\"\"Maps inputs not in the allowable set to the last element.\"\"\"\n if x not in allowable_set:\n x = allowable_set[-1]\n return list(map(lambda s: x == s, allowable_set))\n\ndef atom_features(atom,\n bool_id_feat=False,\n explicit_H=False,\n use_chirality=False):\n \"\"\"Helper method used to compute per-atom feature vectors.\n\n Many different featurization methods compute per-atom features such as ConvMolFeaturizer, WeaveFeaturizer. This method computes such features.\n\n Parameters\n ----------\n bool_id_feat: bool, optional\n Return an array of unique identifiers corresponding to atom type.\n explicit_H: bool, optional\n If true, model hydrogens explicitly\n use_chirality: bool, optional\n If true, use chirality information.\n \"\"\"\n if bool_id_feat:\n return np.array([atom_to_id(atom)])\n else:\n from rdkit import Chem\n results = one_of_k_encoding_unk(\n atom.GetSymbol(),\n [\n 'C',\n 'N',\n 'O',\n 'S',\n 'F',\n 'Si',\n 'P',\n 'Cl',\n 'Br',\n 'Mg',\n 'Na',\n 'Ca',\n 'Fe',\n 'As',\n 'Al',\n 'I',\n 'B',\n 'V',\n 'K',\n 'Tl',\n 'Yb',\n 'Sb',\n 'Sn',\n 'Ag',\n 'Pd',\n 'Co',\n 'Se',\n 'Ti',\n 'Zn',\n 'H', # H?\n 'Li',\n 'Ge',\n 'Cu',\n 'Au',\n 'Ni',\n 'Cd',\n 'In',\n 'Mn',\n 'Zr',\n 'Cr',\n 'Pt',\n 'Hg',\n 'Pb',\n 'Unknown'\n ]) + one_of_k_encoding(atom.GetDegree(),\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + \\\n one_of_k_encoding_unk(atom.GetImplicitValence(), [0, 1, 2, 3, 4, 5, 6]) + \\\n [atom.GetFormalCharge(), atom.GetNumRadicalElectrons()] + \\\n one_of_k_encoding_unk(atom.GetHybridization(), [\n Chem.rdchem.HybridizationType.SP, Chem.rdchem.HybridizationType.SP2,\n Chem.rdchem.HybridizationType.SP3, Chem.rdchem.HybridizationType.\n SP3D, Chem.rdchem.HybridizationType.SP3D2\n ]) + [atom.GetIsAromatic()]\n # In case of explicit hydrogen(QM8, QM9), avoid calling `GetTotalNumHs`\n if not explicit_H:\n results = results + one_of_k_encoding_unk(atom.GetTotalNumHs(),\n [0, 1, 2, 3, 4])\n if use_chirality:\n try:\n results = results + one_of_k_encoding_unk(\n atom.GetProp('_CIPCode'),\n ['R', 'S']) + [atom.HasProp('_ChiralityPossible')]\n except:\n results = results + [False, False\n ] + [atom.HasProp('_ChiralityPossible')]\n\n return np.array(results)\n\n\nclass molecule(object):\n def __init__(self, data, smile_col, target_cols, norm=True):\n self.mol = self._smile2mol(data[smile_col])\n self.targets = self._extract_targets(data,target_cols)\n self.target_cols = target_cols\n self.mol_props = self._properties_matrix() \n self.norm=norm\n if norm:\n self.adj_mat = self._normalise_adj()\n else:\n self.adj_mat = self._adj_mat()\n \n \n def _smile2mol(self, smile):\n return MolFromSmiles(smile)\n \n def _extract_targets(self, data, target_cols):\n targets ={}\n for t in target_cols:\n targets[t] = data[t]\n return targets\n \n def _properties_matrix(self):\n mol_props = [None]* self.mol.GetNumAtoms()\n for atom in self.mol.GetAtoms():\n mol_props[atom.GetIdx()] = atom_features(atom)\n return torch.tensor(mol_props, dtype=torch.float32, requires_grad=True)\n \n def _bond_index(self):\n mol = self.mol\n edges = self._self_bonds()\n for bond in iter(mol.GetBonds()):\n edges.append([bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()])\n edges.append([bond.GetEndAtomIdx(), bond.GetBeginAtomIdx()])\n bond_index = torch.tensor(edges, dtype=torch.long).t().contiguous()\n return edges,bond_index\n \n def _adj_mat(self):\n _,b_i = self._bond_index()\n adj = torch.sparse_coo_tensor(b_i, torch.ones(b_i.shape[1]))\n return adj\n\n def _normalise_adj(self):\n adj_mat = self._adj_mat()\n degrees = sparse.sum(adj_mat,dim=1)\n d_ii = list(range(len(degrees)))\n D = torch.sparse_coo_tensor([d_ii,d_ii],degrees.values())\n D_inv_sqrt = D.pow(-0.5)\n norm_adj_mat = D_inv_sqrt.mm(adj_mat.to_dense()).mm(D_inv_sqrt.to_dense()) # need to get rid of dense conversions\n return norm_adj_mat \n \n def _self_bonds(self):\n self_edges = [[i,i] for i in range(self.mol.GetNumAtoms())]\n return self_edges\n \n def get_feats(self):\n return self.mol_props\n \n def get_AM(self):\n return self.adj_mat\n \n def get_targets(self):\n return self.targets\n \n def get_model_inputs(self):\n return self.get_feats(), self.get_AM(), self.get_targets(), self.target_cols\n def __repr__(self):\n return self.mol.__repr__() # improve this\n \n def __str__(self):\n pass\n \n","sub_path":"as_chem/feat/as_mol.py","file_name":"as_mol.py","file_ext":"py","file_size_in_byte":5764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"435140765","text":"# alien.py\r\n# Robyn Murray 3.29.19\r\n# Version 1\r\n\r\nimport pygame\r\nfrom pygame.sprite import Sprite\r\n\r\n# creates an alien class for alien creation and handling\r\nclass Alien(Sprite):\r\n # function to create an alien\r\n def __init__(self, ai_settings, screen):\r\n #inherit from sprite\r\n super(Alien, self).__init__()\r\n self.screen = screen\r\n self.ai_settings = ai_settings\r\n \r\n # load the alien ship image and get its rect\r\n self.graphic = pygame.image.load('images/alien1.png')\r\n self.image = pygame.transform.scale(self.graphic, (55, 55))\r\n self.rect = self.image.get_rect()\r\n \r\n # start alien near top left\r\n self.rect.x = self.rect.width\r\n self.rect.y = self.rect.height\r\n \r\n # store alien's position\r\n self.x = float(self.rect.x)\r\n \r\n # draw alien to screen at current position \r\n def blitme(self):\r\n self.screen.blit(self.image, self.rect)\r\n \r\n # check if alien has hit an edge\r\n def check_edge(self):\r\n # get screens rect \r\n screen_rect = self.screen.get_rect()\r\n # check if hit right edge\r\n if self.rect.right >= screen_rect.right:\r\n return True\r\n # check if hit left edge\r\n elif self.rect.left <= 0:\r\n return True\r\n \r\n # move alien to right or left\r\n def update(self):\r\n # increment x (by velocity factor times the direction incrementer)\r\n self.x += (self.ai_settings.alien_velocity_factor * self.ai_settings.fleet_dir)\r\n # set new position\r\n self.rect.x = self.x","sub_path":"alien.py","file_name":"alien.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"546782001","text":"\"\"\"\nIan\n9/10/19\nthis program sums the sin^2 and cos^2 for an entered number\nit also calculates the distance given points on a\ncartesian coordinate plane\n1\nwhen computing force = g*mass1*mass2/radius*radius,\nthe computer does not see that both radii are under the\nvinculum, so it multiplies radius * 1/radius, getting 1.\nthe final answer will just be g*mass1*mass 2.\na change to the program would be:\nforce=(g*mass1*mass2)/(radius*radius)\nan alternative would be\nforce=g*mass1*mass2/radius/radius\nOMH\n\"\"\"\n#2\nimport math\nθ = float(input(\"sum of sine^2 and cosine^2\\n\"))\noutput = ((math.sin(θ))**2)+((math.cos(θ))**2)\nprint(\"Sum: \",output)\n'''\noutput is not always 1 since there is some rounding\nthat must be done when calculating the sine and cosine.\nwhen both round down, the sum will be just under 1\n'''\nprint(\"\\n\\n\\ndistance on cartesian coordinate plane\")\nx = float(input(\"\\nx\\n\"))\ny = float(input(\"y\\n\"))\ndistance = ((x**2)+(y**2))**(1/2)\nprint(\"distance: \",distance)","sub_path":"Fall/homework/basic_math_formulas.py","file_name":"basic_math_formulas.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"444739458","text":"import random\n\nmsg = [\"That's not a nice thing to say\",\"Someone once told me if you don't have anything nice to say, don't say anything at all\",\n \"Try to be nicer\",\"Ooh! That might hurt someone's feelings..\",\"Snap!Looks like you are angry...\"]\nch = 'y'\ncount = 0\nbadCount = 0;\nnewBad = []\ni = 0\n\nwith open('bad.txt','r') as f:\n for word in f:\n newBad.append(word[:len(word)-1])\n\nwhile (ch=='y'):\n fileName = input('Enter file name to Upload(if in different directory, entire absolute path)')\n count = 0\n flag = 0\n badCount = 0\n with open(fileName,'r') as f:\n for bad_sent in f:\n for i in bad_sent.split():\n count = count+1\n if i in newBad:\n badCount = badCount+1\n flag = 1\n j = random.randint(0,len(msg)-1)\n if flag==1:\n print('------------------------------------------------------------------------------')\n print('\\tBad word(s) detected')\n print('------------------------------------------------------------------------------')\n print (msg[j])\n print('------------------------------------------------------------------------------')\n print ('Number of words ', count)\n print('Number of Bad words ',badCount)\n print('Percentage bad words ',(badCount/count)*100,'%')\n print('------------------------------------------------------------------------------')\n print('------------------------------------------------------------------------------')\n else:\n print (\"\\nYour file was fine\")\n ch = input('Do you wish to continue?')\n","sub_path":"Profanity.py","file_name":"Profanity.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"595378411","text":"'''\nSUMMARY:\n\ninput > weight > hidden layer 1 (activation function) > weight >hidden l 2 (activation function) > weight > output layer\n\ncompare output to intended output > cost function (cross entropy)\noptimization function (optimizer) > minimize cost (AdamOptimizer ... SGD, AdaGrad)\n\nbackpropagation\n\nfeed forward + backprop = epoch\n\n'''\n\nimport tensorflow as tf\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/tmp/data\", one_hot=True) # one_hot \n\n''' under one_hot = True, one element is hot, the rest are zero (cold)\n 10 classes 0..9\n\n0 = [1,0,0,0,0,0,0,0,0,0,0,0]\n1 = [0,1,0,0,0,0,0,0,0,0,0,0]\n2 = [0,0,1,0,0,0,0,0,0,0,0,0]\n\n'''\n\nn_nodes_hl1=500 # number of nodes in hidden layer 1\nn_nodes_hl2=500 # number of nodes in hidden layer 2\nn_nodes_hl3=500 # number of nodes in hidden layer 3\n\nn_classes = 10 # number of classes\nbatch_size = 100 # batches of 100 features are feed at a time and manipulate the weights\n\n# heigh x width 784\nx = tf.placeholder('float',[None, 784]) #input data \ny = tf.placeholder('float')\n\ndef neural_network_model(data):\n\n\thidden_1_layer = {'weights': tf.Variable(tf.random_normal([784,n_nodes_hl1])), \n\t\t\t\t\t 'biases': tf.Variable(tf.random_normal([n_nodes_hl1]))}\n\n\thidden_2_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl1,n_nodes_hl2])), \n\t\t\t \t\t 'biases': tf.Variable(tf.random_normal([n_nodes_hl2]))}\n\n\thidden_3_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl2,n_nodes_hl3])), \n\t\t\t\t \t 'biases': tf.Variable(tf.random_normal([n_nodes_hl3]))}\n\n\toutput_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl3,n_classes])), \n\t\t\t\t 'biases': tf.Variable(tf.random_normal([n_classes]))}\n\n\t# (input_data * weihgts) + biases\n\tl1 = tf.add( hidden_1_layer['biases'] , tf.matmul(data, hidden_1_layer['weights']) )\n\tl1 = tf.nn.relu(l1) #rectified linear, activation function\n\n\tl2 = tf.add( hidden_2_layer['biases'] , tf.matmul(l1, hidden_2_layer['weights']) )\n\tl2 = tf.nn.relu(l2) #rectified linear, activation function\n\n\tl3 = tf.add( hidden_3_layer['biases'] , tf.matmul(l2, hidden_3_layer['weights']) )\n\tl3 = tf.nn.relu(l3) #rectified linear, activation function\n\n\toutput = output_layer['biases'] + tf.matmul(l3, output_layer['weights']) \n\treturn (output)\n\t\ndef\ttrain_neural_network(x):\n\tprediction=neural_network_model(x)\n\tcost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction,y) )\n\n\t# learning_rate = 0.001\n\toptimizer = tf.train.AdamOptimizer().minimize(cost)\n\n\t# cycles feed forward + backprop\n\thm_epochs = 10\t# how many epochs\n\n\twith tf.Session() as sess:\n\t\tsess.run(tf.initialize_all_variables())\n\t\t\n'''\t\t# training the network\n\t\tfor epoch in range(hm_epochs):\n\t\t\tepoch_loss = 0\n\t\t\tfor _ in range(int(mnist.train.num_examples/batch_size)):\n\t\t\t\tepoch_x, epoch_y = mnist.train.next_batch(batch_size)\n\t\t\t\t_, c = sess.run([optimizer, cost], feed_dict={x:epoch_x, y:epoch_y})\n\t\t\t\tepoch_lost +=c\n\t\t\tprint('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', epoch_loss)\n\n\t\tcorrect = tf.equal(tf.argmax(prediction,1), tf.argmax(y,1))\n\t\taccuracy = tf.reduce_mean(tf.cast(correct, 'float'))\n\t\tprint('Accuracy:', accuracy.eval({x:mnist.test.images, y:mnist.test.labels}))\n'''\n\ntrain_neural_network(x)\n","sub_path":"deep-net.py","file_name":"deep-net.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"105485232","text":"import os\nfrom unittest.mock import patch, MagicMock\n\nimport pytest\nimport requests_mock\n\nimport geetiles\nfrom geetiles.services.redis_service import RedisService\n\nLAYER = {\n \"data\":{\n \"id\":\"e8f9a96a-0a2c-4cf9-b904-36531f23a8b2\",\n \"type\":\"layer\",\n \"attributes\":{\n \"name\":\"TML_sld_layer\",\n \"slug\":\"TML_sld_layer\",\n \"dataset\":\"e9e91ed6-22b0-4280-8710-34589b0b1336\",\n \"description\":\"\",\n \"application\":[\n \"rw\"\n ],\n \"iso\":[\n \n ],\n \"provider\":\"gee\",\n \"userId\":\"58fde4354eecd9073107af0f\",\n \"default\":True,\n \"protected\":False,\n \"published\":True,\n \"env\":\"preproduction\",\n \"layerConfig\":{\n \"body\":{\n \"sldValue\":\"\",\n \"styleType\":\"sld\"\n },\n \"assetId\":\"projects/wri-datalab/TML\",\n \"isImageCollection\":True,\n \"provider\":\"gee\",\n \"position\":\"mosaic\"\n },\n \"legendConfig\":{\n \"type\":\"choropleth\",\n \"items\":[\n {\n \"name\":\"0-20\",\n \"color\":\"#edf8e9\",\n \"id\":0\n },\n {\n \"name\":\"20-40\",\n \"color\":\"#a0d796\",\n \"id\":1\n },\n {\n \"name\":\"40-60\",\n \"color\":\"#68b869\",\n \"id\":2\n },\n {\n \"name\":\"60-80\",\n \"color\":\"#2c8e49\",\n \"id\":3\n },\n {\n \"name\":\"80-100\",\n \"color\":\"#076f2f\",\n \"id\":4\n }\n ]\n },\n \"interactionConfig\":{\n \n },\n \"applicationConfig\":{\n \n },\n \"staticImageConfig\":{\n \n },\n \"createdAt\":\"2021-10-14T13:42:22.034Z\",\n \"updatedAt\":\"2021-10-14T13:42:22.034Z\"\n }\n }\n}\n\n\n@pytest.fixture\ndef client():\n app = geetiles.app\n app.config['TESTING'] = True\n client = app.test_client()\n\n yield client\n\n\n@patch(\"ee.data.getTileUrl\")\n@patch(\"ee.Image\")\n@patch(\"geetiles.services.storage_service.uuid\")\n@patch(\"geetiles.services.storage_service.storageClient\")\n@requests_mock.mock(kw='mocker')\ndef test_get_tile_mosaic_cold_cache(storageClient, uuid, Image, getTileUrl, client, mocker):\n # Populate a bunch of internal mocks to avoid calls to actual GEE servers\n uuid.return_value = os.getcwd()+'/tests/tile'\n Image.return_value.sldStyle.return_value.getMapId.return_value = {\n 'mapid': 'projects/earthengine-legacy/maps/abcd-efgh',\n 'token': '', 'tile_fetcher': MagicMock(), 'image': MagicMock()\n }\n\n getTileUrl.return_value = 'https://picsum.photos/200/300'\n bucket = storageClient().get_bucket\n blob = bucket().blob\n blob().public_url = 'https://my-tile.server/1234/4/7/6.png'\n\n # Mock request to layer MS\n get_layer = mocker.get(os.getenv('GATEWAY_URL') + '/v1/layer/1234', status_code=200, json=LAYER)\n\n # Clean Redis cache\n RedisService.expire_layer('1234')\n\n # Call to the actual endpoint\n response = client.get('/api/v1/layer/1234/tile/gee/4/7/6')\n\n uuid.assert_called()\n Image().sldStyle.assert_called()\n Image().sldStyle().getMapId.assert_called()\n getTileUrl.assert_called()\n\n bucket.assert_called_with('gee-tiles')\n\n blob.assert_called_with(\n '1234/4/7/6/tile_projects/earthengine-legacy/maps/abcd-efgh.png')\n blob().upload_from_file.assert_called()\n blob().make_public.assert_called()\n\n assert response.headers['Location'] == 'https://my-tile.server/1234/4/7/6.png'\n assert response.status_code == 302\n\n assert RedisService.get('/api/v1/layer/1234/tile/gee/4/7/6') == b'https://my-tile.server/1234/4/7/6.png'\n\n assert get_layer.called\n assert get_layer.call_count == 1\n\n\ndef test_get_tile_mosaic_warm_cache(client):\n # Populate Redis cache\n RedisService.set('/api/v1/layer/1234/tile/gee/4/7/6', b'https://my-tile.server/1234/4/7/6.png')\n\n # Call to the actual endpoint\n response = client.get('/api/v1/layer/1234/tile/gee/4/7/6')\n\n assert response.headers['Location'] == 'https://my-tile.server/1234/4/7/6.png'\n assert response.status_code == 302\n\n assert RedisService.get('/api/v1/layer/1234/tile/gee/4/7/6') == b'https://my-tile.server/1234/4/7/6.png'\n","sub_path":"tests/get_tiles_mosaic.py","file_name":"get_tiles_mosaic.py","file_ext":"py","file_size_in_byte":5360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"461904809","text":"from .base import *\n\nsecrets = json.load(open(os.path.join(SECRETS_DIR, 'dev.json')))\n\nDEBUG = True\n\nALLOWED_HOSTS = secrets['ALLOWED_HOSTS']\n\nWSGI_APPLICATION = 'config.wsgi.dev.application'\n\n# .secrets/dev.json의 내용을 사용해서\n# 아래 DATABASES설정 채우기\nDATABASES = secrets['DATABASES']\n\n# django-storages\n# ~/.aws/credentials\n# DEFAULT_FILE_STORAGE = 'config.storages.MediaStorage'\n# STATICFILES_STORAGE = 'config.storages.StaticStorage'\n# collectstatic을 실행했을 때,\n# 버킷의 'static'폴더 아래에 정적파일들이 저장되도록 설정해보기\n# config.storages.StaticStorage클래스를 만들어서 적용\n\n# 위 설정시 S3 프리티어의 기본 PUT한계를 금방 초과하게되므로\n# STATIC_ROOT에 collectstatic후 Nginx에서 제공하는 형태로 사용\nAWS_ACCESS_KEY_ID = secrets['AWS_ACCESS_KEY_ID']\nAWS_SECRET_ACCESS_KEY = secrets['AWS_SECRET_ACCESS_KEY']\nAWS_STORAGE_BUCKET_NAME = secrets['AWS_STORAGE_BUCKET_NAME']\n\nAWS_DEFAULT_ACL = None\n\nINSTALLED_APPS += [\n 'django_extensions',\n 'debug_toolbar',\n]\n\nINTERNAL_IPS = ('127.0.0.1',)\n\nMIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware', ]\n\n# Celery settings\n\nCELERY_BROKER_URL = 'redis://localhost:6379//'\n","sub_path":"app/config/settings/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"35505206","text":"from django.shortcuts import render, redirect\r\nfrom django.contrib import messages\r\nfrom .models import User, Book, Review\r\nimport bcrypt\r\n\r\n\r\n\r\ndef index(request):\r\n\treturn render(request, 'first_app/login.html')\r\n\r\ndef create(request):\r\n\terrors = User.objects.validation(request.POST)\r\n\tif len(errors):\r\n\t\tfor keys, value in errors.items():\r\n\t\t\tmessages.error(request, value, extra_tags='reg')\r\n\r\n\t\treturn redirect('/')\r\n\telse:\r\n\t\thash = bcrypt.hashpw(request.POST['password'].encode(), bcrypt.gensalt())\r\n\t\tprint(hash)\r\n\t\tnew_user = User.objects.create(first_name = request.POST['first_name'], last_name = request.POST['last_name'], email = request.POST['email'], password = hash)\r\n\t\trequest.session['user_id'] = new_user.id\r\n\t\treturn redirect('/home')\r\n\treturn redirect('/')\r\n\r\ndef read(request):\r\n\terror = User.objects.login_validation(request.POST)\r\n\tif len(error):\r\n\t\tfor keys, value in error.items():\r\n\t\t\tmessages.error(request, value, extra_tags='login')\r\n\r\n\t\treturn redirect('/')\r\n\telse:\r\n\t\trequest.session['user_id'] = User.objects.get(email = request.POST['username']).id\r\n\t\treturn redirect('/home')\r\n\r\n\r\n\r\ndef home(request):\r\n\tif 'user_id' not in request.session:\r\n\t\treturn redirect('/')\r\n\tcontext= {\r\n\t'user': User.objects.get(id = request.session['user_id']),\r\n\t'recent_book_review' :Book.objects.all().order_by('id')[:3],\r\n\t'all_books_with_reviews': Book.objects.all()\r\n\t}\r\n\tprint(context)\r\n\treturn render(request, 'first_app/home.html', context)\r\n\r\ndef displayAddReview(request):\r\n\tcontext = {\r\n\t'authors': Book.objects.all()\r\n\t}\r\n\tprint(context['authors'])\r\n\treturn render(request, 'first_app/addBook.html', context)\r\n\r\ndef logoff(request):\r\n\trequest.session.clear()\r\n\treturn redirect('/')\r\n\r\ndef processBnR(request):\r\n\terrorsB = Book.objects.bookValidator(request.POST)\r\n\terrorsR = Review.objects.reviewValidator(request.POST)\r\n\r\n\tif len(errorsB) or len(errorsR):\r\n\t\tfor keys, value in errorsR.items():\r\n\t\t\tmessages.error(request, value, extra_tags = 'msgs')\r\n\t\tfor keys, value in errorsB.items():\r\n\t\t\tmessages.error(request, value, extra_tags = 'msgs')\r\n\t\treturn redirect('/displayAdd')\r\n\r\n\telse:\r\n\t\tBook.objects.create(title = request.POST['new_book'], author = request.POST['new_author'], user_who_uploaded_id = request.session['user_id'], rating = request.POST['rating'])\r\n\t\tReview.objects.create(review = request.POST['new_review'], review_from_user_id = request.session['user_id'], review_on_book = Book.objects.last())\r\n\treturn redirect('/home')\r\n\r\ndef bookInfo(request, id):\r\n\tcontext = {\r\n\t'book_data' : Book.objects.get(id = id)\r\n\t}\r\n\treturn render(request, 'first_app/bookInfo.html', context )\r\n\r\ndef processR(request, id):\r\n\terrors = Review.objects.reviewValidator(request.POST)\r\n\tif len(errors):\r\n\t\tfor keys, value in errors.items():\r\n\t\t\tmessages.error(request, value, extra_tags='msgs')\r\n\r\n\t\treturn redirect('/home')\r\n\telse:\r\n\t\tReview.objects.create(review = request.POST['new_review'], review_from_user_id = request.session['user_id'], review_on_book = Book.objects.get(id = request.POST['redirect']))\r\n\t\treturn redirect('/home')\r\n\r\ndef userInfo(request, id):\r\n\tcontext = {\r\n\t'user_reviews' : User.objects.get(id = id)\r\n\t}\r\n\treturn render(request, 'first_app/userReviews.html', context)\r\n\r\n\r\n","sub_path":"BeltReviewer/apps/first_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"15944882","text":"#Wais Patrick Assignment 3\n#This is the main branch, and I update it on GITHUB\n\nimport pandas as pd\nimport numpy as np\n\ndf = pd.read_csv('input.csv',delimiter=';',header=None, decimal=\",\")\n\nnumber_cluster = df.loc[0,0]\nelements = df.loc[1,0]\n\narray = df.to_numpy()\narray = array[2:20,:]\n#Append column for cluster number\noutput = np.insert(array, 0, values=0, axis=1) # Insert column at position 0\n\nw, h = 2, int(number_cluster)\nk_means_array = [[0 for x in range(w)] for y in range(h)]\niteration = 0\n\n#Define random means\nfor i in range(0,3):\n k_means_array[i][0] = np.random.uniform(min(array[:,1]), max(array[:,0]))#X-coor\n k_means_array[i][1] = np.random.uniform(min(array[:,1]), max(array[:,1]))#Y-coor\n\n#Calculate manhattan distance\n#print('Array')\n#print(array[0,:])\n#print(\"KMEans 1\")\n#print(k_means_array[0][:])\n#print(\"KMEans 2\")\n#print(k_means_array[1][:])\n#print(\"KMEans 3\")\n#print(k_means_array[2][:])\nmean_array_k1X = []\nmean_array_k2X = []\nmean_array_k3X = []\nmean_array_k1Y = []\nmean_array_k2Y = []\nmean_array_k3Y = []\n\n\nflag = 0 #check if kmeans changes in new iteration\n\nwhile(flag == 0):\n\n iteration = iteration + 1\n mean_array_k1X.clear()\n mean_array_k1Y.clear()\n mean_array_k2X.clear()\n mean_array_k2Y.clear()\n mean_array_k3X.clear()\n mean_array_k3Y.clear()\n\n for i in range(0,18):\n #mean_array_k1X_old = k_means_array[0][0]\n sum1 = abs(array[i,0] - k_means_array[0][0]) + abs(array[i,1] - k_means_array[0][1])\n sum2 = abs(array[i,0] - k_means_array[1][0]) + abs(array[i,1] - k_means_array[1][1])\n sum3 = abs(array[i,0] - k_means_array[2][0]) + abs(array[i,1] - k_means_array[2][1])\n if(sum1 math.fabs(start) else math.fabs(start))\n inst.write(':SOUR:VOLT:RANG ' + str(voltage_range))\n inst.write(':SENS:CURR:PROT 0.04')\n inst.write(':SOUR:DEL ' + str(delay/1000))\n\n pyplot.ion()\n for i in range(step+1):\n V = start + stage * i\n inst.write(':SOUR:VOLT %s' % V)\n inst.write(':SOUR:DEL ' + str(delay/1000))\n inst.write('READ?')\n data = inst.read(':TRAC:DATA')\n I = float(data.split(',')[1]) * 1000\n r = os.popen(r'C:\\Users\\Administrator\\Desktop\\Debug\\SampleProgram.exe')\n Lv = float(r.readline().strip('\\r\\n').split(':')[1])\n\n current_list.append(I)\n voltage_list.append(V)\n luminance_list.append(Lv)\n\n # L-V Curve\n pyplot.subplot(2, 1, 1)\n pyplot.plot(voltage_list, luminance_list, 'c.-')\n pyplot.title(file_name)\n pyplot.ylabel('Luminance (cd/m2')\n\n # I-V Curve\n pyplot.subplot(2, 1, 2)\n pyplot.plot(voltage_list, current_list, 'b.-')\n pyplot.ylabel('Current (mA)')\n pyplot.xlabel('Voltage (V)')\n\n pyplot.pause(0.3)\n # if i == step:\n # savefig(file_path + '/' + file_name + '.png')\n pyplot.ioff()\n time.sleep(interval / 1000)\n save_data = pd.DataFrame({'Current (mA)': current_list,\n 'Voltage (V)': voltage_list,\n 'Luminance (cd/m2)': luminance_list})\n save_data.to_csv(file_path + '/' + file_name + '.csv', index=False, sep=\",\")\n\n close_inst()\n pyplot.show()\n\n\ndef SweepCurrent():\n start = float(start_current.get()) / 1000\n end = float(end_current.get()) / 1000\n step = int(current_steps.get())\n file_name = sweep_current_file_names.get()\n file_path = save_dir.get()\n\n delay = 50\n interval = 300\n stage = (end - start) / step\n\n current_list = list()\n voltage_list = list()\n luminance_list = list()\n\n pyplot.close()\n inst = connect_inst()\n current_range = 1.2 * (math.fabs(end) if math.fabs(end) > math.fabs(start) else math.fabs(start))\n\n inst.write(':SOUR:FUNC CURR')\n inst.write(':OUTP ON')\n inst.write(':SOUR:CURR:RANG ' + str(current_range))\n inst.write(':SOUR:DEL 0.05')\n inst.write(':SYST:KEY 15')\n\n pyplot.ion()\n for i in range(step+1):\n I = start + stage * i\n inst.write(':SOUR:CURR %s' % I)\n inst.write(':SOUR:DEL ' + str(delay / 1000))\n inst.write('read?')\n data = inst.read(\"TRAC:DATA\")\n\n V = float(data.split(',')[0])\n r = os.popen(r'C:\\Users\\Administrator\\Desktop\\Debug\\SampleProgram.exe')\n Lv = float(r.readline().strip('\\r\\n').split(':')[1])\n\n I1000 = I *1000\n current_list.append(I1000)\n voltage_list.append(V)\n luminance_list.append(Lv)\n\n # L-V Curve\n pyplot.subplot(2, 1, 1)\n pyplot.plot(current_list, luminance_list, 'c.-')\n pyplot.title(file_name)\n pyplot.ylabel('Luminance (cd/m2')\n\n # I-V Curve\n pyplot.subplot(2, 1, 2)\n pyplot.plot(current_list, voltage_list, 'b.-')\n pyplot.ylabel('Voltage (V)')\n pyplot.xlabel('Current (mA)')\n\n pyplot.pause(0.1)\n # if i == step:\n # savefig(file_path + '/' + file_name + '.png')\n pyplot.ioff()\n time.sleep(interval / 1000)\n save_data = pd.DataFrame({'Current (mA)': current_list,\n 'Voltage (V)': voltage_list,\n 'Luminance (cd/m2)': luminance_list})\n save_data.to_csv(file_path + '/' + file_name + '.csv', index=False, sep=\",\")\n\n close_inst()\n pyplot.show()\n\n\n# check file save directory\ndef FilePath():\n file_path = askdirectory()\n if save_dir != '':\n save_dir.delete(0, END)\n save_dir.insert(0, file_path)\n else:\n save_dir.insert(0, file_path)\n\n\nif __name__ == '__main__':\n App = Tk()\n App.title('IV Sweep')\n App.geometry('400x600+900+100')\n\n # View Model\n # Constant Voltage View\n Label(App, text='Constant Voltage (V):').grid(row=0, column=0, sticky=E, pady=10)\n constant_voltage = Entry(App, textvariable=DoubleVar)\n constant_voltage.grid(row=0, column=1)\n Button(App, text='ON', command=ConstantVoltage).grid(row=0, column=3, padx=10)\n Button(App, text='OFF', command=close_inst).grid(row=0, column=4)\n\n # Constant Current View\n Label(App, text='Constant Current (mA):').grid(row=1, column=0, sticky=E)\n constant_current = Entry(App, textvariable=DoubleVar)\n constant_current.grid(row=1, column=1)\n Button(App, text='ON', command=ConstantCurrent).grid(row=1, column=3)\n Button(App, text='OFF', command=close_inst).grid(row=1, column=4)\n\n # Sweep Voltage View\n Label(App, text='Voltage Sweep Model', font=20).grid(row=2, column=0, columnspan=4, pady=15)\n Label(App, text='Start Voltage (V):').grid(row=3, column=0, sticky=E)\n start_voltage = Entry(App, textvariable=DoubleVar)\n start_voltage.grid(row=3, column=1)\n Label(App, text='End Voltage (V):').grid(row=4, column=0, sticky=E, pady=5)\n end_voltage = Entry(App, textvariable=DoubleVar)\n end_voltage.grid(row=4, column=1)\n Label(App, text='Step:').grid(row=5, column=0, sticky=E)\n steps = Entry(App, textvariable=IntVar)\n steps.grid(row=5, column=1)\n Label(App, text='File Name:').grid(row=6, column=0, sticky=E, pady=5)\n sweep_voltage_file_names = Entry(App, textvariable=StringVar)\n sweep_voltage_file_names.grid(row=6, column=1)\n Button(App, text='ON', command=SweepVoltage).grid(row=6, column=3)\n Button(App, text='OFF', command=close_inst).grid(row=6, column=4)\n\n # Sweep Current View\n Label(App, text='Current Sweep Model', font=20).grid(row=7, column=0, columnspan=4, pady=15)\n Label(App, text='Start Current (mA):').grid(row=8, column=0, sticky=E)\n start_current = Entry(App, textvariable=DoubleVar)\n start_current.grid(row=8, column=1)\n Label(App, text='End Current (mA):').grid(row=9, column=0, sticky=E, pady=5)\n end_current = Entry(App, textvariable=DoubleVar)\n end_current.grid(row=9, column=1)\n Label(App, text='Step:').grid(row=10, column=0, sticky=E)\n current_steps = Entry(App, textvariable=IntVar)\n current_steps.grid(row=10, column=1)\n Label(App, text='File Name:').grid(row=11, column=0, sticky=E, pady=5)\n sweep_current_file_names = Entry(App, textvariable=StringVar)\n sweep_current_file_names.grid(row=11, column=1)\n Button(App, text='ON', command=SweepCurrent).grid(row=11, column=3)\n Button(App, text='OFF', command=close_inst).grid(row=11, column=4)\n\n # File path\n Label(App, text='Save Directory:').grid(row=12, column=0, sticky=E, pady=20)\n save_dir = Entry(App, textvariable=StringVar)\n save_dir.grid(row=12, column=1, columnspan=2)\n Button(App, text='Save Path', bg='cyan3', command=FilePath).grid(row=13, column=1)\n\n # Plot View\n # frame = Frame(width=400, height=400, bg=\"gray80\", colormap=\"new\")\n # frame.grid(row=0, column=5, rowspan=12, padx=20, pady=20)\n\n App.mainloop()\n","sub_path":"IVSweep.py","file_name":"IVSweep.py","file_ext":"py","file_size_in_byte":8866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"359174446","text":"from django.shortcuts import render,redirect,Http404,get_object_or_404\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom accounts.models import Profile,AdwordsApi\nfrom django.core.mail import send_mail\n# Create your views here.\nfrom .forms import UserCreateForm,UserUpdateForm,ProfileForm,ApiCreateForm,ApiUpdateForm\nfrom googleads import adwords\nfrom googleads import oauth2\nimport datetime\nfrom django.utils import timezone\nimport sys\nimport logging\nlogging.basicConfig(level=logging.INFO)\nlogging.getLogger('suds.transport').setLevel(logging.DEBUG)\n\n\nPAGE_SIZE = 500\n\n\nfrom googleads import adwords\n\n\nPAGE_SIZE = 500\n\n\ndef DisplayAccountTree(account, accounts, links, depth=0):\n \"\"\"Displays an account tree.\n\n Args:\n account: dict The account to display.\n accounts: dict Map from customerId to account.\n links: dict Map from customerId to child links.\n depth: int Depth of the current account in the tree.\n \"\"\"\n prefix = '-' * depth * 2\n print ('%s%s, %s' % (prefix, account['customerId'], account['name']))\n if account['customerId'] in links:\n for child_link in links[account['customerId']]:\n child_account = accounts[child_link['clientCustomerId']]\n DisplayAccountTree(child_account, accounts, links, depth + 1)\n\n\n#def get_all_campaign(request):\n\n#def get_campaign_index(request,id):\n\n\ndef customer_index_view(request,customer_id):\n\n if not request.user.is_superuser:\n raise Http404()\n\n adwords_api = AdwordsApi.objects.all();\n adwords_bilgi = adwords_api[0]\n\n oauth2_client = oauth2.GoogleRefreshTokenClient(\n adwords_bilgi.oauth_client_id, adwords_bilgi.oauth_client_secret,\n adwords_bilgi.refresh_token)\n\n adwords_client = adwords.AdWordsClient(\n adwords_bilgi.developer_token, oauth2_client,\n client_customer_id=customer_id)\n\n report_downloader = adwords_client.GetReportDownloader(version='v201708')\n\n report = {\n 'reportName': 'Last 7 days CAMPAIGN_PERFORMANCE_REPORT',\n 'dateRangeType': 'LAST_7_DAYS',\n 'reportType': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'downloadFormat': 'XML',\n 'selector': {\n 'fields': ['CampaignName', 'CampaignId', 'CampaignStatus',\n 'Impressions', 'Clicks', 'Conversions' ,'Cost', 'Amount', 'AverageCost', 'AverageCpc',\n 'BounceRate', 'ConversionRate', 'CostPerConversion']\n }\n }\n\n # You can provide a file object to write the output to. For this demonstration\n # we use sys.stdout to write the report to the screen.\n report_downloader.DownloadReport(\n report, sys.stdout, skip_report_header=False, skip_column_header=False,\n skip_report_summary=False, include_zero_impressions=True)\n\n sayi = 5\n return render(request, 'metadmin/customer_index.html', {'sayi': sayi})\n\n\n\ndef user_index_view(request):\n\n profiles = Profile.objects.all()\n print(profiles)\n return render(request, 'metadmin/index.html', {'profiles': profiles,\n })\n'''def user_index_view(request):\n\n if not request.user.is_superuser:\n raise Http404()\n\n adwords_api = AdwordsApi.objects.all();\n adwords_bilgi = adwords_api[1]\n print(adwords_bilgi.developer_token)\n print(adwords_bilgi.oauth_client_id)\n print(adwords_bilgi.customer_id)\n\n oauth2_client = oauth2.GoogleRefreshTokenClient(\n adwords_bilgi.oauth_client_id, adwords_bilgi.oauth_client_secret,\n adwords_bilgi.refresh_token)\n\n client = adwords.AdWordsClient(\n adwords_bilgi.developer_token, oauth2_client,\n client_customer_id=adwords_bilgi.customer_id)\n # Initialize appropriate service.\n managed_customer_service = client.GetService(\n 'ManagedCustomerService', version='v201708')\n\n # Construct selector to get all accounts.\n offset = 0\n selector = {\n 'fields': ['CustomerId', 'Name','CurrencyCode','DateTimeZone'],\n 'paging': {\n 'startIndex': str(offset),\n 'numberResults': str(PAGE_SIZE)\n }\n }\n more_pages = True\n accounts = {}\n child_links = {}\n parent_links = {}\n root_account = None\n\n while more_pages:\n # Get serviced account graph.\n page = managed_customer_service.get(selector)\n if 'entries' in page and page['entries']:\n # Create map from customerId to parent and child links.\n if 'links' in page:\n for link in page['links']:\n if link['managerCustomerId'] not in child_links:\n child_links[link['managerCustomerId']] = []\n child_links[link['managerCustomerId']].append(link)\n if link['clientCustomerId'] not in parent_links:\n parent_links[link['clientCustomerId']] = []\n parent_links[link['clientCustomerId']].append(link)\n # Map from customerID to account.\n for account in page['entries']:\n accounts[account['customerId']] = account\n offset += PAGE_SIZE\n selector['paging']['startIndex'] = str(offset)\n more_pages = offset < int(page['totalNumEntries'])\n\n # Find the root account.\n for customer_id in accounts:\n if customer_id not in parent_links:\n root_account = accounts[customer_id]\n\n # Display account tree.\n if root_account:\n print('CustomerId, Name','CurrencyCode','DateTimeZone')\n DisplayAccountTree(root_account, accounts, child_links, 0)\n else:\n print('Unable to determine a root account')\n\n sayi=5\n\n return render(request, 'metadmin/index.html', {\"sayi\":sayi})\n'''\n\n\ndef user_create_view(request):\n if not request.user.is_staff:\n raise Http404()\n\n form = UserCreateForm(request.POST or None)\n if form.is_valid():\n user = form.save(commit=False)\n sifre = \"123456\"\n user.set_password(sifre)\n user.save()\n print(sifre)\n '''send_mail(\n 'Üyelik Bilgileri',\n sifre,\n 'info@robotizma.com',\n ['sametmacit@outlook.com'],\n fail_silently=False,\n )'''\n\n messages.success(request,'Başarılı bir şekilde oluşturdunuz')\n\n return redirect('/metadmin/index')\n\n\n return render(request,'metadmin/form.html',{\n 'form': form,\n })\n\ndef user_update_view(request,id):\n if not request.user.is_staff:\n raise Http404()\n\n user = get_object_or_404(User, id=id)\n profile = get_object_or_404(Profile, user=user)\n form = UserUpdateForm(request.POST or None,instance=user)\n profile_form = ProfileForm(request.POST or None,instance=profile)\n if form.is_valid() and profile_form.is_valid():\n user = form.save()\n profile_form.save()\n\n messages.success(request,'Başarılı bir güncellendi oluşturdunuz')\n\n return redirect('/metadmin/index')\n\n\n return render(request,'metadmin/update.html',{\n 'form': form,\n 'profile_form': profile_form,\n })\n\ndef user_delete_view(request,id):\n if not request.user.is_staff:\n raise Http404()\n\n user = get_object_or_404(User, id=id)\n profile = get_object_or_404(Profile, user=user)\n user.delete()\n profile.delete()\n\n messages.success(request,'Başarılı bir güncellendi oluşturdunuz')\n\n return redirect('/metadmin/index')\n\ndef api_create_view(request):\n if not request.user.is_staff:\n raise Http404()\n\n form = ApiCreateForm(request.POST or None)\n if form.is_valid():\n form.save()\n\n messages.success(request, 'Başarılı bir şekilde oluşturdunuz')\n\n return redirect('/metadmin/index')\n\n return render(request, 'metadmin/apiform.html', {\n 'form': form,\n })\ndef api_update_view(request,id):\n if not request.user.is_staff:\n raise Http404()\n\n api = get_object_or_404(AdwordsApi, id=id)\n form = ApiUpdateForm(request.POST or None,instance=api)\n if form.is_valid():\n api = form.save()\n\n messages.success(request,'Başarılı bir güncellendi oluşturdunuz')\n\n return redirect('/metadmin/index')\n\n\n return render(request,'metadmin/api_update.html',{\n 'form': form,\n })\n\ndef api_delete_view(request,id):\n\n if not request.user.is_staff:\n raise Http404()\n\n profile.delete()\n api = get_object_or_404(AdwordsApi, id=id)\n api.delete()\n\n messages.success(request,'Başarılı bir güncellendi oluşturdunuz')\n\n return redirect('/metadmin/index')\n","sub_path":"metadmin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"460525817","text":"#Some review on classes\nimport random \n\nnum = random.randint(0,100)\nprint(num)\n\nclass Dog:\n\n def __init__(self, name, age):\n self.name = name \n self.age = age\n self.happy = 0\n\n def sit(self):\n print(self.name + \" is sitting!\")\n\n def roll_over(self):\n print(self.name + \" is rolling over!\")\n\nclass Human:\n\n def __init__(self, name):\n self.name = name \n\n def pet(self, animal):\n print(self.name + \" pets \" + animal.name)\n animal.happy += 2\n print(animal.happy)\n\ndog = Dog(\"Dutch\", 13)\nperson = Human(\"Mike\")\ndog.sit()\nperson.pet(dog)\n\nchoice = input(\"Do you want to pet the dog?(y/n) \")\nif choice == \"y\":\n person.pet(dog)\nelse:\n print(\"Maybe another day!\")","sub_path":"nine/review.py","file_name":"review.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"555369746","text":"\"\"\"\nHGVS language currently implemented.\n\nHGVS = ALLELE\n | PREFIX_NAME : ALLELE\n\nPREFIX_NAME = TRANSCRIPT\n | TRANSCRIPT '(' GENE ')'\n\nTRANSCRIPT = TRANSCRIPT_NAME\n | TRANSCRIPT_NAME '.' TRANSCRIPT_VERSION\n\nTRANSCRIPT_VERSION = NUMBER\n\nALLELE = 'c.' CDNA_ALLELE # cDNA\n | 'g.' GENOMIC_ALLELE # genomic\n | 'm.' MIT_ALLELE # mitochondrial sequence\n | 'n.' NC_ALLELE # non-coding RNA reference sequence\n | 'r.' RNA_ALLELE # RNA sequence (like r.76a>u)\n | 'p.' PROTEIN_ALLELE # protein sequence (like p.Lys76Asn)\n\nNC_ALLELE =\nRNA_ALLELE =\nCDNA_ALLELE = CDNA_COORD SINGLE_BASE_CHANGE\n | CDNA_COORD_RANGE MULTI_BASE_CHANGE\n\nGENOMIC_ALLELE =\nMIT_ALLELE = COORD SINGLE_BASE_CHANGE\n | COORD_RANGE MULTI_BASE_CHANGE\n\nSINGLE_BASE_CHANGE = CDNA_ALLELE = CDNA_COORD BASE '=' # no change\n | CDNA_COORD BASE '>' BASE # substitution\n | CDNA_COORD 'ins' BASE # 1bp insertion\n | CDNA_COORD 'del' BASE # 1bp deletion\n | CDNA_COORD 'dup' BASE # 1bp duplication\n | CDNA_COORD 'ins' # 1bp insertion\n | CDNA_COORD 'del' # 1bp deletion\n | CDNA_COORD 'dup' # 1bp duplication\n | CDNA_COORD 'del' BASE 'ins' BASE # 1bp indel\n | CDNA_COORD 'delins' BASE # 1bp indel\n\nMULTI_BASE_CHANGE = COORD_RANGE 'del' BASES # deletion\n | COORD_RANGE 'ins' BASES # insertion\n | COORD_RANGE 'dup' BASES # duplication\n | COORD_RANGE 'del' # deletion\n | COORD_RANGE 'dup' # duplication\n | COORD_RANGE 'del' BASES 'ins' BASES # indel\n | COORD_RANGE 'delins' BASES # indel\n\n\nAMINO1 = [GAVLIMFWPSTCYNQDEKRH]\n\nAMINO3 = 'Gly' | 'Ala' | 'Val' | 'Leu' | 'Ile' | 'Met' | 'Phe' | 'Trp' | 'Pro'\n | 'Ser' | 'Thr' | 'Cys' | 'Tyr' | 'Asn' | 'Gln' | 'Asp' | 'Glu' | 'Lys'\n | 'Arg' | 'His'\n\nPROTEIN_ALLELE = AMINO3 COORD '=' # no peptide change\n | AMINO1 COORD '=' # no peptide change\n | AMINO3 COORD AMINO3 PEP_EXTRA # peptide change\n | AMINO1 COORD AMINO1 PEP_EXTRA # peptide change\n | AMINO3 COORD '_' AMINO3 COORD PEP_EXTRA # indel\n | AMINO1 COORD '_' AMINO1 COORD PEP_EXTRA # indel\n | AMINO3 COORD '_' AMINO3 COORD PEP_EXTRA AMINO3 # indel\n | AMINO1 COORD '_' AMINO1 COORD PEP_EXTRA AMINO1 # indel\n\n# A genomic range:\nCOORD_RANGE = COORD '_' COORD\n\n# A cDNA range:\nCDNA_COORD_RANGE = CDNA_COORD '_' CDNA_COORD\n\n# A cDNA coordinate:\nCDNA_COORD = COORD_PREFIX COORD\n | COORD_PREFIX COORD OFFSET_PREFIX OFFSET\nCOORD_PREFIX = '' | '-' | '*'\nCOORD = NUMBER\nOFFSET_PREFIX = '-' | '+'\nOFFSET = NUMBER\n\n# Primatives:\nNUMBER = \\d+\nBASE = [ACGT]\nBASES = BASE+\n\n\"\"\"\n\nimport re\n\nfrom .cdna import CDNACoord\nfrom .variants import revcomp\n\nCHROM_PREFIX = 'chr'\n\n\nclass HGVSRegex(object):\n \"\"\"\n All regular expression for HGVS names.\n \"\"\"\n\n # DNA syntax\n # http://www.hgvs.org/mutnomen/standards.html#nucleotide\n BASE = \"[acgtbdhkmnrsvwyACGTBDHKMNRSVWY]|\\d+\"\n BASES = \"[acgtbdhkmnrsvwyACGTBDHKMNRSVWY]+|\\d+\"\n DNA_REF = \"(?P\" + BASES + \")\"\n DNA_ALT = \"(?P\" + BASES + \")\"\n\n # Mutation types\n EQUAL = \"(?P=)\"\n SUB = \"(?P>)\"\n INS = \"(?Pins)\"\n DEL = \"(?Pdel)\"\n DUP = \"(?Pdup)\"\n\n # Simple coordinate syntax\n COORD_START = \"(?P\\d+)\"\n COORD_END = \"(?P\\d+)\"\n COORD_RANGE = COORD_START + \"_\" + COORD_END\n\n # cDNA coordinate syntax\n CDNA_COORD = (\"(?P|-|\\*)(?P\\d+)\"\n \"((?P-|\\+)(?P\\d+))?\")\n CDNA_START = (\"(?P(?P|-|\\*)(?P\\d+)\"\n \"((?P-|\\+)(?P\\d+))?)\")\n CDNA_END = (r\"(?P(?P|-|\\*)(?P\\d+)\"\n \"((?P-|\\+)(?P\\d+))?)\")\n CDNA_RANGE = CDNA_START + \"_\" + CDNA_END\n\n # cDNA allele syntax\n CDNA_ALLELE = [\n # No change\n CDNA_START + EQUAL,\n CDNA_START + DNA_REF + EQUAL,\n\n # Substitution\n CDNA_START + DNA_REF + SUB + DNA_ALT,\n\n # 1bp insertion, deletion, duplication\n CDNA_START + INS + DNA_ALT,\n CDNA_START + DEL + DNA_REF,\n CDNA_START + DUP + DNA_REF,\n CDNA_START + DEL,\n CDNA_START + DUP,\n\n # Insertion, deletion, duplication\n CDNA_RANGE + INS + DNA_ALT,\n CDNA_RANGE + DEL + DNA_REF,\n CDNA_RANGE + DUP + DNA_REF,\n CDNA_RANGE + DEL,\n CDNA_RANGE + DUP,\n\n # Indels\n \"(?P\" + CDNA_START + 'del' + DNA_REF + 'ins' + DNA_ALT + \")\",\n \"(?P\" + CDNA_RANGE + 'del' + DNA_REF + 'ins' + DNA_ALT + \")\",\n \"(?P\" + CDNA_START + 'delins' + DNA_ALT + \")\",\n \"(?P\" + CDNA_RANGE + 'delins' + DNA_ALT + \")\",\n ]\n\n CDNA_ALLELE_REGEXES = [re.compile(\"^\" + regex + \"$\")\n for regex in CDNA_ALLELE]\n\n # Peptide syntax\n PEP = \"([A-Z]([a-z]{2}))+\"\n PEP_REF = \"(?P\" + PEP + \")\"\n PEP_REF2 = \"(?P\" + PEP + \")\"\n PEP_ALT = \"(?P\" + PEP + \")\"\n\n PEP_EXTRA = \"(?P(|=|\\?)(|fs))\"\n\n # Peptide allele syntax\n PEP_ALLELE = [\n # No peptide change\n # Example: Glu1161=\n PEP_REF + COORD_START + PEP_EXTRA,\n\n # Peptide change\n # Example: Glu1161Ser\n PEP_REF + COORD_START + PEP_ALT + PEP_EXTRA,\n\n # Peptide indel\n # Example: Glu1161_Ser1164?fs\n \"(?P\" + PEP_REF + COORD_START + \"_\" + PEP_REF2 + COORD_END +\n PEP_EXTRA + \")\",\n \"(?P\" + PEP_REF + COORD_START + \"_\" + PEP_REF2 + COORD_END +\n PEP_ALT + PEP_EXTRA + \")\",\n ]\n\n PEP_ALLELE_REGEXES = [re.compile(\"^\" + regex + \"$\")\n for regex in PEP_ALLELE]\n\n # Genomic allele syntax\n GENOMIC_ALLELE = [\n # No change\n COORD_START + EQUAL,\n COORD_START + DNA_REF + EQUAL,\n\n # Substitution\n COORD_START + DNA_REF + SUB + DNA_ALT,\n\n # 1bp insertion, deletion, duplication\n COORD_START + INS + DNA_ALT,\n COORD_START + DEL + DNA_REF,\n COORD_START + DUP + DNA_REF,\n COORD_START + DEL,\n COORD_START + DUP,\n\n # Insertion, deletion, duplication\n COORD_RANGE + EQUAL,\n COORD_RANGE + INS + DNA_ALT,\n COORD_RANGE + DEL + DNA_REF,\n COORD_RANGE + DUP + DNA_REF,\n COORD_RANGE + DEL,\n COORD_RANGE + DUP,\n\n # Indels\n \"(?P\" + COORD_START + 'del' + DNA_REF + 'ins' + DNA_ALT + \")\",\n \"(?P\" + COORD_RANGE + 'del' + DNA_REF + 'ins' + DNA_ALT + \")\",\n \"(?P\" + COORD_START + 'delins' + DNA_ALT + \")\",\n \"(?P\" + COORD_RANGE + 'delins' + DNA_ALT + \")\",\n ]\n\n GENOMIC_ALLELE_REGEXES = [re.compile(\"^\" + regex + \"$\")\n for regex in GENOMIC_ALLELE]\n\n\n# The RefSeq standard for naming contigs/transcripts/proteins:\n# http://www.ncbi.nlm.nih.gov/books/NBK21091/table/ch18.T.refseq_accession_numbers_and_mole/?report=objectonly # nopep8\nREFSEQ_PREFIXES = [\n ('AC_', 'genomic',\n 'Complete genomic molecule, usually alternate assembly'),\n ('NC_', 'genomic',\n 'Complete genomic molecule, usually reference assembly'),\n ('NG_', 'genomic', 'Incomplete genomic region'),\n ('NT_', 'genomic', 'Contig or scaffold, clone-based or WGS'),\n ('NW_', 'genomic', 'Contig or scaffold, primarily WGS'),\n ('NS_', 'genomic', 'Environmental sequence'),\n ('NZ_', 'genomic', 'Unfinished WGS'),\n ('NM_', 'mRNA', ''),\n ('NR_', 'RNA', ''),\n ('XM_', 'mRNA', 'Predicted model'),\n ('XR_', 'RNA', 'Predicted model'),\n ('AP_', 'Protein', 'Annotated on AC_ alternate assembly'),\n ('NP_', 'Protein', 'Associated with an NM_ or NC_ accession'),\n ('YP_', 'Protein', ''),\n ('XP_', 'Protein', 'Predicted model, associated with an XM_ accession'),\n ('ZP_', 'Protein', 'Predicted model, annotated on NZ_ genomic records'),\n]\n\n\nREFSEQ_PREFIX_LOOKUP = dict(\n (prefix, (kind, description))\n for prefix, kind, description in REFSEQ_PREFIXES\n)\n\n\ndef get_refseq_type(name):\n \"\"\"\n Return the RefSeq type for a refseq name.\n \"\"\"\n prefix = name[:3]\n return REFSEQ_PREFIX_LOOKUP.get(prefix, (None, ''))[0]\n\n\nclass InvalidHGVSName(ValueError):\n def __init__(self, name='', part='name', reason=''):\n if name:\n message = 'Invalid HGVS %s \"%s\"' % (part, name)\n else:\n message = 'Invalid HGVS %s' % part\n if reason:\n message += ': ' + reason\n super(InvalidHGVSName, self).__init__(message)\n\n self.name = name\n self.part = part\n self.reason = reason\n\n\nclass HGVSName(object):\n \"\"\"\n Represents a HGVS variant name.\n \"\"\"\n\n def __init__(self, name='', prefix='', chrom='', transcript='', gene='',\n kind='', mutation_type=None, start=0, end=0, ref_allele='',\n ref2_allele='', alt_allele='',\n cdna_start=None, cdna_end=None, pep_extra=''):\n\n # Full HGVS name.\n self.name = name\n\n # Name parts.\n self.prefix = prefix\n self.chrom = chrom\n self.transcript = transcript\n self.gene = gene\n self.kind = kind\n self.mutation_type = mutation_type\n self.start = start\n self.end = end\n self.ref_allele = ref_allele # reference allele\n self.ref2_allele = ref2_allele # reference allele at end of pep indel\n self.alt_allele = alt_allele # alternate allele\n\n # cDNA-specific fields\n self.cdna_start = cdna_start if cdna_start else CDNACoord()\n self.cdna_end = cdna_end if cdna_end else CDNACoord()\n\n # Protein-specific fields\n self.pep_extra = pep_extra\n\n if name:\n self.parse(name)\n\n def parse(self, name):\n \"\"\"Parse a HGVS name.\"\"\"\n # Does HGVS name have transcript/gene prefix?\n if ':' in name:\n prefix, allele = name.split(':', 1)\n else:\n prefix = ''\n allele = name\n\n self.name = name\n\n # Parse prefix and allele.\n self.parse_allele(allele)\n self.parse_prefix(prefix, self.kind)\n self._validate()\n\n def parse_prefix(self, prefix, kind):\n \"\"\"\n Parse a HGVS prefix (gene/transcript/chromosome).\n\n Some examples of full hgvs names with transcript include:\n NM_007294.3:c.2207A>C\n NM_007294.3(BRCA1):c.2207A>C\n BRCA1{NM_007294.3}:c.2207A>C\n \"\"\"\n\n self.prefix = prefix\n\n # No prefix.\n if prefix == '':\n self.chrom = ''\n self.transcript = ''\n self.gene = ''\n return\n\n # Transcript and gene given with parens.\n # example: NM_007294.3(BRCA1):c.2207A>C\n match = re.match(r\"^(?P[^(]+)\\((?P[^)]+)\\)$\", prefix)\n if match:\n self.transcript = match.group('transcript')\n self.gene = match.group('gene')\n return\n\n # Transcript and gene given with braces.\n # example: BRCA1{NM_007294.3}:c.2207A>C\n match = re.match(r\"^(?P[^{]+){(?P[^}]+)}$\", prefix)\n if match:\n self.transcript = match.group('transcript')\n self.gene = match.group('gene')\n return\n\n # Determine using Ensembl type.\n if prefix.startswith('ENST'):\n self.transcript = prefix\n return\n\n # Determine using LRG type.\n if prefix.startswith('LRG_'):\n self.transcript = prefix\n return\n\n # Determine using refseq type.\n refseq_type = get_refseq_type(prefix)\n if refseq_type in ('mRNA', 'RNA'):\n self.transcript = prefix\n return\n\n # Determine using refseq type.\n if prefix.startswith(CHROM_PREFIX) or refseq_type == 'genomic':\n self.chrom = prefix\n return\n\n # Assume gene name.\n self.gene = prefix\n\n def parse_allele(self, allele):\n \"\"\"\n Parse a HGVS allele description.\n\n Some examples include:\n cDNA substitution: c.101A>C,\n cDNA indel: c.3428delCinsTA, c.1000_1003delATG, c.1000_1001insATG\n No protein change: p.Glu1161=\n Protein change: p.Glu1161Ser\n Protein frameshift: p.Glu1161_Ser1164?fs\n Genomic substitution: g.1000100A>T\n Genomic indel: g.1000100_1000102delATG\n \"\"\"\n if '.' not in allele:\n raise InvalidHGVSName(allele, 'allele',\n 'expected kind \"c.\", \"p.\", \"g.\", etc')\n\n # Determine HGVS name kind.\n kind, details = allele.split('.', 1)\n self.kind = kind\n self.mutation_type = None\n\n if kind in (\"c\", 'n'):\n self.parse_cdna(details)\n if kind == 'n': # Ensure no 3'UTR or 5'UTR coords in non-coding\n if self.cdna_start.coord < 0:\n raise InvalidHGVSName(allele, 'allele',\n \"Non-coding transcript cannot contain negative (5'UTR) coordinates\")\n if self.cdna_start.landmark == 'cdna_stop' or self.cdna_end and self.cdna_end.landmark == 'cdna_stop':\n raise InvalidHGVSName(allele, 'allele',\n \"Non-coding transcript cannot contain '*' (3'UTR) coordinates\")\n elif kind == \"p\":\n self.parse_protein(details)\n elif kind in (\"g\", 'm'):\n self.parse_genome(details)\n else:\n raise NotImplementedError(\"unknown kind: %s\" % allele)\n\n def parse_cdna(self, details):\n \"\"\"\n Parse a HGVS cDNA name.\n\n Some examples include:\n Substitution: 101A>C,\n Indel: 3428delCinsTA, 1000_1003delATG, 1000_1001insATG\n \"\"\"\n for regex in HGVSRegex.CDNA_ALLELE_REGEXES:\n match = re.match(regex, details)\n if match:\n groups = match.groupdict()\n\n # Parse mutation type.\n if groups.get('delins'):\n self.mutation_type = 'delins'\n else:\n self.mutation_type = groups['mutation_type']\n\n # Parse coordinates.\n self.cdna_start = CDNACoord(string=groups.get('start'))\n if groups.get('end'):\n self.cdna_end = CDNACoord(string=groups.get('end'))\n else:\n self.cdna_end = CDNACoord(string=groups.get('start'))\n\n # Parse alleles.\n self.ref_allele = groups.get('ref', '')\n self.alt_allele = groups.get('alt', '')\n\n # Convert numerical allelles.\n if self.ref_allele.isdigit():\n self.ref_allele = \"N\" * int(self.ref_allele)\n if self.alt_allele.isdigit():\n self.alt_allele = \"N\" * int(self.alt_allele)\n\n # Convert duplication alleles.\n if self.mutation_type == \"dup\":\n self.alt_allele = self.ref_allele * 2\n\n # Convert no match alleles.\n if self.mutation_type == \"=\":\n self.alt_allele = self.ref_allele\n return\n\n raise InvalidHGVSName(details, 'cDNA allele')\n\n def parse_protein(self, details):\n \"\"\"\n Parse a HGVS protein name.\n\n Some examples include:\n No change: Glu1161=\n Change: Glu1161Ser\n Frameshift: Glu1161_Ser1164?fs\n \"\"\"\n for regex in HGVSRegex.PEP_ALLELE_REGEXES:\n match = re.match(regex, details)\n if match:\n groups = match.groupdict()\n\n # Parse mutation type.\n if groups.get('delins'):\n self.mutation_type = 'delins'\n else:\n self.mutation_type = '>'\n\n # Parse coordinates.\n self.start = int(groups.get('start'))\n if groups.get('end'):\n self.end = int(groups.get('end'))\n else:\n self.end = self.start\n\n # Parse alleles.\n self.ref_allele = groups.get('ref', '')\n if groups.get('ref2'):\n self.ref2_allele = groups.get('ref2')\n self.alt_allele = groups.get('alt', '')\n else:\n # If alt is not given, assume matching with ref\n self.ref2_allele = self.ref_allele\n self.alt_allele = groups.get(\n 'alt', self.ref_allele)\n\n self.pep_extra = groups.get('extra')\n return\n\n raise InvalidHGVSName(details, 'protein allele')\n\n def parse_genome(self, details):\n \"\"\"\n Parse a HGVS genomic name.\n\n Som examples include:\n Substitution: 1000100A>T\n Indel: 1000100_1000102delATG\n \"\"\"\n\n for regex in HGVSRegex.GENOMIC_ALLELE_REGEXES:\n match = re.match(regex, details)\n if match:\n groups = match.groupdict()\n\n # Parse mutation type.\n if groups.get('delins'):\n self.mutation_type = 'delins'\n else:\n self.mutation_type = groups['mutation_type']\n\n # Parse coordinates.\n self.start = int(groups.get('start'))\n if groups.get('end'):\n self.end = int(groups.get('end'))\n else:\n self.end = self.start\n\n # Parse alleles.\n self.ref_allele = groups.get('ref', '')\n self.alt_allele = groups.get('alt', '')\n\n # Convert numerical alleles.\n if self.ref_allele.isdigit():\n self.ref_allele = \"N\" * int(self.ref_allele)\n if self.alt_allele.isdigit():\n self.alt_allele = \"N\" * int(self.alt_allele)\n\n # Convert duplication alleles.\n if self.mutation_type == \"dup\":\n self.alt_allele = self.ref_allele * 2\n\n # Convert no match alleles.\n if self.mutation_type == \"=\":\n self.alt_allele = self.ref_allele\n return\n\n raise InvalidHGVSName(details, 'genomic allele')\n\n def _validate(self):\n \"\"\"\n Check for internal inconsistencies in representation\n \"\"\"\n if self.start > self.end:\n raise InvalidHGVSName(reason=\"Coordinates are nonincreasing\")\n\n def __repr__(self):\n try:\n return \"HGVSName('%s')\" % self.format()\n except NotImplementedError:\n return \"HGVSName('%s')\" % self.name\n\n def __unicode__(self):\n return self.format()\n\n def format(self, use_prefix=True, use_gene=True, use_counsyl=False):\n \"\"\"Generate a HGVS name as a string.\"\"\"\n\n if self.kind in ('c', 'n'):\n allele = self.kind + '.' + self.format_cdna()\n elif self.kind == 'p':\n allele = 'p.' + self.format_protein()\n elif self.kind in ('g', 'm'):\n allele = self.kind + '.' + self.format_genome()\n else:\n raise NotImplementedError(\"not implemented: '%s'\" % self.kind)\n\n prefix = self.format_prefix(use_gene=use_gene) if use_prefix else ''\n\n if prefix:\n return prefix + ':' + allele\n else:\n return allele\n\n def format_prefix(self, use_gene=True):\n \"\"\"\n Generate HGVS trancript/gene prefix.\n\n Some examples of full hgvs names with transcript include:\n NM_007294.3:c.2207A>C\n NM_007294.3(BRCA1):c.2207A>C\n \"\"\"\n\n if self.kind in ('g', 'm'):\n if self.chrom:\n return self.chrom\n\n if self.transcript:\n if use_gene and self.gene:\n return '%s(%s)' % (self.transcript, self.gene)\n else:\n return self.transcript\n else:\n if use_gene:\n return self.gene\n else:\n return ''\n\n def format_cdna_coords(self):\n \"\"\"\n Generate HGVS cDNA coordinates string.\n \"\"\"\n # Format coordinates.\n if self.cdna_start == self.cdna_end:\n return str(self.cdna_start)\n else:\n return \"%s_%s\" % (self.cdna_start, self.cdna_end)\n\n def format_dna_allele(self):\n \"\"\"\n Generate HGVS DNA allele.\n \"\"\"\n if self.mutation_type == '=':\n # No change.\n # example: 101A=\n return self.ref_allele + '='\n\n if self.mutation_type == '>':\n # SNP.\n # example: 101A>C\n return self.ref_allele + '>' + self.alt_allele\n\n elif self.mutation_type == 'delins':\n # Indel.\n # example: 112_117delAGGTCAinsTG, 112_117delinsTG\n return 'del' + self.ref_allele + 'ins' + self.alt_allele\n\n elif self.mutation_type in ('del', 'dup'):\n # Delete, duplication.\n # example: 1000_1003delATG, 1000_1003dupATG\n return self.mutation_type + self.ref_allele\n\n elif self.mutation_type == 'ins':\n # Insert.\n # example: 1000_1001insATG\n return self.mutation_type + self.alt_allele\n\n elif self.mutation_type == 'inv':\n return self.mutation_type\n\n else:\n raise AssertionError(\n \"unknown mutation type: '%s'\" % self.mutation_type)\n\n def format_cdna(self):\n \"\"\"\n Generate HGVS cDNA allele.\n\n Some examples include:\n Substitution: 101A>C,\n Indel: 3428delCinsTA, 1000_1003delATG, 1000_1001insATG\n \"\"\"\n return self.format_cdna_coords() + self.format_dna_allele()\n\n def format_protein(self):\n \"\"\"\n Generate HGVS protein name.\n\n Some examples include:\n No change: Glu1161=\n Change: Glu1161Ser\n Frameshift: Glu1161_Ser1164?fs\n \"\"\"\n if (self.start == self.end and\n self.ref_allele == self.ref2_allele == self.alt_allele):\n # Match.\n # Example: Glu1161=\n pep_extra = self.pep_extra if self.pep_extra else '='\n return self.ref_allele + str(self.start) + pep_extra\n\n elif (self.start == self.end and\n self.ref_allele == self.ref2_allele and\n self.ref_allele != self.alt_allele):\n # Change.\n # Example: Glu1161Ser\n return (self.ref_allele + str(self.start) +\n self.alt_allele + self.pep_extra)\n\n elif self.start != self.end:\n # Range change.\n # Example: Glu1161_Ser1164?fs\n return (self.ref_allele + str(self.start) + '_' +\n self.ref2_allele + str(self.end) +\n self.pep_extra)\n\n else:\n raise NotImplementedError('protein name formatting.')\n\n def format_coords(self):\n \"\"\"\n Generate HGVS cDNA coordinates string.\n \"\"\"\n # Format coordinates.\n if self.start == self.end:\n return str(self.start)\n else:\n return \"%s_%s\" % (self.start, self.end)\n\n def format_genome(self):\n \"\"\"\n Generate HGVS genomic allele.\n\n Som examples include:\n Substitution: 1000100A>T\n Indel: 1000100_1000102delATG\n \"\"\"\n return self.format_coords() + self.format_dna_allele()\n\n def get_raw_coords(self, transcript=None):\n \"\"\" return genomic coordinates \"\"\"\n if self.kind in ('c', 'n'):\n chrom = transcript.tx_position.chrom\n start = transcript.cdna_to_genomic_coord(self.cdna_start)\n end = transcript.cdna_to_genomic_coord(self.cdna_end)\n\n if not transcript.tx_position.is_forward_strand:\n start, end = end, start\n\n if start > end:\n raise AssertionError(\n \"cdna_start cannot be greater than cdna_end\")\n elif self.kind in ('g', 'm'):\n chrom = self.chrom\n start = self.start\n end = self.end\n else:\n raise NotImplementedError(\n 'Coordinates are not available for this kind of HGVS name \"%s\"'\n % self.kind)\n\n # Check coordinate span is equal to reference bases\n if self.ref_allele:\n coordinate_span = end - start + 1 # Ref will always be >=1 base\n ref_length = len(self.ref_allele)\n if coordinate_span != ref_length:\n raise InvalidHGVSName(\"Coordinate span (%d) not equal to ref length %d\" % (coordinate_span, ref_length))\n\n return chrom, start, end\n\n def get_ref_coords(self, transcript=None):\n \"\"\"Return genomic coordinates of reference allele.\"\"\"\n\n chrom, start, end = self.get_raw_coords(transcript)\n\n if self.mutation_type == \"ins\":\n # Inserts have empty interval.\n if start < end:\n start += 1\n end -= 1\n else:\n end = start - 1\n\n elif self.mutation_type == \"dup\":\n end = start - 1\n return chrom, start, end\n\n def get_vcf_coords(self, transcript=None):\n \"\"\"Return genomic coordinates of reference allele in VCF-style.\"\"\"\n chrom, start, end = self.get_ref_coords(transcript)\n\n # Inserts and deletes require left-padding by 1 base\n if self.mutation_type in (\"=\", \">\"):\n pass\n elif self.mutation_type in (\"del\", \"ins\", \"dup\", \"delins\"):\n # Indels have left-padding.\n start -= 1\n else:\n raise NotImplementedError(\"Unknown mutation_type '%s'\" %\n self.mutation_type)\n return chrom, start, end\n\n def get_ref_alt(self, is_forward_strand=True, raw_dup_alleles=False):\n \"\"\" Return reference and alternate alleles.\n Original code was for representation - ie it altered dup to look like an insert\n pass raw_dup_alleles=True to get the raw values \"\"\"\n if self.kind == 'p':\n raise NotImplementedError(\n 'get_ref_alt is not implemented for protein HGVS names')\n alleles = [self.ref_allele, self.alt_allele]\n\n # Represent duplications are inserts.\n if not raw_dup_alleles and self.mutation_type == \"dup\":\n alleles[0] = \"\"\n alleles[1] = alleles[1][:len(alleles[1]) // 2]\n\n if is_forward_strand:\n return alleles\n else:\n return tuple(map(revcomp, alleles))\n","sub_path":"pyhgvs/models/hgvs_name.py","file_name":"hgvs_name.py","file_ext":"py","file_size_in_byte":27322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"128303098","text":"a = []\nn = int(input(\"Enter the size of list\\n\"))\nprint(\"Enter the list elements\\n\")\nfor i in range(n):\n num = int(input())\n a.append(num)\nprint(\"The postive numbers are\\n\")\nfor num in a:\n if(num > 0):\n print(num)\n","sub_path":"coding_solutions/Python/positiveNumbers.py","file_name":"positiveNumbers.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"352025833","text":"from Management.Sensors import GPIOManager\n\ndef initialize():\n \"\"\" upon start of the drone, run this program \"\"\"\n manager = GPIOManager()\n manager.addOutConnections([1,2])\n manager.addInConnections([[3, \"UP\"]])\ninitialize()\n\n\na = [\n [\n [1,2], \"OUT\"\n ], \n [\n [3,4], \"IN\"\n ]\n]","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"84361463","text":"import pandas as pd\nfrom unidecode import unidecode\n\nimport os\nimport re\n# from numba import cuda\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\nfo = pd.read_excel(dir_path + '/' +'data/karakter.xlsx', sheet_name='Sheet1')\nx = fo['karakter'].tolist()\ny = fo['replace'].tolist()\n\n\ndef normalisasikarakter(str, x=x, y=y):\n for i in range(len(x)):\n if i == 0:\n n_word = str\n n_word = n_word.replace(x[i],y[i])\n return unidecode(n_word)","sub_path":"Filter chi square/modul/replace_karakter.py","file_name":"replace_karakter.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"447585165","text":"# import required modules\nimport os;\nimport getpass as passExtractor;\nimport time;\nimport re as regex;\nfrom datetime import datetime;\nimport locale;\nfrom account import ATMAccount;\n\n# declare ATM Class\nclass ATM:\n # ATM Class control fields\n authacc = None; \n # current action choosen\n act_option = 1;\n\n def initialize(self):\n locale.setlocale(locale.LC_ALL, '');\n if self.authacc is None:\n self.requestAuth();\n else:\n while(self.act_option < 8):\n self.drawMenu();\n self.act_option = input(\"Escolha uma opção [ENTER]: \");\n if(self.act_option == ''):\n self.act_option = 0;\n continue;\n else:\n self.act_option = int(self.act_option);\n if(self.act_option > 8 or self.act_option < 0):\n print(\" Opção não disponível, favor selecione uma opção entre as listadas.\")\n time.sleep(1);\n self.act_option = 1;\n elif(self.act_option == 1):\n self.displayBalance();\n elif(self.act_option == 2):\n self.displayHistory();\n elif(self.act_option == 3):\n # deposit money on current account\n self.depositMoney();\n elif(self.act_option == 4):\n # deposit money on another account (that's what means 'True')\n self.depositMoney(True);\n elif(self.act_option == 5):\n self.withdrawMoney();\n elif(self.act_option == 6):\n self.transferMoney();\n elif(self.act_option == 7):\n self.editPersonalInfo();\n else:\n self.logout();\n time.sleep(1);\n\n def drawWelcome(self):\n self.clearScreen();\n print(\"=====================================================================================\");\n print(\" PyLanguage ATM \");\n print(\"=====================================================================================\");\n print(\" *** Bem vindo ao PyLanguage ATM ***\");\n print(\"-------------------------------------------------------------------------------------\");\n\n def drawMenu(self):\n self.drawWelcome();\n print(\" Bem vindo, \"+self.authacc.getUserFullname()+\".\");\n print(\" Ag: \"+self.authacc.getAgency());\n print(\" C/C: \"+self.authacc.getAccountNumber());\n print(\"-------------------------------------------------------------------------------------\");\n print(\" Opções disponíveis:\");\n print(\"\"\"\n 1 - Consultar saldo\n 2 - Emitir extrato\n 3 - Depositar\n 4 - Depositar em outra c/c\n 5 - Sacar\n 6 - Transferência\n 7 - Alterar dados cadastrais\n 8 - Sair\n \"\"\");\n\n def displayBalance(self):\n self.clearScreen();\n print(\"---------------------------------------------------------------------------------------------\");\n print(\" PyLanguage ATM - Saldo de conta corrente (Ag: \"+self.authacc.getAgency()+\" C/C:\"+self.authacc.getAccountNumber()+\")\");\n print(\"---------------------------------------------------------------------------------------------\");\n print(\" * Saldo atual: \"+locale.currency(self.authacc.getBalance(), grouping=True));\n print(\"---------------------------------------------------------------------------------------------\");\n input(\"Pressione [ENTER] para continuar ...\");\n\n def displayHistory(self):\n self.clearScreen();\n print(\"----------------------------------------------------------------------------------------------\");\n print(\" PyLanguage ATM - Extrato de conta corrente (Ag: \"+self.authacc.getAgency()+\" C/C:\"+self.authacc.getAccountNumber()+\")\");\n print(\"----------------------------------------------------------------------------------------------\");\n print(\" Data \\t Tipo \\t Valor\");\n print(\"==============================================================================================\");\n hists = self.authacc.getMovimentHistory();\n for mov in hists:\n movtip = str(mov[1]);\n if(movtip == 'D'):\n movtip = 'Depósito';\n elif(movtip == 'DO'):\n movtip = 'Depósito outra c/c';\n elif(movtip == 'T'):\n movtip = 'Transferência';\n elif (movtip == 'TO'):\n movtip = 'Transferência outra c/c';\n elif(movtip == 'S'):\n movtip = 'Saque';\n\n dt_reg = regex.match(r\"([0-9]{4})-([0-9]{2})-([0-9]{2}) ([0-9]{2}:[0-9]{2}:[0-9]{2})\", str(mov[0]));\n dt_mov = dt_reg.group(3)+\"/\"+dt_reg.group(2)+\"/\"+dt_reg.group(1)+\" \"+dt_reg.group(4);\n mov_val =locale.currency(mov[2], grouping=True);\n print(\" \"+dt_mov+\" \\t\"+movtip.ljust(19)+\" \\t\"+mov_val);\n print(\"-----------------------------------------------------------------------------------------------\");\n print(\" Total de \"+str(len(hists))+\" registros\\n\");\n input(\" Pressione [ENTER] para continuar ...\");\n \n def depositMoney(self,another_account=False):\n self.clearScreen();\n print(\"----------------------------------------------------------------------------------------------\");\n print(\" \"+(\"PyLanguage ATM - depositar em outra conta corrente\" if another_account else \"PyLanguage ATM - depositar em conta corrente\")); \n print(\"----------------------------------------------------------------------------------------------\");\n print(\"AG: \"+self.authacc.getAgency());\n print(\"C/C: \"+self.authacc.getAccountNumber());\n print(\"-----------------------------------------------------------------------------------------------\");\n deposit_amount = float(input(\" Informe o valor à ser depositado: \")); \n if(another_account):\n ag_deposit = str(input(\" Informe a agencia que deseja depositar: \"));\n acc_deposit = str(input(\" Informe a conta que deseja depositar: \"));\n deposit_ok = self.authacc.deposit(deposit_amount, ag_deposit, acc_deposit);\n else:\n deposit_ok = self.authacc.deposit(deposit_amount);\n \n if(deposit_ok):\n print(\"\\n\\n Depósito realizado com sucesso, aguarde recarregamento ...\");\n time.sleep(2);\n\n\n def withdrawMoney(self):\n return '';\n \n def transferMoney(self):\n print(\"----------------------------------------------------------------------------------------------\");\n print(\" PyLanguage ATM - transferência \"); \n print(\"----------------------------------------------------------------------------------------------\");\n print(\"AG: \"+self.authacc.getAgency());\n print(\"C/C: \"+self.authacc.getAccountNumber());\n print(\"-----------------------------------------------------------------------------------------------\");\n ag_transf = str(input(\"Informe a agencia para a transferência: \"));\n acc_transf = str(input(\"Informe a conta para a transferência: \"));\n transf_amount = float(input(\"Informe a quantia à ser transferida: \"));\n \n self.authacc.transfer(ag_transf, acc_transf, transf_amount);\n print(\" \\n\\n Trasnferência realizada com sucesso, aguarde recarregamento ...\");\n time.sleep(2);\n\n def editPersonalInfo(self):\n return '';\n\n def requestAuth(self):\n self.drawWelcome();\n auth_ag = str(input(\" Informe sua agência: \"));\n auth_account = str(input(\" Informe sua c/c: \"));\n auth_pwd = str(passExtractor.getpass());\n print(\"\\n => Validando informações, aguade ...\");\n time.sleep(2);\n self.authacc = ATMAccount();\n # check if authentication succeds\n if self.authacc.authenticate(auth_ag, auth_account, auth_pwd):\n self.initialize();\n else:\n print(\" Desculpe, não foi possivel realizar a autenticação de seus dados. Reiniciando processo, aguarde ...\");\n time.sleep(3);\n self.initialize();\n \n def logout(self):\n self.clearScreen();\n print(\"----------------------------------------------------------------------------------------------\");\n print(\" PyLanguage ATM - sair (Ag: \"+self.authacc.getAgency()+\" C/C:\"+self.authacc.getAccountNumber()+\")\");\n print(\"----------------------------------------------------------------------------------------------\");\n conf_logout = 'x';\n while(conf_logout != 'S' and conf_logout != 'N'):\n print(\"\\n\"+self.authacc.getUserFullname()+\" deseja realmente sair ?\");\n conf_logout = input(\"\\nPressione [S/N]: \");\n if(conf_logout == 'N'):\n self.act_option = 0;\n elif(conf_logout == 'S'):\n self.authacc.closeDB();\n print(\"Obrigado pela visita, volte sempre !\");\n time.sleep(1); \n \n def clearScreen(self):\n if os.name == 'nt':\n os.system('cls');\n else:\n os.system('clear');\n\n\n\n\n\n \n ","sub_path":"atm.py","file_name":"atm.py","file_ext":"py","file_size_in_byte":8691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"155963994","text":"# /*\n# * Copyright (C) 2020 ACTUAL Systems, Inc.\n# *\n# * http://www.actualhq.com\n# *\n# * In collaboration with UNICEF/Giga: https://gigaconnect.org\n# *\n# * Licensed under the Apache License, Version 2.0 (the \"License\");\n# * you may not use this file except in compliance with the License.\n# * You may obtain a copy of the License at\n# *\n# * http://www.apache.org/licenses/LICENSE-2.0\n# *\n# * Unless required by applicable law or agreed to in writing, software\n# * distributed under the License is distributed on an \"AS IS\" BASIS,\n# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# * See the License for the specific language governing permissions and\n# * limitations under the License.\n# */\n\n#General project inputs framework, customized for Rwanda\n#Create a new file for each country\n#To be extended with more granular regional\n\nimport pandas as pd\n\n#Dictionary set up each variable as an array with [median, -3 std range, +3 std %age]\n#Site specific project Inputs\n\n#Configuration - are these elements used in the project?\nconfiguration = {\n \"P0\" : 1,\n \"P1\" : 1\n}\n\nusage = {\n\n \"EMIS_allowableTransferTime\" : 4, #hrs\n \"allowableWebsiteLoadingTime\" : 20, #seconds\n \"Allowable Document Loading Time\" : 60, #seconds\n \"Allowable Completed Assignments Loading Time\" : 10, #seconds\n \"Peak Hours\" : 9,\n \"Size of Website\" : 700, #KB\n \"Size of Document\" : 200, #KB\n \"Google Docs Bandwidth\" : 20, #kbps\n \"Internet Browsing Bandwidth\" : 1, #Mbps\n \"Video Data Rate (480p)\" : 0.59, #Mbps\n \"Teacher Prep Hours\" : 4 #Hours\n}\n\nAssignments = {\n \"Student Prep Time\" : 4, #hours\n \"Teacher Research Time\" : 0.25, #Hours\n \"Number of Daily Assignments Per Student\" : 2,\n \"Student Research Time\" : 1, #Hours\n \"Student Assignments Time\" : 2, #hours\n \"Time to Grade One Assignment\" : 5 #minutes, per assignment\n}\n\nCommunity = {\n \"Fraction of Community Using School Internet\" : 0.2,\n \"Session Length\" : 30, #mins\n \"Weekly Sessions\" : 2,\n \"Community Access Hours\" : 8, #hrs, daily\n \"Contention\" : 25 #number of people sharing a \"slot\" of bandwidth\n}\n\nLessonPlanning = {\n \"Weekly Planning Time\" : 5, # hrs\n \"Fraction of Planning Time Browsing\" : 0.2\n}\n\nTelemedicine = {\n \"Annual Checkups\" : 1, # well-visits\n \"Illness per Year\" : 2, #number of times annually that community members needs a consult\n \"Consults per Illness\" : 3, #average number of consults when someone gets ill - assume onset, mid-course correction, closeout\n \"Consult time\" : 0.17, #hrs/patient; 10 minutes per consult\n \"Consult hours\" : 2 #daily number of hours that doctors are available for consultation\n}\n\nEMIS = {\n\n #[Nominal, -3 SD %age, +3 SD %age, frequency, unit of measure]\n \"adminEnrollment\" : pd.Series([500,0.1,0.1,12,'School']), # Size of enrollment data`\n \"adminCohort\" : pd.Series([100,0.1,0.2,4,'Students']), #Class-by-class (cohort) data\n \"adminBehavioral\" : pd.Series([100,0.1,0.3,12,'Students']), #Disciplnary records, etc\n \"adminSpecialNeeds\" : pd.Series([100,0.1,1,12,'Students']), #\n \"adminAdministrativeIndicators\" : pd.Series([1000,0.1,0.7,4,'School']), #\n \"adminFinancialData\" : pd.Series([10000,0.1,0.5,12,'School']), #High level finances\n\n \"financialBudget\" : pd.Series([10000,0.3,0.3,12,'School']), #\n \"financialSchool Fees\" : pd.Series([100,0.2,0.2,4,'Students']), #\n \"financialSupply and inventory\" : pd.Series([1000,0.2,0.2,12,'School']), #\n\n \"hrSalaries\" : pd.Series([10,0.1,0.1,4,'Employees']), #\n \"hrEmployee profiles\" : pd.Series([100,0.1,0.1,4,'Employees']), #\n \"hrProfessional development data\" : pd.Series([100,0.1,0.3,12,'Teachers']), #\n \"hrCertification and training data\" : pd.Series([100,0.1,0.7,4,'Teachers']), #\n \"hrDisciplinary records\" : pd.Series([100,0.1,0.1,12,'Employees']), #\n\n \"outcomesGrades\" : pd.Series([100,0.1,1,12,'Students']), #\n \"outcomesNational assessments\" : pd.Series([100,0.5,1,4,'Students']), #\n \"outcomesClassroom assessments\" : pd.Series([1000,0.2,1,4,'Classroom']) #\n}\n\nPortal = {\n\n \"Voter Registration\" : pd.Series([1200,0.1,0.1,'Clustered',20,'Individuals']),\n \"ID Renewal\" : pd.Series([800,0.1,0.1,'Uniform',365,'Individuals']),\n \"Annual Taxes\" : pd.Series([4500,0.1,0.1,'Clustered',5,'Households']),\n \"Bill payments\" : pd.Series([500,0.1,0.1,'Uniform',2,'Households']),\n \"Complaints and Reporting\" : pd.Series([500,0.1,0.1,'Uniform',365,'Individuals']),\n \"e-Petitions\" : pd.Series([500,0.1,0.1,'Uniform',60,'Individuals']),\n}\n\n\ndemo = {\n \"studentTeacherRatio\" : 30,\n \"teacherClassroomRatio\" : 1.2, #20% of teachers not teaching during a given period\n \"schoolAgeFraction\" : 0.354, #35% of Rwandans between ages 5-19 - https://www.populationpyramid.net/rwanda/2020/\n \"schoolEnrollmentFraction\" : 0.966, #96.6% of eligible Rwandans enrolled in school\n \"peoplePerHousehold\" : 6,\n \"labor_cost_skilled\" : 15, #usd/hr\n \"labor_cost_regular\" : 1 #usd/hr\n}\n","sub_path":"projectInputsRwanda.py","file_name":"projectInputsRwanda.py","file_ext":"py","file_size_in_byte":4993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"619595088","text":"import json\n\nfrom django.test import Client\nfrom django.test import TestCase\nfrom django.urls import reverse\nfrom django.conf import settings\n\nfrom tworaven_apps.ta2_interfaces.ta2_util import format_info_for_request\nfrom tworaven_apps.utils.msg_helper import msgt\nfrom tworaven_apps.ta2_interfaces.models import STATUS_VAL_OK,\\\n STATUS_VAL_FAILED_PRECONDITION\nfrom tworaven_apps.ta2_interfaces.req_start_session import ERR_MSG_NO_USER_AGENT\nfrom tworaven_apps.ta2_interfaces.req_end_session import ERR_NO_SESSION_ID\nfrom tworaven_apps.raven_auth.models import User\n\nclass DescribeDataFlowTest(TestCase):\n\n def setUp(self):\n # Set it to internal testing mode\n settings.TA2_STATIC_TEST_MODE = True\n\n # test client\n self.client = Client()\n\n user_obj = User.objects.get_or_create(username='dev_admin')[0]\n self.client.force_login(user_obj)\n\n def test_10_good_request(self):\n \"\"\"(10) Test endpoint used by UI, with successful result\"\"\"\n msgt(self.test_10_good_request.__doc__)\n\n # url and info for call\n #\n pipeline_id = 'pipeline_222'\n url = reverse('DescribeDataflow')\n info = dict(context=dict(sessionId='session_01'),\n pipelineId=pipeline_id)\n\n response = self.client.post(url,\n json.dumps(info),\n content_type=\"application/json\")\n\n # 200 response\n #\n self.assertEqual(response.status_code, 200)\n\n # convert to JSON\n #\n json_resp = response.json()\n #print('json_resp', json_resp)\n\n # status code 'OK'\n #\n self.assertEqual(json_resp['responseInfo']['status']['code'],\n STATUS_VAL_OK)\n\n\n # pipelineId matches\n #\n self.assertEqual(json_resp['pipelineId'],\n pipeline_id)\n\n # 2 modules found\n #\n self.assertTrue(len(json_resp['modules']), 2)\n\n # 2 connections found\n #\n self.assertTrue(len(json_resp['connections']), 2)\n\n\n\n def test_20_send_badvar_name(self):\n \"\"\"(20) Forget pipeline id and fail\"\"\"\n msgt(self.test_20_send_badvar_name.__doc__)\n\n\n # url and info for call\n #\n url = reverse('DescribeDataflow')\n info = dict(context=dict(xsessionId='session_01'))\n\n response = self.client.post(url,\n json.dumps(info),\n content_type=\"application/json\")\n\n # 200 response\n #\n self.assertEqual(response.status_code, 200)\n\n # convert to JSON\n #\n json_resp = response.json()\n #print('json_resp', json.dumps(json_resp, indent=4))\n\n # status code 'FAILED_PRECONDITION'\n #\n self.assertEqual(json_resp['responseInfo']['status']['code'],\n STATUS_VAL_FAILED_PRECONDITION)\n\n # error message found\n #\n err_snippet = (\"Message type \\\"SessionContext\\\"\"\n \" has no field named \\\"xsessionId\\\"\")\n idx = json_resp['responseInfo']['status']['details'].find(err_snippet)\n self.assertTrue(idx > -1)\n","sub_path":"tworaven_apps/ta2_interfaces/testing/test_describe_data_flow.py","file_name":"test_describe_data_flow.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"442094116","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nsearch.py\n\nProvides functions to interact with the search feature of the IMDb iPhone API.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport urllib\n\nfrom .base import instance\n\ndef search(term, extra=None):\n i = instance()\n\n arg = {\"q\": term}\n if extra:\n arg.update(extra)\n result_list = i.make_request('/find', arg)['results']\n results = {}\n for r in result_list:\n results[r['label']] = r['list']\n return results\n\ndef title_search(term, extra=None):\n res = search(term, extra=extra)\n for key in res.keys():\n if 'Title' not in key:\n del res[key]\n return res\n \ndef name_search(term, extra=None):\n \"\"\"Search names only\"\"\"\n res = search(term, extra=extra)\n for key in res.keys():\n if 'Name' not in key:\n del res[key]\n return res\n","sub_path":"imdb_api/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"252863688","text":"# coding=utf-8\r\n\"\"\"\r\nRLCw Model\r\nchecked by xurj\r\n2018/6/3\r\n\"\"\"\r\n\r\nfrom __future__ import print_function\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport cPickle as pkl\r\nimport copy\r\n\r\nimport utils\r\nimport config\r\n\r\n\r\nclass Chatbot(object):\r\n \"\"\"\r\n RLCw model\r\n \"\"\"\r\n\r\n def __init__(self, dim_wordvec, n_words, dim_hidden, batch_size,\r\n n_encode_lstm_step, n_decode_lstm_step, bias_init_vector=None,\r\n lr=0.0001, n_step=config.n_step, ex_batch=3):\r\n \"\"\"\r\n h_: hyper parameter\r\n v_: tensorflow variable\r\n :param dim_wordvec:\r\n :param n_words:\r\n :param dim_hidden:\r\n :param batch_size:\r\n :param n_encode_lstm_step:\r\n :param n_decode_lstm_step:\r\n :param bias_init_vector:\r\n :param lr:\r\n :param n_step: training simulation turn number -1\r\n :param ex_batch: reward bias sample number, if not training, set 1\r\n \"\"\"\r\n '''\r\n set up hyper parameters\r\n '''\r\n self.h_dim_wordvec = dim_wordvec\r\n self.h_dim_hidden = dim_hidden\r\n self.h_batch_size = batch_size\r\n self.h_ex_batch_size = ex_batch * batch_size\r\n self.h_n_words = n_words\r\n self.h_n_encode_lstm_step = n_encode_lstm_step\r\n self.h_n_decode_lstm_step = n_decode_lstm_step\r\n self.h_lr = lr\r\n self.h_n_step = n_step\r\n\r\n '''\r\n set up variables\r\n '''\r\n # with tf.device(\"/cpu:0\"):\r\n self.v_word2vector = tf.Variable(tf.random_uniform([n_words, dim_hidden], -0.1, 0.1),\r\n name='Wemb')\r\n self.v_lstm1 = tf.contrib.rnn.BasicLSTMCell(dim_hidden, state_is_tuple=False)\r\n self.v_lstm2 = tf.contrib.rnn.BasicLSTMCell(dim_hidden, state_is_tuple=False)\r\n self.v_encode_vector_W = tf.Variable(tf.random_uniform([dim_wordvec, dim_hidden], -0.1, 0.1),\r\n name='encode_vector_W')\r\n self.v_encode_vector_b = tf.Variable(tf.zeros([dim_hidden]),\r\n name='encode_vector_b')\r\n self.v_embed_word_W = tf.Variable(tf.random_uniform([dim_hidden, n_words], -0.1, 0.1),\r\n name='embed_word_W')\r\n if bias_init_vector is not None:\r\n self.v_embed_word_b = tf.Variable(bias_init_vector.astype(np.float32),\r\n name='embed_word_b')\r\n else:\r\n self.v_embed_word_b = tf.Variable(tf.zeros([n_words]),\r\n name='embed_word_b')\r\n\r\n '''\r\n keyword related\r\n '''\r\n # keywords hyper parameters\r\n self.h_n_kw = config.n_kw\r\n self.h_i2kw = {}\r\n self.h_kw2i = {}\r\n\r\n # keywords variables\r\n self.v_keywords_W = tf.Variable(tf.random_uniform([dim_wordvec, dim_hidden], -0.1, 0.1),\r\n name='keywords_W')\r\n self.v_keywords_b = tf.Variable(tf.zeros([dim_hidden]),\r\n name='keywords_b')\r\n self.v_keyword2vector = tf.get_variable(\r\n 'keyword_emb', shape=(self.h_n_kw, dim_wordvec), dtype=tf.float32) # trainable, can be reload\r\n\r\n # load keyword pickle\r\n with open(config.kw_path, 'rb') as f:\r\n kw_emb = pkl.load(f)\r\n kws = kw_emb.keys() # 同一次pickle在不同次load时得到的列表顺序一致,并不是随机的\r\n # output top 20 keyword in store order\r\n for kw in kws[:20]:\r\n assert type(kw) == unicode\r\n print(kw.encode('utf-8'))\r\n # set up keyword dictionary\r\n for i, kw in enumerate(kws):\r\n assert type(kw) == unicode\r\n self.h_i2kw[i] = kw\r\n self.h_kw2i[kw] = i\r\n\r\n def build_model(self):\r\n \"\"\"\r\n mp_: model placeholder\r\n mr_: model result\r\n\r\n build training part of this model\r\n responsible for probability gathering\r\n calculate entropy without gradient as part of reward TODO not reasonable\r\n calculate total_loss\r\n can act as a independent module\r\n use keyword sampled by build_generator\r\n :return: total_loss and ordinary cross entropy for tensorboard summary\r\n \"\"\"\r\n self.mp_batch_x = tf.placeholder(tf.float32,\r\n [self.h_ex_batch_size, self.h_n_encode_lstm_step, self.h_dim_wordvec])\r\n self.mp_rewards = tf.placeholder(tf.float32,\r\n [self.h_ex_batch_size])\r\n self.mp_caption = tf.placeholder(tf.int32,\r\n [self.h_ex_batch_size, self.h_n_decode_lstm_step + 1])\r\n self.mp_caption_mask = tf.placeholder(tf.float32,\r\n [self.h_ex_batch_size, self.h_n_decode_lstm_step + 1])\r\n\r\n word_vectors_flat = tf.reshape(self.mp_batch_x, [-1, self.h_dim_wordvec])\r\n wordvec_emb = tf.nn.xw_plus_b(word_vectors_flat, self.v_encode_vector_W, self.v_encode_vector_b)\r\n wordvec_emb = tf.reshape(wordvec_emb, [self.h_ex_batch_size, self.h_n_encode_lstm_step, self.h_dim_hidden])\r\n\r\n state1 = tf.zeros([self.h_ex_batch_size, self.v_lstm1.state_size])\r\n state2 = tf.zeros([self.h_ex_batch_size, self.v_lstm2.state_size])\r\n padding = tf.zeros([self.h_ex_batch_size, self.h_dim_hidden])\r\n\r\n '''\r\n Encoding Stage\r\n '''\r\n for i in range(0, self.h_n_encode_lstm_step):\r\n with tf.variable_scope(\"LSTM1\", reuse=True):\r\n output1, state1 = self.v_lstm1(wordvec_emb[:, i, :], state1)\r\n with tf.variable_scope(\"LSTM2\", reuse=True):\r\n output2, state2 = self.v_lstm2(tf.concat([padding, output1], 1), state2)\r\n\r\n '''\r\n Keyword Stage\r\n '''\r\n kw_info, self.mp_kw_index, _, self.mr_kw_probs, self.mp_kw_emb_history, self.mr_kw_avg_sim_log \\\r\n = self.get_kw(output1, reuse=True, b_simulate=False)\r\n\r\n '''\r\n Decoding Stage\r\n '''\r\n batch_entropy = tf.zeros([self.h_ex_batch_size])\r\n for i in range(0, self.h_n_decode_lstm_step):\r\n # with tf.device(\"/cpu:0\"):\r\n current_embed = tf.nn.embedding_lookup(self.v_word2vector, self.mp_caption[:, i])\r\n with tf.variable_scope(\"LSTM1\", reuse=True):\r\n output1, state1 = self.v_lstm1(kw_info, state1)\r\n with tf.variable_scope(\"LSTM2\", reuse=True):\r\n output2, state2 = self.v_lstm2(tf.concat([current_embed, output1], 1), state2)\r\n\r\n # get logit\r\n labels = tf.expand_dims(self.mp_caption[:, i + 1], 1)\r\n indices = tf.expand_dims(tf.range(0, self.h_ex_batch_size), 1)\r\n concat_indices = tf.concat([indices, labels], 1)\r\n onehot_labels = tf.sparse_to_dense(concat_indices, tf.stack([self.h_ex_batch_size, self.h_n_words]),\r\n 1.0, 0.0)\r\n logit_words = tf.nn.xw_plus_b(output2, self.v_embed_word_W, self.v_embed_word_b)\r\n\r\n # calculate entropy\r\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logit_words, labels=onehot_labels)\r\n batch_entropy += cross_entropy * self.mp_caption_mask[:, i]\r\n\r\n '''\r\n Loss and Training Stage\r\n '''\r\n # [ex_batch_size]\r\n length = tf.reduce_sum(self.mp_caption_mask, axis=-1)\r\n # [ex_batch_size]\r\n self.mr_entropy = batch_entropy / length\r\n # [ex_batch_size]\r\n self.mr_entropy = tf.stop_gradient(self.mr_entropy)\r\n # [ex_batch_size]\r\n self.multi_reward = self.mp_rewards - self.mr_entropy\r\n # [batch_size] reward bias\r\n reward_avg = tf.reduce_mean(tf.reshape(self.multi_reward, [3, self.h_batch_size]), axis=0)\r\n # [ex_batch_size]\r\n self.reward_avg = tf.concat([reward_avg, reward_avg, reward_avg], axis=0)\r\n # [ex_batch_size] reward after apply bias\r\n self.reward = self.multi_reward - self.reward_avg\r\n # []\r\n loss = tf.reduce_sum(-tf.log(self.mr_kw_probs) * self.reward) / self.h_ex_batch_size\r\n # []\r\n true_loss = tf.reduce_sum(batch_entropy) / self.h_ex_batch_size\r\n\r\n with tf.variable_scope(tf.get_variable_scope(), reuse=False):\r\n # train_op = tf.train.RMSPropOptimizer(self.lr).minimize(total_loss)\r\n # train_op = tf.train.GradientDescentOptimizer(self.lr).minimize(total_loss)\r\n optimizer = tf.train.AdamOptimizer(self.h_lr)\r\n train_op = optimizer.minimize(loss)\r\n\r\n grads8vars = optimizer.compute_gradients(loss)\r\n print('[INFORMATION: gradient of defined total_loss]')\r\n [print(g, v) for g, v in grads8vars]\r\n print('[INFORMATION END]')\r\n\r\n self.train_op = train_op\r\n self.loss = loss\r\n self.true_loss = true_loss\r\n return loss, true_loss\r\n\r\n def build_generator(self, b_simulate=True):\r\n \"\"\"\r\n build generating part of this model\r\n calculate keyword similarity as part of reward\r\n do not calculate total_loss\r\n act as a single graph (can be run by SESS.run)\r\n sample keyword\r\n :param b_simulate: for test / simulation, use argmax rather than multinomial for action\r\n :return: no return\r\n \"\"\"\r\n self.gp_batchx = tf.placeholder(tf.float32,\r\n [self.h_ex_batch_size, self.h_n_encode_lstm_step, self.h_dim_wordvec])\r\n word_vectors_flat = tf.reshape(self.gp_batchx, [-1, self.h_dim_wordvec])\r\n wordvec_emb = tf.nn.xw_plus_b(word_vectors_flat, self.v_encode_vector_W, self.v_encode_vector_b)\r\n wordvec_emb = tf.reshape(wordvec_emb, [self.h_ex_batch_size, self.h_n_encode_lstm_step, self.h_dim_hidden])\r\n\r\n state1 = tf.zeros([self.h_ex_batch_size, self.v_lstm1.state_size])\r\n state2 = tf.zeros([self.h_ex_batch_size, self.v_lstm2.state_size])\r\n padding = tf.zeros([self.h_ex_batch_size, self.h_dim_hidden])\r\n\r\n '''\r\n Encoding Stage\r\n '''\r\n for i in range(0, self.h_n_encode_lstm_step):\r\n with tf.variable_scope(\"LSTM1\", reuse=i > 0):\r\n output1, state1 = self.v_lstm1(wordvec_emb[:, i, :], state1)\r\n with tf.variable_scope(\"LSTM2\", reuse=i > 0):\r\n output2, state2 = self.v_lstm2(tf.concat([padding, output1], 1), state2)\r\n\r\n '''\r\n Keyword Stage\r\n '''\r\n self.gr_kw_info, self.gr_kw_index, self.gr_kw_emb, self.gr_probs, self.gp_kw_emb_history, \\\r\n self.gr_kw_avg_sim_log = self.get_kw(output1, reuse=None, b_simulate=b_simulate)\r\n '''\r\n Decoding Stage\r\n '''\r\n generated_words = []\r\n for i in range(0, self.h_n_decode_lstm_step):\r\n if i == 0:\r\n # with tf.device('/cpu:0'):\r\n current_embed = tf.nn.embedding_lookup(self.v_word2vector,\r\n tf.ones([self.h_ex_batch_size], dtype=tf.int64))\r\n with tf.variable_scope(\"LSTM1\", reuse=True):\r\n output1, state1 = self.v_lstm1(self.gr_kw_info, state1)\r\n with tf.variable_scope(\"LSTM2\", reuse=True):\r\n output2, state2 = self.v_lstm2(tf.concat([current_embed, output1], 1), state2)\r\n logit_words = tf.nn.xw_plus_b(output2, self.v_embed_word_W, self.v_embed_word_b)\r\n max_prob_index = tf.argmax(logit_words, 1)\r\n\r\n # with tf.device(\"/cpu:0\"):\r\n current_embed = tf.nn.embedding_lookup(self.v_word2vector, max_prob_index)\r\n\r\n '''\r\n generated sentence sample gathering\r\n '''\r\n generated_words.append(max_prob_index)\r\n\r\n # [batch_size, stc_len]\r\n self.gr_words = tf.stack(generated_words, axis=1)\r\n\r\n def get_kw(self, output1, reuse=None, b_simulate=False):\r\n \"\"\"\r\n keyword getting process\r\n act as part of graph in build_model and build_generator\r\n 1 placeholders here: kw_index (only for build_model because only sample once)\r\n :param output1: last hidden state after context+query passing through first encoder layer\r\n :param reuse: tf variable unify reuse management, True for build_model, None for generator\r\n (because we build generator first)\r\n :param b_simulate: set true when simulate. When True, use argmax instead of multinomial\r\n :return: keyword related information.\r\n note that we do not create class attribute (self....) here in case of naming conflict\r\n \"\"\"\r\n # [ex_batch_size, dim_hidden]\r\n output1 = tf.stop_gradient(output1) # 遮蔽梯度\r\n\r\n with tf.variable_scope(\"get_kw\", reuse=reuse):\r\n\r\n ''' calculate logits and probs '''\r\n # [ex_batch_size, n_kw]\r\n logits = tf.layers.dense(output1, self.h_n_kw, name='kw_prob_dense', activation=tf.nn.tanh,\r\n kernel_initializer=tf.contrib.layers.xavier_initializer())\r\n # [ex_batch_size, n_kw]\r\n prob = tf.nn.softmax(logits)\r\n\r\n ''' get keyword index '''\r\n if reuse: # build_model set reuse=True\r\n kw_index = tf.placeholder(dtype=tf.int64, shape=[self.h_ex_batch_size, 1])\r\n else: # build_generator set reuse=False\r\n if b_simulate:\r\n # [ex_batch_size]\r\n kw_index_1d = tf.argmax(logits, axis=1)\r\n # [ex_batch_size, 1]\r\n kw_index = tf.expand_dims(kw_index_1d, axis=1)\r\n else:\r\n # [ex_batch_size, 1]\r\n kw_index = tf.multinomial(logits, 1)\r\n\r\n ''' get keyword embeddings '''\r\n # [ex_batch_size, dim_wordvec]\r\n kw_emb = tf.nn.embedding_lookup(self.v_keyword2vector, tf.reshape(kw_index, [self.h_ex_batch_size]))\r\n\r\n ''' get p(keyword) '''\r\n # [self.h_ex_batch_size, 1]\r\n kw_batch_loc = tf.constant([[e] for e in range(self.h_ex_batch_size)], dtype=tf.int64)\r\n # [self.h_ex_batch_size, 2]\r\n kw_indices = tf.concat([kw_batch_loc, kw_index], axis=-1)\r\n # [self.h_ex_batch_size]\r\n kw_probs = tf.gather_nd(prob, kw_indices)\r\n\r\n ''' generate keyword information '''\r\n kw_info = tf.nn.xw_plus_b(kw_emb, self.v_keywords_W, self.v_keywords_b)\r\n\r\n ''' calculate keyword-keyword similarity in one turn '''\r\n kw_emb_history = tf.placeholder(tf.float32, [None, self.h_ex_batch_size, self.h_dim_wordvec])\r\n # [1, ex_batch_size, dim_wordvec]\r\n kw_emb_3d = tf.expand_dims(kw_emb, axis=0)\r\n # [None, ex_batch_size]\r\n kw_sims = self.cosine_similarity(kw_emb_3d, kw_emb_history)\r\n # [ex_batch_size]\r\n kw_avg_sim = tf.reduce_mean(kw_sims, axis=0)\r\n # [ex_batch_size]\r\n kw_avg_sim_log = tf.log((kw_avg_sim + 1.) / 2 + 1e-33)\r\n\r\n return kw_info, kw_index, kw_emb, kw_probs, kw_emb_history, kw_avg_sim_log\r\n\r\n @staticmethod\r\n def cosine_similarity(batch_a, batch_b):\r\n \"\"\"\r\n calculate cosine similarity. it can deal with batches. only last dimension will be reduced.\r\n only act as part of graph in any places if you like\r\n :param batch_a: tensor whose shape with shape [(..., batch_size,) dim_size], at least 1 dims\r\n :param batch_b: tensor whose shape with shape [(..., batch_size,) dim_size], must have same dimension as batch_a\r\n :return: cosine similarity result with shape [(..., batch_size)]\r\n \"\"\"\r\n numerator = tf.reduce_sum(batch_a * batch_b, axis=-1)\r\n denominator1 = tf.reduce_sum(batch_a * batch_a, axis=-1)\r\n denominator2 = tf.reduce_sum(batch_b * batch_b, axis=-1)\r\n result = numerator / tf.sqrt(denominator1 * denominator2 + 1e-20)\r\n return result\r\n\r\n def kw_stc_sim(self):\r\n \"\"\"\r\n calculate similarity between keyword and sentences (word emb level + reduce max)\r\n 3 place holder here\r\n act as a single graph without any variables (can be run by SESS.run)\r\n :return: no return\r\n \"\"\"\r\n self.sp_kw_embs = tf.placeholder(\r\n tf.float32, [self.h_n_step + 1, self.h_ex_batch_size, self.h_dim_wordvec])\r\n self.sp_queries = tf.placeholder(\r\n tf.float32, [self.h_n_step + 2, self.h_ex_batch_size, self.h_n_encode_lstm_step / 2, self.h_dim_wordvec])\r\n self.sp_queries_mask = tf.placeholder(\r\n tf.float32, [self.h_n_step + 2, self.h_ex_batch_size, self.h_n_encode_lstm_step / 2])\r\n\r\n result = tf.zeros([self.h_ex_batch_size])\r\n mag = 1. # 收益倍率,随轮数递减\r\n for i in range(self.h_n_step + 1):\r\n # [batch, words, dim]\r\n query_embeddings = self.sp_queries[i, :, :, :]\r\n # [batch, words]\r\n query_mask = self.sp_queries_mask[i, :, :]\r\n # [batch, words, dim]\r\n response_embeddings = self.sp_queries[i + 1, :, :, :]\r\n # [batch, words]\r\n response_mask = self.sp_queries_mask[i + 1, :, :]\r\n # [batch, dim]\r\n kw_emb = self.sp_kw_embs[i, :, :]\r\n q_sims = []\r\n r_sims = []\r\n for j in range(self.h_n_encode_lstm_step / 2):\r\n # [batch, dim]\r\n q_emb = query_embeddings[:, j, :]\r\n # [batch]\r\n q_emb_mask = query_mask[:, j]\r\n # [batch, dim]\r\n r_emb = response_embeddings[:, j, :]\r\n # [batch]\r\n r_emb_mask = response_mask[:, j]\r\n # [batch]\r\n q_sims.append(tf.nn.relu(self.cosine_similarity(q_emb, kw_emb) * q_emb_mask))\r\n # [batch]\r\n r_sims.append(tf.nn.relu(self.cosine_similarity(r_emb, kw_emb) * r_emb_mask))\r\n\r\n q_sims = tf.stack(q_sims, axis=1)\r\n r_sims = tf.stack(r_sims, axis=1)\r\n # [batch_size]\r\n q_sim = tf.reduce_max(q_sims, axis=1)\r\n # [batch_size]\r\n r_sim = tf.reduce_max(r_sims, axis=1)\r\n # [batch_size]\r\n result += (tf.log(q_sim + 1e-33) + tf.log(r_sim + 1e-33)) * mag\r\n\r\n mag *= config.decay_factor # 收益倍率,随轮数递减\r\n\r\n self.sr_kw_sim_result = result / (self.h_n_step + 1)\r\n\r\n '''\r\n for training and validation\r\n '''\r\n\r\n def run(self, sess, summary, dr, i2w, w2i, word_vector, output, _is_train=True):\r\n \"\"\"\r\n train or valid procedure\r\n :param sess:\r\n :param summary:\r\n :param dr: data reader\r\n :param i2w: index to word\r\n :param w2i: word to index\r\n :param word_vector: word to vector\r\n :param output: output debug message or not\r\n :param _is_train: train or valid\r\n :return: loss result, summary result, all selected keyword\r\n \"\"\"\r\n # 收益倍率,随轮数递减\r\n sim_mag = config.decay_factor\r\n\r\n '''\r\n make init batch\r\n '''\r\n batch_x, batch_y, query, _ = dr.generate_training_batch_with_former(self.h_batch_size)\r\n assert type(batch_x[0]) == unicode\r\n assert type(batch_y[0]) == unicode\r\n assert type(query[0]) == unicode\r\n\r\n # for multi sample\r\n batch_x = batch_x + copy.deepcopy(batch_x) + copy.deepcopy(batch_x)\r\n batch_y = batch_y + copy.deepcopy(batch_y) + copy.deepcopy(batch_y)\r\n query = query + copy.deepcopy(query) + copy.deepcopy(query)\r\n\r\n feats = utils.make_batch_X(batch_x, self.h_n_encode_lstm_step, self.h_dim_wordvec, word_vector)\r\n caption_matrix, caption_masks = utils.make_batch_Y(batch_y, w2i, self.h_n_decode_lstm_step)\r\n\r\n # reserve sample and first batch x embeddings for real run (real run means run with train op)\r\n samples = [[x.encode('utf-8'), q.encode('utf-8')] for x, q in zip(batch_x, query)]\r\n ori_feats = copy.deepcopy(feats)\r\n\r\n '''\r\n first turn\r\n '''\r\n generated_words_index, kw_ix, kw_emb, g_kw_probs = sess.run(\r\n [self.gr_words, self.gr_kw_index, self.gr_kw_emb, self.gr_probs],\r\n feed_dict={\r\n self.gp_batchx: feats\r\n })\r\n kw_history_list = [kw_emb]\r\n kw_ixs = copy.deepcopy(kw_ix)\r\n kw_avg_sim = np.zeros([self.h_ex_batch_size])\r\n\r\n # reserve selected first turn keyword for real run\r\n f_kw_ix = copy.deepcopy(kw_ix)\r\n\r\n for i in range(self.h_n_step + 1):\r\n\r\n '''\r\n generate next batch and get generated reply sample\r\n '''\r\n new_sentences = []\r\n\r\n for idx, gw in enumerate(generated_words_index):\r\n\r\n words = []\r\n for index in gw:\r\n if index == 2:\r\n break\r\n word = i2w[index]\r\n words.append(word)\r\n\r\n sent = ' '.join(words)\r\n new_sentences.append(sent)\r\n if type(query[idx]) == unicode:\r\n query[idx] = query[idx].encode('utf-8')\r\n assert type(sent) == str\r\n batch_x[idx] = query[idx] + ' ' + sent\r\n\r\n for s, q in zip(samples, new_sentences):\r\n s.append(q)\r\n\r\n query = new_sentences\r\n\r\n '''\r\n 最后一轮的回复也要提取出来,但不再进行新一轮生成了\r\n '''\r\n if i >= self.h_n_step:\r\n break\r\n\r\n '''\r\n 新一轮生成\r\n '''\r\n feats = utils.make_batch_X(batch_x, self.h_n_encode_lstm_step, self.h_dim_wordvec, word_vector)\r\n generated_words_index, kw_ix, kw_emb, kw_sim_log = sess.run(\r\n [self.gr_words, self.gr_kw_index, self.gr_kw_emb, self.gr_kw_avg_sim_log],\r\n feed_dict={\r\n self.gp_batchx: feats,\r\n self.gp_kw_emb_history: kw_history_list\r\n })\r\n assert type(kw_sim_log) == np.ndarray\r\n assert len(kw_sim_log) == self.h_ex_batch_size\r\n kw_avg_sim += kw_sim_log\r\n kw_history_list.append(kw_emb)\r\n kw_ixs = np.concatenate((kw_ixs, kw_ix), axis=1)\r\n sim_mag *= config.decay_factor\r\n\r\n '''\r\n calculate keyword-sentence & keyword-keyword similarity\r\n '''\r\n all_queries = [['' for _ in range(self.h_ex_batch_size)] for _ in range(self.h_n_step + 2)]\r\n assert len(samples) == self.h_ex_batch_size\r\n for i, abatch in enumerate(samples):\r\n assert len(abatch) == self.h_n_step + 3\r\n for j, aquery in enumerate(abatch[1:]):\r\n assert type(aquery) == str\r\n all_queries[j][i] = aquery\r\n qs_emb_batches = [[] for _ in range(self.h_n_step + 2)]\r\n qs_emb_batches_mask = [[] for _ in range(self.h_n_step + 2)]\r\n assert len(all_queries) == self.h_n_step + 2\r\n for i, query_batch in enumerate(all_queries):\r\n assert len(query_batch) == self.h_ex_batch_size\r\n qs_emb_batches[i] = utils.make_batch_X(query_batch,\r\n self.h_n_encode_lstm_step / 2, self.h_dim_wordvec, word_vector)\r\n q_emb_mask = [[] for _ in range(self.h_ex_batch_size)]\r\n for j, aquery in enumerate(query_batch):\r\n assert type(aquery) == str\r\n mask_len = len(aquery.split())\r\n q_emb_mask[j] = [1. if k < mask_len else 0. for k in range(self.h_n_encode_lstm_step / 2)]\r\n assert len(q_emb_mask[j]) == self.h_n_encode_lstm_step / 2\r\n assert len(q_emb_mask) == self.h_ex_batch_size\r\n qs_emb_batches_mask[i] = q_emb_mask\r\n f_qs_emb_batches = np.asarray(qs_emb_batches, np.float32)\r\n f_qs_emb_batches_mask = np.array(qs_emb_batches_mask, np.float32)\r\n f_kw_embs = np.asarray(kw_history_list, np.float32)\r\n stc_kw_sims = sess.run(self.sr_kw_sim_result,\r\n feed_dict={\r\n self.sp_kw_embs: f_kw_embs,\r\n self.sp_queries: f_qs_emb_batches,\r\n self.sp_queries_mask: f_qs_emb_batches_mask\r\n })\r\n assert type(stc_kw_sims) == np.ndarray\r\n assert len(stc_kw_sims) == self.h_ex_batch_size\r\n kw_avg_sim /= self.h_n_step + 1.\r\n reward = stc_kw_sims + kw_avg_sim\r\n\r\n '''\r\n real run: real run means run with train op\r\n '''\r\n if _is_train:\r\n _, loss_val, t_summary, true_loss_val, entropies, kw_probs, m_kw_index \\\r\n , final_reward = sess.run(\r\n [self.train_op, self.loss, summary, self.true_loss, self.mr_entropy, self.mr_kw_probs,\r\n self.mp_kw_index, self.reward],\r\n feed_dict={\r\n self.mp_batch_x: ori_feats,\r\n self.mp_caption: caption_matrix,\r\n self.mp_caption_mask: caption_masks,\r\n self.mp_rewards: reward,\r\n self.mp_kw_index: f_kw_ix\r\n })\r\n else:\r\n loss_val, t_summary, true_loss_val, entropies, kw_probs, m_kw_index \\\r\n , final_reward = sess.run(\r\n [self.loss, summary, self.true_loss, self.mr_entropy, self.mr_kw_probs,\r\n self.mp_kw_index, self.reward],\r\n feed_dict={\r\n self.mp_batch_x: ori_feats,\r\n self.mp_caption: caption_matrix,\r\n self.mp_caption_mask: caption_masks,\r\n self.mp_rewards: reward,\r\n self.mp_kw_index: f_kw_ix\r\n })\r\n\r\n '''\r\n output debug message\r\n '''\r\n if output:\r\n sample_id = 0\r\n print('=====')\r\n for e in samples[sample_id]:\r\n print(e)\r\n for e in kw_ixs[sample_id]:\r\n print('kw: {}'.format(self.h_i2kw[e].encode('utf-8')))\r\n print('stc_kw_sim: {}'.format(stc_kw_sims[sample_id]))\r\n print('entropy: {}'.format(entropies[sample_id]))\r\n print('kw prob: {}'.format(kw_probs[sample_id]))\r\n print('stc_kw_sim: {}'.format(stc_kw_sims[sample_id]))\r\n print('avg_kw_sim: {}'.format(kw_avg_sim[sample_id]))\r\n print('final_reward: {}'.format(final_reward[sample_id]))\r\n\r\n print('->true_loss: {}'.format(true_loss_val))\r\n\r\n '''\r\n return result to train.py\r\n '''\r\n ret_kw_strs = [[self.h_i2kw[kw].encode('utf-8') for kw in case_kw_ixs] for case_kw_ixs in kw_ixs]\r\n return loss_val, t_summary, ret_kw_strs\r\n\r\n def valid(self, sess, summary, dr, ixtoword, wordtoix, word_vector, output):\r\n\r\n return self.run(sess, summary, dr, ixtoword, wordtoix, word_vector, output, _is_train=False)\r\n\r\n def test(self, sess, args_tuple, _):\r\n\r\n feats, kw = args_tuple\r\n generated_words_index, kw_index = sess.run(\r\n [self.gr_words, self.gr_kw_index],\r\n feed_dict={\r\n self.gp_batchx: feats\r\n })\r\n return generated_words_index, [self.h_i2kw[k[0]].encode('utf-8') for k in kw_index]\r\n","sub_path":"python/rlcw_model.py","file_name":"rlcw_model.py","file_ext":"py","file_size_in_byte":27563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"122191985","text":"import ctypes as C\nlib=C.cdll.LoadLibrary('./libenergy.so')\nlib.energy.restype=C.c_double\n\ndef energy(Lx, Ly, J1, J2, J3, Sz):\n S=0\n for x,ss in enumerate(Sz):\n for y,s in enumerate(ss):\n if s==1:\n mask=1<<(y*(Lx+1)+x)\n S=S|mask\n# S=[(C.c_int*(len(Sz[i])))(*Sz[i]) for i in range(len(Sz))]\n# S=(C.POINTER(C.c_int)*len(S))(*S)\n r=lib.energy(C.c_int(Lx), C.c_int(Ly), C.c_double(J1), C.c_double(J2), C.c_double(J3), C.c_ulonglong(S))\n return r\n\ndef genpattern(L,N,fname):\n lib=C.cdll.LoadLibrary('./libgenerate.so')\n lib.generate(C.c_int(L),C.c_int(N),C.c_int(0),C.c_char_p(fname))\n\nLx=3\nLy=2\n\nSz=[[-1 for i in range(Ly+1)] for j in range(Lx+1)]\nSz[0][0]=Sz[3][1]=Sz[1][0]=1\nr=energy(Lx, Ly, 1.0, 1.0, 0.0, Sz)\nprint(r)\n\ngenpattern(5,2,\"pat.csv\")","sub_path":"call.py","file_name":"call.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"513238991","text":"import random\nimport math\nimport numpy as np\nimport scipy.spatial\nimport matplotlib.pyplot as plt\nimport contextlib\nfrom matplotlib import animation as anim\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport numpy as np\nfrom math import *\nimport time\nfrom mpl_toolkits import mplot3d\nfrom mpl_toolkits.mplot3d import Axes3D\n\n# parameter\nN_SAMPLE = 100 # number of sample_points\nN_KNN = 15 # number of edge from one sampled point\nMAX_EDGE_LEN = 15.0 # [m] Maximum edge length\n\nshow_animation = True\n\n# ax = plt.axes(projection='3d')\nfig = plt.figure()\nax = fig.add_subplot(111, projection = '3d')\nclass Node:\n \"\"\"\n Node class for dijkstra search\n \"\"\"\n\n def __init__(self, x, y, z, cost, pind):\n self.x = x\n self.y = y\n self.z = z\n self.cost = cost\n self.pind = pind\n\n def __str__(self):\n return str(self.x) + \",\" + str(self.y) + \",\" + str(self.z) + \",\" + str(self.cost) + \",\" + str(self.pind)\n\n\nclass KDTree:\n \"\"\"\n Nearest neighbor search class with KDTree\n \"\"\"\n\n def __init__(self, data):\n # store kd-tree\n self.tree = scipy.spatial.cKDTree(data)\n\n def search(self, inp, k=1):\n \"\"\"\n Search NN\n\n inp: input data, single frame or multi frame\n\n \"\"\"\n\n if len(inp.shape) >= 2: # multi input\n index = []\n dist = []\n\n for i in inp.T:\n idist, iindex = self.tree.query(i, k=k)\n index.append(iindex)\n dist.append(idist)\n\n return index, dist\n\n dist, index = self.tree.query(inp, k=k)\n return index, dist\n\n def search_in_distance(self, inp, r):\n \"\"\"\n find points with in a distance r\n \"\"\"\n\n index = self.tree.query_ball_point(inp, r)\n return index\n\n\ndef PRM_planning(sx, sy, sz, gx, gy, gz, ox, oy, oz, rr):\n time = 0\n while(1):\n time+=1\n obkdtree = KDTree(np.vstack((ox, oy, oz)).T)\n sample_x, sample_y, sample_z = sample_points(sx, sy, sz, gx, gy, gz, rr, ox, oy, oz, obkdtree)\n if show_animation:\n # ax.scatter3D(sample_x, sample_y, sample_z, color='yellow', s=10);\n ax.scatter(sample_x, sample_y, sample_z, color='y', marker=\"o\")\n # plt.draw()\n # plt.pause(10)\n if(time>2):\n print(len(ox))\n road_map = generate_roadmap(sample_x, sample_y, sample_z, rr, obkdtree)\n print(len(road_map))\n\n rx, ry, rz = dijkstra_planning(\n sx, sy, sz, gx, gy, gz, ox, oy, oz, rr, road_map, sample_x, sample_y, sample_z)\n if(len(rx)>0):\n break\n\n return rx, ry, rz\n\n\ndef is_collision(sx, sy, sz, gx, gy, gz, rr, okdtree):\n x = sx\n y = sy\n z = sz\n dx = gx - sx\n dy = gy - sy\n dz = gz - sz\n # yaw = math.atan2(gy - sy, gx - sx)\n theta = math.acos(dz/math.sqrt(dx**2+dy**2+dz**2))\n phi = math.acos(dx/math.sqrt(dx**2+dy**2))\n d = math.sqrt(dx**2+dy**2+dz**2)\n\n if d >= MAX_EDGE_LEN:\n return True\n\n D = rr\n nstep = round(d / D)\n\n for i in range(nstep):\n idxs, dist = okdtree.search(np.array([x, y, z]).reshape(3, 1))\n if dist[0] <= rr:\n return True # collision\n x += D * math.sin(theta) * math.cos(phi) \n y += D * math.sin(theta) * math.sin(phi)\n z += D * math.cos(theta)\n\n # goal point check\n idxs, dist = okdtree.search(np.array([gx, gy, gz]).reshape(3, 1))\n if dist[0] <= rr:\n return True # collision\n\n return False # OK\n\n\ndef generate_roadmap(sample_x, sample_y, sample_z, rr, obkdtree):\n \"\"\"\n Road map generation\n\n sample_x: [m] x positions of sampled points\n sample_y: [m] y positions of sampled points\n rr: Robot Radius[m]\n obkdtree: KDTree object of obstacles\n \"\"\"\n\n road_map = []\n nsample = len(sample_x)\n skdtree = KDTree(np.vstack((sample_x, sample_y, sample_z)).T)\n\n for (i, ix, iy, iz) in zip(range(nsample), sample_x, sample_y, sample_z):\n\n index, dists = skdtree.search(\n np.array([ix, iy, iz]).reshape(3, 1), k=nsample)\n inds = index[0]\n edge_id = []\n # print(index)\n\n for ii in range(1, len(inds)):\n nx = sample_x[inds[ii]]\n ny = sample_y[inds[ii]]\n nz = sample_z[inds[ii]]\n\n if not is_collision(ix, iy, iz, nx, ny, nz, rr, obkdtree):\n edge_id.append(inds[ii])\n\n if len(edge_id) >= N_KNN:\n break\n\n road_map.append(edge_id)\n\n # plot_road_map(road_map, sample_x, sample_y)\n\n return road_map\n\n\ndef dijkstra_planning(sx, sy, sz, gx, gy, gz, ox, oy, oz, rr, road_map, sample_x, sample_y, sample_z):\n \"\"\"\n sx: start x position [m]\n sy: start y position [m]\n gx: goal x position [m]\n gy: goal y position [m]\n ox: x position list of Obstacles [m]\n oy: y position list of Obstacles [m]\n rr: robot radius [m]\n road_map: ??? [m]\n sample_x: ??? [m]\n sample_y: ??? [m]\n\n @return: Two lists of path coordinates ([x1, x2, ...], [y1, y2, ...]), empty list when no path was found\n \"\"\"\n # ax = plt.axes(projection='3d')\n nstart = Node(sx, sy, sz, 0.0, -1)\n ngoal = Node(gx, gy, gz, 0.0, -1)\n\n openset, closedset = dict(), dict()\n openset[len(road_map) - 2] = nstart\n path_found = True\n\n while True:\n # print(len(openset))\n if not openset:\n print(\"openset empty\")\n path_found = False\n break\n\n c_id = min(openset, key=lambda o: openset[o].cost)\n current = openset[c_id]\n\n # show graph\n # if show_animation and len(closedset.keys()) % 2 == 0:\n # # for stopping simulation with the esc key.\n # # plt.gcf().canvas.mpl_connect('key_release_event',\n # # lambda event: [exit(0) if event.key == 'escape' else None])\n # # ax.scatter3D(current.x, current.y, current.z, color='blue', s=10);\n # ax.scatter(current.x, current.y, current.z, color='blue', marker=\"o\");\n # # .plot(current.x, current.y, \"xg\")\n # # plt.pause(0.001)\n\n if c_id == (len(road_map) - 1):\n print(\"goal is found!\")\n ngoal.pind = current.pind\n ngoal.cost = current.cost\n break\n\n # Remove the item from the open set\n del openset[c_id]\n # Add it to the closed set\n closedset[c_id] = current\n\n # expand search grid based on motion model\n for i in range(len(road_map[c_id])):\n n_id = road_map[c_id][i]\n dx = sample_x[n_id] - current.x\n dy = sample_y[n_id] - current.y\n dz = sample_z[n_id] - current.z\n d = math.sqrt(dx**2 + dy**2 + dz**2)\n node = Node(sample_x[n_id], sample_y[n_id], sample_z[n_id], current.cost + d, c_id)\n\n if n_id in closedset:\n continue\n # Otherwise if it is already in the open set\n if n_id in openset:\n if openset[n_id].cost > node.cost:\n openset[n_id].cost = node.cost\n openset[n_id].pind = c_id\n else:\n openset[n_id] = node\n\n if path_found is False:\n return [], [], []\n\n # generate final course\n rx, ry, rz = [ngoal.x], [ngoal.y], [ngoal.z]\n pind = ngoal.pind\n while pind != -1:\n n = closedset[pind]\n rx.append(n.x)\n ry.append(n.y)\n rz.append(n.z)\n pind = n.pind\n\n return rx, ry, rz\n\n\ndef plot_road_map(road_map, sample_x, sample_y): # pragma: no cover\n\n for i, _ in enumerate(road_map):\n for ii in range(len(road_map[i])):\n ind = road_map[i][ii]\n ###########################################\n # ax.scatter3D(goal[0], goal[1], goal[2], color='green', s=10);\n # plt.plot([sample_x[i], sample_x[ind]],\n # [sample_y[i], sample_y[ind]], \"-k\")\n\n\ndef sample_points(sx, sy, sz, gx, gy, gz, rr, ox, oy, oz, obkdtree):\n maxx = gx+3\n maxy = gy+3\n maxz = gz+3\n minx = sx-3\n miny = sy-3\n minz = sz-3\n\n sample_x, sample_y, sample_z = [], [], []\n\n while len(sample_x) <= N_SAMPLE:\n tx = (random.random() * (maxx - minx)) + minx\n ty = (random.random() * (maxy - miny)) + miny\n tz = (random.random() * (maxz - minz)) + minz\n\n index, dist = obkdtree.search(np.array([tx, ty, tz]).reshape(3, 1))\n\n if dist[0] >= rr:\n sample_x.append(tx)\n sample_y.append(ty)\n sample_z.append(tz)\n\n sample_x.append(sx)\n sample_y.append(sy)\n sample_z.append(sz)\n sample_x.append(gx)\n sample_y.append(gy)\n sample_z.append(gz)\n\n return sample_x, sample_y, sample_z\n\ndef move_obs(ox,oy,oz):\n ox[-250:-1]+=0.10\n oy[-250:-1]+=0.10\n oz[-250:-1]+=0.10\n ox[-500:-250]-=0.10\n oy[-500:-250]-=0.10\n oz[-500:-250]-=0.10\n return ox,oy,oz\n\ndef main():\n print(__file__ + \" start!!\")\n\n # start and goal position\n sx = 10.0 # [m]\n sy = 10.0 # [m]\n sz = 10.0\n gx = 50.0 # [m]\n gy = 50.0 # [m]\n gz = 50.0\n robot_size = 5.0 # [m]\n\n ox = []\n oy = []\n oz = []\n\n\n for i in range(60):\n for j in range(60):\n oy.append(i)\n ox.append(0.00)\n oz.append(j)\n for i in range(60):\n for j in range(60):\n oy.append(i)\n ox.append(60.00)\n oz.append(j)\n\n for i in range(60):\n for j in range(2,12):\n for k in range(20,25):\n oy.append(j)\n ox.append(i)\n oz.append(k)\n\n for i in range(60):\n for j in range(55,60):\n for k in range(40,50):\n oy.append(j)\n ox.append(i)\n oz.append(k)\n\n for i in range(20,30):\n for j in range(25,30):\n for k in range(20,25):\n oy.append(j)\n ox.append(i)\n oz.append(k)\n\n for i in range(30,40):\n for j in range(30,35):\n for k in range(35,40):\n oy.append(j)\n ox.append(i)\n oz.append(k)\n\n ox = np.array(ox).astype(float)\n oy = np.array(oy).astype(float)\n oz = np.array(oz).astype(float)\n\n while(1):\n ox,oy,oz = move_obs(ox,oy,oz)\n # print(ox[-1:-250])\n go = [gx,gy,gz]\n current_point1 = [sx,sy,sz]\n print(current_point1)\n dis = np.linalg.norm(np.asarray(go)-np.asarray(current_point1))\n print(dis)\n if(dis<5):\n print(\"Goal Reached\")\n break\n # gx = ((ogx-sx)*15)/(dis) + sx\n # gy = ((ogy-sy)*15)/(dis) + sy\n # gz = ((ogz-sz)*15)/(dis) + sz\n plt.cla()\n if show_animation:\n ax.scatter(ox, oy, oz, color='g', marker = \"o\")\n ax.scatter(sx, sy, sz, color='r', marker = \"^\");\n ax.scatter(gx, gy, gz, color='r', marker = \"^\");\n u = np.linspace(0, np.pi, 10)\n v = np.linspace(0, 2 * np.pi, 10)\n x = sx+15*np.outer(np.sin(u), np.sin(v))\n y = sy+15*np.outer(np.sin(u), np.cos(v))\n z = sz+15*np.outer(np.cos(u), np.ones_like(v))\n ax.plot_wireframe(x, y, z, color=\"b\")\n nox = []\n noy = []\n noz = []\n for i in range(len(ox)):\n obs = [ox[i],oy[i],oz[i]]\n current_point1 = [sx,sy,sz]\n if(np.linalg.norm(np.asarray(current_point1)-np.asarray(obs))<15):\n nox.append(ox[i])\n noy.append(oy[i])\n noz.append(oz[i])\n\n print(len(ox),len(nox))\n rx, ry, rz= PRM_planning(sx, sy, sz, gx, gy, gz, nox, noy, noz, robot_size)\n nx=rx[-2]\n ny=ry[-2]\n nz=rz[-2]\n sx = ((nx-sx)*15)/(dis) + sx\n sy = ((ny-sy)*15)/(dis) + sy\n sz = ((nz-sz)*15)/(dis) + sz\n if show_animation:\n ax.plot(np.array(rx),np.array(ry),np.array(rz))\n # print(rx)\n # ax.scatter3D(rx, ry, rz, color='red', s=10)\n ax.scatter(rx, ry, rz, color='r', marker = \"o\")\n # plt.figure()\n plt.draw()\n plt.pause(0.1)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"PRM/local_prm3d.py","file_name":"local_prm3d.py","file_ext":"py","file_size_in_byte":12242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"355072950","text":"from collections import deque\nn=int(input())\ninfo=list()\nd=[[1, 0], [-1, 0], [0, 1], [0, -1]]\nvisit=[[0]*n for i in range(n)]\nfor i in range(n):\n info.append(list(input()))\nq=deque()\nq.append((0, 0))\nvisit[0][0]=1\nwhile q:\n now_i, now_j=q.popleft()\n for di, dj in d:\n nxt_i=now_i+di\n nxt_j=now_j+dj\n if 0<=nxt_iself._num_examples:\n #结束epoch\n self._epochs_completed+=1\n #Shuffle the data\n perm=np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._images=self._images[perm]\n self._labels=self._labels[perm]\n #开始下一个epoch\n start=0\n self._index_in_epoch=batch_size\n assert batch_size<=self._num_examples\n end=self._index_in_epoch\n return self._images[start:end],self._labels[start:end]\n\ndef read_data_sets(fake_data=False,one_hot=False):\n class DataSets(object):\n pass\n data_sets=DataSets()\n if fake_data:\n data_sets.train=DataSet([],[],fake_data=True)\n data_sets.validation=DataSet([],[],fake_data=True)\n data_sets.test=DataSet([],[],fake_data=True)\n return data_sets\n train_data,validation_data,test_data=load_data()\n train_images,train_labels=train_data\n train_images=np.asarray(np.reshape(train_images,(train_images.shape[0],28,28,1)),dtype='float32')\n if one_hot:\n train_labels=dense_to_one_hot(train_labels)\n\n validation_images,validation_labels=validation_data\n validation_images=np.asarray(np.reshape(validation_images,(validation_images.shape[0],28,28,1)),dtype=\"float32\")\n if one_hot:\n validation_labels=dense_to_one_hot(validation_labels)\n\n test_images,test_labels=test_data\n test_images=np.asarray(np.reshape(test_images,(test_images.shape[0],28,28,1)),dtype=\"float32\")\n if one_hot:\n test_labels=dense_to_one_hot(test_labels)\n\n data_sets.train=DataSet(train_images,train_labels)\n data_sets.validation=DataSet(validation_images,validation_labels)\n data_sets.test=DataSet(test_images,test_labels)\n return data_sets\n\n\n\n\n\nif __name__==\"__main__\":\n train_set,valid_set,test_set=load_data()\n x=train_set[0]\n y=train_set[1]\n x=np.asarray(np.reshape(x,(x.shape[0],28,28,1)),dtype=\"float32\")\n one_hot=dense_to_one_hot(y)\n print(x.shape)\n \n data_sets=read_data_sets(one_hot=True)\n train=data_sets.train\n data,label=train.next_batch(20)\n \n pylab.imshow(np.reshape(x[10],(28,28)))\n \n\n","sub_path":"examples/MLP/mnist_data.py","file_name":"mnist_data.py","file_ext":"py","file_size_in_byte":4611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"364083966","text":"# coding=utf-8\n# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport glob\nimport os\nimport xml.etree.ElementTree as ET\nfrom abc import abstractmethod\nfrom functools import total_ordering\n\nfrom pants.base.mustache import MustacheRenderer\nfrom pants.util import desktop\nfrom pants.util.dirutil import safe_mkdir_for\nfrom pants.util.meta import AbstractClass\nfrom pants.util.strutil import ensure_binary\n\n\n@total_ordering\nclass ReportTestSuite(object):\n \"\"\"Data object for a JUnit test suite\"\"\"\n\n def __init__(self, name, tests, errors, failures, skipped, time, testcases):\n self.name = name\n self.tests = int(tests)\n self.errors = int(errors)\n self.failures = int(failures)\n self.skipped = int(skipped)\n self.time = float(time)\n self.testcases = testcases\n\n def __lt__(self, other):\n if (self.errors, self.failures) > (other.errors, other.failures):\n return True\n elif (self.errors, self.failures) < (other.errors, other.failures):\n return False\n else:\n return self.name.lower() < other.name.lower()\n\n @staticmethod\n def success_rate(test_count, error_count, failure_count, skipped_count):\n if test_count:\n unsuccessful_count = error_count + failure_count + skipped_count\n return '{:.2f}%'.format((test_count - unsuccessful_count) * 100.0 / test_count)\n return '0.00%'\n\n @staticmethod\n def icon_class(test_count, error_count, failure_count, skipped_count):\n icon_class = 'test-passed'\n if test_count == skipped_count:\n icon_class = 'test-skipped'\n elif error_count > 0:\n icon_class = 'test-error'\n elif failure_count > 0:\n icon_class = 'test-failure'\n return icon_class\n\n def as_dict(self):\n d = self.__dict__\n d['success'] = ReportTestSuite.success_rate(self.tests, self.errors, self.failures,\n self.skipped)\n d['icon_class'] = ReportTestSuite.icon_class(self.tests, self.errors, self.failures,\n self.skipped)\n d['testcases'] = map(lambda tc: tc.as_dict(), self.testcases)\n return d\n\n\nclass ReportTestCase(object):\n \"\"\"Data object for a JUnit test case\"\"\"\n\n def __init__(self, name, time, failure, error, skipped):\n self.name = name\n self.time = float(time)\n self.failure = failure\n self.error = error\n self.skipped = skipped\n\n def icon_class(self):\n icon_class = 'test-passed'\n if self.skipped:\n icon_class = 'test-skipped'\n elif self.error:\n icon_class = 'test-error'\n elif self.failure:\n icon_class = 'test-failure'\n return icon_class\n\n def as_dict(self):\n d = {\n 'name': self.name,\n 'time': self.time,\n 'icon_class': self.icon_class()\n }\n if self.error:\n d['message'] = self.error['message']\n elif self.failure:\n d['message'] = self.failure['message']\n return d\n\n\nclass JUnitHtmlReportInterface(AbstractClass):\n \"\"\"The interface JUnit html reporters must support.\"\"\"\n\n @abstractmethod\n def report(self):\n \"\"\"Generate the junit test result report and return its path.\"\"\"\n\n @abstractmethod\n def maybe_open_report(self):\n \"\"\"Open the junit test result report if requested by the end user.\"\"\"\n\n\nclass NoJunitHtmlReport(JUnitHtmlReportInterface):\n \"\"\"JUnit html reporter that never produces a report.\"\"\"\n\n def report(self):\n return None\n\n def maybe_open_report(self):\n pass\n\n\nclass JUnitHtmlReport(JUnitHtmlReportInterface):\n \"\"\"Generates an HTML report from JUnit TEST-*.xml files\"\"\"\n\n @classmethod\n def create(cls, xml_dir, logger):\n return cls(xml_dir=xml_dir, report_dir=os.path.join(xml_dir, 'reports'), logger=logger)\n\n def __init__(self, xml_dir, report_dir, logger):\n self._xml_dir = xml_dir\n self._report_file_path = os.path.join(report_dir, 'junit-report.html')\n self._logger = logger\n\n def report(self):\n self._logger.debug('Generating JUnit HTML report...')\n testsuites = self._parse_xml_files(self._xml_dir)\n safe_mkdir_for(self._report_file_path)\n with open(self._report_file_path, 'wb') as fp:\n fp.write(ensure_binary(self._generate_html(testsuites)))\n self._logger.debug('JUnit HTML report generated to {}'.format(self._report_file_path))\n return self._report_file_path\n\n def maybe_open_report(self):\n desktop.ui_open(self._report_file_path)\n\n @classmethod\n def _parse_xml_files(cls, xml_dir):\n testsuites = []\n for xml_file in glob.glob(os.path.join(xml_dir, 'TEST-*.xml')):\n testsuites += cls._parse_xml_file(xml_file)\n testsuites.sort()\n return testsuites\n\n @staticmethod\n def _parse_xml_file(xml_file):\n testsuites = []\n root = ET.parse(xml_file).getroot()\n\n testcases = []\n for testcase in root.iter('testcase'):\n failure = None\n for f in testcase.iter('failure'):\n failure = {\n 'type': f.attrib['type'],\n 'message': f.text\n }\n error = None\n for e in testcase.iter('error'):\n error = {\n 'type': e.attrib['type'],\n 'message': e.text\n }\n skipped = False\n for _s in testcase.iter('skipped'):\n skipped = True\n\n testcases.append(ReportTestCase(\n testcase.attrib['name'],\n testcase.attrib.get('time', 0),\n failure,\n error,\n skipped\n ))\n\n for testsuite in root.iter('testsuite'):\n testsuites.append(ReportTestSuite(\n testsuite.attrib['name'],\n testsuite.attrib['tests'],\n testsuite.attrib['errors'],\n testsuite.attrib['failures'],\n testsuite.attrib.get('skipped', 0),\n testsuite.attrib['time'],\n testcases\n ))\n return testsuites\n\n @staticmethod\n def _generate_html(testsuites):\n values = {\n 'total_tests': 0,\n 'total_errors': 0,\n 'total_failures': 0,\n 'total_skipped': 0,\n 'total_time': 0.0\n }\n\n for testsuite in testsuites:\n values['total_tests'] += testsuite.tests\n values['total_errors'] += testsuite.errors\n values['total_failures'] += testsuite.failures\n values['total_skipped'] += testsuite.skipped\n values['total_time'] += testsuite.time\n\n values['total_success'] = ReportTestSuite.success_rate(values['total_tests'],\n values['total_errors'],\n values['total_failures'],\n values['total_skipped'])\n values['summary_icon_class'] = ReportTestSuite.icon_class(values['total_tests'],\n values['total_errors'],\n values['total_failures'],\n values['total_skipped'])\n values['testsuites'] = map(lambda ts: ts.as_dict(), testsuites)\n\n package_name, _, _ = __name__.rpartition('.')\n renderer = MustacheRenderer(package_name=package_name)\n html = renderer.render_name('junit_report.html', values)\n return html\n","sub_path":"src/python/pants/backend/jvm/tasks/reports/junit_html_report.py","file_name":"junit_html_report.py","file_ext":"py","file_size_in_byte":7298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"239124454","text":"#C:\\Python33\n\nfrom sys import argv\nfrom lib import *\nfrom email.parser import BytesParser, Parser\nfrom email.policy import default\n\ndef extractdata(filename, actions):\n # with open(filename, 'rb') as file:\n # headers = BytesParser(policy=default).parse(fp)\n\n file = open(filename, \"r\")\n\n # out = \"\"\n # if \"to\" in actions:\n # out += 'To: {}\\n'.format(headers['to'])\n # if \"from\" in actions:\n # out += 'From: {}\\n'.format(headers['from'])\n # if \"subject\" in actions:\n # out += 'Subject: {}\\n'.format(headers['subject'])\n\n fromtext = \"\"\n totext = \"\"\n subjecttext = \"\"\n datetext = \"\"\n\n for line in file:\n if len(line) == 1:\n break\n\n if line[:6].lower() == \"From: \".lower():\n fromtext = line[:-1].replace(',', '.')\n if line[:4].lower() == \"To: \".lower():\n totext = line[:-1].replace(',', '.')\n if line[:9].lower() == \"Subject: \".lower():\n subjecttext = line[:-1].replace(',', '.')\n if line[:6].lower() == \"Date: \".lower():\n datetext = line[:-1].replace(',', '.')\n\n out = datetext + \",\" + subjecttext + \",\" + totext + \",\" + fromtext\n return out\n\nif __name__ == \"__main__\":\n if len(argv) < 3:\n print(\".py \")\n exit()\n \n if argv[1] == \"folder\":\n foo = list_dir(argv[2])\n datain = []\n for each in foo:\n datain.append(argv[2] + \"/\" + each)\n elif argv[1] == \"file\":\n datain = argv[2]\n elif argv[1] == \"list\":\n datain = open(argv[2], \"r\")\n datain = parse_file(datain)\n\n else:\n print(\"no listing available.\")\n exit()\n\n dataout = open(\"out.csv\", \"w+\")\n actions = argv[3:]\n\n for filename in datain:\n out = filename[4:]+ \",\"\n out += extractdata(filename, actions)\n\n print(out)\n dataout.write(out)\n dataout.write(\"\\n\")","sub_path":"emailtools.py","file_name":"emailtools.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"13640001","text":"from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7\n\n#import kratos core and applications\nfrom KratosMultiphysics import *\nfrom KratosMultiphysics.StructuralMechanicsApplication import *\nfrom KratosMultiphysics.ExternalSolversApplication import *\nfrom KratosMultiphysics.ShapeOptimizationApplication import *\n\n# For time measures\nimport time as timer\n\n# ======================================================================================================================================\n# Model part & solver\n# ======================================================================================================================================\n\n#import define_output\nparameter_file = open(\"ProjectParameters.json\",'r')\nProjectParameters = Parameters( parameter_file.read())\n\n#set echo level\necho_level = ProjectParameters[\"problem_data\"][\"echo_level\"].GetInt()\n\n#defining the model_part\nmain_model_part = ModelPart(ProjectParameters[\"problem_data\"][\"model_part_name\"].GetString())\nmain_model_part.ProcessInfo.SetValue(DOMAIN_SIZE, ProjectParameters[\"problem_data\"][\"domain_size\"].GetInt())\n\n###TODO replace this \"model\" for real one once available in kratos core\nModel = {ProjectParameters[\"problem_data\"][\"model_part_name\"].GetString() : main_model_part}\n\n# Create an optimizer\n# Note that internally variables related to the optimizer are added to the model part\noptimizerFactory = __import__(\"optimizer_factory\")\noptimizer = optimizerFactory.CreateOptimizer( main_model_part, ProjectParameters[\"optimization_settings\"] )\n\n# Create solver for all response functions specified in the optimization settings\n# Note that internally variables related to the individual functions are added to the model part\nresponseFunctionFactory = __import__(\"response_function_factory\")\nlistOfResponseFunctions = responseFunctionFactory.CreateListOfResponseFunctions( main_model_part, ProjectParameters[\"optimization_settings\"] )\n\n# Create solver to perform structural analysis\nsolver_module = __import__(ProjectParameters[\"structure_solver_settings\"][\"solver_type\"].GetString())\nCSM_solver = solver_module.CreateSolver(main_model_part, ProjectParameters[\"structure_solver_settings\"])\nCSM_solver.AddVariables()\nCSM_solver.ImportModelPart()\n\n# Add degrees of freedom\nCSM_solver.AddDofs()\n\n# Build sub_model_parts or submeshes (rearrange parts for the application of custom processes)\n## Get the list of the submodel part in the object Model\nfor i in range(ProjectParameters[\"structure_solver_settings\"][\"processes_sub_model_part_list\"].size()):\n part_name = ProjectParameters[\"structure_solver_settings\"][\"processes_sub_model_part_list\"][i].GetString()\n if( main_model_part.HasSubModelPart(part_name) ):\n Model.update({part_name: main_model_part.GetSubModelPart(part_name)})\n\n# ======================================================================================================================================\n# Analyzer\n# ======================================================================================================================================\n\nclass kratosCSMAnalyzer( (__import__(\"analyzer_base\")).analyzerBaseClass ):\n\n # --------------------------------------------------------------------------\n def initializeBeforeOptimizationLoop( self ):\n self.__initializeGIDOutput()\n self.__initializeProcesses()\n self.__initializeSolutionLoop()\n\n# --------------------------------------------------------------------------\n def analyzeDesignAndReportToCommunicator( self, currentDesign, optimizationIteration, communicator ):\n\n # Calculation of value of strain energy\n if communicator.isRequestingValueOf(\"strain_energy\"):\n\n print(\"\\n> Starting StructuralMechanicsApplication to solve structure\")\n startTime = timer.time()\n self.__solveStructure( optimizationIteration )\n print(\"> Time needed for solving the structure = \",round(timer.time() - startTime,2),\"s\")\n\n print(\"\\n> Starting calculation of strain energy\")\n startTime = timer.time()\n listOfResponseFunctions[\"strain_energy\"].CalculateValue()\n print(\"> Time needed for calculation of strain energy = \",round(timer.time() - startTime,2),\"s\")\n\n communicator.reportValue(\"strain_energy\", listOfResponseFunctions[\"strain_energy\"].GetValue())\n\n # Calculation of value of mass\n if communicator.isRequestingValueOf(\"mass\"):\n\n print(\"\\n> Starting calculation of value of mass\")\n startTime = timer.time()\n listOfResponseFunctions[\"mass\"].CalculateValue()\n constraintValue = listOfResponseFunctions[\"mass\"].GetValue()\n print(\"> Time needed for calculation of value of mass = \",round(timer.time() - startTime,2),\"s\")\n\n communicator.reportValue(\"mass\", constraintValue)\n\n # Calculation of gradients of strain energy\n if communicator.isRequestingGradientOf(\"strain_energy\"):\n\n print(\"\\n> Starting calculation of gradient of strain energy\")\n startTime = timer.time()\n listOfResponseFunctions[\"strain_energy\"].CalculateGradient()\n print(\"> Time needed for calculating gradient of strain energy = \",round(timer.time() - startTime,2),\"s\")\n\n gradientForCompleteModelPart = listOfResponseFunctions[\"strain_energy\"].GetGradient()\n communicator.reportGradient(\"strain_energy\", gradientForCompleteModelPart)\n\n # Calculation of gradients of mass\n if communicator.isRequestingGradientOf(\"mass\"):\n\n print(\"\\n> Starting calculation of gradient of mass\")\n startTime = timer.time()\n listOfResponseFunctions[\"mass\"].CalculateGradient()\n print(\"> Time needed for calculating gradient of mass = \",round(timer.time() - startTime,2),\"s\")\n\n gradientForCompleteModelPart = listOfResponseFunctions[\"mass\"].GetGradient()\n communicator.reportGradient(\"mass\", gradientForCompleteModelPart)\n\n # --------------------------------------------------------------------------\n def finalizeAfterOptimizationLoop( self ):\n for process in self.list_of_processes:\n process.ExecuteFinalize()\n self.gid_output.ExecuteFinalize()\n\n # --------------------------------------------------------------------------\n def __initializeProcesses( self ):\n\n import process_factory\n #the process order of execution is important\n self.list_of_processes = process_factory.KratosProcessFactory(Model).ConstructListOfProcesses( ProjectParameters[\"constraints_process_list\"] )\n self.list_of_processes += process_factory.KratosProcessFactory(Model).ConstructListOfProcesses( ProjectParameters[\"loads_process_list\"] )\n if(ProjectParameters.Has(\"problem_process_list\")):\n self.list_of_processes += process_factory.KratosProcessFactory(Model).ConstructListOfProcesses( ProjectParameters[\"problem_process_list\"] )\n if(ProjectParameters.Has(\"output_process_list\")):\n self.list_of_processes += process_factory.KratosProcessFactory(Model).ConstructListOfProcesses( ProjectParameters[\"output_process_list\"] )\n\n #print list of constructed processes\n if(echo_level>1):\n for process in self.list_of_processes:\n print(process)\n\n for process in self.list_of_processes:\n process.ExecuteInitialize()\n\n # --------------------------------------------------------------------------\n def __initializeGIDOutput( self ):\n\n computing_model_part = CSM_solver.GetComputingModelPart()\n problem_name = ProjectParameters[\"problem_data\"][\"problem_name\"].GetString()\n\n from gid_output_process import GiDOutputProcess\n output_settings = ProjectParameters[\"output_configuration\"]\n self.gid_output = GiDOutputProcess(computing_model_part, problem_name, output_settings)\n\n self.gid_output.ExecuteInitialize()\n\n # --------------------------------------------------------------------------\n def __initializeSolutionLoop( self ):\n\n ## Sets strategies, builders, linear solvers, schemes and solving info, and fills the buffer\n CSM_solver.Initialize()\n CSM_solver.SetEchoLevel(echo_level)\n\n for responseFunctionId in listOfResponseFunctions:\n listOfResponseFunctions[responseFunctionId].Initialize()\n\n # Start process\n for process in self.list_of_processes:\n process.ExecuteBeforeSolutionLoop()\n\n ## Set results when are written in a single file\n self.gid_output.ExecuteBeforeSolutionLoop()\n\n # --------------------------------------------------------------------------\n def __solveStructure( self, optimizationIteration ):\n\n # processes to be executed at the begining of the solution step\n for process in self.list_of_processes:\n process.ExecuteInitializeSolutionStep()\n\n self.gid_output.ExecuteInitializeSolutionStep()\n\n # Actual solution\n CSM_solver.Solve()\n\n # processes to be executed at the end of the solution step\n for process in self.list_of_processes:\n process.ExecuteFinalizeSolutionStep()\n\n # processes to be executed before witting the output\n for process in self.list_of_processes:\n process.ExecuteBeforeOutputStep()\n\n # write output results GiD: (frequency writing is controlled internally)\n if(self.gid_output.IsOutputStep()):\n self.gid_output.PrintOutput()\n\n self.gid_output.ExecuteFinalizeSolutionStep()\n\n # processes to be executed after witting the output\n for process in self.list_of_processes:\n process.ExecuteAfterOutputStep()\n\n # --------------------------------------------------------------------------\n\nstructureAnalyzer = kratosCSMAnalyzer()\n\n# ======================================================================================================================================\n# Optimization\n# ======================================================================================================================================\n\noptimizer.importAnalyzer( structureAnalyzer )\noptimizer.optimize()\n\n# ======================================================================================================================================","sub_path":"applications/ShapeOptimizationApplication/test_examples/01_Strain_Energy_Minimization_3D_Hook/run_optimization.py","file_name":"run_optimization.py","file_ext":"py","file_size_in_byte":10444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"482679892","text":"import pika\nimport RabbitListener\nimport pieVisualizer\n\n\nclass ResultsReceiver:\n\n messageBrokerIp = '127.0.0.1'\n messageBrokerUser = 'bunny'\n messageBrokerPwd = 'bunny1234'\n messageBrokerVirtualHost = 'bunny_host'\n exchangeName = 'bunny_exchange'\n exchangeType = 'fanout'\n\n\n def __init__(self, votings_names, chart_type):\n self.votings_names = votings_names\n self.chart_type = chart_type\n\n self.channel = self.create_channel()\n self.consume_results()\n\n def consume_results(self):\n rl = RabbitListener.RabbitListener(self.channel,\n pieVisualizer.PieVisualizer(self.votings_names, self.chart_type))\n rl.start_consuming()\n\n def create_channel(self):\n user_credentials = pika.credentials.PlainCredentials(self.messageBrokerUser, self.messageBrokerPwd)\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(host=self.messageBrokerIp,\n virtual_host=self.messageBrokerVirtualHost,\n credentials=user_credentials)\n )\n channel = connection.channel()\n channel.exchange_declare(exchange=self.exchangeName, exchange_type=self.exchangeType)\n return channel\n","sub_path":"voting-system-visualizer/resultsReceiver.py","file_name":"resultsReceiver.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"229721201","text":"from __future__ import print_function\nfrom __future__ import division\nimport cv2\nimport math\nimport numpy as np\n\n\ndef locate2d(img):\n try:\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n #cv2.imshow('hsv', hsv)\n cv2.waitKey(0)\n upper_color = np.array([35, 255, 255])\n lower_color = np.array([10, 50, 150])\n mask = cv2.inRange(hsv, lower_color, upper_color)\n #cv2.imshow('mask',mask);cv2.waitKey(0)\n mask = cv2.erode(mask, None, iterations=2)\n mask = cv2.dilate(mask, None, iterations=2)\n res = cv2.bitwise_and(img, img, mask=mask)\n gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)\n gray = cv2.medianBlur(gray, 5)\n #print gray.shape\n # cv2.imshow('mask1',mask);cv2.waitKey(0)\n edges = cv2.Canny(gray, 100, 200)\n # cv2.imshow('edge',edges)\n # cv2.waitKey(2000)\n cv2.destroyWindow(\"edge\")\n circles = cv2.HoughCircles(mask, cv2.HOUGH_GRADIENT, 2, 200,\n param1=100, param2=10, minRadius=0, maxRadius=200)[0]\n return_x = []\n return_y = []\n # i = circles[np.lexsort(-circles.T)][0]\n if circles is not None:\n for i in circles:\n cv2.circle(img, (i[0], i[1]), i[2], (0, 0, 255), 2)\n cv2.circle(img, (i[0], i[1]), 2, (0, 0, 255), 3)\n return_x.append(i[0])\n return_y.append(i[1])\n return return_x[0], return_y[0]\n else:\n rows = gray.shape[0]\n cols = gray.shape[1]\n loca_x = []\n loca_y = []\n for i in range(0,rows-1):\n for j in range(0,cols-1):\n if gray[i,j] != 0:\n loca_x.append(i)\n loca_y.append(j)\n loca_x = np.array(loca_x,dtype='int')\n loca_y = np.array(loca_y, dtype='int')\n x = np.mean(loca_x)\n y = np.mean(loca_y)\n return x,y\n except Exception:\n return None\n\n\ndef locate3d(img2, yaw0, pitch0):\n # parameter for camera\n f_y = 271.8194\n f_x = 271.7887\n\n x, y, yaw2, pitch2 = None, None, None, None\n p2 = locate2d(img2)\n if p2 is not None:\n pitch2 = math.atan((p2[1] - 120) / f_y) + 0.6928957 + pitch0\n yaw2 = math.atan((160 - p2[0]) / f_x) + yaw0\n x = (300+126.5 + 17.74 * math.cos(pitch0) + 50.71 * math.sin(pitch0)) / math.tan(pitch2) \\\n + 50.71 * math.cos(pitch0) - 17.74 * math.sin(pitch0)\n y = x * math.tan(yaw2)\n x = x/1000.0\n y = y/1000.0\n else:\n print('Cannot locate.')\n\n print('p =', p2, ', x =', x, ', y =', y)\n return x, y, yaw2, pitch2\n\n\ndef main():\n img1 = cv2.imread('1.bmp', 1)\n x, y = locate3d(img1, -7 / 180 * math.pi)\n return x, y\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"user_locate.py","file_name":"user_locate.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"302044364","text":"#!/usr/bin/env python\nPKG = 'embodied_attention'\nimport roslib; roslib.load_manifest(PKG)\n\nimport rospy\nimport cv2 as cv\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom std_msgs.msg import Float32MultiArray\nfrom geometry_msgs.msg import Point\nfrom embodied_attention.srv import ResetSaccade\nimport sys\nimport os\nimport numpy as np \nfrom scipy import misc\n\n## functions\ndef gauss(x, y, X, Y, sigma):\n return np.exp(-(np.power(x-X, 2)+np.power(y-Y, 2))/(2.*np.power(sigma ,2)))\ndef f(x): return np.maximum(x, 0.)\n\nclass Saccade():\n def __init__(self):\n \n ## parameters\n self.N = 1600 # number of neurons per type (visual, movement)\n self.theta = 11. # decision threshold\n sig_lat = .25 # width of Gaussian lateral inhibition\n self.sig_IoR = .05 # width of Gaussian spread of inhibition of return\n sig_noise = .2 # width of Gaussian noise\n self.k = .0175 # passive decay rate (movement neurons)\n self.g = .33 # input threshold\n self.G = .2 # scaling factor for lateral inhibition\n \n ## setup\n # dimensions and coordinate systems\n self.Ns = int(np.sqrt(self.N)) \n n = 1./(self.Ns*.5)\n r = np.linspace(-1., 1., self.Ns)\n self.X, self.Y = np.meshgrid(r, r)\n self.X = np.reshape(self.X, [self.N, ])\n self.Y = np.reshape(self.Y, [self.N, ])\n \n # lateral weights \n self.W = np.zeros([self.N, self.N])\n for i in range(self.N):\n self.W[:, i] = gauss(self.X[i], self.Y[i], self.X, self.Y, sig_lat)\n self.W[i, i] = 0.\n \n #self.dt = .1\n self.dt = .75\n self.tau = 20.\n \n # noise propagation\n self.dsig_v = np.sqrt(self.dt/self.tau)*sig_noise # input (visual neuron) noise\n self.dsig_m = np.sqrt(self.dt)*sig_noise # movement neuron noise\n \n # (state) variables\n self.M = np.zeros(self.N) # movement neurons\n self.V = np.zeros(self.N) # visual neurons\n \n self.saliency_sub = rospy.Subscriber(\"/saliency_map\", Float32MultiArray, self.saliency_map_callback, queue_size=1, buff_size=2**24)\n\n self.target_pub = rospy.Publisher(\"/saccade_target\", Point, queue_size=1)\n self.potential_target_pub = rospy.Publisher(\"/saccade_potential_target\", Point, queue_size=1)\n\n self.cv_bridge = CvBridge()\n\n self.reset_saccade_serv = rospy.Service('/reset_saccade', ResetSaccade, self.handle_reset_saccade)\n\n # numerical integration (simple Euler)\n def saliency_map_callback(self, saliency_map):\n\n # handle input\n lo = saliency_map.layout\n sal = np.asarray(saliency_map.data[lo.data_offset:]).reshape(lo.dim[0].size, lo.dim[1].size)\n\n sal = misc.imresize(sal, [self.Ns, self.Ns])\n sal = np.reshape(sal, [self.N, ])/235.*0.55+.2\n\n # update\n self.V += self.dt*(-self.V + sal)/self.tau + self.dsig_v*np.random.randn(self.N)\n self.M += self.dt*(-self.k*self.M + f(self.V - self.g) - self.G*np.dot(self.W, f(self.M))) + self.dsig_m*np.random.randn(self.N)\n\n ID = np.argmax(self.M)\n\n # transform to coordinates in saliency map\n x = np.mod(ID, self.Ns) + 0.5\n y = int(ID/self.Ns) + 0.5\n x_scaled = int(float(lo.dim[0].size)/self.Ns * x)\n y_scaled = int(float(lo.dim[1].size)/self.Ns * y)\n rospy.loginfo(\"Potential target: %3d, %3d: %f\" % (x_scaled, y_scaled, self.M[ID]))\n\n # puslish potential target\n self.potential_target_pub.publish(Point(x_scaled, y_scaled, self.M[ID]))\n\n # check if target\n if (self.M[ID] >= self.theta):\n rospy.loginfo(\"\\tis actual target\")\n\n # publish target\n self.target_pub.publish(Point(x_scaled, y_scaled, self.M[ID]))\n\n # reset\n self.M[ID] = 0.\n\n # inhibition of return\n self.V = self.V - gauss(self.X[ID], self.Y[ID], self.X, self.Y, self.sig_IoR)\n\n def handle_reset_saccade(self, req):\n rospy.loginfo(\"Resetting node\")\n self.saliency_sub.unregister()\n self.reset_saccade_serv.shutdown()\n self.__init__()\n return True\n\ndef main(args):\n rospy.init_node(\"saccade\")\n saccade = Saccade()\n rospy.spin()\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"scripts/saccade.py","file_name":"saccade.py","file_ext":"py","file_size_in_byte":4726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"139752354","text":"import os\nimport sys\nimport json\n# Packages\nfrom flask_testing import TestCase\nfrom graphene.test import Client\nfrom alembic import command, config\n\n# Setup\nfrom app import create_app\nfrom schema import schema\nfrom helpers.database import engine, db_session, Base\n\n# Models\nfrom api.user.models import User\nfrom api.party.models import Party\nfrom api.office.models import Office\nfrom api.candidate.model import Candidate\n\n\n# Fixtures\nfrom fixtures.token.token_fixture import (\n CITIZEN_TOKEN,\n ADMIN_TOKEN, POLITICIAN_TOKEN,\n DUP_POLITICIAN_TOKEN\n)\nsys.path.append(os.getcwd())\n\n\nclass BaseTestCase(TestCase):\n\n def create_app(self):\n app = create_app('testing')\n self.base_url = 'https://127.0.0.1:5000/graphql'\n self.headers = {'content-type': 'application/json'}\n self.client = Client(schema)\n return app\n\n def setUp(self):\n app = self.create_app()\n self.app_test = app.test_client()\n with app.app_context():\n Base.metadata.create_all(bind=engine)\n admin_user = User(first_name=\"doe\",\n last_name=\"jon\",\n other_names=\"smith\",\n email=\"admin@yahoo.com\",\n password=\"12345678\",\n picture=\"https://picsum.photos/200\",\n user_type=\"admin\")\n\n admin_user.save()\n citizen_user = User(first_name=\"doe\",\n last_name=\"jon\",\n other_names=\"smith\",\n email=\"citizen@yahoo.com\",\n password=\"12345678\",\n picture=\"https://picsum.photos/200\",\n user_type=\"citizen\")\n\n citizen_user.save()\n politician_user = User(first_name=\"doe\",\n last_name=\"jon\",\n other_names=\"smith\",\n email=\"politician@yahoo.com\",\n password=\"12345678\",\n picture=\"https://picsum.photos/200\",\n user_type=\"politician\")\n\n politician_user.save()\n dup_politician_user = User(first_name=\"doe\",\n last_name=\"jon\",\n other_names=\"smith\",\n email=\"dup_politician@yahoo.com\",\n password=\"12345678\",\n picture=\"https://picsum.photos/200\",\n user_type=\"politician\")\n\n dup_politician_user.save()\n party = Party(party_name=\"party\",\n hq_address=\"5 City Of Power Avenue, Somolu, Lagos, Nigeria\", # noqa\n logo_url=\"www.ipsum/pic\")\n party.save()\n party_2 = Party(party_name=\"second_party\",\n hq_address=\"5 City Of Power Avenue, Somolu, Lagos, Nigeria\", # noqa\n logo_url=\"www.ipsum/pic\")\n party_2.save()\n office = Office(office_name=\"office\",\n office_type=\"state\",\n age_limit=50,\n description=\"my testing office\")\n office.save()\n candidate = Candidate(user_id=4,\n office_id=1,\n party_id=1)\n candidate.save()\n db_session.commit()\n\n def tearDown(self):\n app = self.create_app()\n with app.app_context():\n db_session.remove()\n Base.metadata.drop_all(bind=engine)\n\n\nclass CommonTestCases(BaseTestCase):\n \"\"\"Common test cases throught the code.\n This code is used to reduce duplication\n :params\n - loggedin_CITIZEN_TOKEN_assert_equal\n \"\"\"\n\n def citizen_token_assert_equal(self, query, expected_response):\n \"\"\"\n Make a request with verified citizen token and use assertEquals\n to compare the values\n :params\n - query, expected_response\n \"\"\"\n\n headers = {\"Authorization\": \"Bearer\" + \" \" + CITIZEN_TOKEN}\n response = self.app_test.post(\n '/graphql?query=' + query, headers=headers)\n actual_response = json.loads(response.data)\n self.assertEquals(actual_response, expected_response)\n\n def politician_token_assert_equal(self, query, expected_response):\n \"\"\"\n Make a request with verified politician token and use assertEquals\n to compare the values\n :params\n - query, expected_response\n \"\"\"\n\n headers = {\"Authorization\": \"Bearer\" + \" \" + POLITICIAN_TOKEN}\n response = self.app_test.post(\n '/graphql?query=' + query, headers=headers)\n actual_response = json.loads(response.data)\n self.assertEquals(actual_response, expected_response)\n\n def dup_politician_token_assert_equal(self, query, expected_response):\n \"\"\"\n Make a request with verified politician token and use assertEquals\n to compare the values\n :params\n - query, expected_response\n \"\"\"\n\n headers = {\"Authorization\": \"Bearer\" + \" \" + DUP_POLITICIAN_TOKEN}\n response = self.app_test.post(\n '/graphql?query=' + query, headers=headers)\n actual_response = json.loads(response.data)\n self.assertEquals(actual_response, expected_response)\n\n def admin_token_assert_equal(self, query, expected_response):\n \"\"\"\n Make a request with verified admin token and use assertEquals\n to compare the values\n :params\n - query, expected_response\n \"\"\"\n\n headers = {\"Authorization\": \"Bearer\" + \" \" + ADMIN_TOKEN}\n response = self.app_test.post(\n '/graphql?query=' + query, headers=headers)\n actual_response = json.loads(response.data)\n self.assertEquals(actual_response, expected_response)\n","sub_path":"tests/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"279684801","text":"# My-Tensorflow-Practice\n# 00-Basic-Operations\n# Author: OddNo7\n# Last modified: 2017/04/13\n\n# I wrote this for getting a hold of Tensorflow and for fun. For official\n# documentations, please go to https://www.tensorflow.org\n\nimport tensorflow as tf\nimport numpy as np\n\n# Constant addition, reduction, multiplication and division\na = tf.constant(3, dtype=tf.float32, name='a')\nb = tf.constant(4, dtype=tf.float32, name='b')\nc = a + b\nd = a - b\ne = a * b\nf = a / b\n\n# Run the computational graph\nsess = tf.Session()\nprint(sess.run([c, d, e, f]))\n\n# A placeholder must be fed with data. On definition of a place holder dtype must be specified. One can also specify\n# name and shape.\n# The feeding usually goes with 'feed_dict' argument.\na = tf.placeholder(tf.float32)\nb = tf.placeholder(tf.float32)\nwith tf.Session() as sess:\n print(sess.run(a + b, feed_dict={a: 3, b: 4}))\n\n# A variable is a value that can be trained during the running of a graph.\n# A variable needs to be designated an initial value when created. On\n# computation, this initial value need to be assigned as a initialization\n# process.\nW = tf.Variable(2.0, tf.float32, name='W')\nb = tf.Variable(3.5, tf.float32, name='b')\nx = tf.placeholder(tf.float32, name='x')\nline = W * x + b\ninit = tf.global_variables_initializer()\n# For initialization, one can call a variable's method 'initializer' to\n# initiate the value, or use global initializer.\n\nwith tf.Session() as sess:\n sess.run(init)\n print(sess.run(line, feed_dict={x: [1, 2, 3, 4]}))\n\n# Loss function. To evaluate how good a model is, we need to find a loss function,\n# With loss function we can try to train the variables so that the model\n# fits the fed data.\ny = tf.placeholder(tf.float32, name='y')\nwith tf.name_scope('Loss'):\n MSE = tf.reduce_mean(tf.square(line - y), name='MSE')\nwith tf.name_scope('SGD'):\n optimizer = tf.train.GradientDescentOptimizer(0.02).minimize(MSE)\n\nxtr = np.array([1.0, 2.0, 3.0, 4.0])\nytr = np.array([2.0, 3.0, 4.0, 5.0])\n# Now one needs to minimize the loss function. To do that, we need to call some optimizer.\n# tf.train provides several optimizers. We can use the basic one and build\n# an instance, and call its minimize method.\nwith tf.Session() as sess:\n init = tf.global_variables_initializer()\n sess.run(init)\n for epoch in range(2000):\n if epoch % 100 == 0:\n print('Reaching {}00-th epoch'.format(epoch // 100))\n print(sess.run(MSE, feed_dict={x: xtr, y: ytr}))\n sess.run(optimizer, feed_dict={x: xtr, y: ytr})\n print(sess.run([W, b]))\n","sub_path":"00-Basic-Operations/00-Basic-Operations.py","file_name":"00-Basic-Operations.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"192682381","text":"import iris.coord_categorisation\nimport iris\nimport glob\nimport os\nfrom cftime import datetime\n\n############################################\n# Define variables and set up environment\n#############################################\nroot_fp = \"/nfs/a319/gy17m2a/\"\n#root_fp = \"C:/Users/gy17m2a/OneDrive - University of Leeds/PhD/DataAnalysis/\"\nos.chdir(root_fp)\n\nem = '01'\n\n#############################################\n## Load in the data\n#############################################\nfilenames =[]\n# Create filepath to correct folder using ensemble member and year\ngeneral_filename = 'datadir/UKCP18/2.2km/{}/1980_2001/pr_rcp85_land-cpm_uk_2.2km_{}_1hr_*'.format(em, em)\n#print(general_filename)\n# Find all files in directory which start with this string\nfor filename in glob.glob(general_filename):\n #print(filename)\n filenames.append(filename)\nprint(len(filenames))\n \nmonthly_cubes_list = iris.load(filenames,'lwe_precipitation_rate')\nfor cube in monthly_cubes_list:\n for attr in ['creation_date', 'tracking_id', 'history']:\n if attr in cube.attributes:\n del cube.attributes[attr]\n\n# Concatenate the cubes into one\nconcat_cube = monthly_cubes_list.concatenate_cube()\n\n# Remove ensemble member dimension\nconcat_cube = concat_cube[0,:,:,:]\n\n# Create time constraint (only keeps hours between 1990 and 2000)\ntime_constraint = iris.Constraint(time=lambda c: c.point.year >= 1990 and c.point.year <= 2000)\ntime_constraint_cube =concat_cube.extract(time_constraint)\n\n## Check times covered\ntimes = time_constraint_cube.coord('yyyymmddhh')\n","sub_path":"GlobalFunctions/time_constraint.py","file_name":"time_constraint.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"523403502","text":"from .DictionaryNumOper import thousands, numbers_till_hundred, operations\nimport re\n\nclass HumanCalc:\n def convert(self, string): \n string_normal = self._prepare_string(string) # prepare for split\n split = string_normal.split()\n # check string length\n if len(split) % 2 != 1 or len(split) < 0:\n return 'invalid input'\n # the following code will not be called\n raise TypeError(\"Please make sure the expression is correct\")\n \n expression = []\n\n try:\n if len(split) != 1:\n for k in range((len(split)-1)//2):\n expression.append(num_to_text(split[k*2]))\n expression.append(ope_to_text(split[k*2 + 1]))\n else:\n expression.append(num_to_text(split[k*2 + 2]))\n else:\n expression.append(num_to_text(split[0]))\n except:\n return 'invalid input'\n # the following code will not be called\n raise TypeError(\"Please make sure the expression is correct\")\n\n return ' '.join(expression)\n \n @staticmethod\n def _prepare_string(string):\n ''' normalise the string, remove abundant spaces while adding space when missing,\n prepare to split\n '3243 +3423 - 342= 23'\n to\n '3243 + 3423 - 342 = 23'\n '''\n pattern = r'\\s*([\\*\\+\\-\\=\\/])\\s*'\n return re.sub(pattern, r' \\g<1> ', string)\n \n\ndef ope_to_text(operation):\n return operations[operation]\n\ndef num_to_text(number):\n '''the number is separated into blocks \n 3 digits each with names\n 5,389,276,538\n block names\n b,mil,tho,\n and 3 digit numbers are being converted to a string\n '''\n number = int(number)\n if number == 0:\n return numbers_till_hundred[0]\n \n split = []\n thousands_it = iter(thousands)\n while number:\n reminder = number % 10**3\n thousands_block = next(thousands_it)\n if reminder == 0:\n number = (number - 0) / 10**3\n # there was a huge bug: unreachable code, continue was before number change\n continue # we don't mention anything if 3 digit block is missing\n if reminder == 1:\n # we get rid of 's' at the end\n split.insert(0, (_3digit_to_text(reminder), thousands_block[:-1]))\n number = (number - 1) / 10**3\n continue\n # general case\n split.insert(0, (_3digit_to_text(reminder), thousands_block))\n number = (number - reminder) / 10**3\n\n return ' '.join([item for tupl in split for item in tupl if item != ''])\n\ndef _3digit_to_text(number):\n txtnum = []\n number = int(number)\n if number == 0:\n return numbers_till_hundred[0]\n \n hundreds = number // 100\n if hundreds:\n if hundreds == 1:\n txtnum.extend([numbers_till_hundred[hundreds], numbers_till_hundred[100]])\n else:\n txtnum.extend([numbers_till_hundred[hundreds], numbers_till_hundred[100] + 's'])\n\n tens_ones = number - hundreds * 100\n\n if not tens_ones:\n # if tens_ones is 0 we write nothing\n pass\n elif tens_ones in numbers_till_hundred:\n txtnum.append(numbers_till_hundred[tens_ones])\n else:\n ones = tens_ones % 10\n tens = tens_ones - ones\n txtnum.append(numbers_till_hundred[tens] + '-' + numbers_till_hundred[ones])\n \n return ' '.join(txtnum)\n\n","sub_path":"HumanCalculator/HumanCalculator.py","file_name":"HumanCalculator.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"363697033","text":"\"\"\"long lat\n\nRevision ID: b72de3508458\nRevises: 0f6a991b4ac5\nCreate Date: 2019-10-13 11:41:13.319666\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'b72de3508458'\ndown_revision = '0f6a991b4ac5'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('airport', sa.Column('latitude', sa.Float(), nullable=True))\n op.add_column('airport', sa.Column('longitude', sa.Float(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('airport', 'longitude')\n op.drop_column('airport', 'latitude')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/b72de3508458_long_lat.py","file_name":"b72de3508458_long_lat.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"628503766","text":"import shlex\nimport textwrap\nimport traceback\nfrom typing import Dict, List\n\nfrom communicator import Communicator\nfrom commands.get_rating import GetRatingCommand\nfrom commands.handler import CommandHandler\nfrom commands.list_buildings import ListBuildingsCommand\nfrom commands.list_campuses import ListCampusesCommand\nfrom commands.list_fountains import ListFountainsCommand\nfrom commands.login import LoginCommand\nfrom commands.new_user import NewUserCommand\nfrom commands.rate_fountain import RateFountainCommand\nfrom context import ClientContext\n\n\nclass Shell:\n \"\"\"Manages user interaction with the client.\"\"\"\n done: bool\n context: ClientContext\n handlers: Dict[str, CommandHandler]\n\n def __init__(self, host: str, port: int):\n self.done = False\n self.handlers = {}\n self.context = ClientContext(\n communicator=Communicator(host=host, port=port),\n )\n self._register_handlers()\n\n def _register_handlers(self):\n handler_list: List[CommandHandler] = [\n GetRatingCommand(self.context),\n ListBuildingsCommand(self.context),\n ListCampusesCommand(self.context),\n ListFountainsCommand(self.context),\n LoginCommand(self.context),\n NewUserCommand(self.context),\n RateFountainCommand(self.context),\n ]\n\n for handler in handler_list:\n handler.context = self.context\n self.handlers[handler.command_name.lower()] = handler\n\n def display_help(self):\n print(\"Good-Water client supports the following commands:\")\n\n msgs = [(n, h.help_text) for n, h in self.handlers.items()]\n msgs += [\n (\"help\", \"Displays this help message.\"),\n (\"exit\", \"Terminates the client.\"),\n ]\n msgs.sort()\n\n for name, help_text in msgs:\n print(name)\n text = textwrap.dedent(help_text).strip()\n help_text = \"\\n\".join(\n \"\\n\".join(textwrap.wrap(\n text=t,\n width=100,\n initial_indent=\" \" * 4,\n )) for t in text.splitlines()\n )\n print(help_text)\n\n def read_command(self, cmd: str):\n cmd = cmd.strip()\n\n if cmd == \"\":\n return\n\n args = shlex.split(cmd)\n cmd_name = args[0].lower()\n\n if cmd_name == \"exit\":\n self.done = True\n elif cmd_name == \"help\":\n self.display_help()\n else:\n if cmd_name in self.handlers:\n handler = self.handlers[cmd_name]\n try:\n handler.run(args)\n except Exception as ex:\n print(\n f\"{type(ex).__name__} raised\"\n f\" during '{cmd_name}' command.\"\n )\n traceback.print_exc()\n else:\n print(f\"Unknown command: {cmd_name}\")\n\n def loop(self):\n \"\"\"Handle user input until the program ends.\"\"\"\n print(\"Welcome to the Good-Water client!\")\n print(\"Type 'help' for usage information.\")\n\n while not self.done:\n self.read_command(input(\"> \"))\n","sub_path":"Client/shell.py","file_name":"shell.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"257101041","text":"import os\nfrom argparse import ArgumentParser\n\nimport numpy as np\nimport torch\n\nfrom classifier_id import Resnet\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping, LearningRateLogger\nfrom pytorch_lightning.profiler import AdvancedProfiler\n\n\n\ndef main(hparams):\n print(hparams.dataset)\n model = Resnet(hparams)\n if hparams.checkpoint != None:\n model = Resnet.load_from_checkpoint(hparams.checkpoint)\n model.train()\n\n os.makedirs(hparams.log_dir, exist_ok=True)\n try:\n log_dir = sorted(os.listdir(hparams.log_dir))[-1]\n except IndexError:\n log_dir = os.path.join(hparams.log_dir, 'version_0')\n\n checkpoint_callback = ModelCheckpoint(\n # monitor = 'loss',\n filepath=os.path.join(log_dir, 'checkpoints_older'),\n save_top_k=1,\n verbose=True,\n )\n stop_callback = EarlyStopping(\n monitor='val_loss',\n mode='min',\n patience=60000,\n verbose=True,\n\n )\n\n lr_logger = LearningRateLogger()\n\n trainer = Trainer(\n gpus=1,\n checkpoint_callback=checkpoint_callback,\n early_stop_callback=stop_callback,\n callbacks= [lr_logger],\n accumulate_grad_batches=1,\n # resume_from_checkpoint=hparams.checkpoint,\n benchmark=True,\n # overfit_batches=10,\n # val_check_interval=0.250,\n # auto_scale_batch_size='binsearch',\n #gradient_clip_val=100,\n #amp_level='O2',\n #precision=16,\n )\n\n\n\n trainer.fit(model)\n\n\nif __name__ == '__main__':\n parent_parser = ArgumentParser(add_help=False)\n parent_parser.add_argument('--dataset', required=True)\n parent_parser.add_argument('--log_dir', default='lightning_logs')\n parent_parser.add_argument('--checkpoint', default=None)\n parent_parser.add_argument('--batch_size', type=int, default=1)\n parser = Resnet.add_model_specific_args(parent_parser)\n hparams = parser.parse_args()\n\n main(hparams)\n","sub_path":"detection/training/train_classifier_id.py","file_name":"train_classifier_id.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"266455237","text":"import numpy as np\nfrom datetime import datetime\nimport datetime as dt\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import Flask, jsonify\n\n#################################################\n# Database Setup\n#################################################\nengine = create_engine(\"sqlite:///hawaii.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save reference to the table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n# Create our session (link) from Python to the DB\nsession = Session(engine)\n\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\n\n#################################################\n# Flask Routes\n#################################################\n\n@app.route(\"/\")\ndef Homepage():\n \"\"\"List all available api routes.\"\"\"\n return (\n f\"Welcome to Hawaii Climate Analysis API
\"\n f\"Available Routes:
\"\n f\"/api/v1.0/precipitation
\"\n f\"/api/v1.0/stations
\"\n f\"/api/v1.0/tobs
\"\n f\"/api/v1.0/start
\"\n f\"/api/v1.0/start/end\"\n )\n\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n \"\"\"Convert the query results to a Dictionary using date as the key and prcp as the value.\"\"\"\n \n \n # Query to retrieve the data and precipitation scores\n results = session.query(Measurement.date, Measurement.prcp).order_by(Measurement.date).all()\n\n # Create a dictionary from the row data and append to a list of all_precipitation\n all_precipitation = []\n for precip in results:\n precip_dict = {}\n precip_dict[\"date\"] = precip.date\n precip_dict[\"prcp\"]= precip.prcp\n \n all_precipitation.append(precip_dict)\n\n return jsonify(all_precipitation)\n\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n #Query to retrieve all stations\n stations=session.query(Station.station,Station.name).all()\n\n all_stations = []\n for station in stations:\n stat_dict = {}\n stat_dict[\"name\"] = station.name\n\n all_stations.append(stat_dict)\n\n return jsonify(all_stations)\n\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n \"\"\"Query for the dates and temperature observations from a year from the last data point.\n Return a JSON list of Temperature Observations (tobs) for the previous year.\"\"\"\n #Latest Date in the database\n \n end_date=session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n\n for date in end_date:\n split_last_date=date.split('-')\n \n split_last_date\n last_year=int(split_last_date[0])\n last_month=int(split_last_date[1]) \n last_day=int(split_last_date[2])\n\n # Calculate the date 1 year ago from the last data point in the database\n query_date = dt.date(last_year, last_month, last_day) - dt.timedelta(days=365)\n\n # Query for dates and temperature observations from year ago\n results = session.query(Measurement.date, Measurement.tobs).\\\n filter(Measurement.date>=query_date).order_by(Measurement.date).all()\n\n all_tobs = []\n for row in results:\n tobs_dict = {}\n tobs_dict[\"date\"]= row.date\n tobs_dict[\"tobs\"] = row.tobs\n\n all_tobs.append(tobs_dict)\n\n return jsonify(all_tobs)\n\n@app.route(\"/api/v1.0/\")\n\n#Return a JSON list of the minimum temperature,\n#the average temperature, and the max temperature for a given start or start-end range.\n#When given the start only, calculate TMIN, TAVG, and TMAX for all dates greater than and\n#equal to the start date.\n#When given the start and the end date, calculate the TMIN, TAVG, and TMAX for dates in the start and end date inclusive.\n\ndef calc_temp_start(start_date):\n \n results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start_date).all()\n \n calc_tobs=[]\n for row in results:\n calc_tobs_dict = {}\n calc_tobs_dict[\"TMIN\"] = row[0]\n calc_tobs_dict[\"TAVG\"] = row[1]\n calc_tobs_dict[\"TMAX\"] = row[2]\n calc_tobs.append(calc_tobs_dict)\n\n return jsonify(calc_tobs)\n@app.route(\"/api/v1.0//\")\n\ndef calc_temp_startend(start_date,end_date): \n \n results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs),func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\n # Convert the query results to a Dictionary using date as the key and tobs as the value.\n calc_tobs=[]\n for row in results:\n calc_tobs_dict = {}\n calc_tobs_dict[\"TMIN\"] = row[0]\n calc_tobs_dict[\"TAVG\"] = row[1]\n calc_tobs_dict[\"TMAX\"] = row[2]\n calc_tobs.append(calc_tobs_dict)\n\n return jsonify(calc_tobs)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":5090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"165973325","text":"\"\"\"\nLeia um valor inteiro, que é o tempo de duração em segundos de um determinado evento em uma fábrica, e informe-o expresso no formato horas:minutos:segundos.\n\"\"\"\n\n# Entrada\ns = int(input())\n\n# Cálculo\nif s < 3600:\n h = 0\n m = s // 60\n s = s % 60\n \nelse:\n h = s // 3600\n m = (s - h * 3600) // 60\n s = s - (h * 3600) - (m * 60)\n \n# Saída\nprint('{}:{}:{}'.format(h,m,s))\n\n\n\n\n\n\n","sub_path":"Iniciante/1019.py","file_name":"1019.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"317999090","text":"'''\nENTRADA DE DADOS\n'''\n\n# PASTA 2 - VIDEO 18 - INPUT MIN 04:50\n\nnome = input(\"Qual o seu nome? \")\nidade = input(\"Qual a sua idade? \")\nano_nasc = 2021-int(idade)\nprint()\nprint(f'{nome} tem {idade} anos'\n f'{nome} nasceu em {ano_nasc}')","sub_path":"2.pythonBasico/aula9/aula9.py","file_name":"aula9.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"299749187","text":"# Control flow - if, for, while\n# \"... one folk in the road leads to honesty, another path leads the path of love.\"\n#\n# See also (advanced): built-in functions range(), zip()\n#\n# Important and very useful, but I don't expect you to know them until next class.\n# https://docs.python.org/3/tutorial/datastructures.html#looping-techniques\n#\n# Stanley H.I. Lio\n# hlio@hawaii.edu\n# OCN318, S18, S19\n\n\n\n# IF\nif True:\n print('do something')\nelse:\n print('do something else')\n\n# combined with the \"in\" operator\n# memorize this one. we use it a lot.\nif 'proof' in 'the puddin':\n print('yup')\nelse:\n print('nope')\n\n# our old friend the floats.\nif 0.1 + 0.1 + 0.1 == 0.3:\n print('I was playing with my phone')\nelse:\n print('I paid attention')\n\n\n\n\n\n\n# FOR\n# do something to each and every item in order\nfor s in ['call', 'write']:\n print('you never {}'.format(s))\n\n# repeat something multiple times\nfor i in range(5):\n print(i)\n# Observations: i starts from 0; i never reaches 5; loop ran 5 times nonetheless\n\n\n\n\n\nA = ['Born', 'Christened', 'Married', 'Took ill', 'Grew Worse', 'Died', 'Buried']\nB = ['a Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\n# note: \"list()\" is for illustrative purpose. Look up \"iterable\" (advanced) if you want to learn more.\nfor a,b in list(zip(A, B)):\n print(a + ' on ' + b)\n\n\n\n\n\n# WHILE\n# \"repeat something as long as condition is True\"\n#while True:\n# print('broken record')\n# Ctrl + C to break\n\n\n\n\n\nenergy = 10000\nwhile energy > 9000:\n print(\"{}: IT'S OVER NINE THOUSAND!\".format(energy))\n energy = energy - 200\n\n\n\n# you can stop the loop midway:\nfor i in range(10):\n print(i)\n if i > 5: # note the 6. why?\n break\n\n\n\n\n\n# \"What's the point of all this?\"\n# Example 1\ntags = ['timestamp', 'temperature', 'pressure']\nreadings = ['2018-01-26 03:08:09', 25.00, 101.325]\nd = dict(zip(tags, readings))\n# now you can refer to a reading by its name\n# if you pass this to another program / person, they would know what the values mean because they are all named\nprint('Temperature = {} Deg.C, pressure = {} kPa'.format(d['temperature'], d['pressure']))\n\n\n\n\n# Example 2\nreading = {'timestamp':'2018-01-26 03:08:09', 'temperature':25.00, 'pressure':101.325}\ncalibration_factors = {'temperature':1.038, 'pressure':1.000083}\n# for \"something\" in \"some collection\" - we see this pattern a lot\nfor key in calibration_factors:\n print(reading[key]*calibration_factors[key])\n\n\n# Reading:\n# read the sections on int(), float(), sum(), bytearray()\n#https://docs.python.org/3/library/functions.html\n","sub_path":"S19/class4/jabuticaba5.py","file_name":"jabuticaba5.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"564585629","text":"from pathlib import Path\n\ndef create_dirs(filepath):\n cwd = Path.cwd()\n # print(f'[CURRENT DIRECTORY] {cwd}')\n # print(f'[DIRECTORY TO CREATE] {filepath}')\n # check if last dir/file indicated in path exist\n # if true, return false\n # else false, do nothing\n if filepath.exists():\n return False\n # check if filepath is an absolute path\n # if true, create new path relative to current directory\n # else false, do nothing\n if filepath.is_absolute():\n filepath = filepath.relative_to(cwd)\n # print(f'[RELATIVE TO CURRENT DIRECTORY] {filepath}')\n\n dir_list = []\n # seperate filepath to parts and loop through it\n path_parts = filepath.parts\n for part in path_parts:\n dir_list.append(part)\n new_path = Path('\\\\'.join(dir_list))\n\n # print(f'[EVALUATING] {new_path}')\n # if preceding path does not exists, create\n if not new_path.exists():\n if new_path.suffix == '':\n # create folder if path is a directory\n new_path.mkdir()\n print(f'[DIRECTORY CREATED] {new_path}')\n else:\n # create file if path is a file\n new_path.touch()\n # print(f'[FILE CREATED] {new_path}')\n # else:\n # print(f'[PATH EXISTS] {part}')\n return True\n\n\ndef plog(title, msg=''):\n \"\"\"print in a specific format\"\"\"\n title = ''.join([''.join(['[', str(i), ']']) for i in title]).upper()\n print(f'{title} {str(msg)}')","sub_path":"Scripts/Downloader/Novel/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"5698564","text":"# 2021.07.01 modified\r\n# 선수 개인별 학습모델 생성\r\nimport pymysql\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom pandas import DataFrame\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import accuracy_score\r\nimport pickle\r\nimport joblib\r\n\r\n\r\ndef _for_predict():\r\n conn = pymysql.connect(host='localhost',\r\n user='root',\r\n password='chldlstns1!',\r\n db='baseball',\r\n charset='utf8')\r\n cursor = conn.cursor(pymysql.cursors.DictCursor)\r\n conn.close()\r\n\r\n\r\ndef _all_players_list():\r\n conn = pymysql.connect(host='localhost',\r\n user='root',\r\n password='chldlstns1!',\r\n db='baseball',\r\n charset='utf8')\r\n cursor = conn.cursor()\r\n sql = \"SELECT player_id FROM players;\"\r\n cursor.execute(sql)\r\n rows = cursor.fetchall()\r\n players_list = []\r\n for row in rows:\r\n players_list += row\r\n \r\n return players_list\r\n\r\n\r\ndef _get_pof(player_id):\r\n conn = pymysql.connect(host='localhost',\r\n user='root',\r\n password='chldlstns1!',\r\n db='baseball',\r\n charset='utf8')\r\n cursor = conn.cursor(pymysql.cursors.DictCursor)\r\n sql = \"SELECT * FROM pof WHERE player_id=%s;\"\r\n cursor.execute(sql, player_id)\r\n rows = cursor.fetchall()\r\n player_list = []\r\n for row in rows:\r\n tmp_list = [row['player_id'],\r\n row['temper'],\r\n row['humidity'],\r\n row['rain_prob'],\r\n row['wind'],\r\n row['stadium_prob'],\r\n row['home_away'],\r\n row['oppo_team'],\r\n row['last_7day'],\r\n row['last_30day'],\r\n row['weekly'],\r\n row['night'],\r\n row['first'],\r\n row['second'],\r\n row['third'],\r\n row['fourth'],\r\n row['fifth'],\r\n row['sixth'],\r\n row['seventh'],\r\n row['eighth'],\r\n row['ba'],\r\n row['hit_num'],\r\n row['is_hit']]\r\n player_list.append(tmp_list)\r\n\r\n tmp_arr = [list(x) for x in zip(*player_list)]\r\n player_dic = dict(zip(['player_id',\r\n 'temper',\r\n 'humidity',\r\n 'rain_prob',\r\n 'wind',\r\n 'stadium_prob',\r\n 'home_away',\r\n 'oppo_team',\r\n 'last_7day',\r\n 'last_30day',\r\n 'weekly',\r\n 'night',\r\n 'first',\r\n 'second',\r\n 'third',\r\n 'fourth',\r\n 'fifth',\r\n 'sixth',\r\n 'seventh',\r\n 'eighth',\r\n 'ba',\r\n 'hit_num',\r\n 'is_hit'\r\n ], tmp_arr))\r\n conn.close()\r\n return player_dic\r\n\r\n\r\ndef _train(player_dic=None, player_id=None):\r\n if not player_dic:\r\n return\r\n df = pd.DataFrame(player_dic)\r\n try:\r\n X = df.drop(['player_id', 'is_hit'], axis=1)\r\n y = df['is_hit']\r\n\r\n # data split\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=10)\r\n rf = RandomForestClassifier(random_state=0)\r\n rf.fit(X_train, y_train)\r\n pred = rf.predict(X_test)\r\n \r\n model_name = 'players_model/%d_all.pkl' % player_id\r\n joblib.dump(pred, model_name)\r\n print(accuracy_score(y_test, pred))\r\n\r\n # extract feature_importances\r\n feature_importance = rf.feature_importances_\r\n\r\n # sort\r\n # print(df.sort_values(by=0, ascending=False))\r\n\r\n return feature_importance\r\n except Exception as e:\r\n print(e)\r\n\r\n\r\n# analyze players feature importances\r\ndef _statistics(players_array):\r\n players_df = DataFrame(players_array, columns=['temper', 'humidity', 'rain_prob', 'wind', 'stadium_prob', 'home_away', 'oppo_team', 'last_7days', 'last_30days', 'weekly', 'night', 'first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh', 'eighth', 'ba', 'hit_num']).dropna()\r\n print('features importances')\r\n print(players_df)\r\n print(players_df.mean().sort_values(ascending=False))\r\n\r\n\r\ndef main():\r\n players_list = _all_players_list()\r\n players_array = np.zeros(shape=(len(players_list), 21))\r\n idx = 0\r\n for player in players_list:\r\n tmp = _train(_get_pof(player), player)\r\n players_array[idx] = tmp\r\n idx += 1\r\n\r\n _statistics(players_array)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n","sub_path":"baseball_predict/sort_by_players.py","file_name":"sort_by_players.py","file_ext":"py","file_size_in_byte":5217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"146130510","text":"# created by ray, andre, and khoa,luca\n# created on october 3, 2017\n# created for isc3u\n# created for unit 3-01 daily assignment\n# this program has a mystery number in which the user has to guess\n\nimport ui\n\n# constant\n\nMYSTERYNUM = 5\n\ndef check_number_button_touch_up_inside(sender):\n #this function checks if the guess is correct\n \n #input\n number_entered = int(view['number_textfield'].text)\n \n #process\n if number_entered == MYSTERYNUM:\n \n #output\n view['correct_label'].text = \"Correct!\"\n\nview = ui.load_view()\nview.present('sheet')\n","sub_path":"Unit3-01.py","file_name":"Unit3-01.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"337612619","text":"#!/usr/bin/python\n# -*- coding: utf_8 -*-\n\n# By Dennis Drescher (sparkycbr at gmail dot com)\n\n###############################################################################\n######################### Description/Documentation ###########################\n###############################################################################\n\n# This class will handle page elements that are used for diagnosing format\n# issues in the document. Elements like lines for leading lines and other\n# indicators that help with formating tasks can be added to the output.\n\n###############################################################################\n################################ Component Class ##############################\n###############################################################################\n# Firstly, import all the standard Python modules we need for\n# this process\n\nimport os, subprocess, shutil, tempfile, codecs\n\n# Load the local classes\nfrom rapuma.core.tools import Tools\nfrom rapuma.core.user_config import UserConfig\nfrom rapuma.project.proj_config import Config\nfrom rapuma.core.proj_local import ProjLocal\nfrom rapuma.core.proj_log import ProjLog\n\n\nclass ProjDiagnose (object) :\n\n def __init__(self, pid, gid = None) :\n '''Intitate the whole class and create the object.'''\n\n# import pdb; pdb.set_trace()\n\n self.pid = pid\n self.gid = gid\n self.local = ProjLocal(pid, gid)\n self.tools = Tools()\n self.proj_config = Config(pid, gid)\n self.proj_config.getProjectConfig()\n self.proj_config.getLayoutConfig()\n self.layoutConfig = self.proj_config.layoutConfig\n self.user = UserConfig()\n self.userConfig = self.user.userConfig\n self.log = ProjLog(pid)\n\n # to [px] is 72/25.4 \n self.mmToPx = 72 / 25.4\n # page width [px]\n self.paperPxWidth = round(self.mmToPx * float(self.layoutConfig['PageLayout']['pageWidth']),1)\n # page height [px]\n self.paperPxHeight = round(self.mmToPx * float(self.layoutConfig['PageLayout']['pageHeight']),1)\n\n # Log messages for this module\n self.errorCodes = {\n\n '0000' : ['MSG', 'Placeholder message'],\n '1310' : ['WRN', 'Failed to add diagnostic component: [<<1>>] with error: [<<2>>]']\n\n }\n\n\n###############################################################################\n############################### Create Functions ##############################\n###############################################################################\n######################## Error Code Block Series = 1000 #######################\n###############################################################################\n\n\n def turnOnDiagnostic (self) :\n '''Change the layout config settings to turn on the diagnostic layer.'''\n\n if not self.tools.str2bool(self.layoutConfig['DocumentFeatures']['useDiagnostic']) :\n self.layoutConfig['DocumentFeatures']['useDiagnostic'] = True\n self.tools.writeConfFile(self.layoutConfig)\n\n\n def turnOffDiagnostic (self) :\n '''Change the layout config settings to turn off the diagnostic layer.'''\n\n if self.tools.str2bool(self.layoutConfig['DocumentFeatures']['useDiagnostic']) :\n self.layoutConfig['DocumentFeatures']['useDiagnostic'] = False\n self.tools.writeConfFile(self.layoutConfig)\n\n\n def addTransparency (self, target, force = False) :\n '''Add a transparent layer to a rendered PDF file. This will\n add in diagnosing format issues. Using force will cause any\n existing layer file to be remade.'''\n\n # Do a quick check if the transparency needs to be remade\n # The transparency normally is not remade if one already exists.\n # If one is there, it can be remade in two ways, with a force\n # or a regenerate command.\n if force :\n self.createDiagnostic()\n elif self.tools.str2bool(self.layoutConfig['DocumentFeatures']['regenerateTransparency']) :\n self.createDiagnostic()\n else :\n # If there isn't one, make it\n if not os.path.exists(self.local.diagnosticFile) :\n self.createDiagnostic()\n\n # Create a special temp named file for the target\n tmpTarget = tempfile.NamedTemporaryFile().name\n # Copy the target to the tmpTarget\n shutil.copy(target, tmpTarget)\n # Overlay the transparency diagnostic file over the tmpTarget\n self.tools.mergePdfFilesPdftk(tmpTarget, self.local.diagnosticFile)\n\n # Create a special name for the file with the background\n # Then merge and save it\n viewFile = self.tools.alterFileName(target, 'view')\n\n # Copy the results back to the target (should be done now)\n shutil.copy(tmpTarget, viewFile)\n\n # Not returning a file name would mean it failed\n if os.path.exists(viewFile) :\n return viewFile\n\n\n def createDiagnostic (self) :\n '''Create a diagnostic transparency (file) that will be\n superimposed over the page contents to help diagnose format\n issues. This will overwrite any existing transparency file and\n will add each recognoized diagnostic type found in the \n diagnosticComponents config setting.'''\n\n# import pdb; pdb.set_trace()\n\n self.createBlankTransparency()\n\n # Add each component to the blank transparency file\n for comp in self.layoutConfig['DocumentFeatures']['diagnosticComponents'] :\n try :\n getattr(self, 'merge' + comp.capitalize())()\n except Exception as e :\n self.log.writeToLog(self.errorCodes['1310'],[comp,str(e)])\n pass\n\n return True\n\n\n def createBlankTransparency (self) :\n '''Create a blank background page according to the trim size\n specified.'''\n\n # Set the temp svg file name\n svgFile = tempfile.NamedTemporaryFile().name\n\n # Be sure there is an illustrations folder in place\n if not os.path.isdir(self.local.projIllustrationFolder) :\n os.mkdir(self.local.projIllustrationFolder)\n\n # Write out SVG document text \n with codecs.open(svgFile, 'wb') as fbackgr : # open file for writing \n fbackgr.write( '''\n ''')\n\n shutil.copy(self.tools.convertSvgToPdfRsvg(svgFile), self.local.diagnosticFile)\n\n\n###############################################################################\n############################# Component Functions #############################\n###############################################################################\n######################## Error Code Block Series = 2000 #######################\n###############################################################################\n\n def mergeLeading (self) :\n '''Create a diagnostic page component that has lines to indicate\n the text leading. This will be superimposed over the contents of\n the trim page, not in the background like a watermark, etc.'''\n\n # Initialize the process\n svgFile = tempfile.NamedTemporaryFile().name\n\n # PAGE DIMENSIONS\n # The page dimensions extracted from layoutConfig are in [mm] and\n # must be converted to pixels [px], the conversion factor for [mm]\n # bodyFontSize [px]\n bodyFontSize = self.layoutConfig['TextElements']['bodyFontSize']\n bodyFontPxSize = round(float(bodyFontSize) * 72/72.27,3)\n # bodyTextLeading [px]\n bodyTextLeading = self.layoutConfig['TextElements']['bodyTextLeading']\n bodyTextPxLeading = round(float(bodyTextLeading) * 72/72.27,3)\n # top margin [px]\n topMargin = self.layoutConfig['PageLayout']['topMargin']\n topPxMargin = round(self.mmToPx * float(topMargin),1)\n # outside margin [px]\n outsideMargin = self.layoutConfig['PageLayout']['outsideMargin']\n outsidePxMargin = round(self.mmToPx * float(outsideMargin),1)\n #inside margin [px]\n insideMargin = self.layoutConfig['PageLayout']['insideMargin']\n insidePxMargin = round(self.mmToPx * float(outsideMargin),1)\n # bottom margin [px]\n bottomMargin = self.layoutConfig['PageLayout']['bottomMargin']\n bottomPxMargin = round(self.mmToPx * float(bottomMargin),1)\n # width of the body text\n textPxWidth = self.paperPxWidth - (outsidePxMargin + insidePxMargin)\n\n # Create the svg file\n with codecs.open(svgFile, 'wb') as fbackgr : # open file for writing \n # starting lines of SVG xml\n fbackgr.write( '''\n \\n \\n''')\n fbackgr.write( '''\n \\n \\n''')\n fbackgr.write( '''\n \\n \\n''')\n # add line number '1' to top line just left of margin\n fbackgr.write( ''' 1''')\n # add line numbers to all lines down to bottom margin, starting with line number\n # counter linecount = 2, the distance counter runs from '0' till one short of \n # the quotient (distance between top and bottom margin)/bodyTextPxLeading\n num = 0 # line counter\n linenumber = 2 # line number\n while (num < int(round(self.paperPxHeight - bottomPxMargin - topPxMargin)/bodyTextPxLeading)):\n fbackgr.write( '''''' + str(linenumber) + '''''') \n linenumber = linenumber +1 \n num = num +1\n fbackgr.write(''' \n \\n \n page size: ''' + str(int(self.paperPxWidth/72*25.4+.5)) + ''' x ''' + str(int(self.paperPxHeight/72*25.4+.5)) + ''' mm ; font size: ''' + str(bodyFontSize) + ''' pt; leading: ''' + str(bodyTextLeading) + ''' pt\n \\n \n \n \n ''')\n \n # Convert the lines background component to PDF\n leadingPdf = self.tools.convertSvgToPdfRsvg(svgFile)\n\n # Merge leadingPdf with existing transparency\n results = self.tools.mergePdfFilesPdftk(self.local.diagnosticFile, leadingPdf)\n # Test and return if good\n if os.path.isfile(results) :\n return True\n\n \n","sub_path":"lib/rapuma/project/proj_diagnose.py","file_name":"proj_diagnose.py","file_ext":"py","file_size_in_byte":13377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"117902701","text":"# -*- coding: utf-8 -*-\n\nimport KBEngine\nfrom KBEDebug import *\nfrom IdManager import IDManager\nfrom consts import MatchArgs, SpaceType\n\n\nclass SpaceRoom(KBEngine.Space):\n\tdef __init__(self):\n\t\tKBEngine.Space.__init__(self)\n\n\t\tself._wait_to_match_avatar_set = set()\n\t\tself._avatar_dict = {} # 本space内所有的avatar列表\n\t\tself._waitToEnterList = list() # 等待进入space的avatar\n\n\t\tself.spaceMgr = KBEngine.globalData['SpaceMgr']\n\n\tdef enter(self, avatar_entity_call):\n\t\tif self.uType == SpaceType.SPACE_TYPE_HALL:\n\t\t\tavatar_entity_call.createCellEntity(self.cell)\n\t\telse:\n\t\t\t# 从大厅进入战场\n\t\t\t# 先从大厅离开\n\t\t\tavatar_entity_call.curHallSpace.leave(avatar_entity_call.id)\n\t\t\tavatar_entity_call.cell.onTeleportSpaceCB(self, self.cell, (0, 0, 0), (0, 0, 0))\n\n\t\tself._avatar_dict[avatar_entity_call.id] = avatar_entity_call\n\n\t\tavatar_entity_call.onEnterSpace(self.id, self.uType)\n\n\tdef leave(self, avatar_id):\n\t\tavatar = self._avatar_dict.get(avatar_id, None)\n\t\tif avatar:\n\t\t\tdel self._avatar_dict[avatar_id]\n\n\t\tself.cell and self.cell.leave(avatar_id)\n\n\tdef onGetCell(self):\n\t\tDEBUG_MSG(\"[Space], onGetCell, id=%i, type=%i\" % (self.id, self.uType))\n\n\t\tKBEngine.globalData[\"SpaceMgr\"].onSpaceGetCell(self.id, self.uType, self)\n\n\t\t# 处理排队要进入本空间的列表\n\t\tfor entityCall in self._waitToEnterList:\n\t\t\tself.enter(entityCall)\n\t\t# 清空\n\t\tself._waitToEnterList = []\n\n\tdef onLoseCell(self):\n\t\tDEBUG_MSG(\"[Space], onLoseCell, id=%i, type=%i\", (self.id, self.uType))\n\n\t\tKBEngine.globalData[\"SpaceMgr\"].onSpaceLoseCell(self.id, self.uType)\n\n\tdef addWaitToEnter(self, avatarEntityCall):\n\t\t\"\"\"\n\t\t当空间还没创建完毕时,把目标实体加入到等待进入房间的列表中\n\t\t:param avatarEntityCall: 要进入房间的目标实体\n\t\t:return:\n\t\t\"\"\"\n\t\tINFO_MSG(\"[SpaceRoom], %i, %i, avatar:%s addWaitToEnter.\" % (self.id, self.uType, avatarEntityCall))\n\t\tself._waitToEnterList.append(avatarEntityCall)\n\n\tdef avatarReqMatch(self, avatarID):\n\t\tINFO_MSG(\"[SpaceRoom], %i, %i, avatar:%s reqMatch.\" % (self.id, self.uType, avatarID))\n\n\t\tself._wait_to_match_avatar_set.add(avatarID)\n\n\t\tmatchNum = len(self._wait_to_match_avatar_set)\n\t\tif matchNum >= MatchArgs.BATTLE_ROOM_MATCH_PLAYER_NUM:\n\t\t\tINFO_MSG(\"[SpaceRoom], %i, %i, ready to enter battle room\" % (self.id, self.uType))\n\n\t\t\t_readyAvatarSet = set()\n\t\t\tfor _ava_id in self._wait_to_match_avatar_set:\n\t\t\t\t_avatar = self._avatar_dict.get(_ava_id, None)\n\t\t\t\t_avatar and _readyAvatarSet.add(_avatar)\n\n\t\t\tself.spaceMgr.enterSpace(_readyAvatarSet, SpaceType.SPACE_TYPE_BATTLE)\n\n\t\t\tself._wait_to_match_avatar_set = set()\n\t\t\treturn\n\n\t\tfor ava_id in self._wait_to_match_avatar_set:\n\t\t\t_avatar = self._avatar_dict.get(ava_id, None)\n\t\t\tif not _avatar:\n\t\t\t\tcontinue\n\n\t\t\t_avatar.client.onMatch(matchNum)\n\n\n\n\n\n","sub_path":"Server/assets/scripts/base/SpaceRoom.py","file_name":"SpaceRoom.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"104343452","text":"import os\nimport time\nimport argparse\n\n# Get terminal inputs\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument(\"--type\", dest=\"AM_type\", help=\"LF or MF\", type=str)\nparser.add_argument(\"--cut\", dest=\"cut\", help=\"Cut defining this catalog\", type=str)\nparser.add_argument(\"--name\", dest=\"name\", help=\"Name of this run\", type=str)\nparser.add_argument(\"--nd_gal\", dest=\"nd_gal\", help=\"nd_gal filename, assumes it is in ../Data/\", type=str)\nparser.add_argument(\"--openMPInthreads\", dest=\"openMPInthreads\", help=\"Number of threads for calc. CFs\", type=str)\nparser.add_argument(\"--MPInthreads\", dest=\"MPInthreads\", help=\"Number of threads for gen. AM mocks\", type=str)\n\nargs = parser.parse_args()\n\nalpha_min, alpha_max = -5.0, -0.5\nscatter_min, scatter_max = 0.005, 0.25\nNgrid = 20\n\nfname_test = '/mnt/extraspace/rstiskalek/Catalogs/AMmocks_COMPLETE_{}.npy'.format(args.cut)\n# Check if some old AM mocks already generated with this name, if yes delete them\nif os.path.exists(fname_test):\n command = \"rm {}\".format(fname_test)\n\n\n# Generate AM mocks\ns1 = \"addqueue -q berg -n {} -m 8 /usr/bin/python3 Gen_AMmocks.py \".format(args.MPInthreads)\ns2 = \"--type '{}' --cut {} --name '{}' --nd_gal '{}' \".format(args.AM_type, args.cut, args.name, args.nd_gal)\ns3 = \"--alpha_min {} --alpha_max {} --scatter_min {} --scatter_max {} --Ngrid {}\".format(alpha_min, alpha_max, scatter_min, scatter_max, Ngrid)\ncommand = s1 + s2 + s3\nprint(\"Going to generate AM mocks\")\nos.system(command)\n\n# Have a 1 minute sleep period\nperiod = 60\ntime.sleep(period*10)\n\nwhile True:\n if os.path.exists(fname_test):\n command = \"addqueue -s -q berg -n 1x{} -m 4 /usr/bin/python3 Calc_fit.py --cut {} --name {} --nthreads {} --Ngrid {}\".format(args.openMPInthreads, args.cut, args.name, args.openMPInthreads, Ngrid)\n print(\"Going to calculate the likelihoods\")\n os.system(command)\n\n command = \"rm {}\".format(fname_test)\n os.system(command)\n break\n else:\n time.sleep(period)\n","sub_path":"src/Old/Run_cut.py","file_name":"Run_cut.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"475009373","text":"from tts_modules.encoder.models.dVecModel import DVecModel\nfrom tts_modules.encoder.data.WavPreprocessor import StandardAudioPreprocessor\nfrom tts_modules.encoder.data.Wav2MelTransform import StandardWav2MelTransform\n\nimport torch\nimport numpy as np\nimport yaml\nimport os\n\n\nclass SpeakerEncoderManager:\n def __init__(self, configs, model, checkpoint_path, preprocessor=None, wav2mel=None):\n self.configs = configs\n self.preprocessor = preprocessor\n if preprocessor is None:\n self.preprocessor = StandardAudioPreprocessor(configs[\"AudioConfig\"])\n self.wav2mel = wav2mel\n if wav2mel is None:\n self.wav2mel = StandardWav2MelTransform(configs[\"AudioConfig\"])\n\n self.checkpoint_path = checkpoint_path\n self.current_embed = None\n with open(configs[\"AudioConfig\"], \"r\") as ymlfile:\n self.AudioConfig = yaml.load(ymlfile)\n if torch.cuda.is_available():\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu\")\n if model is None:\n self.__init_dvec_model()\n self.__load_model()\n self.model = self.model.to(self.device)\n\n def __init_dvec_model(self):\n with open(self.configs[\"SpeakerEncoderConfig\"], \"r\") as ymlfile:\n self.SpeakerEncoderConfig = yaml.load(ymlfile)\n self.model = DVecModel(self.device, self.device, self.SpeakerEncoderConfig)\n\n def process_speaker(self, speaker_speech_path, save_embeddings_path=None,\n save_embeddings_speaker_name=\"test_speaker\"):\n processed_wav = self.preprocessor.preprocess_wav(speaker_speech_path)\n\n embed = self.embed_utterance(processed_wav)\n self.current_embed = embed\n if save_embeddings_path is not None:\n self.save_embeddings(self, save_embeddings_path, save_embeddings_speaker_name)\n\n return embed\n\n def save_embeddings(self, save_embeddings_path,save_embeddings_speaker_name):\n np.save(os.path.join(save_embeddings_path,save_embeddings_speaker_name), self.current_embed)\n\n def __load_model(self):\n checkpoint = torch.load(self.checkpoint_path, map_location=self.device)\n self.model.load_state_dict(checkpoint[\"model_state\"])\n self.model.eval()\n\n def embed_utterance(self, wav, using_partials=True, return_partials=False):\n\n # Process the entire utterance if not using partials\n if not using_partials:\n # processed_wav = self.preprocessor.preprocess_wav(wav)\n frames = self.wav2mel.Wav2Mel(wav)\n frames = torch.from_numpy(frames[None, ...]).to(self.device)\n embed = self.model.forward(frames).detach().cpu().numpy()\n self.current_embed = embed[0]\n if return_partials:\n return embed[0], None, None\n\n return embed[0]\n\n # Compute where to split the utterance into partials and pad if necessary\n wave_slices, mel_slices = self.compute_partial_slices(len(wav))\n max_wave_length = wave_slices[-1].stop\n if max_wave_length >= len(wav):\n wav = np.pad(wav, (0, max_wave_length - len(wav)), \"constant\")\n\n # Split the utterance into partials\n # processed_wav = self.preprocessor.preprocess_wav(wav)\n frames = self.wav2mel.Wav2Mel(wav)\n frames_batch = np.array([frames[s] for s in mel_slices])\n frames = torch.from_numpy(frames_batch).to(self.device)\n partial_embeds = self.model.forward(frames).detach().cpu().numpy()\n\n # Compute the utterance embedding from the partial embeddings\n raw_embed = np.mean(partial_embeds, axis=0)\n embed = raw_embed / np.linalg.norm(raw_embed, 2)\n self.current_embed = embed\n if return_partials:\n return embed, partial_embeds, wave_slices\n return embed\n\n def compute_partial_slices(self, n_samples, min_pad_coverage=0.75, overlap=0.5):\n \"\"\"\n Computes where to split an utterance waveform and its corresponding mel spectrogram to obtain\n partial utterances of each. Both the waveform and the mel\n spectrogram slices are returned, so as to make each partial utterance waveform correspond to\n its spectrogram. This function assumes that the mel spectrogram parameters used are those\n defined in params_data.py.\n\n The returned ranges may be indexing further than the length of the waveform. It is\n recommended that you pad the waveform with zeros up to wave_slices[-1].stop.\n\n :param n_samples: the number of samples in the waveform\n :param partial_utterance_n_frames: the number of mel spectrogram frames in each partial\n utterance\n :param min_pad_coverage: when reaching the last partial utterance, it may or may not have\n enough frames. If at least of are present,\n then the last partial utterance will be considered, as if we padded the audio. Otherwise,\n it will be discarded, as if we trimmed the audio. If there aren't enough frames for 1 partial\n utterance, this parameter is ignored so that the function always returns at least 1 slice.\n :param overlap: by how much the partial utterance should overlap. If set to 0, the partial\n utterances are entirely disjoint.\n :return: the waveform slices and mel spectrogram slices as lists of array slices. Index\n respectively the waveform and the mel spectrogram with these slices to obtain the partial\n utterances.\n \"\"\"\n config = self.AudioConfig\n sampling_rate = config[\"SAMPLING_RATE\"]\n mel_window_step = config[\"MEL_WINDOW_STEP\"]\n partial_utterance_n_frames = config[\"PARTIAL_UTTERANCE_N_FRAMES\"]\n\n assert 0 <= overlap < 1\n assert 0 < min_pad_coverage <= 1\n\n samples_per_frame = int((sampling_rate * mel_window_step / 1000))\n n_frames = int(np.ceil((n_samples + 1) / samples_per_frame))\n frame_step = max(int(np.round(partial_utterance_n_frames * (1 - overlap))), 1)\n\n # Compute the slices\n wav_slices, mel_slices = [], []\n steps = max(1, n_frames - partial_utterance_n_frames + frame_step + 1)\n for i in range(0, steps, frame_step):\n mel_range = np.array([i, i + partial_utterance_n_frames])\n wav_range = mel_range * samples_per_frame\n mel_slices.append(slice(*mel_range))\n wav_slices.append(slice(*wav_range))\n\n # Evaluate whether extra padding is warranted or not\n last_wav_range = wav_slices[-1]\n coverage = (n_samples - last_wav_range.start) / (last_wav_range.stop - last_wav_range.start)\n if coverage < min_pad_coverage and len(mel_slices) > 1:\n mel_slices = mel_slices[:-1]\n wav_slices = wav_slices[:-1]\n\n return wav_slices, mel_slices\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"src/tts_modules/encoder/SpeakerEncoderManager.py","file_name":"SpeakerEncoderManager.py","file_ext":"py","file_size_in_byte":6967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"384299756","text":"\"\"\"A Python client for Mozilla's basket service.\"\"\"\nfrom basket.base import ( # noqa: F401\n BasketException,\n BasketNetworkException,\n confirm,\n confirm_email_change,\n debug_user,\n get_newsletters,\n lookup_user,\n request,\n send_recovery_message,\n send_sms,\n start_email_change,\n subscribe,\n unsubscribe,\n update_user,\n user,\n)\n\nVERSION = \"1.1.0\"\n","sub_path":"src/basket/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"196535185","text":"import random\n\nwords = []\n\nwith open('txt/constitution.txt', 'r') as f:\n for line in f:\n # Remove whitespace from the ends\n stripped_line = line.strip()\n # Split the line into a list of words\n line_words = stripped_line.split()\n # Add each word to our master list\n words.extend(line_words)\n\nnext_options = {}\n\nfor i in range(len(words)-2):\n bigram = (words[i], words[i+1])\n next_word = words[i+2]\n if bigram not in next_options:\n next_options[bigram] = [next_word]\n else:\n next_options[bigram].append(next_word)\n\n# Uncomment the next two lines to see a full\n# print of the entire probabilistic model\n# for key in sorted(next_options):\n# print('{}: {}'.format(key, counts[key]))\n\ndef generate_sentence(n):\n # Declare current bigram\n # Print words in current bigram\n # Loop n times:\n # Use current bigram to look up list of possible next words, and pick random next word from that list, store in variable called `next`\n # Print `next`\n # Use `next` to create new current bigram\n current = (current[1], next)\n\ngenerate_sentence(20)\n","sub_path":"Unit3-Applications/hw9-text-gen/text-gen.py","file_name":"text-gen.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"548249459","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import WebDriverException\nimport time\nfrom django.test import LiveServerTestCase\n\n\nMAX_WAIT = 10\nglobal_urls_storage = {}\n\n\nclass NewVisitorTest(LiveServerTestCase):\n def setUp(self):\n self.browser = webdriver.Firefox()\n self.test_points_elis = [\n \"Купить павлиньи перья\",\n \"Сделать мушку из павлиньих перьев\",\n ]\n self.test_points_frenk = [\"Купить молоко\"]\n self.list_url = f\"{self.live_server_url}/lists/\"\n\n def tearDown(self):\n self.browser.quit()\n\n def wait_for_row_in_list_table(self, row_text):\n start_time = time.time()\n while True:\n table = self.browser.find_element_by_id(\"id_list_table\")\n rows = table.find_elements_by_tag_name(\"tr\")\n try:\n self.assertIn(row_text, [row.text for row in rows])\n except AssertionError as e:\n if time.time() - start_time > MAX_WAIT:\n raise e\n time.sleep(0.2)\n except WebDriverException as e:\n print(\"\\n\\nERROR\\n\\n\")\n raise e\n else:\n return\n\n def test_1_list_for_one_user(self):\n self.browser.get(self.list_url)\n self.assertIn(\"To-Do\", self.browser.title)\n header_text = self.browser.find_element_by_tag_name(\"h1\").text\n self.assertIn(\"To-Do\", header_text)\n\n inputbox = self.browser.find_element_by_id(\"id_new_item\")\n self.assertEqual(inputbox.get_attribute(\"placeholder\"), \"Enter a to-do item\")\n inputbox.send_keys(self.test_points_elis[0])\n inputbox.send_keys(Keys.ENTER)\n self.wait_for_row_in_list_table(f\"1: {self.test_points_elis[0]}\")\n\n inputbox = self.browser.find_element_by_id(\"id_new_item\")\n inputbox.send_keys(self.test_points_elis[1])\n inputbox.send_keys(Keys.ENTER)\n self.wait_for_row_in_list_table(f\"1: {self.test_points_elis[0]}\")\n self.wait_for_row_in_list_table(f\"2: {self.test_points_elis[1]}\")\n\n def test_2_multiple_users_lists_at_different_urls(self):\n self.browser.get(self.list_url)\n inputbox = self.browser.find_element_by_id(\"id_new_item\")\n inputbox.send_keys(self.test_points_elis[0])\n inputbox.send_keys(Keys.ENTER)\n global_urls_storage[\"elis\"] = self.browser.current_url\n self.wait_for_row_in_list_table(f\"1: {self.test_points_elis[0]}\")\n self.assertRegex(self.browser.current_url, f\"{self.list_url}.+\")\n\n def test_3_new_user_new_list(self):\n self.browser.get(self.list_url)\n page_test = self.browser.find_element_by_tag_name(\"body\").text\n self.assertNotIn(self.test_points_elis[0], page_test)\n self.assertNotIn(self.test_points_elis[1], page_test)\n\n inputbox = self.browser.find_element_by_id(\"id_new_item\")\n inputbox.send_keys(self.test_points_frenk[0])\n inputbox.send_keys(Keys.ENTER)\n self.wait_for_row_in_list_table(f\"1: {self.test_points_frenk[0]}\")\n\n global_urls_storage[\"frenk\"] = self.browser.current_url\n self.assertRegex(self.browser.current_url, f\"{self.list_url}.+\")\n self.assertNotEqual(\n global_urls_storage.get(\"elis\"), global_urls_storage.get(\"frenk\")\n )\n\n page_test = self.browser.find_element_by_tag_name(\"body\").text\n self.assertNotIn(self.test_points_elis[0], page_test)\n self.assertNotIn(self.test_points_elis[1], page_test)\n","sub_path":"mlm/mlm/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"176424910","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('growlerdeals', '0010_auto_20150601_1851'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='brewery',\n name='address_line2',\n ),\n migrations.AddField(\n model_name='brewery',\n name='website',\n field=models.CharField(max_length=50, blank=True),\n ),\n migrations.AlterField(\n model_name='brewery',\n name='address_line1',\n field=models.CharField(max_length=50, verbose_name=b'Address line 1', blank=True),\n ),\n ]\n","sub_path":"growlerdeals/migrations/0011_auto_20150602_1722.py","file_name":"0011_auto_20150602_1722.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"485505047","text":"from typing import List, Tuple\n\nimport torch\nfrom torch.utils.data import Dataset\n\nfrom tasks.bert_utils import convert_pair_to_feature, pad_sequences\nfrom tokenizer import BaseTokenizer, Vocab\n\n\nclass PAWSDataset(Dataset):\n \"\"\"\n Dataset은 아래와 같은 Input 튜플을 가지고 있습니다.\n Index 0: input token ids\n Index 1: attentio mask\n Index 2: token type ids\n Index 3: labels\n \"\"\"\n\n def __init__(\n self,\n sentence_as: List[str],\n sentence_bs: List[str],\n labels: List[int],\n vocab: Vocab,\n tokenizer: BaseTokenizer,\n max_sequence_length: int,\n ):\n self.sentence_as = sentence_as\n self.sentence_bs = sentence_bs\n self.labels = torch.tensor(labels)\n\n self.vocab = vocab\n self.tokenizer = tokenizer\n self.max_sequence_length = max_sequence_length\n\n self.bert_inputs = self._prepare_data(sentence_as, sentence_bs)\n\n def __len__(self) -> int:\n return self.labels.size(0)\n\n def __getitem__(self, item) -> Tuple[torch.Tensor, ...]:\n batch = (\n self.bert_inputs[0][item],\n self.bert_inputs[1][item],\n self.bert_inputs[2][item],\n self.labels[item],\n )\n return batch\n\n def _prepare_data(self, sentence_as: List[str], sentence_bs: List[str]) -> Tuple[torch.Tensor, ...]:\n input_features = [\n convert_pair_to_feature(sentence_a, sentence_b, self.tokenizer, self.vocab, self.max_sequence_length)\n for sentence_a, sentence_b in zip(sentence_as, sentence_bs)\n ]\n\n padded_token_ids = torch.tensor(\n pad_sequences(\n [feature[0] for feature in input_features],\n padding_value=self.vocab.pad_token_id,\n max_length=self.max_sequence_length,\n ),\n dtype=torch.long,\n )\n padded_attention_mask = torch.tensor(\n pad_sequences(\n [feature[1] for feature in input_features], padding_value=0, max_length=self.max_sequence_length\n ),\n dtype=torch.long,\n )\n padded_token_type_ids = torch.tensor(\n pad_sequences(\n [feature[2] for feature in input_features], padding_value=0, max_length=self.max_sequence_length\n ),\n dtype=torch.long,\n )\n\n return (\n padded_token_ids,\n padded_attention_mask,\n padded_token_type_ids,\n )\n","sub_path":"tasks/paws/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"7886276","text":"import asyncio\nimport random\nimport smtplib\nimport string\nfrom datetime import date\nfrom email.message import EmailMessage\nfrom email.mime.text import MIMEText\n\nfrom sanic import Blueprint, response\nfrom sanic.exceptions import Forbidden, NotFound, ServerError\nfrom sanic.request import Request\n\nfrom core.utils import (add_message, disable_xss, get_school_week,\n login_required, open_db_connection, render_template)\n\nroot = Blueprint('root')\n\n@root.middleware('request')\nasync def setup_session_dict(request: Request):\n \"\"\"Sets up session attributes if they do not exist already\"\"\"\n if request.ctx.session.get('logged_in', None) is None:\n request.ctx.session['logged_in'] = False\n\n if request.ctx.session.get('messages', None) is None:\n request.ctx.session['messages'] = []\n\n@root.get('/')\nasync def index(request: Request):\n async with request.app.ctx.aiohttp.get('https://api.github.com/users/SharpBit/events/public') as resp:\n info = await resp.json()\n recent_commits = filter(lambda x: x['type'] == 'PushEvent', info)\n return await render_template('index', request, title=\"Home Page\", description='Home Page', recent=recent_commits)\n\n@root.get('/repo/')\nasync def repo(request: Request, name: str):\n return response.redirect(f'https://github.com/SharpBit/{name}')\n\n@root.get('/login')\nasync def login(request: Request):\n if request.ctx.session['logged_in']:\n return response.redirect('/')\n return response.redirect(request.app.ctx.oauth.discord_login_url)\n\n@root.get('/callback')\nasync def callback(request: Request):\n app = request.app\n code = request.args.get('code')\n access_token = await app.ctx.oauth.get_access_token(code)\n user = await app.ctx.oauth.get_user_json(access_token)\n if user.get('message'):\n return await render_template('unauthorized', request, description='Discord Oauth Unauthorized.')\n\n if user.get('avatar'):\n avatar = f\"https://cdn.discordapp.com/avatars/{user['id']}/{user['avatar']}.png\"\n else: # in case of default avatar users\n avatar = f\"https://cdn.discordapp.com/embed/avatars/{user['discriminator'] % 5}.png\"\n\n async with open_db_connection(request.app) as conn:\n await conn.executemany(\n '''INSERT INTO users(id, name, discrim, avatar) VALUES ($1, $2, $3, $4)\n ON CONFLICT (id) DO UPDATE SET id=$1, name=$2, discrim=$3, avatar=$4''',\n [\n (user['id'], user['username'], user['discriminator'], avatar),\n (user['id'], user['username'], user['discriminator'], avatar)\n ]\n )\n\n request.ctx.session['logged_in'] = True\n request.ctx.session['id'] = user['id']\n\n return response.redirect('/dashboard')\n\n@root.get('/logout')\nasync def logout(request: Request):\n del request.ctx.session['logged_in']\n del request.ctx.session['id']\n return response.redirect('/')\n\n@root.get('/dashboard')\n@login_required()\nasync def dashboard_home(request: Request):\n async with open_db_connection(request.app) as conn:\n urls = await conn.fetch('SELECT * FROM urls WHERE user_id = $1', request.ctx.session['id'])\n pastes = await conn.fetch('SELECT * FROM pastebin WHERE user_id = $1', request.ctx.session['id'])\n return await render_template(\n template='dashboard',\n request=request,\n title=\"Dashboard\",\n description='Dashboard for your account.',\n urls=urls,\n pastes=pastes\n )\n\n@root.get('/urlshortener')\nasync def url_shortener_home(request: Request):\n return await render_template('url_shortener', request, title='URL Shortener', description='Shorten a URL!')\n\n@root.post('/url/create')\n# @authorized()\nasync def create_url(request: Request):\n chars = string.ascii_letters + string.digits\n code = ''.join(random.choice(chars) for i in range(8))\n try:\n url = request.form['url'][0]\n except KeyError:\n return add_message(request, 'error', 'Enter a URL to redirect to.', '/urlshortener')\n account = request.ctx.session.get('id', 'no_account')\n\n async with open_db_connection(request.app) as conn:\n if request.form.get('code'):\n code = request.form['code'][0]\n existing = await conn.fetchrow('SELECT * FROM urls WHERE code = $1', code)\n if existing:\n return add_message(request, 'error', 'That code is already taken. Try another one.', '/urlshortener')\n await conn.execute('INSERT INTO urls(user_id, code, url) VALUES ($1, $2, $3)', account, code, url)\n secure = 's' if not request.app.config.DEV else ''\n return add_message(\n request,\n 'success',\n f\"Shortened URL created at \"\n f\"http{'s' if not request.app.config.DEV else ''}://{request.app.config.DOMAIN}/{code}\",\n '/urlshortener'\n )\n\n@root.get('/')\nasync def existing_code(request: Request, code: str):\n async with open_db_connection(request.app) as conn:\n res = await conn.fetchrow('SELECT * FROM urls WHERE code = $1', code)\n if not res:\n raise NotFound(message=f'Requested URL {request.path} not found')\n return response.redirect(res['url'])\n\n@root.get('/pastebin')\nasync def pastebin_home(request: Request):\n return await render_template('pastebin', request, title=\"Pastebin\",\n description='Paste some code for easy access later!')\n\n@root.post('/pastebin/create')\n# @authorized()\nasync def create_pastebin(request: Request):\n chars = string.ascii_letters + string.digits\n code = ''.join(random.choice(chars) for i in range(8))\n try:\n text = request.form['text'][0]\n except KeyError:\n return add_message(request, 'error', 'Paste some code in to save.', '/pastebin')\n account = request.ctx.session.get('id', 'no_account')\n async with open_db_connection(request.app) as conn:\n await conn.execute('INSERT INTO pastebin(user_id, code, text) VALUES ($1, $2, $3)', account, code, text)\n return response.redirect(f'/pastebin/{code}')\n\n@root.get('/pastebin/')\nasync def existing_pastebin(request: Request, code: str):\n async with open_db_connection(request.app) as conn:\n res = await conn.fetchrow('SELECT * FROM pastebin WHERE code = $1', code)\n if not res:\n raise NotFound(message=f'Requested URL {request.path} not found')\n text = disable_xss(res['text'])\n return await render_template(\n template='saved_pastebin',\n request=request,\n title=\"Pastebin - Saved\",\n description=\"Saved Pastebin\",\n code=text\n )\n\n@root.get('/brawlstats/')\nasync def brawlstats_tests_proxy(request: Request, endpoint: str):\n endpoint = '/'.join(request.url.split('/')[4:])\n if not request.token:\n raise Forbidden('Invalid authorization')\n headers = {\n 'Authorization': f'Bearer {request.token}',\n 'Accept-Encoding': 'gzip'\n }\n try:\n async with request.app.ctx.aiohttp.get(\n f'https://api.brawlstars.com/v1/{endpoint}',\n timeout=30,\n headers=headers\n ) as resp:\n return response.json(await resp.json(), status=resp.status)\n except asyncio.TimeoutError:\n raise ServerError('Request failed', status_code=503)\n\n@root.get('/schoolweek')\nasync def schoolweektoday(request: Request):\n return response.redirect(f'/schoolweek/{date.today()}')\n\n@root.get('/schoolweek/')\nasync def schoolweek(request: Request, requested_date_str: str):\n requested_date = date(*map(int, requested_date_str.split('-')))\n first_day = date(2020, 9, 8)\n if not first_day <= requested_date <= date(2021, 3, 11):\n raise NotFound(f'Requested URL {request.path} not found. Maybe try a date between 9/8/2020 and 3/11/2021?')\n\n week_fmt = await get_school_week(requested_date, first_day, week=True)\n\n return await render_template(\n template='schoolweek',\n request=request,\n week=week_fmt,\n requested_date=requested_date,\n title='School Week',\n description='This week\\'s maroon and gray A and B days.'\n )\n\n\n@root.post('/schoolweek/subscribe')\n# @authorized()\nasync def email_subscribe(request):\n try:\n email = request.form['email'][0]\n except KeyError:\n return add_message(request, 'error', 'Enter an email in the field.', '/schoolweek')\n\n async with open_db_connection(request.app) as conn:\n existing = await conn.fetchrow('SELECT * FROM mailing_list WHERE email = $1', email)\n if existing:\n return add_message(request, 'error', 'Email already subscribed.', '/schoolweek')\n\n msg = EmailMessage()\n msg['Subject'] = 'Thank you for subscribing to GCHS Daily Updates!'\n msg['From'] = request.app.config.CUSTOM_EMAIL\n msg['To'] = email\n secure = 's' if not request.app.config.DEV else ''\n body = MIMEText(\n f\"If this wasn't you, click here to unsubscribe.\", 'html')\n msg.set_content(body)\n\n with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:\n smtp.login(request.app.config.NOREPLY_EMAIL, request.app.config.EMAIL_APP_PASSWORD)\n\n smtp.send_message(msg)\n\n async with open_db_connection(request.app) as conn:\n await conn.execute('INSERT INTO mailing_list(email) VALUES ($1)', email)\n\n return add_message(request, 'success', 'Your email has been added to the mailing list.', '/schoolweek')\n\n@root.get('/schoolweek/unsubscribe/')\nasync def email_unsubscribe(request, email):\n async with open_db_connection(request.app) as conn:\n await conn.execute('DELETE FROM mailing_list WHERE email = $1', email)\n return add_message(request, 'success', 'Your email has been removed from mailing list.', '/schoolweek')\n\n@root.get('/japanese-conjugation-practice')\nasync def jap_conj(request):\n return await render_template(\n template='jap-conj',\n request=request\n )\n","sub_path":"core/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":10085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"138932701","text":"import re\nfrom pathlib import Path\nimport setuptools\n\n\ndef getLongDescription():\n with open(\"README.md\", \"r\") as fh:\n longDescription = fh.read()\n return longDescription\n\n\ndef getRequirements():\n requirements = []\n with open(\"requirements.txt\") as f:\n requirements = f.read().splitlines()\n return requirements\n\n\ndef getVersion():\n path = Path(__file__).parent.resolve() / \"sizebot\" / \"__init__.py\"\n with open(path, \"r\") as fp:\n version_file = fp.read()\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M)\n if not version_match:\n raise RuntimeError(\"Unable to find version string.\")\n version = version_match.group(1)\n return version\n\n\nsetuptools.setup(\n name=\"sizebot\",\n version=getVersion(),\n author=\"DigiDuncan\",\n author_email=\"digiduncan@gmail.com\",\n description=\"SizeBot3, Cogs Edition, rewritten.\",\n long_description=getLongDescription(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/sizedev/SizeBot3\",\n python_requires=\">=3.7\",\n install_requires=getRequirements(),\n packages=setuptools.find_packages(),\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Operating System :: OS Independent\",\n \"License :: OSI Approved :: GNU General Public License v3 (GPLv3)\"\n ],\n entry_points={\n \"console_scripts\": [\n \"sizebot=sizebot.main:main\"\n ]\n }\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"606091019","text":"import numpy as np\nimport h5py, os, re\npjoin = os.path.join\nfrom data_parsing import get_trials, simulate_choices\n\n# Params\nbase_path = '/jukebox/wang/deverett/puffs'\ncohort = 1\ncpath = pjoin(base_path, 'cohort_{}'.format(cohort))\n\n# Select file\nrestr = re.compile(r'data_?(\\d{0,2}).hdf5')\nreg = [restr.match(f) for f in os.listdir(cpath)]\nmaxx = max([int(r.group(1)) for r in reg if r and r.group(1)])\ndpath = pjoin(cpath, 'data_{:02}.hdf5'.format(maxx))\n\n# Cache trials\ndata = h5py.File(dpath, 'r')\ntrials = get_trials(data, subs=data.keys(), after_date=20150101) \nnp.save(pjoin(cpath,'trials.npy'),trials)\n\n","sub_path":"analysis/c0/cacher.py","file_name":"cacher.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"286823852","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\n\n\nimport numpy as np\nimport argparse\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\n\nfrom helpers import get_device, rotate_img, one_hot_embedding\n#from data import dataloaders, digit_one\nfrom train import train_model\nfrom test import test_data\nfrom losses import edl_mse_loss, edl_digamma_loss, edl_log_loss, relu_evidence\nfrom net import Net_30\nfrom src.classification.load_data_30_turn import load_all_data, load_test_data_new\nfrom sklearn.model_selection import train_test_split\n\ndef main():\n features = ['cog']\n dim = len(features)\n timesteps = 60 # number of sequential features per sample\n CLASSES = 2\n # load data\n\n parser = argparse.ArgumentParser()\n mode_group = parser.add_mutually_exclusive_group(required=False)\n mode_group.add_argument(\"--train\", default=False, action=\"store_true\",\n help=\"To train the network.\")\n mode_group.add_argument(\"--test\", default=True, action=\"store_true\",\n help=\"To test the network.\")\n parser.add_argument(\"--epochs\", default=50, type=int,\n help=\"Desired number of epochs.\")\n parser.add_argument(\"--dropout\", default=True, action=\"store_true\",\n help=\"Whether to use dropout or not.\")\n parser.add_argument(\"--uncertainty\", default = True, action=\"store_true\",\n help=\"Use uncertainty or not.\")\n uncertainty_type_group = parser.add_mutually_exclusive_group()\n uncertainty_type_group.add_argument(\"--mse\", default = True, action=\"store_true\",\n help=\"Set this argument when using uncertainty. Sets loss function to Expected Mean Square Error.\")\n uncertainty_type_group.add_argument(\"--digamma\", default = False, action=\"store_true\",\n help=\"Set this argument when using uncertainty. Sets loss function to Expected Cross Entropy.\")\n uncertainty_type_group.add_argument(\"--log\", default = False, action=\"store_true\",\n help=\"Set this argument when using uncertainty. Sets loss function to Negative Log of the Expected Likelihood.\")\n args = parser.parse_args()\n\n\n\n if args.train:\n num_epochs = args.epochs\n use_uncertainty = args.uncertainty\n num_classes = CLASSES\n\n model = Net_30(dropout=args.dropout)\n\n if use_uncertainty:\n if args.digamma:\n criterion = edl_digamma_loss\n elif args.log:\n criterion = edl_log_loss\n elif args.mse:\n criterion = edl_mse_loss\n else:\n parser.error(\n \"--uncertainty requires --mse, --log or --digamma.\")\n else:\n criterion = nn.CrossEntropyLoss()\n\n optimizer = optim.Adam(model.parameters(), lr=1e-3, weight_decay=0.005)\n\n exp_lr_scheduler = optim.lr_scheduler.StepLR(\n optimizer, step_size=7, gamma=0.1)\n\n device = get_device()\n model = model.to(device)\n\n data_normal, Y_data = load_all_data(timesteps, dim, features, CLASSES)\n np.random.seed(1234)\n x_train, x_test, y_train, y_test = train_test_split(data_normal, Y_data, test_size=0.10)\n\n model, metrics = train_model(model, x_train, x_test, y_train, y_test, num_classes, criterion,\n optimizer, scheduler=exp_lr_scheduler, num_epochs=num_epochs,\n device=device, uncertainty=use_uncertainty)\n\n state = {\n \"epoch\": num_epochs,\n \"model_state_dict\": model.state_dict(),\n \"optimizer_state_dict\": optimizer.state_dict(),\n }\n\n if use_uncertainty:\n if args.digamma:\n torch.save(state, \"./results/model_uncertainty_digamma.pt\")\n print(\"Saved: ./results/model_uncertainty_digamma.pt\")\n if args.log:\n torch.save(state, \"./results/model_uncertainty_log.pt\")\n print(\"Saved: ./results/model_uncertainty_log.pt\")\n if args.mse:\n torch.save(state, \"./results/turn_30_model_uncertainty_mse_epochs_\"+str(num_epochs)+\".pt\")\n print(\"Saved: ./results/turn_30_model_uncertainty_mse_epochs_\"+str(num_epochs)+\".pt\")\n torch.save(metrics, \"./results/turn_30_metrics_mse_epochs_\"+str(num_epochs)+\".pt\")\n print(\"Saved: ./results/turn_30_metrics_mse_epochs_\" + str(num_epochs) + \".pt\")\n\n else:\n torch.save(state, \"./results/model.pt\")\n print(\"Saved: ./results/model.pt\")\n\n elif args.test:\n\n use_uncertainty = args.uncertainty\n device = get_device()\n model = Net_30()\n model = model.to(device)\n optimizer = optim.Adam(model.parameters())\n\n if use_uncertainty:\n if args.digamma:\n checkpoint = torch.load(\n \"./results/model_uncertainty_digamma.pt\")\n\n if args.log:\n checkpoint = torch.load(\"./results/model_uncertainty_log.pt\")\n\n if args.mse:\n checkpoint = torch.load(\"./results/turn_30_model_uncertainty_mse_epochs_50.pt\")\n metrics = torch.load(\"./results/turn_30_metrics_mse_epochs_50.pt\")\n print(\"testing with metrics of epochs 50\",)\n\n else:\n checkpoint = torch.load(\"./results/model.pt\")\n\n\n model.load_state_dict(checkpoint[\"model_state_dict\"])\n optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n\n model.eval()\n x_test, y_test = load_test_data_new(timesteps, dim, features, CLASSES)\n test_data(\"turn_30\", model, metrics, x_test, y_test, uncertainty=use_uncertainty, device=None)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"classification/torch_codes/main_30.py","file_name":"main_30.py","file_ext":"py","file_size_in_byte":5862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"372463810","text":"from django.forms.models import model_to_dict\nfrom django.http import JsonResponse\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import PasswordChangeForm\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.contrib.auth import authenticate, login, logout\nfrom techTrend.models import *\nfrom career.models import *\nfrom django.contrib import messages\nfrom successStories.models import *\n\nfrom .models import *\nimport json\n\nfrom .form import * \n\n\n@login_required(login_url='login')\ndef dashboard(request):\n degree = UserDegree.objects.filter(user=request.user)\n degreeCount = UserDegree.objects.filter(user=request.user).count()\n workExp = WorkExperience.objects.filter(user=request.user)\n workExpCount = WorkExperience.objects.filter(user=request.user).count()\n profileCount=Profile.objects.filter(user=request.user).count()\n workExpCountFlag = True if workExpCount > 0 else False\n degreeCountFlag = True if degreeCount > 0 else False\n profileFlag = True if profileCount > 0 else False\n rechord = UserDegree.objects.all()\n department = Department.objects.all()\n\n if profileCount == 0:\n context = {\n 'degree': degree, 'workExp': workExp,\n 'workExpCountFlag': workExpCountFlag, 'degreeCountFlag': degreeCountFlag,\n 'profileFlag': profileFlag, \n 'rechord': rechord,\n 'department': department,\n }\n return render(request,'dashboard.html',context)\n\n profile = Profile.objects.get(user=request.user)\n \n context = {\n 'degree': degree, 'workExp': workExp,\n 'workExpCountFlag': workExpCountFlag, 'degreeCountFlag': degreeCountFlag,\n 'profileFlag': profileFlag, 'profile': profile,\n 'rechord': rechord,\n 'department': department,\n }\n\n return render(request,'dashboard.html',context)\n\n@login_required(login_url='login')\ndef createprofile(request):\n if request.method == 'POST':\n form=ProfileForm(request.POST,request.FILES)\n\n if form.is_valid():\n userForm=form.save(commit=False)\n userForm.user=request.user\n userForm.save()\n messages.success(request, 'Profile Information added successfully.')\n return redirect('dashboard')\n\n form=ProfileForm()\n context={'form':form}\n return render(request,'userProfile.html',context)\n\n\n@login_required(login_url='login')\ndef updateProfile(request):\n\n if request.method==\"POST\":\n form=ProfileForm(request.POST,request.FILES,instance=Profile.objects.get(user=request.user))\n if form.is_valid():\n userForm=form.save(commit=False)\n userForm.user=request.user\n userForm.save()\n return redirect('dashboard')\n\n profile = Profile.objects.get(user=request.user)\n profileCount = Profile.objects.get(user=request.user)\n profileFlag=True\n\n form=ProfileForm(instance=request.user.profile)\n context = {'profile': profile, 'profileFlag': profileFlag,'form':form}\n\n return render(request, 'userProfile.html', context)\n\n@login_required(login_url='login')\ndef getDegree(request, brand):\n brand=brand.replace('@',' ')\n \n current_brand = Department.objects.get(name=brand)\n models = Degree.objects.all().filter(department=currenqt_brand).values()\n data = (list(models))\n\n return JsonResponse(data, safe=False)\n\n\n@login_required(login_url='login')\ndef getDegreeByDepartment(request):\n dep=request.POST.get('brand')\n current_dep = Department.objects.get(name=dep)\n degree = Degree.objects.all().filter(department=current_dep).values()\n data = (list(degree))\n return JsonResponse(data, safe=False)\n\n\n@login_required(login_url='login')\ndef createUserDegree(request):\n if request.method == \"POST\":\n degree = request.POST.get('degree')\n dateStarted = request.POST.get('dateStarted')\n dateFinished = request.POST.get('dateFinished')\n userDeg = UserDegree.objects.create(\n degree=Degree.objects.get(name=degree), user=request.user, dateStarted=dateStarted, dateFinished=dateFinished)\n userDeg.save()\n messages.success(request, 'User Degree has been added successfully.')\n return redirect('dashboard')\n\n department = Department.objects.all()\n context = { 'department': department}\n return render(request,'userDegree.html',context)\n\n@login_required(login_url='login')\ndef updateUserDegree(request,pk):\n try:\n user = UserDegree.objects.get(pk=pk)\n except:\n return HttpResponse('

404 Not Found

')\n if request.method == \"POST\":\n degree = request.POST.get('degree')\n dateStarted = request.POST.get('dateStarted')\n dateFinished = request.POST.get('dateFinished')\n UserDegree.objects.filter(pk=pk).update(\n degree=Degree.objects.get(name=degree), user=request.user, dateStarted=dateStarted, dateFinished=dateFinished)\n messages.success(request, 'User Degree has been Update successfully.')\n return redirect('dashboard')\n userDeg = UserDegree.objects.get(pk=pk)\n department = Department.objects.all()\n flag=True\n context = {'userDeg': userDeg, 'department': department, 'flag': flag}\n return render(request, 'userDegree.html', context)\n\n@login_required(login_url='login')\ndef deleteUserDegree(request, pk):\n instance = UserDegree.objects.get(pk=pk)\n instance.delete()\n messages.success(request, 'User Degree has been Deleted successfully.')\n return redirect('dashboard')\n\n@login_required(login_url='login')\ndef getUserDegree(request):\n\n return HttpResponse('okk')\n\n@login_required(login_url='login')\ndef createWorkExp(request):\n if request.method == \"POST\":\n companyName = request.POST.get('companyName')\n experienceTime = request.POST.get('experienceTime')\n workingPosition = request.POST.get('workingPosition')\n workingWebsite = request.POST.get('workingWebsite')\n obj = WorkExperience.objects.create(\n companyName=companyName, experienceTime=experienceTime,\n position=workingPosition,portfolioWebsite=workingWebsite,user=request.user\n )\n obj.save()\n messages.success(request, 'Work Experience has been added successfully.')\n return redirect('dashboard') \n return render(request, 'userWorkExp.html')\n\n\n@login_required(login_url='login')\ndef updateWorkExp(request,pk):\n try:\n user = WorkExperience.objects.get(pk=pk)\n except:\n return HttpResponse('

404 Not Found

')\n if request.method == \"POST\":\n companyName = request.POST.get('companyName')\n experienceTime = request.POST.get('experienceTime')\n workingPosition = request.POST.get('workingPosition')\n workingWebsite = request.POST.get('workingWebsite')\n WorkExperience.objects.filter(pk=pk).update(\n companyName=companyName, experienceTime=experienceTime,\n position=workingPosition, portfolioWebsite=workingWebsite\n )\n messages.success(request, 'Work Experience has been Update successfully.')\n return redirect('dashboard')\n workExp = WorkExperience.objects.get(pk=pk)\n flag=True\n context = {'workExp': workExp, 'flag': flag}\n return render(request, 'userWorkExp.html', context)\n \n\n@login_required(login_url='login')\ndef deleteWorkExp(request, pk):\n instance = WorkExperience.objects.get(pk=pk)\n instance.delete()\n messages.success(request, 'WorkExperience has been Deleted successfully.')\n return redirect('dashboard')\n\n\n@login_required(login_url='login')\ndef showMarketTrend(request):\n data = TechTrend.objects.filter(author=request.user)\n dataCount = TechTrend.objects.filter(author=request.user).count()\n techFlag = True if dataCount == 0 else False\n context = {'techdata': data, 'name': 'Market Trend Posts', 'techsFlag': techFlag}\n print(techFlag)\n return render(request, 'techData.html', context)\n \n\n\n@login_required(login_url='login')\ndef showSuccessStories(request):\n data = SuccessStories.objects.filter(author=request.user)\n dataCount = SuccessStories.objects.filter(author=request.user).count()\n profileFlag = True if dataCount == 0 else False\n context = {'success': data, 'name': 'Success Stories Posts', 'successFlag': profileFlag}\n return render(request, 'successData.html', context)\n\n\n@login_required(login_url='login')\ndef showCareer(request):\n data = CareerPost.objects.filter(author=request.user)\n dataCount = CareerPost.objects.filter(author=request.user).count()\n profileFlag = True if dataCount == 0 else False\n context = {'career': data, 'name': 'Job-Intership Posts', 'careerFlag': profileFlag}\n return render(request, 'careerData.html', context)\n \n@login_required(login_url='login')\ndef UserInfo(request):\n try:\n if request.method == 'POST':\n username=request.POST.get('username')\n email=request.POST.get('email')\n oldPass = request.POST.get('oldPass')\n pass1 = request.POST.get('newPass')\n\n \n if request.user.check_password(oldPass):\n user = User.objects.get(id=request.user.id)\n user.email = email\n \n user.username = username\n user.set_password(pass1)\n user.save()\n user = authenticate(username=email, password=pass1)\n if user is not None:\n if user.is_active:\n login(request, user)\n messages.success(request, 'User Object has been updated successfullly')\n return redirect('dashboard')\n\n else:\n messages.success(request, 'Old-Password is not Matching existing Passord')\n return redirect('UserInfo')\n except:\n messages.success(request, 'Another user is register with same \"username\" please Try another!')\n\n \n return render(request,'userInfo.html')\n \n\ndef UserView(request,username):\n user = User.objects.get(username=username)\n try:\n degree = UserDegree.objects.filter(user=user)\n profile=Profile.objects.get(user=user)\n degreeCount = UserDegree.objects.filter(user=user).count()\n workExp = WorkExperience.objects.filter(user=user)\n workExpCount = WorkExperience.objects.filter(user=user).count()\n profileCount=Profile.objects.filter(user=user).count()\n workExpCountFlag = True if workExpCount > 0 else False\n degreeCountFlag = True if degreeCount > 0 else False\n profileFlag = True if profileCount > 0 else False\n rechord = UserDegree.objects.all()\n if profileCount == 0:\n context = {\n 'degree': degree, 'workExp': workExp,\n 'workExpCountFlag': workExpCountFlag, 'degreeCountFlag': degreeCountFlag,\n 'profileFlag': profileFlag, \n 'rechord': rechord,\n 'profile':profile\n }\n return render(request,'userView.html',context)\n\n profile = Profile.objects.get(user=user)\n context = {\n 'degree': degree, 'workExp': workExp,\n 'workExpCountFlag': workExpCountFlag, 'degreeCountFlag': degreeCountFlag,\n 'profileFlag': profileFlag, 'profile': profile,\n 'rechord': rechord,\n }\n return render(request,'userView.html',context)\n\n except:\n context={'flagProfileData':True}\n return render(request,'userView.html' ,context)\n\n return render(request,'userView.html',context)\n ","sub_path":"profileDashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"373479661","text":"from django.test import TestCase\nfrom django.test import Client\nfrom django.urls import reverse\n\nfrom aliss.tests.fixtures import Fixtures\nfrom aliss.models import *\n\n# class PlacesViewTestCase(TestCase):\nfixtures = ['categories.json', 'service_areas.json', ]\n\ndef setUp(self):\n self.service = Fixtures.create()\n self.category = Category.objects.get(slug=\"conditions\")\n self.service.categories.add(self.category)\n self.service.save()\n self.postcode = Postcode.objects.get(place_name='Glasgow')\n self.postcode.generate_place_name_slug()\n\ndef test_valid_landing_page_place_category(self):\n response = self.client.get('/places/glasgow/conditions/')\n self.assertEqual(self.service.categories.first().slug,\n self.category.slug)\n self.assertEqual(self.postcode.slug, 'glasgow')\n self.assertEqual(response.status_code, 200)\n\ndef test_response_contains_blurb(self):\n response = self.client.get('/places/glasgow/conditions/')\n self.assertContains(response, \"Help and support with Conditions in\")\n self.assertContains(response, \"Glasgow\")\n\ndef test_response_result(self):\n response = self.client.get('/places/glasgow/conditions/')\n self.assertContains(response, \"My First Service\")\n\ndef test_response_with_error_status_code(self):\n response = self.client.get('/places/borkington/borks/')\n self.assertEqual(response.status_code, 200)\n\ndef test_response_with_error_status_code(self):\n response = self.client.get('/places/borkington/borks/')\n self.assertContains(\n response, \"

Sorry, borkington or borks doesn't appear to be a valid search.

\")\n\ndef test_valid_search_filter_link(self):\n response = self.client.get('/places/glasgow/conditions/')\n self.assertContains(\n response, \"/search/?category=conditions&postcode=G2+4AA\")\n\ndef test_valid_search_category_link(self):\n response = self.client.get('/places/glasgow/conditions/')\n self.assertContains(\n response, \"/search/?category=goods&postcode=G2+4AA\")\n\ndef test_valid_landing_page_place_content_block(self):\n custom_content = \"

Edinburgh title test

New landing page content for a place.

\"\n ContentBlock.objects.create(\n slug='places-edinburgh', body=custom_content)\n response = self.client.get('/places/edinburgh/')\n self.assertContains(response, custom_content)\n\ndef test_valid_landing_page_title_content_block(self):\n custom_content = \"Landing page for Edinburgh\"\n ContentBlock.objects.create(\n slug='places-edinburgh', body=\"

New landing page content for a place.

\")\n ContentBlock.objects.create(\n slug='places-edinburgh-title', body=custom_content)\n response = self.client.get('/places/edinburgh/')\n self.assertContains(response, custom_content)\n\ndef test_valid_landing_page_meta_content_blocks(self):\n custom_meta = \"meta description for edinburgh\"\n custom_title = \"meta title for edinburgh\"\n ContentBlock.objects.create(\n slug='places-edinburgh', body=\"

New landing page content for a place.

\")\n ContentBlock.objects.create(\n slug='places-edinburgh-meta-description', body=custom_meta)\n ContentBlock.objects.create(\n slug='places-edinburgh-meta-title', body=custom_title)\n response = self.client.get('/places/edinburgh/')\n self.assertContains(response, custom_meta)\n self.assertContains(response, custom_title)\n\ndef test_valid_redirect_no_custom_content_when_placename_exists(self):\n response = self.client.get('/places/glasgow/')\n self.assertEqual(response.status_code, 302)\n #self.assertRedirects(response, reverse('search', kwargs={'postcode': 'G2 4AA'}))\n\ndef test_valid_redirect_no_custom_content_no_placename(self):\n response = self.client.get('/places/musselburgh/')\n self.assertEqual(response.status_code, 404)\n\ndef test_valid_landing_page_place_category_with_content(self):\n custom_content = \"

Conditions services for Glasgow

\"\n custom_title = \"Glasgow: conditions and wellbeing services - ALISS\"\n custom_meta_title = \"Glasgow conditions / wellbeing services\"\n custom_meta_desc = \"Find services on ALISS\"\n ContentBlock.objects.create(\n slug='places-glasgow-conditions', body=custom_content)\n ContentBlock.objects.create(\n slug='places-glasgow-conditions-title', body=custom_title)\n ContentBlock.objects.create(\n slug='places-glasgow-conditions-meta-title', body=custom_meta_title)\n ContentBlock.objects.create(\n slug='places-glasgow-conditions-meta-description', body=custom_meta_desc)\n response = self.client.get('/places/glasgow/conditions/')\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, custom_content)\n self.assertContains(response, custom_title)\n self.assertContains(response, custom_meta_desc)\n self.assertContains(response, custom_meta_title)\n\ndef test_valid_landing_page_uppercase_placename(self):\n custom_content = \"

Edinburgh title test

New landing page content for a place.

\"\n ContentBlock.objects.create(\n slug='places-edinburgh', body=custom_content)\n response = self.client.get('/places/Edinburgh/')\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, custom_content)\n\ndef test_invalid_landing_page_uppercase_valid_placename_redirect_search(self):\n place_name_slug = self.postcode.slug\n response = self.client.get('/places/Glasgow/')\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, '/search/?postcode=G2+4AA')\n\ndef test_valid_landing_page_uppercase_placename_uppercase_category(self):\n custom_content = \"

Conditions services for Glasgow

\"\n ContentBlock.objects.create(\n slug='places-glasgow-conditions', body=custom_content)\n response = self.client.get('/places/Glasgow/Conditions/')\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, custom_content)\n\ndef test_invalid_landing_page_uppercase_placename_uppercase_category_standard_copy(self):\n response = self.client.get('/places/Glasgow/Conditions/')\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'Glasgow')\n self.assertContains(response, 'Conditions')\n\ndef tearDown(self):\n for block in ContentBlock.objects.all():\n block.delete()","sub_path":"aliss/tests/views/test_places_view.py","file_name":"test_places_view.py","file_ext":"py","file_size_in_byte":6666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"415984122","text":"# -*- coding: utf-8 -*-\n# @File : Helper_Encoder_Json.py\n# @AUTH : swxs\n# @Time : 2018/5/16 9:56\n\nimport json\nfrom functools import partial\nfrom .Helper_Encoder import CONVERTERS, encoder\n\n__all__ = [\n \"dump\",\n \"dumps\",\n \"load\",\n \"loads\",\n]\n\n\nclass ComplexEncoder(json.JSONEncoder):\n def default(self, obj):\n try:\n return encoder(obj)\n except Exception:\n return super(ComplexEncoder, self).default(obj)\n\n\ndef json_object_hook(obj):\n _spec_type = obj.get('_spec_type')\n if not _spec_type:\n return obj\n\n if _spec_type in CONVERTERS:\n return CONVERTERS[_spec_type](obj['val'])\n else:\n raise Exception('Unknown {}'.format(_spec_type))\n\n\ndump = partial(json.dump, cls=ComplexEncoder)\ndumps = partial(json.dumps, cls=ComplexEncoder)\nload = partial(json.load, object_hook=json_object_hook)\nloads = partial(json.loads, object_hook=json_object_hook)\n","sub_path":"commons/Helpers/Helper_Encoder_Json.py","file_name":"Helper_Encoder_Json.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"527192639","text":"print(\"--------------------------\")\n\n\ndef seq_search(elt, tab):\n \"\"\"\n Find an element elt in an array tab and returns its index.\n If the array doesn't contains the element, returns -1\n\n :param elt: the element to search\n :param tab: the array\n :return: the index of the element or -1\n \"\"\"\n n = len(tab)\n i = 0\n while i < n and elt != tab[i]:\n i += 1\n return -1 if i == n else i\n\n\nprint(\"------- seq_search -------\")\nprint(\"Trying to search 5 in [1, 3, 5, 7, 9], expected result is 2\")\nprint(\"Result: \", seq_search(5, [1, 3, 5, 7, 9]))\nprint(\"Trying to search 13 in [3, 4, 5, 10, 13], expected result is 4\")\nprint(\"Result: \", seq_search(13, [3, 4, 5, 10, 13]))\nprint(\"Trying to search 1 in [1, 3, 5, 7, 9], expected result is 0\")\nprint(\"Result: \", seq_search(1, [1, 3, 5, 7, 9]))\n\n\ndef average(tab):\n \"\"\"\n Get the average value of an array\n\n :param tab: the array\n :return: the average of the array\n \"\"\"\n n = len(tab)\n s = 0\n for i in range(n):\n s = s + tab[i]\n return s / n\n\n\nprint(\"--------- average --------\")\nprint(\"Trying to do the average of [1, 3, 5, 7, 9], expected result is 5\")\nprint(\"Result: \", average([1, 3, 5, 7, 9]))\nprint(\"Trying to do the average of [1, 2, 4, 5, 8, 10], expected result is 5\")\nprint(\"Result: \", average([1, 2, 4, 5, 8, 10]))\n\n\ndef find_min(tab):\n \"\"\"\n Find the smallest value of an array\n\n :param tab: the array\n :return: the smallest value\n \"\"\"\n n = len(tab)\n low = tab[0]\n for i in range(n):\n if tab[i] < low:\n low = tab[i]\n return low\n\n\nprint(\"-------- find_min --------\")\nprint(\"Trying with [3, 7, 12, 2, 7, 19], expected result is 2\")\nprint(\"Result: \", find_min([3, 7, 12, 2, 7, 19]))\n\n\ndef sort_by_selection(tab):\n \"\"\"\n Sort an array using the selection method\n\n :param tab: the array to sort\n :return: the sorted array\n \"\"\"\n n = len(tab)\n for i in range(n):\n low = i\n for j in range(i + 1, n):\n if tab[j] < tab[low]:\n low = j\n if low != i:\n tab[low], tab[i] = tab[i], tab[low]\n return tab\n\n\nprint(\"--- sort_by_selection ----\")\nprint(\"Trying with [3, 8, 1, 2, 5, 0, 8, 1, 1, 2, 14], expected result is [0, 1, 1, 1, 2, 2, 3, 5, 8, 8, 14]\")\nprint(\"Result: \", sort_by_selection([3, 8, 1, 2, 5, 0, 8, 1, 1, 2, 14]))\n\n\ndef sort_by_insertion(tab):\n \"\"\"\n Sort an array using the insertion method\n\n :param tab: the array to sort\n :return: the sorted array\n \"\"\"\n n = len(tab)\n for i in range(1, n):\n key = tab[i]\n j = i - 1\n while j >= 0 and tab[j] > key:\n tab[j + 1] = tab[j]\n j -= 1\n tab[j + 1] = key\n return tab\n\n\nprint(\"--- sort_by_insertion ----\")\nprint(\"Trying with [3, 8, 1, 2, 5, 0, 8, 1, 1, 2, 14], expected result is [0, 1, 1, 1, 2, 2, 3, 5, 8, 8, 14]\")\nprint(\"Result: \", sort_by_insertion([3, 8, 1, 2, 5, 0, 8, 1, 1, 2, 14]))\n\n\ndef find_by_dichotomy(elt: int, tab):\n \"\"\"\n Find an element in an array using the dichotomy method\n\n :param elt: the element to search\n :param tab: the array\n :return: the element or False if it was not found\n \"\"\"\n g, m = 0, 0\n d = len(tab)\n while g < d - 1:\n m = (d + g) // 2\n if elt < tab[m]:\n d = m\n else:\n g = m\n return g if tab[g] == elt else False\n\n\nprint(\"---- find_by_dichotomy ---\")\nprint(\"Trying to search 5 in [1, 3, 5, 7, 9], expected result is 2\")\nprint(\"Result: \", find_by_dichotomy(5, [1, 3, 5, 7, 9]))\nprint(\"Trying to search 13 in [3, 4, 5, 10, 13], expected result is 4\")\nprint(\"Result: \", find_by_dichotomy(13, [3, 4, 5, 10, 13]))\nprint(\"Trying to search 1 in [1, 3, 5, 7, 9], expected result is False\")\nprint(\"Result: \", find_by_dichotomy(13, [1, 3, 5, 7, 9]))\n\n\ndef return_change(money: int, tab):\n \"\"\"\n convert a sum of money into a number of bills and coins according to the monetary system\n\n :param money: the sum of money to convert\n :param tab: the monetary system\n :return: the bills and coins to return\n \"\"\"\n lst = []\n i = len(lst) - 1\n while money > 0:\n val = tab[i]\n if money < val:\n i = i - 1\n else:\n lst.append(val)\n money -= val\n\n return lst\n\n\nprint(\"------ return_change -----\")\nprint(\"Trying to convert 133 with [1, 2, 5, 10, 20, 50, 100, 200, 500], expected result is [100, 20, 10, 2, 1]\")\nprint(\"Result: \", return_change(133, [1, 2, 5, 10, 20, 50, 100, 200, 500]))\nprint(\"--------------------------\")\n","sub_path":"sequences/sequence01/Algorithmes.py","file_name":"Algorithmes.py","file_ext":"py","file_size_in_byte":4539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"127450124","text":"import numpy as np\nimport os\nimport pandas as pd\n\npatch_root = \"/infodev1/non-phi-data/junjiang/OvaryCancer/StromaReaction/PatchSampling\"\ntesting_result_dir = \"/infodev1/non-phi-data/junjiang/OvaryCancer/StromaReaction/VGG16_Classification_ROIs\"\neval_out_dir = \"/infodev1/non-phi-data/junjiang/OvaryCancer/StromaReaction/eval\"\n\nall_class_score_txt = [\"score0\",\"score1\",\"score2\"]\nall_class_list = [\"Fibrosis\", \"Cellularity\", \"Orientation\"]\ntesting_cases = [\"Wo-1-A5_RIO1338_HE\", \"Wo-1-C4_RIO1338_HE\", \"Wo-1-F1_RIO1338_HE\", \"Wo-2-B4_RIO1338_HE\",\n \"Wo-2-F1_RIO1338_HE\"]\n\n# annotation_csv = \"/infodev1/non-phi-data/junjiang/OvaryCancer/StromaReaction/PatchSampling/all_samples.csv\"\nannotation_csv = \"/infodev1/non-phi-data/junjiang/OvaryCancer/StromaReaction/PatchSampling/validation_five_cases.csv\"\n\n\nanno_df = pd.read_csv(annotation_csv, header=0)\nfile_list = pd.Series.get(anno_df, \"img_fn\").tolist()\nFibrosis_scores_list = pd.Series.get(anno_df, \"Fibrosis\").tolist()\nCellularity_scores_list = pd.Series.get(anno_df, \"Cellularity\").tolist()\nOrientation_scores_list = pd.Series.get(anno_df, \"Orientation\").tolist()\n\n# save_all_testing_score_list = os.path.join(eval_out_dir, \"all_testing_scores.npy\")\nsave_all_testing_score_list = os.path.join(eval_out_dir, \"validation_scores.npy\")\n\nif os.path.exists(save_all_testing_score_list):\n all_testing_score_list = np.load(save_all_testing_score_list)\nelse:\n all_testing_score_list = []\n\n testing_df_list = []\n for case in testing_cases: # case ID\n case_df_list = []\n for class_txt in all_class_list: # metric ID\n csv_name = \"eval\" + \"_\" + case + \"_\" + class_txt + \".csv\"\n csv_full_name = os.path.join(testing_result_dir, csv_name)\n testing_df = pd.read_csv(csv_full_name, header=0)\n case_df_list.append(testing_df)\n testing_df_list.append(case_df_list)\n\n for img_fn in file_list:\n ele = os.path.split(img_fn)[1].split(\"_\")\n patch_loc_x = int(ele[-2])\n patch_loc_y = int(ele[-1][0:-4])\n roi_loc_x = int(ele[-6])\n roi_loc_y = int(ele[-5])\n\n case_id = ele[0] + \"_\" + ele[1] + \"_\" + ele[2]\n\n testing_scores_list = []\n for c_idx, class_txt in enumerate(all_class_list):\n df = testing_df_list[testing_cases.index(case_id)][c_idx]\n testing_patch_loc_x_list = pd.Series.get(df, \"location_x\").tolist()\n testing_patch_loc_y_list = pd.Series.get(df, \"location_y\").tolist()\n testing_patch_scores = pd.Series.get(df, all_class_score_txt)\n\n Found = False\n for idx, loc_x in enumerate(testing_patch_loc_x_list):\n loc_y = testing_patch_loc_y_list[idx]\n if (loc_x == roi_loc_x + patch_loc_x) and (loc_y == roi_loc_y + patch_loc_y):\n # print(\"match\")\n Found = True\n\n scores = testing_patch_scores.iloc[idx].tolist()\n testing_scores_list.append(scores.index(max(scores)))\n break\n if not Found:\n raise Exception(\"not found\")\n #\n # testing_df_list = []\n # for class_txt in all_class_list:\n # csv_name = \"eval\" + \"_\" + ele[0] + \"_\" + ele[1] + \"_\" + ele[2] + \"_\" + class_txt + \".csv\"\n # csv_full_name = os.path.join(testing_result_dir, csv_name)\n # testing_df = pd.read_csv(csv_full_name, header=0)\n # testing_df_list.append(testing_df)\n #\n # testing_scores_list = []\n # for c_idx, df in enumerate(testing_df_list):\n # testing_patch_loc_x_list = pd.Series.get(df, \"location_x\").tolist()\n # testing_patch_loc_y_list = pd.Series.get(df, \"location_y\").tolist()\n # testing_patch_scores = pd.Series.get(df, all_class_score_txt[c_idx]).tolist()\n # Found = False\n # for idx, loc_x in enumerate(testing_patch_loc_x_list):\n # loc_y = testing_patch_loc_y_list[idx]\n # if (loc_x == roi_loc_x + patch_loc_x) and (loc_y == roi_loc_y + patch_loc_y):\n # # print(\"match\")\n # Found = True\n # testing_scores_list.append(testing_patch_scores[idx])\n # break\n # if not Found:\n # raise Exception(\"not found\")\n\n all_testing_score_list.append(testing_scores_list)\n np.save(save_all_testing_score_list, np.array(all_testing_score_list))\n\n# calculate confusion matrix\nfrom sklearn.metrics import confusion_matrix\nimport seaborn\nimport matplotlib.pyplot as plt\n\ndef plot_confusion_matrix(data, labels, title, output_filename):\n \"\"\"Plot confusion matrix using heatmap.\n\n Args:\n data (list of list): List of lists with confusion matrix data.\n labels (list): Labels which will be plotted across x and y axis.\n output_filename (str): Path to output file.\n\n \"\"\"\n seaborn.set(color_codes=True)\n plt.figure(1, figsize=(9, 6))\n\n plt.title(title)\n\n seaborn.set(font_scale=1.4)\n ax = seaborn.heatmap(data, annot=True, cmap=\"YlGnBu\", cbar_kws={'label': 'Scale'})\n\n ax.set_xticklabels(labels)\n ax.set_yticklabels(labels)\n\n ax.set(ylabel=\"True Label\", xlabel=\"Predicted Label\")\n\n plt.savefig(output_filename, bbox_inches='tight', dpi=300)\n plt.close()\n\nlabels = [\"0\", \"1\", \"2\"]\nanno_score_list = [Fibrosis_scores_list, Cellularity_scores_list, Orientation_scores_list]\nfor idx, y_true in enumerate(anno_score_list):\n print(all_class_list[idx])\n y_pred = all_testing_score_list[:, idx]\n cm = confusion_matrix(y_true, y_pred)\n print(cm)\n title = all_class_list[idx] + \" Confusion Matrix\"\n output_filename = os.path.join(eval_out_dir, all_class_list[idx] + \".jpg\")\n plot_confusion_matrix(cm, labels, title, output_filename)\n\n cm = confusion_matrix(y_true, y_pred, normalize='true')\n title = all_class_list[idx] + \" Normalized Confusion Matrix\"\n output_filename = os.path.join(eval_out_dir, all_class_list[idx] + \"_normalized.jpg\")\n plot_confusion_matrix(cm, labels, title, output_filename)\n print(cm)\n\nprint(\"OK\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"stromaReaction/calculate_concordance.py","file_name":"calculate_concordance.py","file_ext":"py","file_size_in_byte":6159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"188649232","text":"import csv\nimport os\nimport psycopg2\nimport datetime\nimport shutil\n\ntoday_is_the_day = datetime.datetime.now()\n\nfilePath = f'./data/postgres/{today_is_the_day.year}-{today_is_the_day.month}-{today_is_the_day.day}/'\nfilePathCsv = f'./data/csv/{today_is_the_day.year}-{today_is_the_day.month}-{today_is_the_day.day}/'\n# Database connection variable.\nconnect = None\n\n# Check if the file path exists.\nif not os.path.exists(filePath):\n os.mkdir(filePath)\n try:\n\n # Connect to database.\n connect = psycopg2.connect(\n host=\"0.0.0.0\",\n database=\"northwind\",\n user=\"northwind_user\",\n password=\"thewindisblowing\")\n\n except psycopg2.DatabaseError as e:\n\n # Confirm unsuccessful connection and stop program execution.\n print(\"Database connection unsuccessful.\")\n quit()\n\n # Cursor to execute query.\n cursor = connect.cursor()\n\n cursor.execute(\"select table_name from information_schema.tables where table_schema='public'\")\n query_response = cursor.fetchall()\n\n table_names = [name[0] for name in query_response]\n # SQL to select data from the person table.\n\n try:\n for table_name in table_names:\n sqlSelect = f\"SELECT * FROM {table_name}\"\n\n # Execute query.\n cursor.execute(sqlSelect)\n\n # Fetch the data returned.\n results = cursor.fetchall()\n\n # Extract the table headers.\n headers = [i[0] for i in cursor.description]\n\n # Open CSV file for writing.\n csvFile = csv.writer(open(f'{filePath}{table_name}.csv', 'w', newline=''),\n delimiter=',', lineterminator='\\r\\n',\n quoting=csv.QUOTE_NONE, escapechar='\\\\')\n\n # Add the headers and data to the CSV file.\n csvFile.writerow(headers)\n csvFile.writerows(results)\n\n # Message stating export successful.\n print(f\"Data export successful from {table_name}\")\n\n except psycopg2.DatabaseError as e:\n\n # Message stating export unsuccessful.\n print(\"Data export unsuccessful.\")\n quit()\n\n finally:\n print(\"Run app.py again to retrieve today's backup of order_details.csv\" + \"\\n\")\n print(\"Each table of Northwind db are backup in this dir: data/postgres/[current-year-month-day]\")\n # Close database connection.\n connect.close()\n\n\nelif not os.path.exists(filePathCsv):\n os.mkdir(filePathCsv)\n orders_details_bkp = shutil.copy2('./data/order_details.csv', filePathCsv)\n print(\"Data export successful from order_details.csv\" + \"\\n\")\n print(\"Your backup is at: data/csv/[current-year-month-day]\")\nelse:\n print(\"You already have today's backup\" + \"\\n\")\n print(\"Starting services to join all latest backup to destination database...\" + \"\\n\")\n","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"353098921","text":"def gcContent(filename):\r\n #read the file\r\n file = open(filename, 'r')\r\n\r\n #separate id into an array and dna string into another\r\n id = [] \r\n gc = [\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\"] #at most 10 strings\r\n id_count=0 \r\n for line in file:\r\n if line.startswith('>'):\r\n id.append(line)\r\n id_count += 1\r\n else:\r\n gc[id_count-1] += line \r\n gc = gc[0:len(id)] #removing \"\" to match length of id\r\n #print(\"id\" + str(id))\r\n #print(\"gc\" + str(gc))\r\n\r\n #analysis\r\n gcArr=[]\r\n for i in gc:\r\n # dividing by len-1 because of the additional /n characters\r\n gcArr.append((i.count('C') + i.count('G')) / (len(i)-1) * 100)\r\n gc_index = gcArr.index(max(gcArr))\r\n print(id[gc_index])\r\n print(max(gcArr))\r\n \r\n#test/output \r\nfilename = \"rosalind_gc.txt\"\r\ngcContent(filename)\r\n\r\n\r\n# when file has '>', we want to move to the i+14 position and \r\n# all the way to the next '>'. Store all that in an array as a string\r\n# Then, when the whole file is parsed,\r\n# compute the string with the greatest gc content","sub_path":"005-Computing GC Content/gc.py","file_name":"gc.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"558462432","text":"# Locally Linear Embedding\n# a distance retaining dimensionality reduction method\nfrom __future__ import unicode_literals, print_function, division\n\n# Common Imports\nimport os\nimport numpy as np\n\n# ML Imports\nfrom sklearn.manifold import LocallyLinearEmbedding, MDS, Isomap, TSNE\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.datasets import make_swiss_roll, fetch_mldata\n\n# Graph Imports\nimport matplotlib.pyplot as plt\nplt.rcParams['axes.labelsize'] = 14\nplt.rcParams['xtick.labelsize'] = 12\nplt.rcParams['ytick.labelsize'] = 12\n\n# Config\nPROJECT_ROOT_DIR = \".\"\n\n\n# Declare Functions\ndef image_path(fig_id):\n if not os.path.exists('images'):\n os.makedirs('images')\n return os.path.join(PROJECT_ROOT_DIR, 'images', fig_id)\n\n\ndef save_fig(fig_id, tight_layout=True):\n print(\"Saving\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(image_path(fig_id) + \".png\", format=\"png\", dpi=300)\n\n\n# Create Datasets\n\nX, t = make_swiss_roll(n_samples=1000, noise=0.2, random_state=42)\nmnist = fetch_mldata(\"MNIST original\")\n\n# Manifold\nlle = LocallyLinearEmbedding(n_components=2, n_neighbors=10, random_state=42)\nX_reduced = lle.fit_transform(X)\n\nplt.title(\"Unrolled swiss roll using LLE\", fontsize=14)\nplt.scatter(X_reduced[:, 0], X_reduced[:, 1], c=t, cmap=plt.cm.hot)\nplt.xlabel(\"$z_1$\", fontsize=18)\nplt.ylabel(\"$z_2$\", fontsize=18)\nplt.axis([-0.065, 0.065, -0.1, 0.12])\nplt.grid(True)\n\nsave_fig(\"lle_unrolling_plot\")\nplt.show()\n\nmds = MDS(n_components=2, random_state=42)\nX_reduced_mds = mds.fit_transform(X)\n\nisomap = Isomap(n_components=2)\nX_reduced_isomap = isomap.fit_transform(X)\n\ntsne = TSNE(n_components=2, random_state=42)\nX_reduced_tsne = tsne.fit_transform(X)\n\nlda = LinearDiscriminantAnalysis(n_components=2)\nX_mnist = mnist[\"data\"]\ny_mnist = mnist[\"target\"]\nlda.fit(X_mnist, y_mnist)\nX_reduced_lda = lda.transform(X_mnist)\n\ntitles = [\"MDS\", \"Isomap\", \"t-SNE\"]\n\nplt.figure(figsize=(11, 4))\n\nfor subplot, title, X_reduced in zip(\n (131, 132, 133), titles,\n (X_reduced_mds, X_reduced_isomap, X_reduced_tsne)):\n\n plt.subplot(subplot)\n plt.title(title, fontsize=14)\n plt.scatter(X_reduced[:, 0], X_reduced[:, 1], c=t, cmap=plt.cm.hot)\n plt.xlabel(\"$z_1$\", fontsize=18)\n if subplot == 131:\n plt.ylabel(\"$z_2$\", fontsize=18, rotation=0)\n plt.grid(True)\n\nsave_fig(\"other_dim_reduction_plot\")\nplt.show()\n","sub_path":"dimensionality_reduction/examples/lle.py","file_name":"lle.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"622822972","text":"# -*- encoding=utf-8 -*-\nimport datetime\nimport re\nimport linecache\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nimport pandas as pd\n\n\ndef deal(listStr, i):\n # list转dataframe\n column = 'L2Voltage';\n df = pd.DataFrame(listStr, columns=[column])\n\n # 保存到本地excel\n IndexFile = 'L2Voltage'+'.xlsx'\n df.to_excel(IndexFile, index=False)\n\n\nif __name__ == '__main__':\n dataall = []\n for i in range(1, 100):\n indexFile = \"dnn\" + str(i) + \".mt0\"\n s = linecache.getline(indexFile, 5)\n print(s)\n strAfter = re.sub(' +', ',', s)\n data = strAfter.split(',')\n print(data)\n datalist = data[1025: 1537]\n print(datalist)\n dataall = dataall + datalist\n deal(dataall, i)\n","sub_path":"DNN网络对于波动快速下降的研究/ReRAM Array/test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"369051828","text":"import os\r\nimport subprocess\r\nimport multiprocessing\r\nimport json\r\nimport numpy as np\r\nimport random\r\n\r\nnum_gpus = 1\r\n\r\n#COD CIFS from materials science journals without H (Richard specified no H)\r\nCIFS_DIR = r\"\\\\flexo.ads.warwick.ac.uk\\shared41\\Microscopy\\Jeffrey-Ede\\crystal_structures\\standardized_inorganic_no_H\"\r\ncif_filepaths = [r\"Z:\\Jeffrey-Ede\\crystal_structures\\standardized_inorganic_no_H\\666.cif\"]\r\n\r\nPARENT_DIR = r\"\\\\flexo.ads.warwick.ac.uk\\shared41\\Microscopy\\Jeffrey-Ede\\models\\wavefunctions\"\r\ndefault_json_filepath = os.path.join(PARENT_DIR, \"default.json\")\r\nfailed_file_filepath = os.path.join(PARENT_DIR, \"failed_files.txt\")\r\n\r\nEXE_DIR = r\"\\\\flexo.ads.warwick.ac.uk\\shared41\\Microscopy\\Jeffrey-Ede\\models\\wavefunctions\\clTEM_files\"\r\nexe_filepath = os.path.join(EXE_DIR, \"clTEM_cmd.exe\")\r\n\r\nOUTPUT_DIR = os.path.join(PARENT_DIR, \"output_single\")\r\nif not os.path.exists(OUTPUT_DIR):\r\n os.makedirs(OUTPUT_DIR)\r\n\r\nNUM_REPEATS = 5000\r\n\r\nCONFIG_DIR = os.path.join(PARENT_DIR, \"temp_single\")\r\nconfig_filepath = os.path.join(CONFIG_DIR, \"current_config_single.json\")\r\nif not os.path.exists(CONFIG_DIR):\r\n os.makedirs(CONFIG_DIR)\r\n\r\nwith open(default_json_filepath, \"r\") as f:\r\n default_config = json.load(f)\r\n\r\n\r\nfailed_paths = []\r\nwith open(failed_file_filepath, 'r') as ff:\r\n lines = ff.readlines()\r\n for l in lines:\r\n failed_paths.append(l.rstrip())\r\n\r\n# print(default_config)\r\n\r\ndef random_config():\r\n \"\"\"Change default configuration to random configuration.\"\"\"\r\n\r\n config = default_config.copy()\r\n \r\n # Things to randomise\r\n # Voltage (use some presets)\r\n # aperture size\r\n # convergence\r\n # defocus spread\r\n voltages = [300, 200, 80]\r\n\r\n config[\"microscope\"][\"voltage\"] = random.choice(voltages)\r\n config[\"microscope\"][\"aperture\"] = np.random.uniform(5, 30)\r\n config[\"microscope\"][\"delta\"] = np.random.uniform(0, 20)\r\n config[\"microscope\"][\"alpha\"] = np.random.uniform(0.1, 2)\r\n\r\n # aberrations\r\n config[\"microscope\"][\"aberrations\"][\"C10\"][\"val\"] = np.random.uniform(-30, 30)\r\n\r\n config[\"microscope\"][\"aberrations\"][\"C12\"][\"mag\"] = np.random.uniform(-50, 50)\r\n config[\"microscope\"][\"aberrations\"][\"C12\"][\"ang\"] = np.random.uniform(0, 180)\r\n\r\n config[\"microscope\"][\"aberrations\"][\"C21\"][\"mag\"] = np.random.uniform(-1000, 1000)\r\n config[\"microscope\"][\"aberrations\"][\"C21\"][\"ang\"] = np.random.uniform(0, 180)\r\n\r\n config[\"microscope\"][\"aberrations\"][\"C23\"][\"mag\"] = np.random.uniform(-1000, 1000)\r\n config[\"microscope\"][\"aberrations\"][\"C23\"][\"ang\"] = np.random.uniform(0, 180)\r\n\r\n config[\"microscope\"][\"aberrations\"][\"C30\"][\"val\"] = np.random.uniform(-500, 500)\r\n\r\n return config\r\n\r\ndef do_sim(cif_filepath):\r\n #\r\n # This is a real bodge to match the device to the thread....\r\n #\r\n device = 1 #int(multiprocessing.current_process().name[-1]) - 1\r\n\r\n device_string = \"0:%s\" % device\r\n\r\n if cif_filepath in failed_paths:\r\n return\r\n\r\n out_paths = []\r\n\r\n for repetition in range(NUM_REPEATS):\r\n cif_name = os.path.splitext(os.path.basename(cif_filepath))[0]\r\n out_filepath = os.path.join(OUTPUT_DIR, cif_name)\r\n out_repeat_filepath = os.path.join(out_filepath, str(repetition))\r\n\r\n if os.path.exists(os.path.join(out_repeat_filepath, 'Image.tif')):\r\n continue # get out this loop as we already have data here\r\n\r\n out_paths.append(out_repeat_filepath)\r\n\r\n if len(out_paths) == 0:\r\n return\r\n\r\n print(\"\\n\\nSimulating on device:\" + device_string + \" using file: \" + cif_filepath)\r\n #print(\"\\n\\n\\n\")\r\n\r\n # for repetition in range(NUM_REPEATS): # Number of times to go through CIFs\r\n # #\r\n # # Create output folder\r\n # #\r\n\r\n # # make a folder for each cif\r\n # cif_name = os.path.splitext(os.path.basename(cif_filepath))[0]\r\n # out_filepath = os.path.join(OUTPUT_DIR, cif_name)\r\n \r\n # # make a folder for each repetition\r\n # out_repeat_filepath = os.path.join(out_filepath, str(repetition))\r\n\r\n # # while os.path.exists(out_repeat_filepath):\r\n # # counter += 1\r\n # # out_repeat_filepath = os.path.join(out_filepath, str(counter))\r\n\r\n # if os.path.exists(os.path.join(out_repeat_filepath, 'Image.tif')):\r\n # continue # get out this loop as we already have data here\r\n\r\n for out_path in out_paths:\r\n \r\n try:\r\n\r\n if not os.path.exists(out_path):\r\n os.makedirs(out_path)\r\n\r\n #\r\n # Randomise the simulation parameters\r\n #\r\n\r\n # Save random configuration\r\n config = random_config()\r\n with open(config_filepath, \"w\") as f:\r\n json.dump(config, f)\r\n\r\n #\r\n # Randomise the structure inputs\r\n #\r\n\r\n # randomise the cell depth (between 5 nm and 100 nm)\r\n cell_depth = np.random.uniform(50, 1000)\r\n cell_widths = np.random.uniform(50, 100)\r\n cell_string = \"%s,%s,%s\" % (cell_widths, cell_widths, cell_depth)\r\n\r\n # randomise the zone axis (only up to 2)\r\n zone_h = np.random.randint(0, 3)\r\n zone_k = np.random.randint(0, 3)\r\n zone_l = np.random.randint(0, 3)\r\n zone_string = \"%s,%s,%s\" % (zone_h, zone_k, zone_l)\r\n\r\n # random tilt perturbations (normal distribution)\r\n tilt_a = np.random.normal(scale=0.1)\r\n tilt_b = np.random.normal(scale=0.1)\r\n tilt_c = np.random.normal(scale=0.1)\r\n tilt_string = \"%s,%s,%s\" % (tilt_a, tilt_b, tilt_c)\r\n\r\n #\r\n # Do the simulation\r\n #\r\n\r\n # FNULL = open(os.devnull, 'w') # used to suppress the output\r\n subprocess.call([exe_filepath, cif_filepath, \"-s\"+cell_string, \"-z\"+zone_string, \"-t\"+tilt_string, \"-o\" + out_path, \"-d\"+device_string, \"-c\" + config_filepath])#, stdout=FNULL)\r\n\r\n #if not os.path.exists(os.path.join(out_path, 'Image.tif')):\r\n # # file didn't simulate\r\n # with open(failed_file_filepath, \"a\") as ff:\r\n # ff.write(cif_filepath + \"\\n\")\r\n\r\n # return\r\n except:\r\n continue\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # with multiprocessing.Pool(num_gpus) as p:\r\n # p.map(do_sim, cif_filepaths)\r\n\r\n with open(failed_file_filepath, \"a\") as ff:\r\n ff.write(\"\\n*\\nStarting run\\n*\\n\")\r\n\r\n for cf in cif_filepaths:\r\n do_sim(cf)\r\n","sub_path":"wavefunctions/run_simulations_single.py","file_name":"run_simulations_single.py","file_ext":"py","file_size_in_byte":6535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"569274557","text":"# CST 205 Final Project\n# Central Coast Solutions, Shannon Davis and Kyle Luoma\n# 13 December, 2016\n\ndef runProgram():\n# Run this function to see the full program in action.\n# Be sure the resource directory is present with all files!\n marioify(generateAudio(\"song.csv\"))\n\n\n# ----- Audio Generation -----\n\n#Libraries:\nimport math\nimport random\n\n#Global variables:\nresDirectory = \"D:\\\\Python Projects\\\\CST 205\\\\res\\\\\"\ninstList = [\"SINE\", \"SQUARE\", \"HSINE\"]\nrobust = True \n\ndef generateAudio(someFile):\n \n generatedAudio = makeATune(someFile)\n play(generatedAudio)\n writeSoundTo(generatedAudio, resDirectory + \"jingleBells.wav\")\n return generatedAudio\n \ndef makeATune(fileName):\n global resDirectory\n global instList\n \n instrument = None\n note = None\n \n trackList = []\n sampleRate = 22050\n amplitude = 15000\n \n # Tempo settings:\n beatPerMin = 120 \n count = 4\n \n beatPerSec = beatPerMin / float(60)\n ticksPerSec = beatPerSec * count\n tickLength = 1 / float(ticksPerSec)\n \n # Open the notes file and make into a dictionary with note/octave as key:\n noteFreqs = dict()\n noteFreqs[\"\"] = 1\n noteFile = open(resDirectory + 'noteFrequencies.csv')\n noteText = noteFile.read()\n noteList = noteText.split('\\n')\n for note in noteList:\n tempList = note.split(',')\n if len(tempList) == 2:\n noteFreqs[tempList[0]] = int(tempList[1])\n \n # Open the song csv and make into a 2d list:\n songFile = open(resDirectory + fileName, 'rt')\n songText = songFile.read()\n tickList = songText.split('\\n')\n for i in range(0, len(tickList)):\n tickList[i] = tickList[i].split(',')\n \n trackLength = tickLength * len(tickList)\n \n # Build a four track song:\n for i in range(0, 4):\n trackList.append(makeEmptySound(int(trackLength * sampleRate)))\n cursor = 0\n for tick in tickList:\n #Iterate through all ticks in a track i and generate sound based on instrument and note values in song\n if len(tick) >= 2:\n instrument = tick[1 + (i * 2)]\n note = tick[2 + (i * 2)]\n if robust:\n print(\"On track \" + str(i + 1) + \" tick number \" + str(tick[0]) + \" the instrument is \" + instrument + \" and note \" + note + \".\")\n if instrument == \"SINE\":\n tickSound = sinSynth(noteFreqs[note], amplitude, tickLength)\n elif instrument == \"SQUARE\":\n tickSound = squareSynth(noteFreqs[note], amplitude, tickLength)\n elif instrument == \"HSINE\":\n tickSound = halfSinSynth(noteFreqs[note], amplitude, tickLength)\n else:\n tickSound = makeEmptySound(int(tickLength * sampleRate))\n \n trackList[i] = copy(tickSound, trackList[i], cursor)\n cursor += int(tickLength * sampleRate)\n\n if robust:\n print(\"Tick length: \" + str(tickLength))\n print(tickList)\n print(noteFreqs)\n print(trackList)\n \n return mergeTracks(trackList)\n\ndef copy(source, target, start):\n# Copies a source file onto a target file at the given start index\n for i in range(start, getLength(source) + start):\n sourceValue = getSampleValueAt(source, i - start)\n setSampleValueAt(target, i, sourceValue)\n \n return target\n\n\ndef mergeTracks(trackList):\n # Takes a list of at least two .wav files of equal length and merges them.\n numTracks = len(trackList)\n trackLength = getLength(trackList[0])\n mergedSound = makeEmptySound(trackLength)\n \n if len(trackList) < 2:\n print(\"Not enough tracks in list to merge tracks.\")\n return mergedSound\n \n for track in trackList:\n if getLength(track) != trackLength:\n print(\"Not all of the tracks are the same length. Cannot merge.\")\n return mergedSound\n \n for i in range (0, trackLength):\n for track in trackList:\n mergedSample = getSampleValueAt(mergedSound, i)\n trackSample = getSampleValueAt(track, i)\n mergedSample += (trackSample / numTracks)\n setSampleValueAt(mergedSound, i, mergedSample)\n \n return mergedSound\n \n\ndef squareSynth(hertz, amplitude, duration):\n #Generates a square waveform in .wav format\n \n sampleRate = 22050\n cycleLength = sampleRate / hertz\n cursor = 0\n \n squareSynth = makeEmptySound(int(duration * sampleRate))\n \n while cursor <= (sampleRate * duration) - cycleLength:\n for i in range(-1 , 2, 2):\n for j in range(0 , cycleLength / 2):\n setSampleValueAt(squareSynth, cursor + j, i * amplitude)\n cursor += (cycleLength / 2)\n \n #explore(squareSynth)\n return squareSynth\n \n \ndef sinSynth(hertz, amplitude, duration):\n #Generates a sine waveform in .wav format\n \n sampleRate = 22050\n cycleLength = sampleRate / hertz\n cursor = 0\n \n sinSynth = makeEmptySound(int(duration * sampleRate))\n while cursor <= (sampleRate * duration) - cycleLength:\n for i in range(0, cycleLength):\n setSampleValueAt(sinSynth, cursor + i, amplitude * sin((i * (2 * pi)) / cycleLength))\n cursor += cycleLength\n \n #explore(sinSynth)\n return sinSynth\n\ndef halfSinSynth(hertz, amplitude, duration):\n #Generates a half sine waveform in .wav format\n \n sampleRate = 22050\n cycleLength = sampleRate / hertz\n cursor = 0\n \n halfSinSynth = makeEmptySound(int(duration * sampleRate))\n while cursor <= (sampleRate * duration) - cycleLength:\n for i in range(0, cycleLength):\n setSampleValueAt(halfSinSynth, cursor + i, amplitude * sin((i * pi) / cycleLength))\n cursor += cycleLength\n \n #explore(halfSinSynth)\n return halfSinSynth\n\n\n# ----- Image Generation / Sound Visualization -----\n\nmario = makePicture(resDirectory + 'marioR.jpg')\nbackground = makePicture(resDirectory + 'background.jpg')\nflag = makePicture(resDirectory + 'flag.jpg')\n\ndef marioify(song):\n play(song)\n for i in range(0, 20):\n output_Pic = move_Image(background, mario, i * 25, 380)\n output_Pic = cheap_Reset(background, output_Pic)\n output_Pic = move_Image(output_Pic, flag, 650, 100)\n add_Text(output_Pic)\n\ndef add_Text(source):\n import java.awt.Font as Font\n font = makeStyle(sansSerif, bold, 80)\n addTextWithStyle(source, 100, 100, \"Fin!\", font, white)\n repaint(source)\n\ndef move_Image(dest, source, targetX, targetY):\n for x in range(0, getWidth(source)):\n for y in range(0, getHeight(source)):\n sourcePx = getPixel(source, x, y)\n destPx = getPixel(dest, x + targetX, y + targetY)\n setColor(destPx, getColor(sourcePx))\n for x in range(targetX, 0, -1):\n for y in range(380, 580):\n destPx = getPixel(dest, x, y)\n setColor(destPx, black)\n repaint(dest)\n return dest\n\ndef cheap_Reset(newImage, oldImage):\n show(newImage)\n return newImage\n\n","sub_path":"CST205_Final_Project.py","file_name":"CST205_Final_Project.py","file_ext":"py","file_size_in_byte":6499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"397470234","text":"from gluon import current\nimport json\n\nimport datetime\nfrom datetime import timedelta\n\n#\n\nfrom applications.my_pms2.modules import common\nfrom applications.my_pms2.modules import logger\n\nclass Casereport:\n def __init__(self,db,providerid):\n self.db = db\n self.providerid = providerid\n return \n\n #this method creates a new case report\n def createcasereport(self, csrdata):\n db = self.db\n \n jsonresp = {}\n \n csrid = 0\n \n try:\n #create new case report\n csrid = db.casereport.insert(\\\n child_name = csrdata[\"child_name\"],\n child_class = csrdata[\"child_class\"],\n parent_name = csrdata[\"parent_name\"],\n school_name = csrdata[\"school_name\"],\n admission_number = csrdata[\"admission_number\"],\n cell = csrdata[\"cell\"],\n email = csrdata[\"email\"],\n dob = datetime.datetime.strptime(csrdata[\"dob\"],\"%d/%m/%Y\"),\n gender = csrdata[\"gender\"],\n cavity_milk_teeth = True if(csrdata[\"cavity_milk_teeth\"]==\"1\") else False,\n cavity_perm_teeth = True if(csrdata[\"cavity_perm_teeth\"]==\"1\") else False,\n crooked_teeth = True if(csrdata[\"crooked_teeth\"]==\"1\") else False,\n gum_problems = True if(csrdata[\"gum_problems\"]==\"1\") else False,\n emergency_consult = True if(csrdata[\"emergency_consult\"]==\"1\") else False,\n priority_checkup = True if(csrdata[\"priority_checkup\"]==\"1\") else False,\n routine_checkup = True if(csrdata[\"routine_checkup\"]==\"1\") else False,\n fluoride_check = True if(csrdata[\"fluoride_check\"]==\"1\") else False,\n casereport = csrdata[\"doctor_notes\"],\n \n is_active = True,\n created_on = common.getISTFormatCurrentLocatTime(),\n created_by = 1,\n modified_on = common.getISTFormatCurrentLocatTime(),\n modified_by = 1\n \n )\n \n jsonresp = {\"casereport_id\":str(csrid), \"result\":\"success\",\"error_message\":\"\"}\n \n except Exception as e:\n logger.loggerpms2.info(\"Create Case Report Exception:\\n\" + str(e))\n jsonresp = {\n \"result\":\"fail\",\n \"error_message\":\"Create Case Report Exception:\\n\" + str(e)\n }\n \n return json.dumps(jsonresp)\n \n #this method gets a list of all case reports filtered on email and cell\n def get_casereport_list(self, email, cell):\n \n db = self.db\n \n jsonresp = {}\n csrobj = {}\n csrlist = []\n \n try:\n csrs = db((db.casereport.email == email) & (db.casereport.cell == cell) & (db.casereport.is_active == True)).select()\n \n \n for csr in csrs:\n csrobj = {}\n csrobj[\"id\"] = csr.id\n csrobj[\"child_name\"] = csr.child_name\n csrobj[\"child_class\"] = csr.child_class\n csrobj[\"parent_name\"] = csr.parent_name\n csrobj[\"school_name\"] = csr.school_name\n csrobj[\"admission_number\"] = csr.admission_number\n csrobj[\"cell\"] = csr.cell\n csrobj[\"email\"] = csr.email\n csrobj[\"dob\"] = (csr.dob).strftime(\"%d/%m/%Y\")\n csrobj[\"gender\"] = csr.gender\n csrobj[\"created_on\"] = (csr.created_on).strftime(\"%d/%m/%Y\")\n csrobj[\"modified_on\"] = (csr.modified_on).strftime(\"%d/%m/%Y\")\n \n csrlist.append(csrobj)\n \n jsonresp = {\n \"csrcount\":str(len(csrlist)),\n \"csrlist\":csrlist,\n \"result\":\"success\",\n \"error_message\":\"\",\n \n }\n \n \n except Exception as e:\n logger.loggerpms2.info(\"Get Case Report List Response Exception: \\n\" + str(e))\n jsonresp = {\n \"result\":\"fail\",\n \"error_message\":\"Get Case Report List Response Exception: \\n\" + str(e)\n }\n \n return json.dumps(jsonresp) \n \n #this method returns casereport\n def getcasereport(self,csrid):\n db = self.db\n \n jsonresp = {}\n csrobj = {}\n try:\n \n csr = db(db.casereport.id == csrid).select()\n csrobj[\"id\"] = csr[0].id\n csrobj[\"child_name\"] = csr[0].child_name\n csrobj[\"child_class\"] = csr[0].child_class\n csrobj[\"parent_name\"] = csr[0].parent_name\n csrobj[\"school_name\"] = csr[0].school_name\n csrobj[\"admission_number\"] = csr[0].admission_number\n csrobj[\"cell\"] = csr[0].cell\n csrobj[\"email\"] = csr[0].email\n csrobj[\"dob\"] = (csr[0].dob).strftime(\"%d/%m/%Y\")\n csrobj[\"gender\"] = csr[0].gender\n \n csrobj[\"cavity_milk_teeth\"] = \"1\" if(csr[0].cavity_milk_teeth == True) else \"0\"\n csrobj[\"cavity_perm_teeth\"] = \"1\" if(csr[0].cavity_perm_teeth == True) else \"0\"\n csrobj[\"crooked_teeth\"] = \"1\" if(csr[0].crooked_teeth == True) else \"0\"\n csrobj[\"gum_problems\"] = \"1\" if(csr[0].gum_problems == True) else \"0\"\n csrobj[\"emergency_consult\"] = \"1\" if(csr[0].emergency_consult == True) else \"0\"\n csrobj[\"priority_checkup\"] = \"1\" if(csr[0].priority_checkup == True) else \"0\"\n csrobj[\"routine_checkup\"] = \"1\" if(csr[0].routine_checkup == True) else \"0\"\n csrobj[\"fluoride_check\"] = \"1\" if(csr[0].fluoride_check == True) else \"0\"\n \n csrobj[\"doctor_notes\"] = csr[0].casereport\n \n csrobj[\"created_on\"] = (csr[0].created_on).strftime(\"%d/%m/%Y\")\n csrobj[\"modified_on\"] = (csr[0].modified_on).strftime(\"%d/%m/%Y\") \n \n csrobj[\"result\"] = \"success\"\n csrobj[\"error_message\"] = \"\"\n \n jsonresp = csrobj\n except Exception as e:\n logger.loggerpms2.info(\"Get Case Report Response Exception: \\n\" + str(e))\n jsonresp = {\n \"result\":\"fail\",\n \"error_message\":\"Get Case Report Response Exception: \\n\" + str(e)\n }\n \n return json.dumps(jsonresp)\n \n \n #this method update the case report\n def updatecasereport(self,csrdata):\n \n db = self.db\n \n jsonresp = {}\n \n try:\n csrid = int(common.getid(csrdata[\"id\"]))\n db(db.casereport.id == csrid).update(\\\n child_name = csrdata[\"child_name\"],\n child_class = csrdata[\"child_class\"],\n parent_name = csrdata[\"parent_name\"],\n school_name = csrdata[\"school_name\"],\n admission_number = csrdata[\"admission_number\"],\n cell = csrdata[\"cell\"],\n email = csrdata[\"email\"],\n dob = datetime.datetime.strptime(csrdata[\"dob\"],\"%d/%m/%Y\"),\n gender = csrdata[\"gender\"],\n cavity_milk_teeth = True if(csrdata[\"cavity_milk_teeth\"]==\"1\") else False,\n cavity_perm_teeth = True if(csrdata[\"cavity_perm_teeth\"]==\"1\") else False,\n crooked_teeth = True if(csrdata[\"crooked_teeth\"]==\"1\") else False,\n gum_problems = True if(csrdata[\"gum_problems\"]==\"1\") else False,\n emergency_consult = True if(csrdata[\"emergency_consult\"]==\"1\") else False,\n priority_checkup = True if(csrdata[\"priority_checkup\"]==\"1\") else False,\n routine_checkup = True if(csrdata[\"routine_checkup\"]==\"1\") else False,\n fluoride_check = True if(csrdata[\"fluoride_check\"]==\"1\") else False,\n casereport = csrdata[\"doctor_notes\"],\n is_active = True,\n modified_on = common.getISTFormatCurrentLocatTime(),\n modified_by = 1\n )\n jsonresp = {\"casereport_id\":str(csrid), \"result\":\"success\",\"error_message\":\"\"} \n except Exception as e:\n logger.loggerpms2.info(\"Update Case Report Response Exception: \" + str(e))\n jsonresp = {\n \"result\":\"fail\",\n \"error_message\":\"updatecasereport:\\n\" + str(e)\n }\n \n return json.dumps(jsonresp)\n \n \n \n","sub_path":"modules/mdpcasereport.py","file_name":"mdpcasereport.py","file_ext":"py","file_size_in_byte":7373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"460911004","text":"#!/usr/bin/env python3\nimport platform, optparse, subprocess\n\ndef checkOS():\n if \"Linux\" in platform.system():\n return True\n else:\n return False\n\ndef get_arguments():\n parser = optparse.OptionParser()\n parser.add_option(\"-i\", \"--interface\", dest=\"interface\", help=\"Interface to change its MAC address\")\n parser.add_option(\"-m\", \"--mac\", dest=\"new_mac\", help=\"New MAC address\")\n (options, arguments) = parser.parse_args()\n if not options.interface:\n parser.error(\"[-] Please specify an interface, use --help for more info\")\n if not options.new_mac:\n parser.error(\"[-] Please specify a new mac address, use --help for more info\")\n return options\n \n\ndef macChanger(interface_name, new_mac):\n subprocess.call([\"sudo\", \"ifconfig\", interface_name, \"down\"])\n subprocess.call([\"sudo\", \"ifconfig\", interface_name, \"hw\", \"ether\", new_mac])\n subprocess.call([\"sudo\", \"ifconfig\", interface_name, \"up\"])\n\n\nif __name__ == \"__main__\":\n ch = checkOS()\n if ch:\n options = get_arguments()\n macChanger(options.interface, options.new_mac)\n else:\n print(\"[-] Your OS is not linux!\")","sub_path":"veryBasicMacChanger.py","file_name":"veryBasicMacChanger.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"337396353","text":"class Solution:\n\n def twoSum(nums, target):\n look_for = {}\n for n, x in enumerate(nums, 0):\n try:\n return look_for[x], n\n except KeyError:\n look_for.setdefault(target - x, n)\n # \"\"\"\n # :type nums: List[int]\n # :type target: int\n # :rtype: List[int]\n # \"\"\"\n\n\nnums_list = [2, 7, 11, 15]\ntarget = 9\nresult = Solution.twoSum(nums_list, target)\nprint(result)\n","sub_path":"two_sums.py","file_name":"two_sums.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"633379355","text":"#Time O(V+E), Where V is number of vertices and E is number of edges in the graph.\n#Representation by Adjacency List.\n\nfrom collections import defaultdict\n\n\nclass Graph:\n def __init__(self):\n self.graph = defaultdict(list)\n\n # Function to add an edge to graph.\n def addEdge(self, u, v):\n self.graph[u].append(v)\n #For undirected graph.\n self.graph[v].append(u)\n\n # Functin to print a BFS of graph.\n def BFS(self, source):\n\n # Marked all the verteces as not visited.\n visited = [False] * (len(self.graph)+1)\n\n # Create a queue for BFS\n queue = []\n\n #Mark the source node as visited and enqueue it.\n queue.append(source)\n visited[source] = True\n\n while queue:\n\n # Dequeue a vertex from queue and print it.\n source = queue.pop(0)\n print(source, end = \" \")\n\n # Get all adjacent vertices of the dequeued vertex source.\n # If a adjacent has not been visited, then mark it visited and enqueue it.\n for i in self.graph[source]:\n if visited[i] == False:\n queue.append(i)\n visited[i] = True\n\nif __name__ == \"__main__\":\n g = Graph()\n g.addEdge(1, 2)\n g.addEdge(1, 3)\n g.addEdge(2, 4)\n g.addEdge(2, 5)\n g.addEdge(3, 5)\n g.addEdge(4, 5)\n g.addEdge(4, 6)\n g.addEdge(5, 2)\n g.addEdge(5, 4)\n g.addEdge(5, 6)\n g.addEdge(6, 4)\n g.addEdge(6, 5)\n\n print (\"Following is Breadth First Traversal(starting from vertex 1)\")\n g.BFS(1)\n","sub_path":"Graph/BFS.py","file_name":"BFS.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"640492857","text":"# https://qiita.com/oppasiri330/items/0f9526a1c507ae170a56\nimport requests\nfrom bs4 import BeautifulSoup\n\nr = requests.get('https://pythonhow.com/real-estate/rock-springs-wy/LCWYROCKSPRINGS/')\nsoup = BeautifulSoup(r.content,'html.parser')\n\n#page数の取得\npage_nr=soup.find_all(\"a\",{\"class\":\"Page\"})[-1].text\n\nbase_url=\"https://pythonhow.com/real-estate/rock-springs-wy/LCWYROCKSPRINGS/t=0&s=\"\n\nl = []\nfor page in range(0, int(page_nr)*10, 10):\n url=base_url+str(page)+\".html\"\n print(url)\n r = requests.get(url)\n c = r.content\n soup = BeautifulSoup(c, \"html.parser\")\n all = soup.find_all(\"div\", {\"class\":\"propertyRow\"})#class=valueなどは辞書で渡す{}\n\nfor item in all:\n d = {}\n\n price = item.find_all(\"h4\", {\"class\", \"propPrice\"})[0].text.replace(\"\\n\", \"\")\n d[\"Price\"] = price\n\n address = item.find_all(\"span\", {\"class\": \"propAddressCollapse\"})\n try:\n d[\"Address\"] = address[0].text\n except:\n d[\"Address\"] = None\n try:\n d[\"Locality\"] = address[1].text\n except:\n d[\"Locality\"] = None\n\n l.append(d)\n\nimport pandas as pd\n#除書のリストからデータフレーム作成\ndf = pd.DataFrame(l)\ndf.to_csv(\"century21.csv\")","sub_path":"scraping/century21_bs4.py","file_name":"century21_bs4.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"145188354","text":"\"\"\"\n COCO keypoint detection (2D multiple human pose estimation) dataset (for IBPPose).\n\"\"\"\n\nimport os\nimport json\n# import math\nimport cv2\n# from operator import itemgetter\nimport numpy as np\nimport torch\nimport torch.utils.data as data\nfrom .dataset_metainfo import DatasetMetaInfo\n\n\nclass CocoHpe3Dataset(data.Dataset):\n \"\"\"\n COCO keypoint detection (2D multiple human pose estimation) dataset.\n\n Parameters\n ----------\n root : string\n Path to `annotations`, `train2017`, and `val2017` folders.\n mode : string, default 'train'\n 'train', 'val', 'test', or 'demo'.\n transform : callable, optional\n A function that transforms the image.\n \"\"\"\n def __init__(self,\n root,\n mode=\"train\",\n transform=None):\n super(CocoHpe3Dataset, self).__init__()\n self._root = os.path.expanduser(root)\n self.mode = mode\n self.transform = transform\n\n mode_name = \"train\" if mode == \"train\" else \"val\"\n annotations_dir_path = os.path.join(root, \"annotations\")\n annotations_file_path = os.path.join(annotations_dir_path, \"person_keypoints_\" + mode_name + \"2017.json\")\n with open(annotations_file_path, \"r\") as f:\n self.file_names = json.load(f)[\"images\"]\n self.image_dir_path = os.path.join(root, mode_name + \"2017\")\n self.annotations_file_path = annotations_file_path\n\n def __str__(self):\n return self.__class__.__name__ + \"(\" + self._root + \")\"\n\n def __len__(self):\n return len(self.file_names)\n\n def __getitem__(self, idx):\n file_name = self.file_names[idx][\"file_name\"]\n image_file_path = os.path.join(self.image_dir_path, file_name)\n image = cv2.imread(image_file_path, flags=cv2.IMREAD_COLOR)\n # image = cv2.cvtColor(img, code=cv2.COLOR_BGR2RGB)\n\n max_downsample = 64\n pad_value = 128\n image, pad = self.pad_right_down_corner(image, max_downsample, pad_value)\n image = np.float32(image / 255)\n image = image[None, ...]\n image = image.transpose((2, 0, 1))\n image = torch.from_numpy(image)\n\n image_id = int(os.path.splitext(os.path.basename(file_name))[0])\n\n label = np.array([image_id, 1.0] + pad, np.float32)\n label = torch.from_numpy(label)\n\n return image, label\n\n @staticmethod\n def pad_right_down_corner(img,\n stride,\n pad_value):\n h = img.shape[0]\n w = img.shape[1]\n\n pad = 4 * [None]\n pad[0] = 0 # up\n pad[1] = 0 # left\n pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down\n pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right\n\n img_padded = img\n pad_up = np.tile(img_padded[0:1, :, :] * 0 + pad_value, (pad[0], 1, 1))\n img_padded = np.concatenate((pad_up, img_padded), axis=0)\n pad_left = np.tile(img_padded[:, 0:1, :] * 0 + pad_value, (1, pad[1], 1))\n img_padded = np.concatenate((pad_left, img_padded), axis=1)\n pad_down = np.tile(img_padded[-2:-1, :, :] * 0 + pad_value, (pad[2], 1, 1))\n img_padded = np.concatenate((img_padded, pad_down), axis=0)\n pad_right = np.tile(img_padded[:, -2:-1, :] * 0 + pad_value, (1, pad[3], 1))\n img_padded = np.concatenate((img_padded, pad_right), axis=1)\n\n return img_padded, pad\n\n\n# ---------------------------------------------------------------------------------------------------------------------\n\n\nclass CocoHpe2ValTransform(object):\n def __init__(self,\n ds_metainfo):\n self.ds_metainfo = ds_metainfo\n\n def __call__(self, src, label):\n return src, label\n\n\ndef recalc_pose(pred,\n label):\n # label_img_id = label[:, 0].astype(np.int32)\n # label_score = label[:, 1]\n\n # pad = label[:, 2:6].astype(np.int32)\n #\n # paf_layers = 30\n # num_layers = 50\n # stride = 4\n #\n # output_blob = pred[0].transpose((1, 2, 0))\n # output_blob0 = output_blob[:, :, :paf_layers]\n # output_blob1 = output_blob[:, :, paf_layers:num_layers]\n #\n # output_blob0_avg = output_blob0\n # output_blob1_avg = output_blob1\n #\n # heatmap = cv2.resize(output_blob1_avg, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)\n #\n # heatmap = heatmap[pad[0]:imageToTest_padded.shape[0] - pad[2], pad[1]:imageToTest_padded.shape[1] - pad[3], :]\n # heatmap = cv2.resize(heatmap, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_CUBIC)\n #\n # # output_blob0 is PAFs\n # paf = cv2.resize(output_blob0_avg, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)\n #\n # paf = paf[pad[0]:imageToTest_padded.shape[0] - pad[2], pad[1]:imageToTest_padded.shape[1] - pad[3], :]\n # paf = cv2.resize(paf, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_CUBIC)\n #\n # heatmap_avg = heatmap_avg + heatmap / (len(multiplier) * len(rotate_angle))\n # paf_avg = paf_avg + paf / (len(multiplier) * len(rotate_angle))\n\n pred_pts_score = []\n pred_person_score = []\n label_img_id_ = []\n\n return np.array(pred_pts_score).reshape((-1, 17, 3)), np.array(pred_person_score)[0], np.array(label_img_id_[0])\n\n# ---------------------------------------------------------------------------------------------------------------------\n\n\nclass CocoHpe3MetaInfo(DatasetMetaInfo):\n def __init__(self):\n super(CocoHpe3MetaInfo, self).__init__()\n self.label = \"COCO\"\n self.short_label = \"coco\"\n self.root_dir_name = \"coco\"\n self.dataset_class = CocoHpe3Dataset\n self.num_training_samples = None\n self.in_channels = 3\n self.num_classes = 17\n self.input_image_size = (368, 368)\n self.train_metric_capts = None\n self.train_metric_names = None\n self.train_metric_extra_kwargs = None\n self.val_metric_capts = None\n self.val_metric_names = None\n self.test_metric_capts = [\"Val.CocoOksAp\"]\n self.test_metric_names = [\"CocoHpeOksApMetric\"]\n self.test_metric_extra_kwargs = [\n {\"name\": \"OksAp\",\n \"coco_annotations_file_path\": None,\n \"use_file\": False,\n \"pose_postprocessing_fn\": lambda x, y: recalc_pose(x, y)}]\n self.saver_acc_ind = 0\n self.do_transform = True\n self.val_transform = CocoHpe2ValTransform\n self.test_transform = CocoHpe2ValTransform\n self.ml_type = \"hpe\"\n self.net_extra_kwargs = {}\n self.mean_rgb = (0.485, 0.456, 0.406)\n self.std_rgb = (0.229, 0.224, 0.225)\n self.load_ignore_extra = False\n\n def add_dataset_parser_arguments(self,\n parser,\n work_dir_path):\n \"\"\"\n Create python script parameters (for ImageNet-1K dataset metainfo).\n\n Parameters:\n ----------\n parser : ArgumentParser\n ArgumentParser instance.\n work_dir_path : str\n Path to working directory.\n \"\"\"\n super(CocoHpe3MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)\n parser.add_argument(\n \"--input-size\",\n type=int,\n nargs=2,\n default=self.input_image_size,\n help=\"size of the input for model\")\n parser.add_argument(\n \"--load-ignore-extra\",\n action=\"store_true\",\n help=\"ignore extra layers in the source PyTroch model\")\n\n def update(self,\n args):\n \"\"\"\n Update ImageNet-1K dataset metainfo after user customizing.\n\n Parameters:\n ----------\n args : ArgumentParser\n Main script arguments.\n \"\"\"\n super(CocoHpe3MetaInfo, self).update(args)\n self.input_image_size = args.input_size\n self.load_ignore_extra = args.load_ignore_extra\n\n def update_from_dataset(self,\n dataset):\n \"\"\"\n Update dataset metainfo after a dataset class instance creation.\n\n Parameters:\n ----------\n args : obj\n A dataset class instance.\n \"\"\"\n self.test_metric_extra_kwargs[0][\"coco_annotations_file_path\"] = dataset.annotations_file_path\n","sub_path":"pytorch/datasets/coco_hpe3_dataset.py","file_name":"coco_hpe3_dataset.py","file_ext":"py","file_size_in_byte":8310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"432532696","text":"# To use this code, make sure you\n#\n# import json\n#\n# and then, to convert JSON from a string, do\n#\n# result = ControllerStatistics_from_dict(json.loads(json_string))\nimport json\nimport sys\nimport threading\nfrom enum import Enum\nfrom typing import Any, Optional, Dict, List, TypeVar, Callable, Type, cast\nfrom dataclasses import dataclass\nfrom typing import Dict, Any, TypeVar, Callable, Type, cast\n\nimport numpy as np\n\nimport ConfigConst as CC\nimport logging\nlogger = logging.getLogger('ResultParser')\nhdlr = logging.FileHandler(CC.RESULT_PROCESSOR_LOG_FILE_PATH)\nformatter = logging.Formatter('[%(asctime)s] p%(process)s {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s','%m-%d %H:%M:%S')\nhdlr.setFormatter(formatter)\nlogger.addHandler(hdlr)\nlogging.StreamHandler(stream=None)\nlogger.setLevel(logging.INFO)\n\nT = TypeVar(\"T\")\n\n\ndef from_float(x: Any) -> float:\n #print('Inside from float. data is '+str(x)+\" type is \"+str(type(x)))\n try:\n assert isinstance(x, (float, int)) and not isinstance(x, bool)\n except AssertionError as a:\n #print(\"Assertion error occured\",a)\n error =a\n return float(x)\n\n\ndef from_int(x: Any) -> int:\n assert isinstance(x, int) and not isinstance(x, bool)\n return x\n\n\ndef to_float(x: Any) -> float:\n assert isinstance(x, float)\n return x\n\n\ndef from_none(x: Any) -> Any:\n assert x is None\n return x\n\n\ndef from_union(fs, x):\n for f in fs:\n try:\n return f(x)\n except:\n pass\n assert False\n\n\ndef from_bool(x: Any) -> bool:\n assert isinstance(x, bool)\n return x\n\n\ndef from_dict(f: Callable[[Any], T], x: Any) -> Dict[str, T]:\n assert isinstance(x, dict)\n return { k: f(v) for (k, v) in x.items() }\n\n\ndef to_class(c: Type[T], x: Any) -> dict:\n assert isinstance(x, c)\n return cast(Any, x).to_dict()\n\n\ndef from_list(f: Callable[[Any], T], x: Any) -> List[T]:\n assert isinstance(x, list)\n return [f(y) for y in x]\n\n\ndef from_str(x: Any) -> str:\n assert isinstance(x, str)\n return x\n\n\n@dataclass\nclass CPUUtilizationPercent:\n host_total: float\n host_user: float\n host_system: float\n remote_total: float\n remote_user: float\n remote_system: int\n\n @staticmethod\n def from_dict(obj: Any) -> 'CPUUtilizationPercent':\n assert isinstance(obj, dict)\n host_total = from_float(obj.get(\"host_total\"))\n host_user = from_float(obj.get(\"host_user\"))\n host_system = from_float(obj.get(\"host_system\"))\n remote_total = from_float(obj.get(\"remote_total\"))\n remote_user = from_float(obj.get(\"remote_user\"))\n remote_system = from_float(obj.get(\"remote_system\"))\n return CPUUtilizationPercent(host_total, host_user, host_system, remote_total, remote_user, remote_system)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"host_total\"] = to_float(self.host_total)\n result[\"host_user\"] = to_float(self.host_user)\n result[\"host_system\"] = to_float(self.host_system)\n result[\"remote_total\"] = to_float(self.remote_total)\n result[\"remote_user\"] = to_float(self.remote_user)\n result[\"remote_system\"] = from_int(self.remote_system)\n return result\n\n\n@dataclass\nclass SumReceived:\n start: float\n end: float\n seconds: float\n bytes: int\n bits_per_second: float\n socket: Optional[int] = None\n retransmits: Optional[int] = None\n omitted: Optional[bool] = None\n\n @staticmethod\n def from_dict(obj: Any) -> 'SumReceived':\n assert isinstance(obj, dict)\n start = from_float(obj.get(\"start\"))\n end = from_float(obj.get(\"end\"))\n seconds = from_float(obj.get(\"seconds\"))\n bytes = from_int(obj.get(\"bytes\"))\n bits_per_second = from_float(obj.get(\"bits_per_second\"))\n socket = from_union([from_int, from_none], obj.get(\"socket\"))\n retransmits = from_union([from_int, from_none], obj.get(\"retransmits\"))\n omitted = from_union([from_bool, from_none], obj.get(\"omitted\"))\n return SumReceived(start, end, seconds, bytes, bits_per_second, socket, retransmits, omitted)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"start\"] = to_float(self.start)\n result[\"end\"] = to_float(self.end)\n result[\"seconds\"] = to_float(self.seconds)\n result[\"bytes\"] = from_int(self.bytes)\n result[\"bits_per_second\"] = to_float(self.bits_per_second)\n result[\"socket\"] = from_union([from_int, from_none], self.socket)\n result[\"retransmits\"] = from_union([from_int, from_none], self.retransmits)\n result[\"omitted\"] = from_union([from_bool, from_none], self.omitted)\n return result\n\n\n@dataclass\nclass EndStream:\n sender: Dict[str, float]\n receiver: SumReceived\n\n @staticmethod\n def from_dict(obj: Any) -> 'EndStream':\n assert isinstance(obj, dict)\n sender = from_dict(from_float, obj.get(\"sender\"))\n receiver = SumReceived.from_dict(obj.get(\"receiver\"))\n return EndStream(sender, receiver)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"sender\"] = from_dict(to_float, self.sender)\n result[\"receiver\"] = to_class(SumReceived, self.receiver)\n return result\n\n\n@dataclass\nclass End:\n streams: List[EndStream]\n sum_sent: SumReceived\n sum_received: SumReceived\n cpu_utilization_percent: CPUUtilizationPercent\n sender_tcp_congestion: str\n receiver_tcp_congestion: str\n\n @staticmethod\n def from_dict(obj: Any) -> 'End':\n assert isinstance(obj, dict)\n streams = from_list(EndStream.from_dict, obj.get(\"streams\"))\n sum_sent = SumReceived.from_dict(obj.get(\"sum_sent\"))\n sum_received = SumReceived.from_dict(obj.get(\"sum_received\"))\n cpu_utilization_percent = CPUUtilizationPercent.from_dict(obj.get(\"cpu_utilization_percent\"))\n sender_tcp_congestion = from_str(obj.get(\"sender_tcp_congestion\"))\n receiver_tcp_congestion = from_str(obj.get(\"receiver_tcp_congestion\"))\n return End(streams, sum_sent, sum_received, cpu_utilization_percent, sender_tcp_congestion, receiver_tcp_congestion)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"streams\"] = from_list(lambda x: to_class(EndStream, x), self.streams)\n result[\"sum_sent\"] = to_class(SumReceived, self.sum_sent)\n result[\"sum_received\"] = to_class(SumReceived, self.sum_received)\n result[\"cpu_utilization_percent\"] = to_class(CPUUtilizationPercent, self.cpu_utilization_percent)\n result[\"sender_tcp_congestion\"] = from_str(self.sender_tcp_congestion)\n result[\"receiver_tcp_congestion\"] = from_str(self.receiver_tcp_congestion)\n return result\n\n\n@dataclass\nclass IntervalStream:\n socket: int\n start: float\n end: float\n seconds: float\n bytes: int\n bits_per_second: float\n retransmits: int\n snd_cwnd: int\n rtt: int\n rttvar: int\n pmtu: int\n omitted: bool\n\n @staticmethod\n def from_dict(obj: Any) -> 'IntervalStream':\n assert isinstance(obj, dict)\n socket = from_int(obj.get(\"socket\"))\n start = from_float(obj.get(\"start\"))\n end = from_float(obj.get(\"end\"))\n seconds = from_float(obj.get(\"seconds\"))\n bytes = from_int(obj.get(\"bytes\"))\n bits_per_second = from_float(obj.get(\"bits_per_second\"))\n retransmits = from_int(obj.get(\"retransmits\"))\n snd_cwnd = from_int(obj.get(\"snd_cwnd\"))\n rtt = from_int(obj.get(\"rtt\"))\n rttvar = from_int(obj.get(\"rttvar\"))\n pmtu = from_int(obj.get(\"pmtu\"))\n omitted = from_bool(obj.get(\"omitted\"))\n return IntervalStream(socket, start, end, seconds, bytes, bits_per_second, retransmits, snd_cwnd, rtt, rttvar, pmtu, omitted)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"socket\"] = from_int(self.socket)\n result[\"start\"] = to_float(self.start)\n result[\"end\"] = to_float(self.end)\n result[\"seconds\"] = to_float(self.seconds)\n result[\"bytes\"] = from_int(self.bytes)\n result[\"bits_per_second\"] = to_float(self.bits_per_second)\n result[\"retransmits\"] = from_int(self.retransmits)\n result[\"snd_cwnd\"] = from_int(self.snd_cwnd)\n result[\"rtt\"] = from_int(self.rtt)\n result[\"rttvar\"] = from_int(self.rttvar)\n result[\"pmtu\"] = from_int(self.pmtu)\n result[\"omitted\"] = from_bool(self.omitted)\n return result\n\n\n@dataclass\nclass Interval:\n streams: List[IntervalStream]\n sum: SumReceived\n\n @staticmethod\n def from_dict(obj: Any) -> 'Interval':\n assert isinstance(obj, dict)\n streams = from_list(IntervalStream.from_dict, obj.get(\"streams\"))\n sum = SumReceived.from_dict(obj.get(\"sum\"))\n return Interval(streams, sum)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"streams\"] = from_list(lambda x: to_class(IntervalStream, x), self.streams)\n result[\"sum\"] = to_class(SumReceived, self.sum)\n return result\n\n\n@dataclass\nclass Connected:\n socket: int\n local_host: str\n local_port: int\n remote_host: str\n remote_port: int\n\n @staticmethod\n def from_dict(obj: Any) -> 'Connected':\n assert isinstance(obj, dict)\n socket = from_int(obj.get(\"socket\"))\n local_host = from_str(obj.get(\"local_host\"))\n local_port = from_int(obj.get(\"local_port\"))\n remote_host = from_str(obj.get(\"remote_host\"))\n remote_port = from_int(obj.get(\"remote_port\"))\n return Connected(socket, local_host, local_port, remote_host, remote_port)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"socket\"] = from_int(self.socket)\n result[\"local_host\"] = from_str(self.local_host)\n result[\"local_port\"] = from_int(self.local_port)\n result[\"remote_host\"] = from_str(self.remote_host)\n result[\"remote_port\"] = from_int(self.remote_port)\n return result\n\n\n@dataclass\nclass ConnectingTo:\n host: str\n port: int\n\n @staticmethod\n def from_dict(obj: Any) -> 'ConnectingTo':\n assert isinstance(obj, dict)\n host = from_str(obj.get(\"host\"))\n port = from_int(obj.get(\"port\"))\n return ConnectingTo(host, port)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"host\"] = from_str(self.host)\n result[\"port\"] = from_int(self.port)\n return result\n\n\n@dataclass\nclass TestStart:\n protocol: str\n num_streams: int\n blksize: int\n omit: int\n duration: int\n bytes: int\n blocks: int\n reverse: int\n tos: int\n\n @staticmethod\n def from_dict(obj: Any) -> 'TestStart':\n assert isinstance(obj, dict)\n protocol = from_str(obj.get(\"protocol\"))\n num_streams = from_int(obj.get(\"num_streams\"))\n blksize = from_int(obj.get(\"blksize\"))\n omit = from_int(obj.get(\"omit\"))\n duration = from_int(obj.get(\"duration\"))\n bytes = from_int(obj.get(\"bytes\"))\n blocks = from_int(obj.get(\"blocks\"))\n reverse = from_int(obj.get(\"reverse\"))\n tos = from_int(obj.get(\"tos\"))\n return TestStart(protocol, num_streams, blksize, omit, duration, bytes, blocks, reverse, tos)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"protocol\"] = from_str(self.protocol)\n result[\"num_streams\"] = from_int(self.num_streams)\n result[\"blksize\"] = from_int(self.blksize)\n result[\"omit\"] = from_int(self.omit)\n result[\"duration\"] = from_int(self.duration)\n result[\"bytes\"] = from_int(self.bytes)\n result[\"blocks\"] = from_int(self.blocks)\n result[\"reverse\"] = from_int(self.reverse)\n result[\"tos\"] = from_int(self.tos)\n return result\n\n\n@dataclass\nclass Timestamp:\n time: str\n timesecs: int\n\n @staticmethod\n def from_dict(obj: Any) -> 'Timestamp':\n assert isinstance(obj, dict)\n time = from_str(obj.get(\"time\"))\n timesecs = from_int(obj.get(\"timesecs\"))\n return Timestamp(time, timesecs)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"time\"] = from_str(self.time)\n result[\"timesecs\"] = from_int(self.timesecs)\n return result\n\n\n@dataclass\nclass Start:\n connected: List[Connected]\n version: str\n system_info: str\n timestamp: Timestamp\n connecting_to: ConnectingTo\n cookie: str\n tcp_mss: int\n sock_bufsize: int\n sndbuf_actual: int\n rcvbuf_actual: int\n test_start: TestStart\n\n @staticmethod\n def from_dict(obj: Any) -> 'Start':\n assert isinstance(obj, dict)\n connected = from_list(Connected.from_dict, obj.get(\"connected\"))\n version = from_str(obj.get(\"version\"))\n system_info = from_str(obj.get(\"system_info\"))\n timestamp = Timestamp.from_dict(obj.get(\"timestamp\"))\n connecting_to = ConnectingTo.from_dict(obj.get(\"connecting_to\"))\n cookie = from_str(obj.get(\"cookie\"))\n tcp_mss = from_int(obj.get(\"tcp_mss\"))\n sock_bufsize = from_int(obj.get(\"sock_bufsize\"))\n sndbuf_actual = from_int(obj.get(\"sndbuf_actual\"))\n rcvbuf_actual = from_int(obj.get(\"rcvbuf_actual\"))\n test_start = TestStart.from_dict(obj.get(\"test_start\"))\n return Start(connected, version, system_info, timestamp, connecting_to, cookie, tcp_mss, sock_bufsize, sndbuf_actual, rcvbuf_actual, test_start)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"connected\"] = from_list(lambda x: to_class(Connected, x), self.connected)\n result[\"version\"] = from_str(self.version)\n result[\"system_info\"] = from_str(self.system_info)\n result[\"timestamp\"] = to_class(Timestamp, self.timestamp)\n result[\"connecting_to\"] = to_class(ConnectingTo, self.connecting_to)\n result[\"cookie\"] = from_str(self.cookie)\n result[\"tcp_mss\"] = from_int(self.tcp_mss)\n result[\"sock_bufsize\"] = from_int(self.sock_bufsize)\n result[\"sndbuf_actual\"] = from_int(self.sndbuf_actual)\n result[\"rcvbuf_actual\"] = from_int(self.rcvbuf_actual)\n result[\"test_start\"] = to_class(TestStart, self.test_start)\n return result\n\n\n@dataclass\nclass IPerfResult:\n start: Start\n intervals: List[Interval]\n end: End\n srcName :str\n dstName:str\n\n @staticmethod\n def from_dict(obj: Any) -> 'ControllerStatistics':\n assert isinstance(obj, dict)\n start = Start.from_dict(obj.get(\"start\"))\n intervals = from_list(Interval.from_dict, obj.get(\"intervals\"))\n end = End.from_dict(obj.get(\"end\"))\n return IPerfResult(start, intervals, end, srcName = None, dstName = None)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"start\"] = to_class(Start, self.start)\n result[\"intervals\"] = from_list(lambda x: to_class(Interval, x), self.intervals)\n result[\"end\"] = to_class(End, self.end)\n return result\n\n def getResultsummary(self):\n # This function will only sum up total send and rcvd data and also avg bandwidth for sending and reciving. also collect total time\n # all those information are kept in End\n return self.end\n\n def getEndTimeInSec(self):\n return float(self.start.timestamp.timesecs+float(self.end.sum_received.seconds))\n def getRcvrSideThroughput(self):\n return self.end.streams[0].sender.bits_per_second\n def getSenderSideThroughput(self):\n return self.start.streams[0].sender.bits_per_second\n def getLocalFCT(self):\n '''\n This gives the time required to send the data to server from sender\n :return:\n '''\n return self.end.sum_sent.end\n\n def getRemoteFCT(self):\n '''\n This gives the time required to send the data to server from sender. the time is from rcver side\n :return:\n '''\n return self.end.sum_received.end\n def getTotalBytesSent(self):\n return self.end.sum_sent.bytes\n def getTotalBytesRcvd(self):\n return self.end.sum_received.bytes\n def getTotalRetransmits(self):\n return self.end.sum_sent.retransmits\n def getMinRTTForEachInterval(self):\n #This function creates a list of time vs min rtt\n pass\n\n def getMaxRTTForEachInterval(self):\n #This function creates a list of time vs max rtt\n pass\n\n def getAvgRTTForEachInterval(self):\n #This function creates a list of time vs avg rtt\n pass\n\n def getTimeVsCumulativeBytes(self):\n #This function creates a list of time vs total data sent in cummulative fashion\n # for example if at 1 st send m bytes and 2 nd second n bytes are transferrred. This list will contain [(1,m), (2, m+n)]\n pass\n def setSrcDestName(self, src, dst):\n self.srcName = src\n self.dstName = dst\n\ndef IPerfResult_from_dict(s: Any) -> IPerfResult:\n return IPerfResult.from_dict(s)\n\n\ndef IPerfResult_to_dict(x: IPerfResult) -> Any:\n return to_class(IPerfResult, x)\n\n\nclass IPerfResultObjectsForOneFolder():\n def __init__(self,folderPath, start_timer, iperfResults):\n self.folderPath = folderPath\n self.start_timer = start_timer\n self.iperfResults = iperfResults\n\n def __str__(self):\n print(\"Total Iperf Result objects in the folder are \"+str( len(self.iperfResults)))\n for r in self.iperfResults:\n r= r[0]\n print(\"sum _sent = \", r.end.sum_sent.bytes, \" sum_recevied = \", r.end.sum_received.bytes, \" loss = \", (r.end.sum_sent.bytes-r.end.sum_received.bytes) )\n\n\n\n\n#--------------classes for parsing controller stats\n\n# To use this code, make sure you\n#\n# import json\n#\n# and then, to convert JSON from a string, do\n#\n# result = ControllerStatistics_from_dict(json.loads(json_string))\n\n\n\n@dataclass\nclass PortStats:\n upward_port_egress_packet_counter: Dict[str, int]\n downward_port_egress_packet_counter: Dict[str, int]\n upward_port_ingress_packet_counter: Dict[str, int]\n downward_port_inress_packet_counter: Dict[str, int]\n cpu_port_ingress_packet_counter: int\n cpu_port_egress_packet_counter: int\n queue_rates: Dict[str, int]\n queue_depths: Dict[str, int]\n\n @staticmethod\n def from_dict(obj: Any) -> 'PortStats':\n assert isinstance(obj, dict)\n upward_port_egress_packet_counter = from_dict(from_int, obj.get(\"_upwardPortEgressPacketCounter\"))\n # downward_port_egress_packet_counter = from_dict(from_int, obj.get(\"_downwardPortEgressPacketCounter\"))\n # upward_port_ingress_packet_counter = from_dict(from_int, obj.get(\"_upwardPortIngressPacketCounter\"))\n # downward_port_inress_packet_counter = from_dict(from_int, obj.get(\"_downwardPortInressPacketCounter\"))\n # cpu_port_ingress_packet_counter = from_int(obj.get(\"_CPUPortIngressPacketCounter\"))\n # cpu_port_egress_packet_counter = from_int(obj.get(\"_CPUPortEgressPacketCounter\"))\n # queue_rates = from_dict(from_int, obj.get(\"queueRates\"))\n # queue_depths = from_dict(from_int, obj.get(\"queueDepths\"))\n return PortStats(upward_port_egress_packet_counter, None, None, None, None, None, None, None)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"_upwardPortEgressPacketCounter\"] = from_dict(from_int, self.upward_port_egress_packet_counter)\n result[\"_downwardPortEgressPacketCounter\"] = from_dict(from_int, self.downward_port_egress_packet_counter)\n result[\"_upwardPortIngressPacketCounter\"] = from_dict(from_int, self.upward_port_ingress_packet_counter)\n result[\"_downwardPortInressPacketCounter\"] = from_dict(from_int, self.downward_port_inress_packet_counter)\n result[\"_CPUPortIngressPacketCounter\"] = from_int(self.cpu_port_ingress_packet_counter)\n result[\"_CPUPortEgressPacketCounter\"] = from_int(self.cpu_port_egress_packet_counter)\n result[\"queueRates\"] = from_dict(from_int, self.queue_rates)\n result[\"queueDepths\"] = from_dict(from_int, self.queue_depths)\n return result\n\n\n@dataclass\nclass SwitchPortStatistics:\n keys: bool\n ensure_ascii: bool\n check_circular: bool\n allow_nan: bool\n sort_keys: bool\n indent: None\n port_stats: PortStats\n time: float\n dev_name: str\n\n @staticmethod\n def from_dict(obj: Any) -> 'SwitchPortStatistics':\n assert isinstance(obj, dict)\n keys = from_bool(obj.get(\"skipkeys\"))\n ensure_ascii = from_bool(obj.get(\"ensure_ascii\"))\n check_circular = from_bool(obj.get(\"check_circular\"))\n allow_nan = from_bool(obj.get(\"allow_nan\"))\n sort_keys = from_bool(obj.get(\"sort_keys\"))\n indent = from_none(obj.get(\"indent\"))\n port_stats = PortStats.from_dict(obj.get(\"portStats\"))\n time = from_float(obj.get(\"time\"))\n dev_name = from_str(obj.get(\"devName\"))\n return SwitchPortStatistics(keys, ensure_ascii, check_circular, allow_nan, sort_keys, indent, port_stats, time, dev_name)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"skipkeys\"] = from_bool(self.keys)\n result[\"ensure_ascii\"] = from_bool(self.ensure_ascii)\n result[\"check_circular\"] = from_bool(self.check_circular)\n result[\"allow_nan\"] = from_bool(self.allow_nan)\n result[\"sort_keys\"] = from_bool(self.sort_keys)\n result[\"indent\"] = from_none(self.indent)\n result[\"portStats\"] = to_class(PortStats, self.port_stats)\n result[\"time\"] = to_float(self.time)\n result[\"devName\"] = from_str(self.dev_name)\n return result\n\n\ndef SwitchPortStatistics_from_dict(s: Any) -> SwitchPortStatistics:\n return SwitchPortStatistics.from_dict(s)\n\n\ndef SwitchPortStatistics_to_dict(x: SwitchPortStatistics) -> Any:\n return to_class(SwitchPortStatistics, x)\n\n\n\n#==============config parser\n\n\nT = TypeVar(\"T\")\nEnumT = TypeVar(\"EnumT\", bound=Enum)\n\ndef from_str(x: Any) -> str:\n assert isinstance(x, str)\n return x\n\n\ndef from_int(x: Any) -> int:\n assert isinstance(x, int) and not isinstance(x, bool)\n return x\n\n\ndef from_list(f: Callable[[Any], T], x: Any) -> List[T]:\n assert isinstance(x, list)\n return [f(y) for y in x]\n\n\ndef to_class(c: Type[T], x: Any) -> dict:\n assert isinstance(x, c)\n return cast(Any, x).to_dict()\n\n\ndef to_enum(c: Type[EnumT], x: Any) -> EnumT:\n assert isinstance(x, c)\n return x.value\n\n\ndef from_dict(f: Callable[[Any], T], x: Any) -> Dict[str, T]:\n assert isinstance(x, dict)\n return { k: f(v) for (k, v) in x.items() }\n\n\n@dataclass\nclass Link:\n node1: str\n node2: str\n port2: int\n bw: int\n port1: int\n\n\n @staticmethod\n def from_dict(obj: Any) -> 'Link':\n assert isinstance(obj, dict)\n node1 = from_str(obj.get(\"node1\"))\n node2 = from_str(obj.get(\"node2\"))\n port1 = from_int(obj.get(\"port1\"))\n port2 = from_int(obj.get(\"port2\"))\n bw = from_int(obj.get(\"bw\"))\n return Link(node1, node2, port2,bw, port1)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"node1\"] = from_str(self.node1)\n result[\"node2\"] = from_str(self.node2)\n result[\"port1\"] = from_int(self.port1)\n result[\"port2\"] = from_int(self.port2)\n return result\n\n\n@dataclass\nclass Alllinks:\n links: List[Link]\n\n @staticmethod\n def from_dict(obj: Any) -> 'Alllinks':\n assert isinstance(obj, dict)\n links = from_list(Link.from_dict, obj.get(\"links\"))\n return Alllinks(links)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"links\"] = from_list(lambda x: to_class(Link, x), self.links)\n return result\n\n\nclass Driver(Enum):\n BMV2 = \"bmv2\"\n\n\nclass Pipeconf(Enum):\n ORG_MEDIANET_DCN_TE_LEAF = \"org.medianet.dcn-te-leaf\"\n ORG_MEDIANET_DCN_TE_SPINE = \"org.medianet.dcn-te-spine\"\n ORG_MEDIANET_DCN_TE_SUPER_SPINE = \"org.medianet.dcn-te-super-spine\"\n\nclass DeviceType(Enum):\n INVALID=-1\n HOST = 0\n LEAF_SWITCH=1\n SPINE_SWITCH=2\n SUPER_SPINE_SWITCH=3\n\n def __str__(self):\n val=self\n if val == DeviceType.INVALID:\n return \"DEV TYPE: INVALID \"\n elif val == DeviceType.HOST:\n return \"DEV TYPE: HOST \"\n elif val == DeviceType.LEAF_SWITCH:\n return \"DEV TYPE: LEAF_SWITCH \"\n elif val == DeviceType.SPINE_SWITCH:\n return \"DEV TYPE: SPINE_SWITCH \"\n elif val == DeviceType.SUPER_SPINE_SWITCH:\n return \"DEV TYPE: SUPER_SPINE_SWITCH \"\n else:\n return \"DEV TYPE: INVALID \"\n\n@dataclass\nclass DeviceBasic:\n management_address: str\n driver: Driver\n pipeconf: Pipeconf\n thirftPort: int\n\n @staticmethod\n def from_dict(obj: Any) -> 'DeviceBasic':\n assert isinstance(obj, dict)\n management_address = from_str(obj.get(\"managementAddress\"))\n driver = Driver(obj.get(\"driver\"))\n pipeconf = Pipeconf(obj.get(\"pipeconf\"))\n thirftPort = from_str(obj.get(\"thirftPort\"))\n return DeviceBasic(management_address, driver, pipeconf,thirftPort)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"managementAddress\"] = from_str(self.management_address)\n result[\"driver\"] = to_enum(Driver, self.driver)\n result[\"pipeconf\"] = to_enum(Pipeconf, self.pipeconf)\n return result\n\n\nclass SwitchType(Enum):\n LEAF = \"Leaf\"\n SPINE = \"Spine\"\n SUPER_SPINE = \"SuperSpine\"\n\n\n@dataclass\nclass FabricDeviceConfig:\n my_station_mac: str\n switch_type: SwitchType\n switch_host_subnet_prefix: str\n\n @staticmethod\n def from_dict(obj: Any) -> 'FabricDeviceConfig':\n assert isinstance(obj, dict)\n my_station_mac = from_str(obj.get(\"myStationMac\"))\n switch_type = SwitchType(obj.get(\"switchType\"))\n switch_host_subnet_prefix = from_str(obj.get(\"switchHostSubnetPrefix\"))\n return FabricDeviceConfig(my_station_mac, switch_type, switch_host_subnet_prefix)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"myStationMac\"] = from_str(self.my_station_mac)\n result[\"switchType\"] = to_enum(SwitchType, self.switch_type)\n result[\"switchHostSubnetPrefix\"] = from_str(self.switch_host_subnet_prefix)\n return result\n\n\n@dataclass\nclass BasicElement:\n name: str\n ips: List[str]\n\n @staticmethod\n def from_dict(obj: Any) -> 'BasicElement':\n assert isinstance(obj, dict)\n name = from_str(obj.get(\"name\"))\n ips = from_list(from_str, obj.get(\"ips\"))\n return BasicElement(name, ips)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"name\"] = from_str(self.name)\n result[\"ips\"] = from_list(from_str, self.ips)\n return result\n\n\n@dataclass\nclass FabricHostConfig:\n mac: str\n location: str\n\n @staticmethod\n def from_dict(obj: Any) -> 'FabricHostConfig':\n assert isinstance(obj, dict)\n mac = from_str(obj.get(\"mac\"))\n location = from_str(obj.get(\"location\"))\n return FabricHostConfig(mac, location)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"mac\"] = from_str(self.mac)\n result[\"location\"] = from_str(self.location)\n return result\n\n\n@dataclass\nclass Host:\n hostName : str\n basic: BasicElement\n fabric_host_config: FabricHostConfig\n\n def __init__(self,hostName, basic, fabric_host_config):\n self.hostName = hostName\n self.basic = basic\n self.fabric_host_config = fabric_host_config\n self.portToLeafSwitchMap = {}\n\n @staticmethod\n def from_dict( obj: Any) -> 'Host':\n assert isinstance(obj, dict)\n basic = BasicElement.from_dict(obj.get(\"basic\"))\n fabric_host_config = FabricHostConfig.from_dict(obj.get(\"fabricHostConfig\"))\n return Host(basic.name,basic, fabric_host_config)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"basic\"] = to_class(BasicElement, self.basic)\n result[\"fabricHostConfig\"] = to_class(FabricHostConfig, self.fabric_host_config)\n return result\n def getLocationIndexes(self):\n hostIndex=self.basic.name[self.basic.name.index(\"h\")+1: self.basic.name.index(\"p\")]\n podIndex = self.basic.name[self.basic.name.index(\"p\")+1: self.basic.name.index(\"l\")]\n leafSwitchIndex=self.basic.name[self.basic.name.index(\"l\")+1: len(self.basic.name)]\n return hostIndex, leafSwitchIndex, podIndex\n\n\n@dataclass\nclass Port:\n interfaces: List[BasicElement]\n\n @staticmethod\n def from_dict(obj: Any) -> 'Port':\n assert isinstance(obj, dict)\n interfaces = from_list(BasicElement.from_dict, obj.get(\"interfaces\"))\n return Port(interfaces)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"interfaces\"] = from_list(lambda x: to_class(BasicElement, x), self.interfaces)\n return result\n\n\n\n@dataclass\nclass Device:\n devName: str\n basic: DeviceBasic\n fabric_device_config: FabricDeviceConfig\n\n def __init__(self,devName, basic, fabric_device_config):\n self.devName = devName\n self.basic = basic\n self.fabric_device_config = fabric_device_config\n self.portToHostMap = {}\n self.portToSpineSwitchMap = {}\n self.portToLeafSwitchMap = {}\n self.portToSuperSpineSwitchMap = {}\n self.packetOutLock = threading.Lock()\n self.cliLock = threading.Lock()\n self.portToQueueRateMap={}\n self.portToQueueDepthMap={}\n self.maxPort = CC.MAX_PORT_NUMBER\n\n s = self.basic.management_address.index(\"device_id=\")+len(\"device_id=\")\n tempString = self.basic.management_address[s:len(self.basic.management_address)]\n self.device_id = int(tempString)\n s = self.basic.management_address.index(\"grpc://\") + len(\"grpc://\")\n e = self.basic.management_address.index(\"?device_id=\")\n self.grpcAddress = self.basic.management_address[s:e]\n self.election_id = (1,0)\n\n @staticmethod\n def from_dict(devName, obj: Any) -> 'Device':\n assert isinstance(obj, dict)\n basic = DeviceBasic.from_dict(obj.get(\"basic\"))\n fabric_device_config = FabricDeviceConfig.from_dict(obj.get(\"fabricDeviceConfig\"))\n return Device(devName, basic, fabric_device_config)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"basic\"] = to_class(DeviceBasic, self.basic)\n result[\"fabricDeviceConfig\"] = to_class(FabricDeviceConfig, self.fabric_device_config)\n return result\n\n\n\n\nclass ConfigLoader():\n def __init__(self, cfgFileName):\n self.nameToSwitchMap = {}\n self.nameToHostMap = {}\n self.cfgFileName = cfgFileName\n print(\"Starting Result Processor with config file \",cfgFileName)\n self.loadCFG(cfgFileName)\n\n def loadCFG(self,cfgfileName):\n cfgFile = open(cfgfileName)\n obj = json.load(fp=cfgFile)\n for devName in obj[\"devices\"]:\n try:\n dev = Device.from_dict(devName, obj[\"devices\"][devName])\n s = devName.index(\"device:\") + len(\"device:\") #Strp from \"device:\" prefix from device name. this was created for onos.\n devName = devName[s:len(devName)]\n self.nameToSwitchMap[devName] = dev\n logger.info(\"New dev is \"+str(dev))\n #dev.initialSetup()\n except:\n e = sys.exc_info()\n logger.error(\"Error in initializing \", devName)\n logger.error(\"Error is \"+str( e))\n for portLoc in obj[\"ports\"]:\n p = Port.from_dict(obj[\"ports\"][portLoc])\n logger.info(\"New port is \"+ str(p))\n pass\n for hostMac in obj[\"hosts\"]:\n h = Host.from_dict( obj[\"hosts\"][hostMac])\n self.nameToHostMap[h.basic.name] = h\n logger.info(\"New host is \"+str(h))\n for i in range (0, len(obj[\"alllinks\"][\"links\"])):\n logger.info(\"Link processing is not required for result processing. So skipping... \")\n pass\n cfgFile.close()\n logger.info(\"Finished reading and loading cfg\")\n # print(self.nameToSwitchMap)\n # print(self.nameToHostMap)\n\n\nclass PerTrafficClassSummaryResults:\n\n def __init__(self, traficClassIdentifierFlowVolume):\n '''\n\n :param traficClassIdentifierFlowVolume: This filed is just to mention, what is the flow volume based on which we have decided that any flow will belong to this type\n As example, if traficClassIdentifierFlowVolume = 50 KB then all results in this class will have flow volume near to 50 KB\n '''\n self.traficClassIdentifierFlowVolume = traficClassIdentifierFlowVolume\n self.iperfResults = [] # in this filed we will keep all the iperfresults that belongs to this category\n pass\n\n def getTraficClassIdentifierFlowVolume(self):\n return self.traficClassIdentifierFlowVolume\n\n def addIperfResult(self, iperfResult):\n self.iperfResults.append(iperfResult)\n def getNthPercentilieTCPThroughputInBPS(self, n):\n throughputAsArray = []\n for r in self.iperfResults:\n throughputAsArray.append(r.end.sum_received.bits_per_second) # we are taking the recivier side time for flow completion\n return np.percentile(throughputAsArray, n)\n\n def getSTDOfTCPThroughputInBPS(self):\n throughputAsArray = []\n for r in self.iperfResults:\n throughputAsArray.append(r.end.sum_received.bits_per_second) # we are taking the recivier side time for flow completion\n return np.std(throughputAsArray)\n def getAVGOfTCPThroughputInBPS(self):\n throughputAsArray = []\n for r in self.iperfResults:\n throughputAsArray.append(r.end.sum_received.bits_per_second) # we are taking the recivier side time for flow completion\n return np.average(throughputAsArray)\n\n def getNthPercentilieFCT(self, n):\n '''\n\n :param n: what percentile of FCT you want . If we want 90th percentile then pass n=90\n :return:\n '''\n fctAsArray = []\n for r in self.iperfResults:\n fctAsArray.append(r.end.sum_received.seconds) # we are taking the recivier side time for flow completion\n return np.percentile(fctAsArray, n)\n\n def getAvgFCT(self):\n '''\n\n :param n: what percentile of FCT you want . If we want 90th percentile then pass n=90\n :return:\n '''\n fctAsArray = []\n for r in self.iperfResults:\n fctAsArray.append(r.end.sum_received.seconds) # we are taking the recivier side time for flow completion\n return np.average(fctAsArray)\n def getSTDOfFCT(self):\n '''\n\n :param n: what percentile of FCT you want . If we want 90th percentile then pass n=90\n :return:\n '''\n fctAsArray = []\n for r in self.iperfResults:\n fctAsArray.append(r.end.sum_received.seconds) # we are taking the recivier side time for flow completion\n return np.std(fctAsArray)\n\n def getNthPercentilieRetransmit(self, n):\n '''\n\n :param n: what percentile of retransmit you want . If we want 90th percentile then pass n=90\n :return:\n '''\n retrisnmitNumAsArray = []\n for r in self.iperfResults:\n # r = IPerfResult()\n retrisnmitNumAsArray.append(r.end.sum_sent.retransmits)\n return np.percentile(retrisnmitNumAsArray, n)\n\n def getSTDOfRetransmit(self):\n retrisnmitNumAsArray = []\n for r in self.iperfResults:\n # r = IPerfResult()\n retrisnmitNumAsArray.append(r.end.sum_sent.retransmits)\n return np.std(retrisnmitNumAsArray)\n def getAVGOfRetransmit(self):\n retrisnmitNumAsArray = []\n for r in self.iperfResults:\n # r = IPerfResult()\n retrisnmitNumAsArray.append(r.end.sum_sent.retransmits)\n return np.average(retrisnmitNumAsArray)\n\n def getNthPercentilieSuccessfulData(self, n):\n '''\n\n :param n: what percentile of data loss you want . If we want 90th percentile then pass n=90\n :return:\n '''\n succesfulDataSentAsArray = []\n for r in self.iperfResults:\n #r = IPerfResult()\n # if(r.end.sum_sent.bytes < r.end.sum_received.bytes):\n # succesfulDataSentAsArray.append(0)\n # else:\n succesfulDataSentAsArray.append(r.end.sum_received.bytes)\n return np.percentile(succesfulDataSentAsArray, n)/1024\n\n def getSTDOfSuccessfulData(self):\n '''\n\n :param n: what percentile of data loss you want . If we want 90th percentile then pass n=90\n :return:\n '''\n succesfulDataSentAsArray = []\n for r in self.iperfResults:\n #r = IPerfResult()\n # if(r.end.sum_sent.bytes < r.end.sum_received.bytes):\n # succesfulDataSentAsArray.append(0)\n # else:\n succesfulDataSentAsArray.append(r.end.sum_received.bytes)\n return np.std(succesfulDataSentAsArray)\n\n def getAVGOfSuccessfulData(self):\n '''\n\n :param n: what percentile of data loss you want . If we want 90th percentile then pass n=90\n :return:\n '''\n succesfulDataSentAsArray = []\n for r in self.iperfResults:\n #r = IPerfResult()\n # if(r.end.sum_sent.bytes < r.end.sum_received.bytes):\n # succesfulDataSentAsArray.append(0)\n # else:\n succesfulDataSentAsArray.append(r.end.sum_received.bytes)\n return np.average(succesfulDataSentAsArray)\n\n def getNthPercentilieDataLoss(self, n):\n '''\n\n :param n: what percentile of data loss you want . If we want 90th percentile then pass n=90\n :return:\n '''\n dataLossAsArray = []\n for r in self.iperfResults:\n #r = IPerfResult()\n if(r.end.sum_sent.bytes < r.end.sum_received.bytes):\n dataLossAsArray.append(0)\n else:\n dataLossAsArray.append(r.end.sum_sent.bytes - r.end.sum_received.bytes)\n return np.percentile(dataLossAsArray, n)/1024\n\n def getSTDOfDataLoss(self):\n '''\n\n :param n: what percentile of data loss you want . If we want 90th percentile then pass n=90\n :return:\n '''\n dataLossAsArray = []\n for r in self.iperfResults:\n #r = IPerfResult()\n if(r.end.sum_sent.bytes < r.end.sum_received.bytes):\n dataLossAsArray.append(0)\n else:\n dataLossAsArray.append(r.end.sum_sent.bytes - r.end.sum_received.bytes)\n return np.std(dataLossAsArray)\n\n def getAvgDataLoss(self):\n '''\n\n :param n: what percentile of data loss you want . If we want 90th percentile then pass n=90\n :return:\n '''\n dataLossAsArray = []\n for r in self.iperfResults:\n #r = IPerfResult()\n if(r.end.sum_sent.bytes < r.end.sum_received.bytes):\n dataLossAsArray.append(0)\n else:\n dataLossAsArray.append(r.end.sum_sent.bytes - r.end.sum_received.bytes)\n return np.average(dataLossAsArray)","sub_path":"testAndMeasurement/ResultParsers.py","file_name":"ResultParsers.py","file_ext":"py","file_size_in_byte":39112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"613923078","text":"\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence, PackedSequence\n\nfrom Utils.WordVecs import WordVecs\nfrom Utils.utils import *\nfrom test_model import get_best_run\nimport numpy as np\n\n\nimport matplotlib.pyplot as plt\n\nfrom tqdm import tqdm\n\nfrom collections import defaultdict\nfrom Utils.sst import SSTDataset\nfrom torch.utils.data import DataLoader\n\nimport os\nimport argparse\nimport pickle\n\nfrom hierarchical_model import *\n\nclass SetVocab(dict):\n def __init__(self, vocab):\n self.update(vocab)\n\n def ws2ids(self, ws):\n return [self[w] if w in self else 0 for w in ws]\n\n def ids2sent(self, ids):\n idx2w = dict([(i, w) for w, i in self.items()])\n return [idx2w[int(i)] if i in idx2w else \"UNK\" for i in ids]\n\nclass Vocab(defaultdict):\n def __init__(self, train=True):\n super().__init__(lambda : len(self))\n self.train = train\n self.UNK = \"UNK\"\n # set UNK token to 0 index\n self[self.UNK]\n\n def ws2ids(self, ws):\n \"\"\" If train, you can use the default dict to add tokens\n to the vocabulary, given these will be updated during\n training. Otherwise, we replace them with UNK.\n \"\"\"\n if self.train:\n return [self[w] for w in ws]\n else:\n return [self[w] if w in self else 0 for w in ws]\n\n def ids2sent(self, ids):\n idx2w = dict([(i, w) for w, i in self.items()])\n return [idx2w[int(i)] if i in idx2w else \"UNK\" for i in ids]\n\n\ndef prepare_sequence(seq, to_ix):\n idxs = [to_ix[w] for w in seq]\n return torch.tensor(idxs, dtype=torch.long)\n\n\ndef train_model(vocab,\n new_matrix,\n tag_to_ix,\n num_labels,\n task2label2id,\n embedding_dim,\n hidden_dim,\n num_lstm_layers,\n train_embeddings,\n auxiliary_trainX,\n auxiliary_trainY,\n auxiliary_testX,\n auxiliary_testY,\n maintask_loader,\n maintask_train_iter,\n maintask_dev_iter,\n AUXILIARY_TASK=None,\n epochs=10,\n sentiment_learning_rate=0.001,\n auxiliary_learning_rate=0.0001,\n BATCH_SIZE=50,\n number_of_runs=5,\n #random_seeds=[123, 456, 789, 101112, 131415],\n random_seeds=[456, 789, 101112, 131415],\n DATASET=\"SST\",\n FINE_GRAINED=\"fine\",\n aux=\"starsem\"\n ):\n\n # Save the model parameters\n param_file = (dict(vocab.items()),\n new_matrix.shape,\n tag_to_ix,\n num_labels,\n task2label2id)\n\n basedir = os.path.join(\"saved_models\",\n \"transfer_learning\",\n aux,\n \"{0}-{1}\".format(DATASET, FINE_GRAINED),\n args.AUXILIARY_TASK)\n outfile = os.path.join(basedir,\n \"params.pkl\")\n print(\"Saving model parameters to \" + outfile)\n os.makedirs(basedir, exist_ok=True)\n\n with open(outfile, \"wb\") as out:\n pickle.dump(param_file, out)\n\n for i, run in enumerate(range(number_of_runs)):\n\n model = Hierarchical_Model(vocab,\n new_matrix,\n tag_to_ix,\n num_labels,\n task2label2id,\n embedding_dim,\n hidden_dim,\n 1,\n train_embeddings=train_embeddings)\n\n # Set our optimizers\n sentiment_params = list(model.word_embeds.parameters()) + \\\n list(model.lstm1.parameters()) +\\\n list(model.lstm2.parameters()) +\\\n list(model.linear.parameters())\n\n auxiliary_params = list(model.word_embeds.parameters()) + \\\n list(model.lstm1.parameters()) +\\\n list(model.hidden2tag.parameters()) +\\\n [model.transitions]\n\n sentiment_optimizer = torch.optim.Adam(sentiment_params, lr=sentiment_learning_rate)\n auxiliary_optimizer = torch.optim.Adam(auxiliary_params, lr=auxiliary_learning_rate)\n\n print(\"RUN {0}\".format(run + 1))\n print(\"RUN {0}\".format(run + 2))\n best_dev_acc = 0.0\n best_dev_f1 = 0.0\n\n # set random seed for reproducibility\n np.random.seed(random_seeds[i])\n torch.manual_seed(random_seeds[i])\n\n for j, epoch in enumerate(range(epochs)):\n\n # If AUXILIARY_TASK is None, defaults to single task\n if AUXILIARY_TASK not in [\"None\", \"none\", 0, None]:\n\n print(\"epoch {0}: \".format(epoch + 1), end=\"\")\n for k in tqdm(range(len(auxiliary_trainX))):\n # Step 1. Remember that Pytorch accumulates gradients.\n # We need to clear them out before each instance\n model.zero_grad()\n\n # Step 2. Get our inputs ready for the network, that is,\n # turn them into Tensors of word indices.\n sentence_in = torch.tensor(auxiliary_trainX[k])\n targets = torch.tensor(auxiliary_trainY[k][AUXILIARY_TASK])\n\n # Step 3. Run our forward pass.\n loss = model.neg_log_likelihood(sentence_in, targets)\n\n # Step 4. Compute the loss, gradients, and update the parameters by\n # calling optimizer.step()\n loss.backward()\n auxiliary_optimizer.step()\n\n preds = model.eval_aux(auxiliary_testX, auxiliary_testY,\n taskname=AUXILIARY_TASK, verbose=False)\n ys = [i[AUXILIARY_TASK] for i in auxiliary_testY]\n\n f1 = 0\n for y, y_hat in zip(ys, preds):\n f1 += f1_score(y, y_hat, average=\"micro\")\n f1 /= len(preds)\n\n if f1 > best_dev_f1:\n best_dev_f1 = f1\n print(\"NEW BEST DEV F1: {0:.3f}\".format(f1))\n\n\n basedir = os.path.join(\"saved_models\",\n \"transfer_learning\",\n aux,\n \"{0}-{1}\".format(DATASET, FINE_GRAINED),\n AUXILIARY_TASK,\n \"auxiliary_models\",\n \"{0}\".format(run + 2))\n outname = \"epochs:{0}-lstm_dim:{1}-lstm_layers:{2}-devf1:{3:.3f}\".format(epoch + 1, model.lstm1.hidden_size, model.lstm1.num_layers, f1)\n modelfile = os.path.join(basedir,\n outname)\n os.makedirs(basedir, exist_ok=True)\n print(\"saving model to {0}\".format(modelfile))\n torch.save(model.state_dict(), modelfile)\n\n # # LOAD THE BEST AUXILIARY MODEL\n # weight_dir = os.path.join(\"saved_models\",\n # \"transfer_learning\",\n # aux,\n # \"{0}-{1}\".format(DATASET, FINE_GRAINED),\n # AUXILIARY_TASK,\n # \"auxiliary_models\",\n # \"{0}\".format(run + 2))\n # best_f1, (epochs, lstm_dim, lstm_layers), best_weights =\\\n # get_best_run(weight_dir)\n # print(\"Loading best aux weights from {0}\".format(weight_dir))\n # model.load_state_dict(torch.load(best_weights))\n\n\n # for j, epoch in enumerate(range(epochs)):\n\n # batch_losses = 0\n # num_batches = 0\n # model.train()\n\n # print(\"epoch {0}\".format(epoch + 1))\n\n # for sents, targets in maintask_loader:\n # model.zero_grad()\n\n # loss = model.pooled_sentiment_loss(sents, targets)\n # batch_losses += loss.data\n # num_batches += 1\n\n # loss.backward()\n # sentiment_optimizer.step()\n\n # print()\n # print(\"loss: {0:.3f}\".format(batch_losses / num_batches))\n # model.eval()\n # f1, acc, preds, ys = model.eval_sent(maintask_train_iter,\n # batch_size=BATCH_SIZE)\n # f1, acc, preds, ys = model.eval_sent(maintask_dev_iter,\n # batch_size=BATCH_SIZE)\n\n # if acc > best_dev_acc:\n # best_dev_acc = acc\n # print(\"NEW BEST DEV ACC: {0:.3f}\".format(acc))\n\n\n # basedir = os.path.join(\"saved_models\", \"{0}-{1}\".format(DATASET, FINE_GRAINED),\n # AUXILIARY_TASK,\n # \"{0}\".format(run + 1))\n # outname = \"epochs:{0}-lstm_dim:{1}-lstm_layers:{2}-devacc:{3:.3f}\".format(epoch + 1, model.lstm1.hidden_size, model.lstm1.num_layers, acc)\n # modelfile = os.path.join(basedir,\n # outname)\n # os.makedirs(basedir, exist_ok=True)\n # print(\"saving model to {0}\".format(modelfile))\n # torch.save(model.state_dict(), modelfile)\n\n\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--NUM_LAYERS\", \"-nl\", default=1, type=int)\n parser.add_argument(\"--HIDDEN_DIM\", \"-hd\", default=100, type=int)\n parser.add_argument(\"--BATCH_SIZE\", \"-bs\", default=50, type=int)\n parser.add_argument(\"--EMBEDDING_DIM\", \"-ed\", default=300, type=int)\n parser.add_argument(\"--TRAIN_EMBEDDINGS\", \"-te\", action=\"store_false\")\n parser.add_argument(\"--AUXILIARY_TASK\", \"-aux\", default=\"negation_scope\")\n parser.add_argument(\"--EMBEDDINGS\", \"-emb\",\n default=\"../../embeddings/google.txt\")\n parser.add_argument(\"--DATA_DIR\", \"-dd\",\n default=\"../data/datasets/en\")\n parser.add_argument(\"--DATASET\", \"-data\",\n default=\"SST\")\n parser.add_argument(\"--AUXILIARY_DATASET\", \"-auxdata\",\n default=\"preprocessed/starsem_negation/cdt.conllu\")\n parser.add_argument(\"--SENTIMENT_LR\", \"-slr\", default=0.001, type=float)\n parser.add_argument(\"--AUXILIARY_LR\", \"-alr\", default=0.0001, type=float)\n parser.add_argument(\"--FINE_GRAINED\", \"-fg\",\n default=\"fine\",\n help=\"Either 'fine' or 'binary' (defaults to 'fine'.\")\n\n args = parser.parse_args()\n print(args)\n\n START_TAG = \"\"\n STOP_TAG = \"\"\n\n if \"starsem\" in args.AUXILIARY_DATASET:\n aux = \"starsem\"\n else:\n aux = \"sfu\"\n\n # Get embeddings (CHANGE TO GLOVE OR FASTTEXT EMBEDDINGS)\n embeddings = WordVecs(args.EMBEDDINGS)\n print(\"loaded embeddings from {0}\".format(args.EMBEDDINGS))\n w2idx = embeddings._w2idx\n\n # Create shared vocabulary for tasks\n vocab = Vocab(train=True)\n\n # Update with word2idx from pretrained embeddings so we don't lose them\n # making sure to change them by one to avoid overwriting the UNK token\n # at index 0\n with_unk = {}\n for word, idx in embeddings._w2idx.items():\n with_unk[word] = idx + 1\n vocab.update(with_unk)\n\n # Import datasets\n # This will update vocab with words not found in embeddings\n datadir = os.path.join(args.DATA_DIR, args.DATASET, args.FINE_GRAINED)\n sst = SSTDataset(vocab, False, datadir)\n\n maintask_train_iter = sst.get_split(\"train\")\n maintask_dev_iter = sst.get_split(\"dev\")\n maintask_test_iter = sst.get_split(\"test\")\n\n maintask_loader = DataLoader(maintask_train_iter,\n batch_size=args.BATCH_SIZE,\n collate_fn=maintask_train_iter.collate_fn,\n shuffle=True)\n\n if args.AUXILIARY_TASK in [\"speculation_scope\"]:\n X, Y, org_X, org_Y, word2id, char2id, task2label2id =\\\n get_conll_data(os.path.join(args.DATA_DIR, \"preprocessed/SFU/filtered_speculation_scope.conll\"),\n [\"speculation_scope\"],\n word2id=vocab)\n\n\n if args.AUXILIARY_TASK in [\"negation_scope\"]:\n X, Y, org_X, org_Y, word2id, char2id, task2label2id =\\\n get_conll_data(os.path.join(args.DATA_DIR, args.AUXILIARY_DATASET),\n [\"negation_scope\"],\n word2id=vocab)\n\n\n if args.AUXILIARY_TASK in [\"xpos\", \"upos\", \"multiword\", \"supersense\"]:\n X, Y, org_X, org_Y, word2id, char2id, task2label2id =\\\n get_conll_data(os.path.join(args.DATA_DIR, \"preprocessed/streusle/train/streusle.ud_train.conllulex\"),\n [\"xpos\", \"upos\", \"multiword\", \"supersense\"],\n word2id=vocab)\n\n\n if args.AUXILIARY_TASK not in [\"None\", \"none\", 0, None]:\n train_n = int(len(X) * .9)\n tag_to_ix = task2label2id[args.AUXILIARY_TASK]\n tag_to_ix[START_TAG] = len(tag_to_ix)\n tag_to_ix[STOP_TAG] = len(tag_to_ix)\n\n X, char_X = zip(*X)\n\n auxiliary_trainX = X[:train_n]\n auxiliary_trainY = Y[:train_n]\n auxiliary_testX = X[train_n:]\n auxiliary_testY = Y[train_n:]\n\n else:\n # Set all relevant auxiliary task parameters to None\n tag_to_ix = {\"None\": 0}\n tag_to_ix[START_TAG] = len(tag_to_ix)\n tag_to_ix[STOP_TAG] = len(tag_to_ix)\n task2label2id = None\n\n auxiliary_trainX = None\n auxiliary_trainY = None\n auxiliary_testX = None\n auxiliary_testY = None\n\n\n # Get new embedding matrix so that words not included in pretrained embeddings have a random embedding\n\n diff = len(vocab) - embeddings.vocab_length - 1\n UNK_embedding = np.zeros((1, 300))\n new_embeddings = np.zeros((diff, args.EMBEDDING_DIM))\n new_matrix = np.concatenate((UNK_embedding, embeddings._matrix, new_embeddings))\n\n\n train_model(vocab,\n new_matrix,\n tag_to_ix,\n len(sst.labels),\n task2label2id,\n args.EMBEDDING_DIM,\n args.HIDDEN_DIM,\n args.NUM_LAYERS,\n args.TRAIN_EMBEDDINGS,\n auxiliary_trainX,\n auxiliary_trainY,\n auxiliary_testX,\n auxiliary_testY,\n maintask_loader,\n maintask_train_iter,\n maintask_dev_iter,\n AUXILIARY_TASK=args.AUXILIARY_TASK,\n epochs=10,\n sentiment_learning_rate=args.SENTIMENT_LR,\n auxiliary_learning_rate=args.AUXILIARY_LR,\n BATCH_SIZE=50,\n number_of_runs=5,\n random_seeds=[123, 456, 789, 101112, 131415],\n DATASET=args.DATASET,\n FINE_GRAINED=args.FINE_GRAINED,\n aux=aux\n )\n\n","sub_path":"models/transfer_learning.py","file_name":"transfer_learning.py","file_ext":"py","file_size_in_byte":15444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"476800419","text":"import xml.etree.ElementTree as ET \nimport xml.dom.minidom as md \n\narbol = ET.parse(\"micatalogo.xml\")\nraiz = arbol.getroot()\n\ndef hazlegible(estruc):\n cadenaorig = ET.tostring(estruc, encoding=\"unicode\")\n doc = md.parseString(cadenaorig)\n return doc.toprettyxml()\n\ndef listado():\n print(\"Cantidad catalogada:\", len(raiz))\n for etiqueta in raiz:\n print(\"-\" * 10, \"\\nID Pelicula:\", etiqueta.attrib[\"id\"])\n for subetiqueta in etiqueta:\n print(subetiqueta.attrib, subetiqueta.text)\n print(\"-\"*30, \"\\nEstructura legible:\\n\", hazlegible(raiz))\n\ndef masdatos():\n peli = raiz[0]\n atributos = {\"name\": \"cat\", \"value\": \"comedia\"}\n tipo = peli.makeelement(\"tipo\", atributos)\n tipo.text = \"comedia\"\n peli.append(tipo)\n atributos = {}\n reparto = peli.makeelement(\"reparto\", atributos)\n atributos = {\"name\": \"protagonista\"}\n actor1 = reparto.makeelement(\"interprete1\", atributos)\n atributos = {}\n actor2 = reparto.makeelement(\"interprete2\", atributos)\n actor3 = reparto.makeelement(\"interprete3\", atributos)\n actor1.text = \"Carles Chaplin\"\n actor2.text = \"Paulette Goddard\"\n actor3.text = \"Jack Oakie\"\n reparto.append(actor1)\n reparto.append(actor2)\n reparto.append(actor3)\n peli.append(reparto)\n arbol.write(\"micatalogoAmpliado.xml\")\n\ndef nuevapeli():\n peli = ET.SubElement(raiz, \"palicula\")\n titulo = ET.SubElement(peli, \"titulo\")\n director = ET.SubElement(peli, \"director\")\n tipo = ET.SubElement(peli, \"tipo\")\n reparto = ET.SubElement(peli, \"reparto\")\n actor1 = ET.SubElement(reparto, \"interprete1\")\n actor2 = ET.SubElement(reparto, \"interprete2\")\n actor3 = ET.SubElement(reparto, \"interprete3\")\n\n peli.set(\"id\", \"12346\")\n titulo.set(\"name\", \"ben-hur\")\n titulo.text = \"William Wyler\"\n tipo.set(\"name\", \"cat\")\n tipo.set(\"value\", \"aventuras\")\n tipo.text = \"aventuras, drama\"\n actor1.set(\"name\", \"protagonista\")\n actor1.text = \"Charlton Heston\"\n actor2.text = \"Jack Hawkins\"\n actor3.text = \"Stephen Boyd\"\n arbol.write(\"micatalogoAmpliado.xml\")\n\ndef localizar(id):\n for etiqueta in raiz.iter(\"pelicula\"):\n if etiqueta.get(\"id\") == id:\n print(etiqueta.find(\"titulo\").text, \"Película id\", id)\n print(\"Director:\", etiqueta.find(\"director\").text)\n print(\"Tipo:\", etiqueta.find(\"tipo\").text)\n for actor in etiqueta.find(\"reparto\"):\n if actor.get(\"name\") == \"protagonista\":\n print(\"Protagonista:\", actor.text)\n\ndef modificar():\n for etiqueta in raiz.iter(\"interprete2\"):\n etiqueta.set(\"name\", \"secundario\")\n for elemento in raiz:\n for tipo in elemento.findall(\"tipo\"):\n if tipo.get(\"value\") == \"comedia\":\n tipo.set(\"value\", \"humor, comedia\")\n tipo.text = \"humor, comedia\"\n arbol.write(\"micatalogoAmpliado.xml\")\n\ndef eliminar():\n for etiqueta in raiz.iter(\"titulo\"):\n etiqueta.attrib.pop(\"name\")\n for actor in raiz.iter(\"reparto\"):\n actor.remove(actor.find(\"interprete3\"))\n arbol.write(\"micatalogoAmpliado.xml\")\n\ndef vaciar():\n raiz.clear()\n arbol.write(\"micatalogoAmpliado.xml\")\n\n\nmasdatos()\nnuevapeli()\nmodificar()\neliminar()\nvaciar()\nlistado()\nlocalizar(\"12345\")","sub_path":"Bases de datos/XML/gestionxml.py","file_name":"gestionxml.py","file_ext":"py","file_size_in_byte":3285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"358345796","text":"import pickle\n######## 바이너리 파일 (파일을 열어도 내용을 알아 볼 수 없다)\nf = open(\"test.txt\",\"wb\")\ndata = {1:'python', 2: 'you need'}\npickle.dump(data, f)\nf.close()\n\nf = open(\"test.txt\",\"rb\")\ndata = pickle.load(f)\nprint(data)\nf.close()\n######## 텍스트파일 (아스키코드를 이용한 파일 , 내용을 보고 읽을 수 있다)\nf = open(\"test2.txt\",\"w\")\nf.write(\"{1:'python', 2: 'you need'}\")\nf.close()\n\nf = open(\"test2.txt\",\"r\")\nprint(f.read())\nf.close()","sub_path":"9_Outter_Function/_pickle.py","file_name":"_pickle.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"466695874","text":"\"\"\"\nService wrapper for the ngram BPE model - Gensim implementation.\n\"\"\"\n\nfrom typing import Tuple, Optional # noqa # pylint: disable=unused-import\nimport logging\nimport os\nimport uuid\nimport requests\nimport json\n\nimport pandas as pd\nfrom multiprocessing import Process\n\nfrom werkzeug.datastructures import FileStorage\nimport wget\n\nfrom tackle.rest_api import wrapper_util\nfrom tropical.models import ngram_analysis_gensim_bpe\n\n\ndef __ngram_task(df: pd.DataFrame, file_format_version: str):\n analyser = ngram_analysis_gensim_bpe.NGramAnalysisGensimBPE()\n response_frames = analyser.analyse_dataframe(df, delimiter=b'|')\n return response_frames\n\n\ndef __ngram_task_and_callback(df: pd.DataFrame, file_format_version: str,\n task_uuid: str, callback: str):\n print(f\"Process FORKED!!! Now in process with ID={os.getpid()}\")\n\n response_frames = __ngram_task(df, file_format_version)\n payload = {\n \"uuid\": task_uuid,\n \"file_format_version\": file_format_version,\n \"callback\": callback,\n \"response_frames\": response_frames\n }\n try:\n response = requests.post(\n url=callback,\n data=json.dumps(payload, ensure_ascii=False).encode('utf-8'),\n timeout=3.0,\n headers={'content-type': 'application/json'}\n )\n response_status_code, response_text = response.status_code, response.text\n except requests.Timeout as e:\n logging.error(f\"ngram_bpe_wrapper.__ngram_task_and_callback: requests.Timeout {e}!\")\n response_status_code, response_text = 400, json.dumps({\"error\": f\"Callback request timeout ({e}).\"})\n except requests.RequestException as e:\n logging.error(f\"ngram_bpe_wrapper.__ngram_task_and_callback: requests.RequestException {e}!\")\n response_status_code, response_text = 500, json.dumps({\"error\": f\"Callback request exception ({e}).\"})\n\n logging.info(f\"ngram_bpe_wrapper.__ngram_task_and_callback: Completed {task_uuid} with callback={callback}.\")\n return response_status_code, response_text\n\n\ndef __start_process(task_filename: str,\n file_format_version: str,\n callback: Optional[str]) -> Tuple[int, wrapper_util.JSONType]:\n task_uuid = str(uuid.uuid4())\n logging.info(f\"ngram_bpe_wrapper.start_process: Start {task_uuid} with callback={callback}.\")\n\n # ToDo: catch read_csv and other exceptions.\n df = pd.read_csv(task_filename, na_filter=False)\n\n if callback is None:\n # Just do it synchronously.\n response_frames = __ngram_task(df, file_format_version)\n logging.info(f\"ngram_bpe_wrapper.__start_process: Completed {task_uuid} with callback={callback}.\")\n response_status = 200\n else:\n # Start an async task ...\n print(f\"Process WILL BE FORKED! Now in process with ID={os.getpid()}\")\n p = Process(target=__ngram_task_and_callback, args=(df, file_format_version, task_uuid, callback))\n p.start()\n # p.join() # Fire and forget for now. Could in future keep track of the processes via the API.\n print(f\"Process JOINED!!! Now in process with ID={os.getpid()}\")\n\n response_frames = None # Response will be sent later to callback URL.\n response_status = 202\n\n return response_status, {\n \"uuid\": task_uuid,\n \"file_format_version\": file_format_version,\n \"callback\": callback,\n \"response_frames\": response_frames\n }\n\n\n@wrapper_util.lock_decorator\n@wrapper_util.auth_decorator\ndef start_process_url(auth_token: str, caller_name: Optional[str],\n file_url: str,\n file_format_version: str,\n callback: Optional[str]) -> Tuple[int, wrapper_util.JSONType]:\n filename = wget.detect_filename(file_url)\n\n if filename.endswith(\".csv\"):\n task_filename = str(uuid.uuid4()) + \"_\" + filename\n task_filename = wget.download(file_url, out=task_filename)\n response = __start_process(task_filename, file_format_version, callback)\n os.remove(task_filename)\n return response\n else:\n return 400, {\"error\": \"Please provide a .csv file.\"}\n\n\n@wrapper_util.lock_decorator\n@wrapper_util.auth_decorator\ndef start_process_form(auth_token: str, caller_name: Optional[str],\n upfile: FileStorage,\n file_format_version: str,\n callback: Optional[str]) -> Tuple[int, wrapper_util.JSONType]:\n filename = str(upfile.filename)\n\n if filename.endswith(\".csv\"):\n task_filename = str(uuid.uuid4()) + \"_\" + filename\n upfile.save(task_filename)\n upfile.close()\n response = __start_process(task_filename, file_format_version, callback)\n os.remove(task_filename)\n return response\n else:\n return 400, {\"error\": \"Please upload a .csv file.\"}\n","sub_path":"tropical/rest_api/ngram_bpe_wrapper.py","file_name":"ngram_bpe_wrapper.py","file_ext":"py","file_size_in_byte":4877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"457553534","text":"#!/usr/bin/env python2.7\n\nfrom __future__ import print_function\nfrom Bio import SeqIO\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\nimport os\nimport pprint\nimport utils\nimport sys\nimport time\nimport platform\nimport psutil\nimport locale\nimport pileup as pileup\nimport vcf_writer as vcf_writer\nfrom __init__ import __version__\n\n\nverbose_print = lambda *a, **k: None\nverbose_pprint = lambda *a, **k: None\n\ndef set_logging_verbosity(options_dict):\n \"\"\"Enable or disable logging.\n\n Args:\n verbose : Verbosity value, any value greater than 0 enables logging\n \"\"\"\n global verbose_print\n global verbose_pprint\n verbose_print = print if options_dict['verbose'] > 0 else lambda *a, **k: None\n verbose_pprint = pprint.pprint if options_dict['verbose'] > 0 else lambda *a, **k: None\n\n\ndef timestamp():\n \"\"\"Return a timestamp string.\"\"\"\n return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())\n\ndef program_name():\n \"\"\"Return the basename of the python script being executed.\"\"\"\n return os.path.basename(sys.argv[0])\n\ndef command_line_short():\n \"\"\"Return the command line string without the full path to the program.\"\"\"\n return \"%s %s\" % (program_name(), \" \".join(sys.argv[1:]))\n\ndef command_line_long():\n \"\"\"Return the command line string with the full path to the program.\"\"\"\n return \" \".join(sys.argv)\n\ndef print_log_header():\n \"\"\"Print a standardized header for the log with starting conditions.\"\"\"\n verbose_print(\"# Command : %s\" % command_line_long())\n verbose_print(\"# Working Directory : %s\" % os.getcwd())\n pbs_jobid = os.environ.get(\"PBS_JOBID\")\n sge_jobid = os.environ.get(\"JOB_ID\")\n sge_task_id = os.environ.get(\"SGE_TASK_ID\")\n if sge_task_id == \"undefined\":\n sge_task_id = None\n if pbs_jobid:\n verbose_print(\"# Job ID : %s\" % pbs_jobid)\n elif sge_jobid and sge_task_id:\n verbose_print(\"# Job ID : %s[%s]\" % (sge_jobid, sge_task_id))\n elif sge_jobid:\n verbose_print(\"# Job ID : %s\" % sge_jobid)\n\n verbose_print(\"# Hostname : %s\" % platform.node())\n locale.setlocale(locale.LC_ALL, '')\n ram_mbytes = psutil.virtual_memory().total / 1024 / 1024\n ram_str = locale.format(\"%d\", ram_mbytes, grouping=True)\n verbose_print(\"# RAM : %s MB\" % ram_str)\n verbose_print(\"# Python Version : %s\" % sys.version.replace(\"\\n\", \" \"))\n verbose_print(\"\")\n\n\ndef print_arguments(options_dict):\n \"\"\"Print the program options.\n\n Inputs:\n options_dict : Dictionary of program arguments\n \"\"\"\n verbose_print(\"Options:\")\n for key in options_dict.keys():\n verbose_print(\" %s=%s\" % (key, options_dict[key]))\n\n\ndef create_snp_list(options_dict):\n \"\"\"Create SNP list file\n\n Description:\n Create the SNP list -- the list of positions where variants were found\n and the corresponding list of samples having a variant at each position. \n This function expects, or creates '(*)', the following files arranged \n in the following way:\n sampleDirectories.txt\n samples\n sample_name_one/var.flt.vcf\n ...\n snplist.txt (*)\n\n The files are used as follows:\n 1. The sampleDirectories.txt input file contains a list of the paths to \n the sample directories.\n 2. The var.flt.vcf variant input files are used to construct the \n SNP position list.\n 3. The snplist.txt output file contains the union of the SNP positions \n and sample names extracted from all the var.flt.vcf files.\n\n The sampleDirectories.txt and var.flt.vcf files are created outside of \n this function. The package documentation provides an example of creating \n these files based on the lambda_virus sequence that is used as one test \n for this package.\n\n Args:\n sampleDirsFile: File path (not just file name) of file containing paths \n to directories containing var.flt.vcf file for each sequence.\n vcfFileName: File name of the VCF files which must exist in each of the\n sample directories\n snpListFile: File path (not just file name) of text format list \n of SNP positions\n\n Raises:\n\n Examples:\n options_dict = {'sampleDirsFile':'sampleDirectories.txt',\n 'vcfFileName':'var.flt.vcf'\n 'snpListFile':'snplist.txt',\n }\n create_snp_list(options_dict)\n \"\"\"\n print_log_header()\n verbose_print(\"# %s %s\" % (timestamp(), command_line_short()))\n verbose_print(\"# %s version %s\" % (program_name(), __version__))\n print_arguments(options_dict)\n\n #==========================================================================\n # Prep work\n # Note use of filter on list_of_sample_directories to remove blank lines.\n #==========================================================================\n sample_directories_list_filename = options_dict['sampleDirsFile']\n list_of_sample_directories = [line.rstrip() for line in open(sample_directories_list_filename, \"r\")]\n list_of_sample_directories = sorted(filter(None, list_of_sample_directories))\n\n #==========================================================================\n # Read in all vcf files and process into dict of SNPs passing various\n # criteria. Do this for each sample. Write to file.\n #==========================================================================\n snp_list_file_path = options_dict['snpListFile']\n vcf_file_name = options_dict['vcfFileName']\n list_of_vcf_files = [os.path.join(dir, vcf_file_name) for dir in list_of_sample_directories]\n\n if options_dict['forceFlag'] or utils.target_needs_rebuild(list_of_vcf_files, snp_list_file_path):\n snp_dict = utils.convert_vcf_files_to_snp_dict(list_of_vcf_files)\n verbose_print('Found %d snp positions across %d sample vcf files.' % (len(snp_dict), len(list_of_vcf_files)))\n utils.write_list_of_snps(snp_list_file_path, snp_dict)\n verbose_print(\"\")\n else:\n verbose_print(\"SNP list %s has already been freshly built. Use the -f option to force a rebuild.\" % snp_list_file_path)\n verbose_print(\"# %s %s finished\" % (timestamp(), program_name()))\n\n\ndef create_snp_pileup(options_dict):\n \"\"\"Create the SNP pileup file for a sample.\n\n Description:\n Create the SNP pileup file for a sample -- the pileup file restricted to \n only positions where variants were found in any sample.\n This function expects, or creates '(*)', the following files arranged \n in the following way:\n snplist.txt\n samples\n sample_name_one/reads.all.pileup\n sample_name_one/reads.snp.pileup (*)\n ...\n\n The files are used as follows:\n 1. The snplist.txt input file contains the list of SNP positions \n extracted from the var.flt.vcf file.\n 2. The reads.all.pileup input file is the genome-wide pileup file \n for this sample.\n 3. The reads.snp.pileup output file is the pileup file for this sample,\n restricted to only positions where variants were found in any \n sample.\n\n The snplist.txt and reads.all.pileup files are created outside of this \n function. The package documentation provides an example of creating these\n files based on the lambda_virus sequence that is used as one test for \n this package.\n\n Args:\n snpListFile: File path (not just file name) of text format list \n of SNP positions across all samples\n allPileupFile: File path (not just file name) of the whole-genome\n pileup file fot this sample\n snpPileupFile: File path (not just file name) of the snp pileup file\n\n Raises:\n\n Examples:\n options_dict = {'snpListFile':'snplist.txt',\n 'allPileupFile':'samples/SRR555888/reads.all.pileup'\n 'snpPileupFile':'samples/SRR555888/reads.snp.pileup'\n }\n create_snp_pileup(options_dict)\n \"\"\"\n print_log_header()\n verbose_print(\"# %s %s\" % (timestamp(), command_line_short()))\n verbose_print(\"# %s version %s\" % (program_name(), __version__))\n print_arguments(options_dict)\n\n snp_list_file_path = options_dict['snpListFile']\n all_pileup_file_path = options_dict['allPileupFile']\n snp_pileup_file_path = options_dict['snpPileupFile']\n\n source_files = [snp_list_file_path, all_pileup_file_path]\n if options_dict['forceFlag'] or utils.target_needs_rebuild(source_files, snp_pileup_file_path):\n # Create a pileup file with a subset of the whole-genome pileup restricted\n # to locations with SNPs only.\n snp_list = utils.read_snp_position_list(snp_list_file_path)\n utils.create_snp_pileup(all_pileup_file_path, snp_pileup_file_path, set(snp_list))\n verbose_print(\"\")\n else:\n verbose_print(\"SNP pileup %s has already been freshly built. Use the -f option to force a rebuild.\" % snp_pileup_file_path)\n verbose_print(\"# %s %s finished\" % (timestamp(), program_name()))\n\n\ndef call_consensus(options_dict):\n \"\"\"Call the consensus base for a sample\n\n Call the consensus base for a sample at the positions where SNPs were found\n in any of the samples.\n This function expects, or creates '(*)', the following\n files arranged in the following way:\n snplist.txt\n samples\n sample_name_one/reads.all.pileup\n sample_name_one/consensus.fasta (*)\n\n The files are used as follows:\n 1. The snplist.txt input file contains the list of SNP positions \n extracted from all the var.flt.vcf files combined.\n 2. The reads.all.pileup input file is a pileups at all positions\n used to determine the nucleotide base at each SNP position.\n 3. The consensus.fasta output file contains the SNP calls for each \n sequence, arranged as a fasta file with one sequence per sample.\n\n The snplist.txt, and reads.snp.pileup are created outside of this function.\n The package documentation provides an example \n of creating these files based on the lambda_virus sequence that is used \n as one test for this package.\n\n Args:\n forceFlag : boolean \n flag to force processing even when result file already exists and\n is newer than inputs\n snpListFile : str\n File path (not just file name) of text format list of SNP positions\n allPileupFile : str\n Relative or absolute path to the genome-wide pileup file for this \n sample\n consensusFile : str\n Output file. Relative or absolute path to the consensus fasta file\n for this sample.\n minBaseQual : int\n Mimimum base quality score to count a read. All other snp filters\n take effect after the low-quality reads are discarded.\n minConsFreq : float\n Consensus frequency. Mimimum fraction of high-quality reads\n supporting the consensus to make a call.\n minConsStrdDpth : int\n Consensus strand depth. Minimum number of high-quality reads \n supporting the consensus which must be present on both the\n forward and reverse strands to make a call.\n minConsStrdBias : float\n Strand bias. Minimum fraction of the high-quality \n consensus-supporting reads which must be present on both the \n forward and reverse strands to make a call. The numerator of this\n fraction is the number of high-quality consensus-supporting reads\n on one strand. The denominator is the total number of high-quality\n consensus-supporting reads on both strands combined.\n\n Raises:\n\n Examples:\n options_dict = {'snpListFile':'snplist.txt',\n 'allPileupFile':'reads.all.pileup',\n 'consensusFile':'consensus.fasta',\n 'minBaseQual':15,\n 'minConsFreq':0.6,\n 'minConsStrdDpth':4\n 'minConsStrdBias':0.10\n }\n call_consensus(options_dict)\n \"\"\"\n print_log_header()\n verbose_print(\"# %s %s\" % (timestamp(), command_line_short()))\n verbose_print(\"# %s version %s\" % (program_name(), __version__))\n print_arguments(options_dict)\n\n snp_list_file_path = options_dict['snpListFile']\n all_pileup_file_path = options_dict['allPileupFile']\n sample_directory = os.path.dirname(os.path.abspath(all_pileup_file_path))\n sample_name = os.path.basename(sample_directory)\n consensus_file_path = options_dict['consensusFile']\n consensus_file_dir = os.path.dirname(os.path.abspath(consensus_file_path))\n vcf_file_name = options_dict['vcfFileName']\n vcf_file_path = os.path.join(consensus_file_dir, vcf_file_name) if vcf_file_name else None\n\n # Check if the result is already fresh\n source_files = [snp_list_file_path, all_pileup_file_path]\n if not options_dict['forceFlag'] and not utils.target_needs_rebuild(source_files, consensus_file_path):\n verbose_print(\"Consensus call file %s has already been freshly built. Use the -f option to force a rebuild.\" % consensus_file_path)\n verbose_print(\"# %s %s finished\" % (timestamp(), program_name()))\n return\n\n # Load the list of which positions to called\n snp_list = utils.read_snp_position_list(snp_list_file_path)\n snplist_length = len(snp_list)\n verbose_print(\"snp position list length = %d\" % snplist_length)\n\n # Call consensus. Write results to file.\n position_consensus_base_dict = dict()\n\n caller = pileup.ConsensusCaller(options_dict['minConsFreq'], \n options_dict['minConsStrdDpth'], \n options_dict['minConsStrdBias'])\n snp_positions = set(snp_list)\n parse_positions = None if options_dict['vcfAllPos'] else snp_positions\n pileup_reader = pileup.Reader(all_pileup_file_path, \n options_dict['minBaseQual'], \n parse_positions)\n if vcf_file_name:\n writer = vcf_writer.SingleSampleWriter(vcf_file_path)\n filters = caller.get_filter_descriptions()\n writer.write_header(sample_name, filters, options_dict['vcfRefName'])\n for pileup_record in pileup_reader:\n chrom = pileup_record.chrom\n pos = pileup_record.position\n consensus_base, fail_reasons = caller.call_consensus(pileup_record)\n if (chrom, pos) in snp_positions:\n if fail_reasons:\n position_consensus_base_dict[(chrom, pos)] = '-'\n else:\n position_consensus_base_dict[(chrom, pos)] = consensus_base\n\n if vcf_file_name:\n writer.write_from_pileup(pileup_record, fail_reasons)\n if vcf_file_name:\n writer.close()\n\n verbose_print(\"called consensus positions = %i\" % (len(position_consensus_base_dict)))\n\n consensus_list = [position_consensus_base_dict.get(key, '-') for key in snp_list]\n consensus_str = ''.join(consensus_list)\n snp_seq_record = SeqRecord(Seq(consensus_str), id=sample_name, description=\"\")\n\n # Write the consensus calls to a fasta file\n with open(consensus_file_path, \"w\") as fasta_file_object:\n SeqIO.write([snp_seq_record], fasta_file_object, \"fasta\")\n\n verbose_print(\"\")\n verbose_print(\"# %s %s finished\" % (timestamp(), program_name()))\n\n\ndef create_snp_matrix(options_dict):\n \"\"\"Create SNP matrix\n\n Description:\n Create the SNP matrix containing the consensus base for each of the samples \n at the positions where SNPs were found in any of the samples. The matrix \n contains one row per sample and one column per SNP position. Non-SNP \n positions are not included in the matrix.\n This function expects, or creates '(*)', the following\n files arranged in the following way:\n sampleDirectories.txt\n samples\n sample_name_one/consensus.fasta\n ...\n snpma.fasta (*)\n\n The files are used as follows:\n 1. The sampleDirectories.txt input file contains a list of the paths to \n the sample directories.\n 2. The consensus.fasta input files are previously called consensus\n for each sample to construct the SNP matrix fasta file.\n 3. The snpma.fasta output file contains the SNP calls for each \n sequence, arranged as a multi-fasta file with one sequence per \n sample.\n\n The sampleDirectories.txt, and consensus.fasta are created outside of this\n function. The package documentation provides an example of creating \n these files based on the lambda_virus sequence that is used as one \n test for this package.\n\n Args:\n sampleDirsFile : str\n File path (not just file name) of file containing paths \n to directories containing consensus.fasta file for each sequence.\n snpListFile : str\n File path (not just file name) of text format list of SNP positions\n consFileName : str\n File name of the previously called consensus fasta files which must\n exist in each of the sample directories\n snpmaFile : str\n File path (not just file name) of the output snp matrix, formatted\n as a fasta file, with each sequence (all of identical length) \n corresponding to the SNPs in the correspondingly named sequence.\n\n Raises:\n\n Examples:\n options_dict = {'sampleDirsFile':'sampleDirectories.txt',\n 'consFileName':'consensus.fasta',\n 'snpmaFile':'snpma.fasta',\n 'minConsFreq':0.6,\n }\n create_snp_matrix(options_dict)\n \"\"\"\n print_log_header()\n verbose_print(\"# %s %s\" % (timestamp(), command_line_short()))\n verbose_print(\"# %s version %s\" % (program_name(), __version__))\n print_arguments(options_dict)\n\n #==========================================================================\n # Prep work\n # Note use of filter on list_of_sample_directories to remove blank lines.\n #==========================================================================\n sample_directories_list_filename = options_dict['sampleDirsFile']\n list_of_sample_directories = [line.rstrip() for line in open(sample_directories_list_filename, \"r\")]\n list_of_sample_directories = sorted(filter(None, list_of_sample_directories))\n\n #==========================================================================\n # Check if the result is already fresh\n #==========================================================================\n snpma_file_path = options_dict['snpmaFile']\n source_files = []\n if not options_dict['forceFlag']:\n for sample_directory in list_of_sample_directories:\n consensus_file_path = os.path.join(sample_directory, options_dict['consFileName'])\n source_files.append(consensus_file_path)\n if not utils.target_needs_rebuild(source_files, snpma_file_path):\n verbose_print(\"SNP matrix %s has already been freshly built. Use the -f option to force a rebuild.\" % snpma_file_path)\n verbose_print(\"# %s %s finished\" % (timestamp(), program_name()))\n return\n\n #==========================================================================\n # Create snp matrix. Write results to file.\n #==========================================================================\n with open(snpma_file_path, \"w\") as output_file:\n for sample_directory in list_of_sample_directories:\n consensus_file_path = os.path.join(sample_directory, options_dict['consFileName'])\n verbose_print(\"Merging \" + consensus_file_path)\n with open(consensus_file_path, \"r\") as input_file:\n for line in input_file:\n output_file.write(line)\n\n verbose_print(\"\")\n verbose_print(\"# %s %s finished\" % (timestamp(), program_name()))\n\n\n\ndef create_snp_reference_seq(options_dict):\n \"\"\"Write reference sequence bases at SNP locations to a fasta file.\n\n Description:\n Write reference sequence bases at SNP locations to a fasta file. \n This function expects, or creates '(*)', the following files:\n reference.fasta\n snplist.txt\n referenceSNP.fasta (*)\n\n The files are used as follows:\n 1. The reference.fasta input file contains the whole-genome reference \n bases.\n 2. The snplist.txt input file contains the list of SNP positions across\n all the samples.\n 2. The referenceSNP.fasta output file contains the reference bases at \n the identified SNP locations.\n\n The snplist.txt file is created outside of this function. The package \n documentation provides an example of creating this file based on the\n lambda_virus sequence that is used as one test for this package.\n\n Args:\n referenceFile: File path (not just file name) for reference sequence \n (in fasta format\n snpListFile: File path (not just file name) of text format list of SNP\n positions\n snpRefFile: File path (not just file name) for the SNP reference \n sequence file.\n\n Raises:\n\n Examples:\n options_dict = {'referenceFile':'reference.fasta',\n 'snpListFile':'snplist.txt',\n 'snpRefFile':'referenceSNP.fasta'\n }\n create_snp_reference_seq(options_dict)\n \"\"\"\n print_log_header()\n verbose_print(\"# %s %s\" % (timestamp(), command_line_short()))\n verbose_print(\"# %s version %s\" % (program_name(), __version__))\n print_arguments(options_dict)\n\n #==========================================================================\n # Write reference sequence bases at SNP locations to a fasta file.\n #==========================================================================\n reference_file = options_dict['referenceFile']\n snp_list_file_path = options_dict['snpListFile']\n snp_ref_seq_path = options_dict['snpRefFile']\n\n source_files = [reference_file, snp_list_file_path]\n if options_dict['forceFlag'] or utils.target_needs_rebuild(source_files, snp_ref_seq_path):\n utils.write_reference_snp_file(reference_file, snp_list_file_path, snp_ref_seq_path)\n verbose_print(\"\")\n else:\n verbose_print(\"SNP reference sequence %s has already been freshly built. Use the -f option to force a rebuild.\" % snp_ref_seq_path)\n\n verbose_print(\"# %s %s finished\" % (timestamp(), program_name()))\n\n\n","sub_path":"snppipeline/snppipeline.py","file_name":"snppipeline.py","file_ext":"py","file_size_in_byte":22745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"93552927","text":"#!/usr/bin/env python3\nimport json\nimport os\nimport sys\nimport traceback\nimport urllib.request\n\nimport flask_socketio\nfrom dotenv import load_dotenv\nfrom flask import Flask, request, jsonify\nfrom flask_compress import Compress\nfrom flask_cors import CORS\nfrom oauth2client import client, crypt\n\nfrom opendc.models.user import User\nfrom opendc.util import rest, path_parser, database\nfrom opendc.util.exceptions import AuthorizationTokenError, RequestInitializationError\nfrom opendc.util.json import JSONEncoder\n\nload_dotenv()\n\nTEST_MODE = \"OPENDC_FLASK_TESTING\" in os.environ\n\n# Setup Sentry if DSN is specified\nif 'SENTRY_DSN' in os.environ:\n import sentry_sdk\n from sentry_sdk.integrations.flask import FlaskIntegration\n\n sentry_sdk.init(\n integrations=[FlaskIntegration()],\n traces_sample_rate=0.1\n )\n\n# Set up database if not testing\nif not TEST_MODE:\n database.DB.initialize_database(\n user=os.environ['OPENDC_DB_USERNAME'],\n password=os.environ['OPENDC_DB_PASSWORD'],\n database=os.environ['OPENDC_DB'],\n host=os.environ.get('OPENDC_DB_HOST', 'localhost'))\n\n# Set up the core app\nFLASK_CORE_APP = Flask(__name__)\nFLASK_CORE_APP.testing = TEST_MODE\nFLASK_CORE_APP.config['SECRET_KEY'] = os.environ['OPENDC_FLASK_SECRET']\nFLASK_CORE_APP.json_encoder = JSONEncoder\n\n# Set up CORS support\nCORS(FLASK_CORE_APP)\n\ncompress = Compress()\ncompress.init_app(FLASK_CORE_APP)\n\nSOCKET_IO_CORE = flask_socketio.SocketIO(FLASK_CORE_APP, cors_allowed_origins=\"*\")\n\nAPI_VERSIONS = {'v2'}\n\n\n@FLASK_CORE_APP.route('/tokensignin', methods=['POST'])\ndef sign_in():\n \"\"\"Authenticate a user with Google sign in\"\"\"\n\n try:\n token = request.form['idtoken']\n except KeyError:\n return 'No idtoken provided', 401\n\n try:\n idinfo = client.verify_id_token(token, os.environ['OPENDC_OAUTH_CLIENT_ID'])\n\n if idinfo['aud'] != os.environ['OPENDC_OAUTH_CLIENT_ID']:\n raise crypt.AppIdentityError('Unrecognized client.')\n\n if idinfo['iss'] not in ['accounts.google.com', 'https://accounts.google.com']:\n raise crypt.AppIdentityError('Wrong issuer.')\n except ValueError:\n url = \"https://www.googleapis.com/oauth2/v3/tokeninfo?id_token={}\".format(token)\n req = urllib.request.Request(url)\n response = urllib.request.urlopen(url=req, timeout=30)\n res = response.read()\n idinfo = json.loads(res)\n except crypt.AppIdentityError as e:\n return 'Did not successfully authenticate'\n\n user = User.from_google_id(idinfo['sub'])\n\n data = {'isNewUser': user.obj is None}\n\n if user.obj is not None:\n data['userId'] = user.get_id()\n\n return jsonify(**data)\n\n\n@FLASK_CORE_APP.route('//', methods=['GET', 'POST', 'PUT', 'DELETE'])\ndef api_call(version, endpoint_path):\n \"\"\"Call an API endpoint directly over HTTP.\"\"\"\n\n # Check whether given version is valid\n if version not in API_VERSIONS:\n return jsonify(error='API version not found'), 404\n\n # Get path and parameters\n (path, path_parameters) = path_parser.parse(version, endpoint_path)\n\n query_parameters = request.args.to_dict()\n for param in query_parameters:\n try:\n query_parameters[param] = int(query_parameters[param])\n except:\n pass\n\n try:\n body_parameters = json.loads(request.get_data())\n except:\n body_parameters = {}\n\n # Create and call request\n (req, response) = _process_message({\n 'id': 0,\n 'method': request.method,\n 'parameters': {\n 'body': body_parameters,\n 'path': path_parameters,\n 'query': query_parameters\n },\n 'path': path,\n 'token': request.headers.get('auth-token')\n })\n\n print(\n f'HTTP:\\t{req.method} to `/{req.path}` resulted in {response.status[\"code\"]}: {response.status[\"description\"]}')\n sys.stdout.flush()\n\n flask_response = jsonify(json.loads(response.to_JSON()))\n flask_response.status_code = response.status['code']\n return flask_response\n\n\n@SOCKET_IO_CORE.on('request')\ndef receive_message(message):\n \"\"\"\"Receive a SocketIO request\"\"\"\n (req, res) = _process_message(message)\n\n print(f'Socket: {req.method} to `/{req.path}` resulted in {res.status[\"code\"]}: {res.status[\"description\"]}')\n sys.stdout.flush()\n\n flask_socketio.emit('response', res.to_JSON(), json=True)\n\n\ndef _process_message(message):\n \"\"\"Process a request message and return the response.\"\"\"\n\n try:\n req = rest.Request(message)\n res = req.process()\n\n return req, res\n\n except AuthorizationTokenError:\n res = rest.Response(401, 'Authorization error')\n res.id = message['id']\n\n except RequestInitializationError as e:\n res = rest.Response(400, str(e))\n res.id = message['id']\n\n if not 'method' in message:\n message['method'] = 'UNSPECIFIED'\n if not 'path' in message:\n message['path'] = 'UNSPECIFIED'\n\n except Exception:\n res = rest.Response(500, 'Internal server error')\n if 'id' in message:\n res.id = message['id']\n traceback.print_exc()\n\n req = rest.Request()\n req.method = message['method']\n req.path = message['path']\n\n return req, res\n\n\nif __name__ == '__main__':\n print(\"Web server started on 8081\")\n SOCKET_IO_CORE.run(FLASK_CORE_APP, host='0.0.0.0', port=8081, use_reloader=False)\n","sub_path":"api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"344758951","text":"'''\nSolution\n1. To count the number of valid islands, perform DFS on those cells containing a 1, for each cell perform DFS\n and keep track of the count if it is a valid island.\n2. While performing DFS on a cell, make sure you don't visit the parent cell again by making the cell to 0.\n3. Traverse all directions and perform DFS in each direction if it is a valid cell.\n\nTime Complexity: O(rows x columns) | Space Complexity: O(1)\n\n--- Passed all testcases successfully on Leetcode.\n'''\n\n\nclass Islands_Recursion(object):\n\n def __init__(self):\n # initialize directions array\n self.dirs = [[0, 1], [0, -1], [1, 0], [-1, 0]]\n\n def __dfs(self, grid, r, c, nRows, nCols):\n\n # base case -- checking validity\n if (r < 0 or r >= nRows or c < 0 or c >= nCols or grid[r][c] == '0'):\n return\n\n # make sure you don't visit the parent cell again by making the cell to 0\n grid[r][c] = '0'\n\n # traverse all directions and perform DFS in each direction if it is a valid cell\n for direction in self.dirs:\n newRow = r + direction[0]\n newCol = c + direction[1]\n\n self.__dfs(grid, newRow, newCol, nRows, nCols)\n\n def numIslands(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n # edge case check\n if (grid == None or len(grid) == 0):\n return 0\n\n # initialize the count, number of rows and number of columns\n total = 0\n nRows = len(grid); nCols = len(grid[0])\n\n # for each cell perform DFS and keep track of the count if it is a valid island.\n for r in range(nRows):\n for c in range(nCols):\n if (grid[r][c] == '1'):\n self.__dfs(grid, r, c, nRows, nCols)\n total += 1\n\n # return the count\n return total","sub_path":"Islands.py","file_name":"Islands.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"200758615","text":"from botaclan.constants import GOOGLEAPI_CALENDAR_ID\nfrom google.oauth2 import service_account\nfrom googleapiclient.discovery import build, Resource\nfrom typing import List, Dict\nimport botaclan.helpers.lists\nimport copy\nimport datetime\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\ndef create_calendar_client(credentials: service_account.Credentials) -> Resource:\n return build(\"calendar\", \"v3\", credentials=credentials, cache_discovery=False)\n\n\ndef generate_user_acl_rule(role: str, email: str) -> Dict:\n return {\n \"scope\": {\"type\": \"user\", \"value\": email},\n \"role\": role,\n }\n\n\ndef remove_generated_acl_fields(rule: Dict) -> Dict:\n modified_rule = copy.deepcopy(rule)\n modified_rule.pop(\"kind\", None)\n modified_rule.pop(\"etag\", None)\n modified_rule.pop(\"id\", None)\n return modified_rule\n\n\ndef list_events(\n credentials: service_account.Credentials, max_results: int = 10\n) -> List[Dict]:\n cal = create_calendar_client(credentials=credentials)\n now = datetime.datetime.utcnow().isoformat() + \"Z\"\n events_result = (\n cal.events()\n .list(\n calendarId=GOOGLEAPI_CALENDAR_ID,\n timeMin=now,\n maxResults=max_results,\n singleEvents=True,\n orderBy=\"startTime\",\n )\n .execute()\n )\n events = events_result.get(\"items\", [])\n return events\n\n\ndef create_event(credentials: service_account.Credentials, event: Dict):\n cal = create_calendar_client(credentials=credentials)\n cal.events().insert(calendarId=GOOGLEAPI_CALENDAR_ID, body=event).execute()\n\n\ndef delete_event(credentials: service_account.Credentials, id: str):\n cal = create_calendar_client(credentials=credentials)\n cal.events().delete(calendarId=GOOGLEAPI_CALENDAR_ID, eventId=id).execute()\n\n\ndef find_acl_by_rule(\n credentials: service_account.Credentials, rule: Dict, max_results: int = 100\n):\n cal = create_calendar_client(credentials=credentials)\n acl_found = []\n acl_results = (\n cal.acl()\n .list(\n calendarId=GOOGLEAPI_CALENDAR_ID, showDeleted=False, maxResults=max_results,\n )\n .execute()\n )\n acl = acl_results.get(\"items\", [])\n for found_rule in acl:\n if remove_generated_acl_fields(found_rule) == rule:\n acl_found.append(found_rule)\n return acl_found\n\n\ndef find_event_by_name(credentials: service_account.Credentials, name: str) -> Dict:\n cal = create_calendar_client(credentials=credentials)\n now = datetime.datetime.utcnow().isoformat() + \"Z\"\n events_result = (\n cal.events()\n .list(\n calendarId=GOOGLEAPI_CALENDAR_ID,\n timeMin=now,\n maxResults=1,\n singleEvents=True,\n orderBy=\"startTime\",\n q=name,\n )\n .execute()\n )\n events = events_result.get(\"items\", [])\n return botaclan.helpers.lists.get_first_item(events)\n\n\ndef subscribe_to_calendar(credentials: service_account.Credentials, rule: Dict):\n cal = create_calendar_client(credentials=credentials)\n cal.acl().insert(calendarId=GOOGLEAPI_CALENDAR_ID, body=rule).execute()\n\n\ndef unsubscribe_to_calendar(credentials: service_account.Credentials, rule_id: str):\n cal = create_calendar_client(credentials=credentials)\n cal.acl().delete(calendarId=GOOGLEAPI_CALENDAR_ID, ruleId=rule_id).execute()\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"botaclan/google/google_calendar.py","file_name":"google_calendar.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"384721342","text":"\n\n#calss header\nclass _IMPURE():\n\tdef __init__(self,): \n\t\tself.name = \"IMPURE\"\n\t\tself.definitions = [u'mixed with other substances and therefore lower in quality: ', u'involving sexual thoughts or behaviour that are wrong or not moral: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_impure.py","file_name":"_impure.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"528976961","text":"'''\nMIT 6.00.1x Week 3 Problem set: Hangman\nProblem 3\n'''\ndef getAvailableLetters(lettersGuessed):\n '''\n lettersGuessed: list, what letters have been guessed so far\n returns: string, comprised of letters that represents what letters have not\n yet been guessed.\n '''\n letters = 'abcdefghijklmnopqrstuvwxyz'\n res = list(letters)\n\n for w in lettersGuessed:\n if w in letters:\n res.remove(w)\n\n return ''.join(res)\n\n","sub_path":"ProblemSet3/week3#3.py","file_name":"week3#3.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"440797377","text":"\r\nldiag = []\r\nrdiag = []\r\nilist = []\r\njlist = []\r\npossible = []\r\ntmp = []\r\nflag = 0\r\ncount = 0\r\nout_list = []\r\n\r\ndef issafe(i,j):\r\n if i in ilist:\r\n return False\r\n elif j in jlist:\r\n return False\r\n elif i+j in ldiag:\r\n return False\r\n elif i-j in rdiag:\r\n return False\r\n else:\r\n #print(\"safe\",i,j)\r\n return True\r\n\r\ndef fn(n,i,j):\r\n global possible,out_list\r\n if i == 0:\r\n ilist.append(i)\r\n jlist.append(j)\r\n ldiag.append(i+j)\r\n rdiag.append(i-j)\r\n tmp.append(str(i)+\",\"+str(j))\r\n fn(n,i+1,j)\r\n if j == n and i == n:\r\n ilist.pop()\r\n jlist.pop()\r\n ldiag.pop()\r\n rdiag.pop()\r\n tmp.pop()\r\n return False\r\n else:\r\n for k in range(n):\r\n if i == n-1 and issafe(i,k):\r\n ilist.append(i)\r\n jlist.append(k)\r\n ldiag.append(i+k)\r\n rdiag.append(i-k)\r\n tmp.append(str(i)+\",\"+str(k))\r\n out_list.append(list(jlist))\r\n possible.append(list(tmp))\r\n ilist.pop()\r\n jlist.pop()\r\n ldiag.pop()\r\n rdiag.pop()\r\n tmp.pop()\r\n return True\r\n elif i != n-1 and issafe(i,k):\r\n ilist.append(i)\r\n jlist.append(k)\r\n ldiag.append(i+k)\r\n rdiag.append(i-k)\r\n tmp.append(str(i)+\",\"+str(k))\r\n fn(n,i+1,k)\r\n ilist.pop()\r\n jlist.pop()\r\n ldiag.pop()\r\n rdiag.pop()\r\n tmp.pop()\r\n\r\n\r\nfor _ in range(int(input().strip())):\r\n n = int(input().strip()) \r\n for head in range(n):\r\n ilist.clear()\r\n jlist.clear()\r\n ldiag.clear()\r\n rdiag.clear()\r\n tmp.clear()\r\n\r\n fn(n,0,head)\r\n\r\n #print(out_list)\r\n if n == 1:\r\n print(\"[1 ]\")\r\n else:\r\n if len(out_list) == 0:\r\n print(\"-1\")\r\n else:\r\n for i in range(len(out_list)):\r\n print(\"[\",end = '')\r\n for j in range(len(out_list[i])):\r\n print(out_list[i][j]+1,\"\",end= '')\r\n print(\"] \",end = '')\r\n print()\r\n possible.clear()\r\n out_list.clear()\r\n","sub_path":"python/n_queens.py","file_name":"n_queens.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"546304270","text":"from odoo import fields, models, api\nfrom datetime import datetime\n\nclass AddReference(models.TransientModel):\n _name = 'edit.doc.dispatch'\n _description = 'Add Reference'\n\n\n select_template = fields.Many2one('select.template.html')\n template_html = fields.Html('Template')\n doc_dispatch = fields.Many2one('dispatch.document', string='Document Dispatch')\n\n @api.onchange('select_template')\n def get_template(self):\n if self.select_template:\n self.template_html = self.select_template.template\n\n\n def confirm_button(self):\n if self:\n self.doc_dispatch.write({\n 'select_template': self.select_template.id,\n 'template_html': self.template_html,\n })\n form_view = self.env.ref('smart_office.foldermaster_form_view')\n tree_view = self.env.ref('smart_office.foldermaster_tree_view1')\n value = {\n 'domain': str([('id', '=', self.doc_dispatch.folder_id.id)]),\n 'view_type': 'form',\n 'view_mode': 'tree, form',\n 'res_model': 'folder.master',\n 'view_id': False,\n 'views': [(form_view and form_view.id or False, 'form'),\n (tree_view and tree_view.id or False, 'tree')],\n 'type': 'ir.actions.act_window',\n 'res_id': self.doc_dispatch.folder_id.id,\n 'target': 'current',\n 'nodestroy': True\n }\n return value\n","sub_path":"smart_office/wizard/edit_doc_dispatch.py","file_name":"edit_doc_dispatch.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"107195630","text":"#!/usr/local/bin/python\n###################################################\n#PKsummarizer.py\n#Inputs:\n#1)input text file \n#2)uuid\n#3)summary type (for now we'll just use # of lines as a proxy, in future, could \n# be something like: short, long, proportional\n#Outputs:\n#1) summary of input text\n#Attributes to add:\n#1) use seed as input for query based summarization\n# \n#######\n\nimport sys, os \nimport summarize\n\n\n#=================================================\ndef main():\n\tinput_text = str(sys.argv[1])\n\tuuid_path = str(sys.argv[2])\n\tsum_lines = sys.argv[3]\n\tos.chdir(uuid_path)\n\t\n\tss = summarize.SimpleSummarizer()\n\twith open(input_text) as tosumfile:\n\t\tinput = tosumfile.read()\n\t\n\tsummaried = ss.summarize(input, sum_lines)\n\t\n\twith open('sum_text.txt', \"w+\") as towritefile:\n\t\ttowritefile.write(summaried)\n\n \n#=================================================\nif __name__ == '__main__':\n main()","sub_path":"scripts/bin/PKsummarizer.py","file_name":"PKsummarizer.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"449736138","text":"import collections\nimport itertools\n\nrows = 'ABCDEFGHI'\ncols = '123456789'\n\ndef cross(A, B):\n \"Cross product of elements in A and elements in B.\"\n return [i+j for i in A for j in B]\ndef diag(A, B):\n \"Cross product of elements in A and elements in B.\"\n return [i+j for (i,j) in zip(A, B)]\n\nboxes = cross(rows, cols)\nrow_units = [cross(r, cols) for r in rows]\ncolumn_units = [cross(rows, c) for c in cols]\nsquare_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')]\ndiag_units = [diag(rows,cols)] + [diag(rows, cols[::-1])]\nunitlist = row_units + column_units + square_units + diag_units\nunits = dict((s, [u for u in unitlist if s in u]) for s in boxes)\npeers = dict((s, set(sum(units[s],[]))-set([s])) for s in boxes)\n\n\n\ndef assign_value(values, box, value):\n \"\"\"\n Please use this function to update your values dictionary!\n Assigns a value to a given box. If it updates the board record it.\n \"\"\"\n\n # Don't waste memory appending actions that don't actually change any values\n if values[box] == value:\n return values\n\n values[box] = value\n if len(value) == 1:\n assignments.append(values.copy())\n return values\n\n\ndef naked_twins(values):\n \"\"\"Eliminate values using the naked twins strategy.\n Args:\n values(dict): a dictionary of the form {'box_name': '123456789', ...}\n\n Returns:\n the values dictionary with the naked twins eliminated from peers.\n \"\"\"\n #values = find_naked_twins(values, column_units) \n #values = find_naked_twins(values, row_units) \n #values = find_naked_twins(values, square_units) \n #values = find_naked_twins(values, diag_units) \n \n # Find all instances of naked twins\n # Eliminate the naked twins as possibilities for their peers\n\n #this part of copied from the reviewer\n for unit in unitlist: \n #Find all boxes with two digits remaining as possiblities \n pairs = [box for box in unit if len(values[box]) == 2]\n #pairwise combinations\n poss_twins = [list(pair) for pair in itertools.combinations(pairs, 2)]\n #find the naked twins\n for pair in poss_twins:\n box1 = pair[0]\n box2 = pair[1]\n if values[box1] == values[box2]:\n for box in unit:\n #eliminate the naked twins as possibilities for peers\n if box != box1 and box != box2:\n for digit in values[box1]:\n values[box] == values[box].replace(digit,'')\n return values\n\n\ndef grid_values(grid):\n \"\"\"\n Convert grid into a dict of {square: char} with '123456789' for empties.\n Args:\n grid(string) - A grid in string form.\n Returns:\n A grid in dictionary form\n Keys: The boxes, e.g., 'A1'\n Values: The value in each box, e.g., '8'. If the box has no value, then the value will be '123456789'.\n \"\"\"\n EMPTY = '123456789'\n sudoku_grid = {}\n count = 0\n for row in row_units:\n for pos in row:\n val = grid[count]\n if val == '.':\n sudoku_grid[pos] = EMPTY\n else:\n sudoku_grid[pos] = val\n count += 1\n \n return sudoku_grid\n\ndef remove_val(grid, units, pos, val):\n for lists in units[pos]:\n for position in lists:\n if position == pos:\n continue\n else:\n grid[position] = grid[position].replace(val, '')\n return grid\n \ndef display(values):\n \"\"\"\n Display the values as a 2-D grid.\n Args:\n values(dict): The sudoku in dictionary form\n \"\"\"\n width = 1 + max(len(values[s]) for s in boxes)\n line = '+'.join(['-'*(width*3)]*3)\n for r in rows:\n print(''.join(values[r+c].center(width)+('|' if c in '36' else '')\n for c in cols))\n if r in 'CF': print(line)\n pass\n\ndef eliminate(values):\n for row in row_units:\n for pos in row:\n val = values[pos]\n if len(val) > 1:\n continue\n values = remove_val(values, units, pos, val)\n return values\n \n\ndef only_choice(values):\n for unit in unitlist:\n for digit in '123456789':\n dplaces = [box for box in unit if digit in values[box]]\n if len(dplaces) == 1:\n values[dplaces[0]] = digit\n return values\n\ndef reduce_puzzle(values):\n stalled = False\n while not stalled:\n # Check how many boxes have a determined value\n solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])\n\n # Your code here: Use the Eliminate Strategy\n values = eliminate(values)\n # Your code here: Use the Only Choice Strategy\n values = only_choice(values)\n #use naked twin\n values = naked_twins(values)\n # Check how many boxes have a determined value, to compare\n solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])\n # If no new values were added, stop the loop.\n stalled = solved_values_before == solved_values_after\n # Sanity check, return False if there is a box with zero available values:\n if len([box for box in values.keys() if len(values[box]) == 0]):\n return False\n return values\n\ndef search(values):\n \"Using depth-first search and propagation, create a search tree and solve the sudoku.\"\n # First, reduce the puzzle using the previous function\n values = reduce_puzzle(values)\n\n if values is False: \n return False\n \n unsolved = [box for box in values.keys() if len(values[box]) > 1] \n if len(unsolved) == 0: \n return values\n\n\n # Choose one of the unfilled squares with the fewest possibilities\n \n possible = []\n for key, val in values.items():\n if len(val) == 1:\n continue\n possible.append([len(val), key])\n minsize, minunit = min(possible)\n\n \n # Now use recursion to solve each one of the resulting sudokus, and if one returns a value (not False), return that answer!\n \n for val in values[minunit]:\n values_copy = values.copy()\n values_copy[minunit] = val \n tmp = search(values_copy)\n if tmp:\n return tmp\n return False\n\n\ndef solve(grid):\n \"\"\"\n Find the solution to a Sudoku grid.\n Args:\n grid(string): a string representing a sudoku grid.\n Example: '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'\n Returns:\n The dictionary representation of the final sudoku grid. False if no solution exists.\n \"\"\"\n #convert string grid to dictionary grid\n values = grid_values(grid)\n solved = search(values)\n if solved:\n return solved\n else:\n return False\n \n\nif __name__ == '__main__':\n #diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'\n #diag_sudoku_grid = '..3.2.6..9..3.5..1..18.64....81.29..7.......8..67.82....26.95..8..2.3..9..5.1.3..'\n #diag_sudoku_grid = '1.4.9..68956.18.34..84.695151.....868..6...1264..8..97781923645495.6.823.6.854179'\n #diag_sudoku_grid = '...8.1.........43.5............7.8........1...2..3....6......75..34........2..6..'\n #diag_sudoku_grid = '2..3..........6.....1...372.......8.....................67.......5...............'\n \n values = solve(diag_sudoku_grid)\n\n try:\n from visualize import visualize_assignments\n visualize_assignments(assignments)\n\n except SystemExit:\n pass\n except:\n print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')\n","sub_path":"backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":7661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"328964481","text":"import unittest\n\nfrom python.src.graph.cycle import Cycle\nfrom python.test.util.utilities import Utilities\n\n\nclass DirectedGraphTest(unittest.TestCase):\n def test_has_cycle(self):\n graph = Utilities.small_cyclic_graph()\n finder = Cycle(graph)\n self.assertTrue(finder.has_cycle())\n\n def test_has_no_cycle(self):\n graph = Utilities.small_acyclic_graph()\n finder = Cycle(graph)\n self.assertFalse(finder.has_cycle())\n\n def test_cycle_path(self):\n graph = Utilities.small_cyclic_graph()\n finder = Cycle(graph)\n\n cycle = [3, 2, 3]\n i = 0\n for v in finder.cycle():\n self.assertEqual(cycle[i], v)\n i += 1\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"python/test/graph/cycle.py","file_name":"cycle.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"547069093","text":"#インポート\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\nG = nx.read_edgelist('data/edgelist.txt', nodetype=str)\n\n#図の作成。figsizeは図の大きさ\nplt.figure(figsize=(10, 8))\n\n#図のレイアウトを決める。kの値が小さい程図が密集する\npos = nx.spring_layout(G, k=0.8)\n\n#ノードとエッジの描画\n# _color: 色の指定\n# alpha: 透明度の指定\nnx.draw_networkx_edges(G, pos, edge_color='y')\nnx.draw_networkx_nodes(G, pos, node_color='r', alpha=0.5)\n\n#ノード名を付加\nnx.draw_networkx_labels(G, pos, font_size=10)\n\n#X軸Y軸を表示しない設定\nplt.axis('off')\n\n#図を描画\nplt.show()\n","sub_path":"research/test_network_plot2.py","file_name":"test_network_plot2.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"528659154","text":"from sqlalchemy import *\nfrom migrate import *\n\n\nfrom migrate.changeset import schema\npre_meta = MetaData()\npost_meta = MetaData()\nbuilding = Table('building', post_meta,\n Column('id', Integer, primary_key=True, nullable=False),\n Column('name', String(length=128)),\n Column('cuid', Integer),\n)\n\ncompany = Table('company', post_meta,\n Column('id', Integer, primary_key=True, nullable=False),\n Column('name', String(length=128)),\n)\n\nfloor = Table('floor', post_meta,\n Column('id', Integer, primary_key=True, nullable=False),\n Column('name', String(length=128)),\n Column('buid', Integer),\n)\n\nlayer = Table('layer', post_meta,\n Column('id', Integer, primary_key=True, nullable=False),\n Column('name', String(length=128)),\n Column('fuid', Integer),\n)\n\nuser = Table('user', post_meta,\n Column('id', Integer, primary_key=True, nullable=False),\n Column('luid', Integer),\n Column('name', String(length=64)),\n Column('username', String(length=64)),\n Column('email', String(length=120)),\n Column('department', String(length=150)),\n Column('title', String(length=200)),\n Column('skype', String(length=120)),\n Column('seat', Integer),\n Column('role', SmallInteger, default=ColumnDefault(0)),\n)\n\nseat = Table('seat', post_meta,\n Column('id', Integer, primary_key=True, nullable=False),\n Column('name', String(length=128)),\n Column('luid', Integer),\n Column('x_coordinate', Float),\n Column('y_coordinate', Float),\n)\n\n\ndef upgrade(migrate_engine):\n # Upgrade operations go here. Don't create your own engine; bind\n # migrate_engine to your metadata\n pre_meta.bind = migrate_engine\n post_meta.bind = migrate_engine\n post_meta.tables['building'].create()\n post_meta.tables['company'].create()\n post_meta.tables['floor'].create()\n post_meta.tables['layer'].create()\n post_meta.tables['user'].columns['luid'].create()\n post_meta.tables['seat'].columns['luid'].create()\n post_meta.tables['seat'].columns['name'].create()\n\n\ndef downgrade(migrate_engine):\n # Operations to reverse the above upgrade go here.\n pre_meta.bind = migrate_engine\n post_meta.bind = migrate_engine\n post_meta.tables['building'].drop()\n post_meta.tables['company'].drop()\n post_meta.tables['floor'].drop()\n post_meta.tables['layer'].drop()\n post_meta.tables['user'].columns['luid'].drop()\n post_meta.tables['seat'].columns['luid'].drop()\n post_meta.tables['seat'].columns['name'].drop()\n","sub_path":"db_repository/versions/003_migration.py","file_name":"003_migration.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"635264830","text":"#!/usr/bin/env python\r\n# -*- coding: UTF-8 -*-\r\n\r\n# Licensed to the Apache Software Foundation (ASF) under one\r\n# or more contributor license agreements. See the NOTICE file\r\n# distributed with this work for additional information\r\n# regarding copyright ownership. The ASF licenses this file\r\n# to you under the Apache License, Version 2.0 (the\r\n# \"License\"); you may not use this file except in compliance\r\n# with the License. You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing,\r\n# software distributed under the License is distributed on an\r\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\r\n# KIND, either express or implied. See the License for the\r\n# specific language governing permissions and limitations\r\n# under the License.\r\n\r\n\r\nr\"\"\"Ticket relations for Apache(TM) Bloodhound\r\n\r\nTicket relations user interface.\r\n\"\"\"\r\n\r\nimport re\r\nimport pkg_resources\r\n\r\nfrom trac.core import Component, implements, TracError\r\nfrom trac.resource import get_resource_url\r\nfrom trac.ticket.model import Ticket\r\nfrom trac.web import IRequestHandler\r\nfrom trac.web.chrome import ITemplateProvider\r\n\r\nfrom bhrelations.api import RelationsSystem\r\n\r\n\r\nclass RelationManagementModule(Component):\r\n implements(IRequestHandler, ITemplateProvider)\r\n\r\n # IRequestHandler methods\r\n def match_request(self, req):\r\n match = re.match(r'/ticket/([0-9]+)/relations/*$', req.path_info)\r\n if not match:\r\n return False\r\n\r\n req.args['id'] = match.group(1)\r\n return True\r\n\r\n def process_request(self, req):\r\n tid = req.args.get('id')\r\n if not tid:\r\n raise TracError('No ticket id provided.')\r\n\r\n req.perm.require('TICKET_VIEW')\r\n ticket = Ticket(self.env, tid)\r\n data = {\r\n 'ticket': ticket,\r\n 'relations': self.get_ticket_relations(ticket),\r\n }\r\n return 'manage.html', data, None\r\n\r\n # ITemplateProvider methods\r\n def get_htdocs_dirs(self):\r\n resource_filename = pkg_resources.resource_filename\r\n return [resource_filename('bhrelations', 'htdocs'), ]\r\n\r\n def get_templates_dirs(self):\r\n resource_filename = pkg_resources.resource_filename\r\n return [resource_filename('bhrelations', 'templates'), ]\r\n\r\n # utility functions\r\n def get_ticket_relations(self, ticket):\r\n grouped_relations = {}\r\n for r in RelationsSystem(self.env).get_relations(ticket):\r\n r['desthref'] = get_resource_url(self.env, r['destination'],\r\n self.env.href)\r\n grouped_relations.setdefault(r['type'], []).append(r)\r\n return grouped_relations\r\n\r\n","sub_path":"bloodhound_relations/bhrelations/web_ui.py","file_name":"web_ui.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"262702442","text":"import pandas as pd\nimport numpy as np\nimport streamlit as st \nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom pickle import dump\nimport pickle \nfrom pickle import load\n\nst.title('Model Deployment: LOAN APPROVAL')\n\nst.sidebar.header('User Input Parameters')\n\ndef user_input_features():\n Education = st.selectbox('Education',(\"1\",\"0\"))\n Married = st.selectbox('Marital Status',(\"0\",\"1\")) \n Credit_History = st.selectbox('Credit_History',(\"0\",\"1\"))\n Property_Area = st.selectbox('Property_Area',(\"0\",\"1\",\"2\"))\n data = {'Married':Married,'Education':Education,'Credit_History':Credit_History,'Property_Area':Property_Area}\n features = pd.DataFrame(data,index = [0])\n return features \n \ndf = user_input_features()\nst.subheader('User Input parameters')\nst.write(df)\n\n\ndata_set = pd.read_csv(\"train.csv\")\ndata_set = data_set.drop(['Loan_ID','Dependents'],axis=1)\ndata = data_set.copy()\n##Filling null values with mode\ndata['Gender'].fillna(data['Gender'].mode()[0], inplace=True)\ndata['Married'].fillna(data['Married'].mode()[0], inplace=True)\ndata['Self_Employed'].fillna(data['Self_Employed'].mode()[0], inplace=True)\ndata['Loan_Amount_Term'].fillna(data['Loan_Amount_Term'].mode()[0], inplace=True)\ndata['Credit_History'].fillna(data['Credit_History'].mode()[0], inplace=True)\ndata['LoanAmount'].fillna(data['LoanAmount'].mean(), inplace=True)\ndata['Loan_Status'] = data['Loan_Status'].map({'Y':1, 'N':0})\n\nlabel_encode = LabelEncoder()\n# Encode labels in column 'Gender'\ndata[\"Gender\"] = label_encode.fit_transform(data[\"Gender\"])\n# Encode labels in column 'Married'\ndata[\"Married\"] = label_encode.fit_transform(data[\"Married\"])\n# Encode labels in column 'Education'\ndata[\"Education\"] = label_encode.fit_transform(data[\"Education\"])\n# Encode labels in column 'Property-area'\ndata[\"Property_Area\"] = label_encode.fit_transform(data[\"Property_Area\"])\n# Encode labels in column 'self-employed'\ndata[\"Self_Employed\"] = label_encode.fit_transform(data[\"Self_Employed\"])\n\nx = np.array(data.iloc[:,[1,2,8,9]])\ny = np.array(data[\"Loan_Status\"])\n\nscaler = StandardScaler()\nX = scaler.fit_transform(x)\n\n\n\nx_train, x_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=4)\n\n\nd_tree = DecisionTreeClassifier(criterion=\"entropy\" , max_depth=3)\nd_tree.fit(x_train,y_train)\ntree_pred = d_tree.predict(x_test)\n\n\n\n\n\nprediction = d_tree.predict(df)\nprediction_proba = d_tree.predict_proba(df)\n\n\nst.subheader('Predicted Result')\nst.write('Yes' if prediction_proba[0][1] > 0.5 else 'No')\n\nst.subheader('Prediction Probability')\nst.write(prediction_proba)\n","sub_path":"loan.py","file_name":"loan.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"619816981","text":"# %load q05_runs/build.py\n# Default Imports\nfrom greyatomlib.python_getting_started.q01_read_data.build import read_data\ndata = read_data()\n\n\n# Your Solution\ndef BC_runs(runs):\n x1 = data['innings']\n x2 = x1[0]\n x3 = x2['1st innings']['deliveries']\n runs = 0\n for index, x in enumerate(x3):\n x4 = x3[index]\n for values in x4.values():\n x5 = x4.values()\n if values['batsman'] == 'BB McCullum':\n runs = runs + values['runs']['batsman']\n\n\n # Write your code here\n \n\n return(runs)\n\n\n","sub_path":"q05_runs/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"160104355","text":"import random\n\nclass person:\n def __init__(self, name, hp, mp, attack,magic):\n self.name = name\n self.maxhp = hp\n self.hp = hp\n self.maxmp = mp\n self.mp = mp\n self.magic = magic\n self.attack = attack\n self.action = [\"Attack\",\"Magic\"]\n def generate_damage(self):\n attack_h = self.attack + 10\n attack_l = self.attack - 10\n damage = random.randrange(attack_l,attack_h)\n return damage\n def take_damage(self,damage):\n self.hp = (self.hp)-damage\n if self.hp < 0 :\n self.hp = 0\n else:\n self.hp = self.hp\n return self.hp\n\n def reduce_mp(self,cost):\n self.mp = self.mp-cost\n if self.mp < 0:\n self.mp = 0\n else:\n self.mp = self.mp\n return self.mp\n\n def choose_action(self):\n number = 1\n print(self.name.upper(), \":\")\n print(\"\\t ACTION: \")\n for item in self.action:\n print(\"\\t\",number,end=(':'))\n print(item)\n number=number + 1\n # Create new method to choose what magic to use\n def choose_magic(self):\n number = 1\n print(\"\\t Magic: \")\n for magic in self.magic:\n print(\"\\t\", number, end=(':'))\n print(magic.name, \", cost : \",magic.mp_cost)\n number = number + 1\n def get_status(self):\n \"\"\"\n This method will print out the current stats of all player and enemy\n NAME:HP/MaxHP\n MP/MaxMP\n \"\"\"\n print(self.name.upper(), end=(':'))\n print(self.hp,\"/\",self.maxhp)\n print(\"\\t\", self.mp,\"/\",self.maxmp)\n","sub_path":"mediummode/persson_class.py","file_name":"persson_class.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"397292857","text":"import logging\n\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\n\nimport friprosveta\nfrom frinajave.models import TeacherSubjectCycles\n\n\n@transaction.atomic\ndef change_teacher_code(old_code, new_code):\n \"\"\"\n Change teacher code.\n Returns True on success, False on failure (no techer with given old_code found).\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info(\"Changing teacher code\")\n logger.debug(\"Old: {0}, new: {1}\".format(old_code, new_code))\n try:\n teacher = friprosveta.models.Teacher.objects.get(code=old_code)\n logger.debug(\"Got teacher\")\n teacher.code = new_code\n teacher.save()\n logger.debug(\"Teacher code on teacher object changed\")\n for entry in TeacherSubjectCycles.objects.filter(teacher_code=old_code):\n entry.teacher_code = new_code\n entry.save()\n logger.debug(\"Teacher code in najave changed\")\n logger.info(\"Changed teacher code\")\n return True\n except Exception:\n logger.exception(\"Exception while changing code\")\n return False\n\n\nclass Command(BaseCommand):\n \"\"\"\n Enrol students into a given timetable.\n \"\"\"\n\n args = 'change_teacher_code old_code new_code'\n help = 'Change teacher code.'\n\n def handle(self, *args, **options):\n if len(args) != 2:\n print(\"See help\")\n return\n\n change_teacher_code(args[0], args[1])\n","sub_path":"friprosveta/management/commands/change_teacher_code.py","file_name":"change_teacher_code.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"305607977","text":"#enconding: utf-8\n\nimport os\nimport time\nimport json\nfrom functools import wraps\n\nfrom django.conf import settings\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render,redirect\n\nfrom .models import AccessLogFileModel,AccessLog\nfrom django.http import JsonResponse\nfrom django.db.models import Count\n\ndef login_required(func):\n\n @wraps(func)\n def wrapper(request, *args, **kwargs):\n if request.session.get('user') is None:\n if request.is_ajax():\n return JsonResponse({'code' : 403, 'result' : []})\n return redirect('user:login')\n return func(request, *args, **kwargs)\n\n return wrapper\n\n@login_required\ndef index(request):\n files = AccessLogFileModel.objects.filter(status=0).order_by('-created_time')[:10]\n return render(request,'webanalysis/index.html', {\"files\" : files})\n\n@login_required\ndef upload(request):\n log = request.FILES.get('log')\n if log:\n path = os.path.join(settings.BASE_DIR,'media','uploads', str(time.time()))\n fhandler = open(path, \"wb\")\n\n for chunk in log.chunks():\n fhandler.write(log.read(chunk))\n fhandler.close()\n\n obj = AccessLogFileModel(name=log.name, path=path)\n obj.save()\n\n path = os.path.join(settings.BASE_DIR,'media','notices', str(time.time()))\n with open(path,'w') as fhandler:\n fhandler.write(json.dumps({'id' : obj.id, 'path' : obj.path}))\n\n return HttpResponse(\"upload\")\n\n@login_required\ndef dist_status_code(request):\n objs = AccessLog.objects.values(\"status_code\").filter(file_id=request.GET.get('id',0)).annotate(codecount=Count(\"status_code\")).order_by('-codecount')\n\n legend = []\n series = []\n for line in objs:\n legend.append(line.get('status_code'))\n series.append({\"name\" : line.get('status_code'), \"value\" : line.get('codecount')})\n\n return JsonResponse({'code' : 200, 'result' : {'legend' : legend, 'series' : series}})\n\n@login_required\ndef trend_visit(request):\n\n time = []\n abc = {}\n access_time= AccessLog.objects.filter(file_id=request.GET.get('id',0)).values(\"access_time\")\n for i in access_time:\n time.append(i.get(\"access_time\").strftime(\"%Y-%m-%d %H:00:00\"))\n\n for j in time:\n if abc.get(j, None) is None:\n abc[j] = 1\n else:\n abc[j] += 1\n\n series = []\n xAxis = []\n for k,v in abc.items():\n xAxis.append(k)\n series.append(v)\n\n return JsonResponse({'code' : 200, 'result' : {'xAxis' : xAxis, 'series' : series}})","sub_path":"item/1/cmdb/webanalysis/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"558187616","text":"'''\nProgram Statement:\nReverse a given string keeping its special character at the same place\nInput Format:\nstring\nOutput Format:\nreverse the string\n\n\nimport re\ns=input().strip()\n\nr=re.findall(\"[a-zA-Z]\",s)\nr.reverse()\n\nout=\"\"\nj=0\nfor i in s:\n if(i not in \"qwertyuiopasdfghjklzxcvbnmQWERTAYUIOPSDZFXCGVHBJNKML\"):\n out+=i\n else:\n out+=r[j]\n j+=1\nprint(out.strip())\n\n\n\n'''\n\n\nimport re\nst=input().strip()\nr=re.findall(\"[a-zA-Z]\",st)\nr.reverse()\n#sp=r'[@_!\\#$%^&*()<>?/|}{~:]'\nal='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'\nfor i in range(len(st)):\n if(st[i] not in al):\n r.insert(i,st[i])\nprint(''.join(r))\n","sub_path":"DontDisturbSplChar.py","file_name":"DontDisturbSplChar.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"39773353","text":"#!/usr/bin/env python3\n\nimport skimage as si\nfrom skimage import draw\nfrom skimage import io\nimport numpy as np\nimport urllib.request\nimport json\nimport logging\nimport os\nimport sys\nimport re\nfrom shutil import copyfile\nimport configparser\nfrom tqdm import tqdm\n\n#all warnings are about low contrast images\nimport warnings\nwarnings.simplefilter(\"ignore\")\n\t\nconfig = configparser.ConfigParser()\nconfig.read(\"config.ini\")\n\n#set user input variables\npath_to_dataturks_annotation_json_folder = config[\"USER\"][\"jsons\"]\npath_to_masks_folder = config[\"USER\"][\"output\"]\njsons = os.listdir(path_to_dataturks_annotation_json_folder)\npath_to_real_images = config[\"USER\"][\"images\"]\nsize = int(config[\"USER\"][\"size\"])\n\n#set colors based on mask type\ncolors = config[\"COLOR\"]\n\n#enable info logging.\nlogging.getLogger().setLevel(logging.INFO)\n\n#*******************************************************************************\n#Function \t: poly2mask\n#Description: using given coordinates, a mask is drawn, colored based on label, \n#\t\t\t\tand saved\n#Parameters : blobs - list of x,y coordinates for the mask\n#\t\t\t c - current cell number \n#\t\t\t path_to_masks_folder - path to output folder\n#\t\t\t h - height of image\n#\t\t\t w - width of image\n#\t\t\t label - label of mask being drawn\n#Returned \t: None\n#Output\t\t: Image of mask saved to output folder\n#*******************************************************************************\n\ndef poly2mask(blobs, num, path_to_masks_folder, h, w, label):\n\tmask = np.zeros((h, w, 3))\n\tif label in colors.keys():\n\t\t\tcolor = list(eval(colors[label]))\n\telse:\n\t\tcolor = list(eval(colors[\"Default\"]))\n\tfor l in blobs:\n\t\tfill_row_coords, fill_col_coords = draw.polygon(l[1], l[0], l[2])\n\t\tmask[fill_row_coords, fill_col_coords] = color\n\tio.imsave(path_to_masks_folder + \"/\" + label + \"_\" + str(num) + \".png\", si.img_as_ubyte(mask))\n\n#*******************************************************************************\n#Function \t: multi_mask\n#Description: make a mask for every individual in a json in SEPARATE files\n#Parameters : train - dictionary of images with information of each individual\n#\t\t\t path_to_masks_folder - path to output folder\n#Returned \t: None\n#Output\t\t: Image of one mask saved to output folder from poly2mask\n#*******************************************************************************\n\ndef multi_masks(train, path_to_masks_folder):\n\tfor image in train:\n\t\th = image.get(\"size\").get(\"height\")\n\t\tw = image.get(\"size\").get(\"width\")\n\t\tc = 1\n\t\t# get the points for each mask in the image\n\t\tfor objects in tqdm(image.get(\"objects\")):\n\t\t\tblobs = []\n\t\t\tlabel = objects.get(\"classTitle\")\n\t\t\tif (config[\"USER\"][\"skip_other\"] == \"True\" and label == \"Other\"):\n\t\t\t\tcontinue\n\t\t\tpoints = objects.get(\"points\").get(\"exterior\")\n\t\t\tx_coord = []\n\t\t\ty_coord = []\n\t\t\tl = []\n\t\t\tfor p in points:\n\t\t\t\tx_coord.append(p[0])\n\t\t\t\ty_coord.append(p[1])\n\t\t\tshape = (h, w)\n\t\t\tl.append(x_coord)\n\t\t\tl.append(y_coord)\n\t\t\tl.append(shape)\n\t\t\tblobs.append(l)\n\t\t\t# create mask for each object \n\t\t\tpoly2mask(blobs, c, path_to_masks_folder, h, w, label)\n\t\t\t\n\t\t\tc += 1\n\n#*******************************************************************************\n#Function \t: add_border_class_rect\n#Description: add a border class around each individual in an image\n#Parameters : l - list of x,y coordinates for the mask\n#\t\t\t mask - array of pixels with object mask filled in\n#\t\t\t size - size of border, total border width = 2*size + 1\n#Returned \t: Mask with border class pixels added\n#Output\t\t: None\n#*******************************************************************************\n\ndef add_border_class_rect(l, mask, size):\n\tborder = np.zeros((mask.shape[0], mask.shape[1]))\n\trr, cc = draw.polygon_perimeter(l[1], l[0], l[2])\n\tborder[rr,cc] = 1\n\tfor i in range(border.shape[0]):\n\t\tfor j in range(border.shape[1]):\n\t\t\tif border[i,j] == 1:\n\t\t\t\trr,cc = draw.rectangle((i-size,j-size), (i+size,j+size), shape = (mask.shape[0],mask.shape[1]))\n\t\t\t\tmask[rr,cc] = list(eval(colors[\"Border\"])) \n\t\t\t\n#*******************************************************************************\n#Function \t: poly2mask_single\n#Description: using given coordinates, a mask is drawn for every individual \n#\t\t\t\tin the file, colored based on label, and saved in one file\n#Parameters : blobs - list of x,y coordinates for the mask\n#\t\t\t c - current cell number \n#\t\t\t path_to_masks_folder - path to output folder\n#\t\t\t h - height of image\n#\t\t\t w - width of image\n#Returned \t: None\n#Output\t\t: Image of all masks in one file saved to output folder\n#*******************************************************************************\n\ndef poly2mask_single(blobs, c, path_to_masks_folder, h, w):\n\tmask = np.zeros((h, w,3))\n\tfor label in blobs.keys():\n\t\tprint(label)\n\t\tif label in colors.keys():\n\t\t\tcolor = list(eval(colors[label]))\n\t\telse:\n\t\t\tcolor = list(eval(colors[\"Default\"]))\n\t\tfor l in tqdm(blobs[label]):\n\t\t\tfill_row_coords, fill_col_coords = draw.polygon(l[1], l[0], l[2])\n\t\t\tmask[fill_row_coords, fill_col_coords] = color\n\t\t\tadd_border_class_rect(l, mask, size)\n\tio.imsave(path_to_masks_folder + \"/\" + str(c) + \".png\", si.img_as_ubyte(mask))\n\n#*******************************************************************************\n#Function \t: single_mask\n#Description: make a mask for every individual in a json in ONE file\n#Parameters : train - dictionary of images with information of each individual\n#\t\t\t path_to_masks_folder - path to output folder\n#\t\t\t file_name - name of image file\n#Returned \t: None\n#Output\t\t: Image of all masks saved to output folder from poly2mask_single\n#*******************************************************************************\n\ndef single_mask(train, path_to_masks_folder, file_name):\n\tfor image in train:\n\t\th = image.get(\"size\").get(\"height\")\n\t\tw = image.get(\"size\").get(\"width\")\n\t\tblobs = {}\n\t\t# get the points for each mask in the image\n\t\tfor objects in image.get(\"objects\"):\n\t\t\tlabel = objects.get(\"classTitle\")\n\t\t\tif (config[\"USER\"][\"skip_other\"] == \"True\" and label == \"Other\"):\n\t\t\t\tcontinue\n\t\t\tif label not in blobs.keys():\n\t\t\t\tblobs[label] = []\n\t\t\tpoints = objects.get(\"points\").get(\"exterior\")\n\t\t\tx_coord = []\n\t\t\ty_coord = []\n\t\t\tl = []\n\t\t\tfor p in points:\n\t\t\t\tx_coord.append(p[0])\n\t\t\t\ty_coord.append(p[1])\n\t\t\tshape = (h, w)\n\t\t\tl.append(x_coord)\n\t\t\tl.append(y_coord)\n\t\t\tl.append(shape)\n\t\t\tblobs[label].append(l)\n\t\t# create mask for each object \n\t\tpoly2mask_single(blobs, file_name, path_to_masks_folder, h, w)\n\n#*******************************************************************************\n#Function \t: convert_dataturks_to_masks\n#Description: create folder for masks, read in jsons, and proceed based on user \n#\t\t\t\tinput for masks, single or individual\n#Parameters : path_to_dataturks_annotation_json - path to folder of jsons\n#\t\t\t path_to_masks_folder - path to output folder\n#\t\t\t file_name - name of image file\n#Returned \t: None\n#Output\t\t: Image of masks saved to output folder\n#*******************************************************************************\n\ndef convert_dataturks_to_masks(path_to_dataturks_annotation_json, path_to_masks_folder, file_name):\n\t# make sure everything is setup.\n\tif (not os.path.isdir(path_to_masks_folder)):\n\t\tlogging.exception(\n\t\t\t\"Please specify a valid directory path to write mask files, \" + path_to_masks_folder + \" doesn't exist\")\n\tif (not os.path.exists(path_to_dataturks_annotation_json)):\n\t\tlogging.exception(\n\t\t\t\"Please specify a valid path to dataturks JSON output file, \" + path_to_dataturks_annotation_json + \" doesn't exist\")\n\n\t# create folder for each image\n\tif (not os.path.exists(path_to_masks_folder + \"/\" + \"masks\")):\n\t\tos.mkdir(path_to_masks_folder + \"/\" + \"masks\")\n\t\n\tpath_to_masks_folder = path_to_masks_folder + \"/\" + \"masks\"\n\t\n\t#load in json information\n\tf = open(path_to_dataturks_annotation_json)\n\ttrain_data = f.readlines()\n\ttrain = []\n\tfor line in train_data:\n\t\tdata = json.loads(line)\n\t\ttrain.append(data)\n\t\t\n\tif config[\"USER\"][\"mask\"] == \"single\":\n\t\tsingle_mask(train, path_to_masks_folder, file_name)\n\telse:\n\t\tmulti_masks(train, path_to_masks_folder)\n\t\n\tf.close()\n\t\n#*******************************************************************************\n#Description: create folder for raw image and copy current image there, \n#\t\t\t\tmake masks out of jsons\n#Output\t\t: Image of masks saved to output folder\n#*******************************************************************************\n\t\nfor file in jsons:\n\tname = re.search(r'(.*)\\..*\\.json', file)\n\tname = name.group(1)\n\tpath_to_current_masks_folder = path_to_masks_folder + name \n\t# create directories\n\tif (not os.path.exists(path_to_current_masks_folder)):\n\t\tos.mkdir(path_to_current_masks_folder)\n\tif (not os.path.exists(path_to_current_masks_folder + \"/image\")):\n\t\tos.mkdir(path_to_current_masks_folder + \"/image\")\n\t# copy real image to correct folder\n\tcopyfile(path_to_real_images + name + \".jpg\", path_to_current_masks_folder + \"/image/\" + name + \".png\")\n\tprint(name)\n\t# create masks\n\tconvert_dataturks_to_masks(path_to_dataturks_annotation_json_folder + file, path_to_current_masks_folder, name)\t\n","sub_path":"JSON_2_mask.py","file_name":"JSON_2_mask.py","file_ext":"py","file_size_in_byte":9024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"651340906","text":"# Copyright (c) 2015 Nicolas JOUANIN\n#\n# See the file license.txt for copying permission.\nfrom datetime import datetime\nfrom hbmqtt.mqtt.packet import PUBLISH\nfrom hbmqtt.codecs import int_to_bytes_str\n\nimport os\nimport ssl\nimport sys\nimport json\nimport random\nimport asyncio\nimport traceback\nimport threading\nimport importlib\nimport urllib.parse as urlparse\n\nfrom collections import deque\nfrom urllib.request import Request, urlopen\n\n\nDOLLAR_SYS_ROOT = '$SYS/broker/'\nSTAT_BYTES_SENT = 'bytes_sent'\nSTAT_BYTES_RECEIVED = 'bytes_received'\nSTAT_MSG_SENT = 'messages_sent'\nSTAT_MSG_RECEIVED = 'messages_received'\nSTAT_PUBLISH_SENT = 'publish_sent'\nSTAT_PUBLISH_RECEIVED = 'publish_received'\nSTAT_START_TIME = 'start_time'\nSTAT_CLIENTS_MAXIMUM = 'clients_maximum'\nSTAT_CLIENTS_CONNECTED = 'clients_connected'\nSTAT_CLIENTS_DISCONNECTED = 'clients_disconnected'\n\n\nclass BrokerBlockchainPlugin:\n\n endpoints = property(lambda cls: cls._endpoints.keys(), None, None, \"\")\n topics = property(\n lambda cls: cls._blockchain.get('bridged-topics', {}).keys(),\n None, None, \"\"\n )\n config = property(lambda cls: cls._blockchain, None, None, \"\")\n\n def __init__(self, context):\n self.context = context\n self._blockchain = self.context.config.get(\"broker-blockchain\", {})\n self._endpoints = self._blockchain.get(\"endpoints\", {})\n self._ndpt_headers = {\n \"Content-type\": \"application/json\",\n \"nethash\": self._blockchain.get(\"nethash\", \"\")\n }\n # import python-bindings:\n for t, (m, f) in list(\n self._blockchain.get('bridged-topics', {}).items()\n ):\n if m is not None and m not in sys.modules:\n try:\n importlib.import_module(m)\n except Exception as error:\n self.context.logger.debug(\n \"%s python binding not loaded (%r)\",\n m, error\n )\n self._blockchain['bridged-topics'].pop(t)\n\n # send http request to blockchain\n async def http_request(self, endpoint, method=\"GET\", data={}, peer=None, **qs):\n method, path = self._endpoints.get(endpoint, [method, endpoint])\n if method in [\"POST\", \"PUT\"]:\n if isinstance(data, (dict, list, tuple)):\n data = json.dumps(data).encode('utf-8')\n elif isinstance(data, str):\n # Assume data is a valid json string\n data = data.encode(\"utf-8\")\n else:\n data = None\n try:\n req = Request(\n urlparse.urlparse(\n peer if peer else random.choice(self._blockchain[\"peers\"])\n )._replace(\n path=path,\n query=\"&\".join([\"%s=%s\" % (k, v) for k, v in qs.items()])\n ).geturl(),\n data, self._ndpt_headers\n )\n req.add_header(\"User-agent\", \"Mozilla/5.0\")\n req.get_method = lambda: method\n except Exception as error:\n self.context.logger.error(\"%r\\n%s\", error, traceback.format_exc())\n else:\n self.context.logger.debug(\n \"blockchain request prepared: %s %s %s\",\n method, req.get_full_url(), data\n )\n try:\n ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)\n result = json.loads(\n urlopen(req, context=ctx, timeout=5).read()\n )\n except Exception as error:\n return {\n \"status\": 500,\n \"error\": \"http request %s to %s failed\" % (method, path)\n }\n self.context.logger.error(\n \"%r\\n%s\", error, traceback.format_exc()\n )\n else:\n self.context.logger.debug(\"blockchain response: %s\", result)\n return result\n return {\"status\": 404, \"error\": \"nothing happen\"}\n\n async def _genuinize(self, data):\n truth = False\n try:\n data = json.loads(data)\n for key, path in zip(\n [\"type\", \"height\"],\n [\"/api/transactions\", \"/api/blocks\"]\n ):\n if key in data:\n id_ = data.get(\"id\", False)\n qs = \\\n {\"id\": id_, key: data[key]} if id_ else \\\n {key: data[key]}\n data = (await self.http_request(path, **qs)).get(\"data\", [])\n truth = any(data)\n break\n except Exception as error:\n self.context.logger.error(\n \"%r\\n%s\", error, traceback.format_exc()\n )\n\n if not truth:\n return False\n else:\n self.context.logger.debug(\"genuined data: %s\", data)\n return data[0] if isinstance(data, list) else data\n\n def _is_bridged_topic(self, topic):\n return any([\n topic.startswith(t) for t in self._blockchain.get(\n 'bridged-topics', []\n )\n ])\n\n async def on_broker_message_received(self, *args, **kwargs):\n message = kwargs[\"message\"]\n topic = message.topic\n\n if not self._is_bridged_topic(topic):\n return False\n\n data = await self._genuinize(message.data)\n if not data:\n return False\n\n modname, funcname = self._blockchain.get(\"bridged-topics\", {}).get(\n topic, [None, None]\n )\n if modname is not None:\n func = getattr(sys.modules[modname], funcname, None)\n elif funcname is not None:\n func = getattr(self, funcname, None)\n else:\n func = None\n\n if func is not None:\n self.context.logger.debug(\n \"broker plugin function '%s' triggered with data=%s\",\n func.__name__, data\n )\n return func(self, data)\n else:\n return None\n\n @staticmethod\n def dummy(cls, data):\n cls.context.logger.info(\"dummy function says: %s\", data)\n return True\n\n\nclass BlockchainApiPlugin(BrokerBlockchainPlugin):\n\n def __init__(self, context):\n BrokerBlockchainPlugin.__init__(self, context)\n thread = threading.Thread(\n target=os.system,\n args=(\n \"%s -p %s\" % (\n os.path.join(os.path.dirname(sys.executable), 'listen'),\n self._blockchain.get(\"webhook-listener\", {})\n .get(\"host\", \"127.0.0.1:5003\").split(\":\")[-1]\n ),\n )\n )\n thread.setDaemon(True)\n thread.start()\n\n def relay_blockchain_response(self, topic, data):\n # Broadcast updates\n tasks = deque()\n tasks.append(\n asyncio.ensure_future(\n self.context.broadcast_message(topic, data),\n loop=self.context.loop\n )\n )\n # Wait until broadcasting tasks end\n while tasks and tasks[0].done():\n tasks.popleft()\n\n async def on_broker_message_received(self, *args, **kwargs):\n message = kwargs[\"message\"]\n topic = message.topic.split(\"/\")\n\n method = topic[0]\n if method not in [\"&GET\", \"&POST\", \"&PUT\", \"&DELETE\"]:\n self.context.logger.debug(\"api plugin not triggered\")\n return False\n\n method = method[1:]\n path = \"/\".join(topic[1:])\n try:\n data = json.loads(message.data)\n except Exception:\n data = {}\n\n try:\n if method in [\"POST\", \"PUT\"]:\n if \"webhooks\" in topic:\n public_ip = self.http_request(\n \"/plain\", peer=\"https://ipecho.net\"\n ).get(\"raw\", None)\n if public_ip is not None:\n data[\"target\"] = \"%s/webhook/forward\" % public_ip\n resp = await self.http_request(path, method, data)\n else:\n resp = await self.http_request(path, method, {}, **data)\n except Exception as error:\n msg = \"%r\\n%s\", error, traceback.format_exc()\n resp = {\"status\": 500, \"error\": msg}\n self.context.logger.error(msg)\n return None\n\n # webhook request handling:\n if \"webhooks\" in topic and method != \"GET\":\n data = {\n \"token\": resp[\"token\"],\n \"id\": resp[\"id\"],\n \"listener\": \"mqtt://127.0.0.1:%s\" % (\n self.config.get(\"broker-blockchain\", {})\n .get(\"webhook-listener\", {})\n .get(\"host\", \"127.0.0.1:1883\").split(\":\")[-1],\n ),\n \"topic\": \"WEBHOOK/\" + kwargs.get('client_id'),\n \"qos\": 2,\n \"venv\": os.path.dirname(sys.executable)\n }\n self.http_request(\n \"/webhook/register\", \"POST\", data,\n peer=\"http://%s\" % self._blockchain.get(\"webhook-listener\", {})\n .get(\"host\", \"127.0.0.1:5000\")\n )\n\n self.relay_blockchain_response(\n \"&RESP/\" + kwargs.get('client_id'),\n json.dumps(resp).encode(\"utf-8\")\n )\n\n return True\n\n\nclass BrokerSysPlugin:\n def __init__(self, context):\n self.context = context\n # Broker statistics initialization\n self._stats = dict()\n self._sys_handle = None\n\n def _clear_stats(self):\n \"\"\"\n Initializes broker statistics data structures\n \"\"\"\n for stat in (STAT_BYTES_RECEIVED,\n STAT_BYTES_SENT,\n STAT_MSG_RECEIVED,\n STAT_MSG_SENT,\n STAT_CLIENTS_MAXIMUM,\n STAT_CLIENTS_CONNECTED,\n STAT_CLIENTS_DISCONNECTED,\n STAT_PUBLISH_RECEIVED,\n STAT_PUBLISH_SENT):\n self._stats[stat] = 0\n\n @asyncio.coroutine\n def _broadcast_sys_topic(self, topic_basename, data):\n return (yield from self.context.broadcast_message(topic_basename, data))\n\n def schedule_broadcast_sys_topic(self, topic_basename, data):\n return asyncio.ensure_future(\n self._broadcast_sys_topic(DOLLAR_SYS_ROOT + topic_basename, data),\n loop=self.context.loop\n )\n\n @asyncio.coroutine\n def on_broker_pre_start(self, *args, **kwargs):\n self._clear_stats()\n\n @asyncio.coroutine\n def on_broker_post_start(self, *args, **kwargs):\n self._stats[STAT_START_TIME] = datetime.now()\n from hbmqtt.version import get_version\n version = 'HBMQTT version ' + get_version()\n self.context.retain_message(DOLLAR_SYS_ROOT + 'version', version.encode())\n\n # Start $SYS topics management\n try:\n sys_interval = int(self.context.config.get('sys_interval', 0))\n if sys_interval > 0:\n self.context.logger.debug(\"Setup $SYS broadcasting every %d secondes\" % sys_interval)\n self.sys_handle = self.context.loop.call_later(sys_interval, self.broadcast_dollar_sys_topics)\n else:\n self.context.logger.debug(\"$SYS disabled\")\n except KeyError:\n pass\n # 'sys_internal' config parameter not found\n\n @asyncio.coroutine\n def on_broker_pre_stop(self, *args, **kwargs):\n # Stop $SYS topics broadcasting\n if self.sys_handle:\n self.sys_handle.cancel()\n\n def broadcast_dollar_sys_topics(self):\n \"\"\"\n Broadcast dynamic $SYS topics updates and reschedule next execution depending on 'sys_interval' config\n parameter.\n \"\"\"\n\n # Update stats\n uptime = datetime.now() - self._stats[STAT_START_TIME]\n client_connected = self._stats[STAT_CLIENTS_CONNECTED]\n client_disconnected = self._stats[STAT_CLIENTS_DISCONNECTED]\n inflight_in = 0\n inflight_out = 0\n messages_stored = 0\n for session in self.context.sessions:\n inflight_in += session.inflight_in_count\n inflight_out += session.inflight_out_count\n messages_stored += session.retained_messages_count\n messages_stored += len(self.context.retained_messages)\n subscriptions_count = 0\n for topic in self.context.subscriptions:\n subscriptions_count += len(self.context.subscriptions[topic])\n\n # Broadcast updates\n tasks = deque()\n tasks.append(self.schedule_broadcast_sys_topic('load/bytes/received', int_to_bytes_str(self._stats[STAT_BYTES_RECEIVED])))\n tasks.append(self.schedule_broadcast_sys_topic('load/bytes/sent', int_to_bytes_str(self._stats[STAT_BYTES_SENT])))\n tasks.append(self.schedule_broadcast_sys_topic('messages/received', int_to_bytes_str(self._stats[STAT_MSG_RECEIVED])))\n tasks.append(self.schedule_broadcast_sys_topic('messages/sent', int_to_bytes_str(self._stats[STAT_MSG_SENT])))\n tasks.append(self.schedule_broadcast_sys_topic('time', str(datetime.now()).encode('utf-8')))\n tasks.append(self.schedule_broadcast_sys_topic('uptime', int_to_bytes_str(int(uptime.total_seconds()))))\n tasks.append(self.schedule_broadcast_sys_topic('uptime/formated', str(uptime).encode('utf-8')))\n tasks.append(self.schedule_broadcast_sys_topic('clients/connected', int_to_bytes_str(client_connected)))\n tasks.append(self.schedule_broadcast_sys_topic('clients/disconnected', int_to_bytes_str(client_disconnected)))\n tasks.append(self.schedule_broadcast_sys_topic('clients/maximum', int_to_bytes_str(self._stats[STAT_CLIENTS_MAXIMUM])))\n tasks.append(self.schedule_broadcast_sys_topic('clients/total', int_to_bytes_str(client_connected + client_disconnected)))\n tasks.append(self.schedule_broadcast_sys_topic('messages/inflight', int_to_bytes_str(inflight_in + inflight_out)))\n tasks.append(self.schedule_broadcast_sys_topic('messages/inflight/in', int_to_bytes_str(inflight_in)))\n tasks.append(self.schedule_broadcast_sys_topic('messages/inflight/out', int_to_bytes_str(inflight_out)))\n tasks.append(self.schedule_broadcast_sys_topic('messages/inflight/stored', int_to_bytes_str(messages_stored)))\n tasks.append(self.schedule_broadcast_sys_topic('messages/publish/received', int_to_bytes_str(self._stats[STAT_PUBLISH_RECEIVED])))\n tasks.append(self.schedule_broadcast_sys_topic('messages/publish/sent', int_to_bytes_str(self._stats[STAT_PUBLISH_SENT])))\n tasks.append(self.schedule_broadcast_sys_topic('messages/retained/count', int_to_bytes_str(len(self.context.retained_messages))))\n tasks.append(self.schedule_broadcast_sys_topic('messages/subscriptions/count', int_to_bytes_str(subscriptions_count)))\n\n # Wait until broadcasting tasks end\n while tasks and tasks[0].done():\n tasks.popleft()\n # Reschedule\n sys_interval = int(self.context.config['sys_interval'])\n self.context.logger.debug(\"Broadcasting $SYS topics\")\n self.sys_handle = self.context.loop.call_later(sys_interval, self.broadcast_dollar_sys_topics)\n\n @asyncio.coroutine\n def on_mqtt_packet_received(self, *args, **kwargs):\n packet = kwargs.get('packet')\n if packet:\n packet_size = packet.bytes_length\n self._stats[STAT_BYTES_RECEIVED] += packet_size\n self._stats[STAT_MSG_RECEIVED] += 1\n if packet.fixed_header.packet_type == PUBLISH:\n self._stats[STAT_PUBLISH_RECEIVED] += 1\n\n @asyncio.coroutine\n def on_mqtt_packet_sent(self, *args, **kwargs):\n packet = kwargs.get('packet')\n if packet:\n packet_size = packet.bytes_length\n self._stats[STAT_BYTES_SENT] += packet_size\n self._stats[STAT_MSG_SENT] += 1\n if packet.fixed_header.packet_type == PUBLISH:\n self._stats[STAT_PUBLISH_SENT] += 1\n\n @asyncio.coroutine\n def on_broker_client_connected(self, *args, **kwargs):\n self._stats[STAT_CLIENTS_CONNECTED] += 1\n self._stats[STAT_CLIENTS_MAXIMUM] = max(self._stats[STAT_CLIENTS_MAXIMUM], self._stats[STAT_CLIENTS_CONNECTED])\n\n @asyncio.coroutine\n def on_broker_client_disconnected(self, *args, **kwargs):\n self._stats[STAT_CLIENTS_CONNECTED] -= 1\n self._stats[STAT_CLIENTS_DISCONNECTED] += 1\n","sub_path":"hbmqtt/plugins/sys/broker.py","file_name":"broker.py","file_ext":"py","file_size_in_byte":16481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"245086994","text":"import torch.nn as nn\nfrom math import sqrt\nimport torch\nfrom architectures.SegAN.model.GlobalConvolution import GlobalConvolution\n\nchannel_dim = 3\ndim = 64\n\n\nclass Critic(nn.Module):\n def __init__(self):\n super(Critic, self).__init__()\n\n self.conv1 = nn.Sequential(\n GlobalConvolution(channel_dim, dim, (13, 13), 2),\n nn.BatchNorm2d(dim),\n nn.LeakyReLU(0.2, inplace=True)\n )\n\n self.conv2 = nn.Sequential(\n GlobalConvolution(dim, dim * 2, (11, 11), 2),\n nn.BatchNorm2d(dim * 2),\n nn.LeakyReLU(0.2, inplace=True)\n )\n\n self.conv3 = nn.Sequential(\n GlobalConvolution(dim * 2, dim * 4, (9, 9), 1),\n nn.BatchNorm2d(dim * 4),\n nn.LeakyReLU(0.2, inplace=True)\n )\n\n self.conv4 = nn.Sequential(\n GlobalConvolution(dim * 4, dim * 8, (7, 7), 1),\n nn.BatchNorm2d(dim * 8),\n nn.LeakyReLU(0.2, inplace=True)\n )\n\n self.conv5 = nn.Sequential(\n nn.Conv2d(dim * 8, dim * 8, 4, 1, 2, bias=False),\n nn.BatchNorm2d(dim * 8),\n nn.LeakyReLU(0.2, inplace=True)\n )\n\n self.conv6 = nn.Sequential(\n nn.Conv2d(dim * 8, dim * 8, 3, 2, 2, bias=False),\n nn.BatchNorm2d(dim * 8),\n nn.LeakyReLU(0.2, inplace=True)\n )\n\n def forward(self, input):\n batch_size = input.size()[0]\n out1 = self.conv1(input)\n out2 = self.conv2(out1)\n out3 = self.conv3(out2)\n out4 = self.conv4(out3)\n out5 = self.conv5(out4)\n out6 = self.conv6(out5)\n\n # Concatenate output\n output = torch.cat((input.view(batch_size, -1), 1 * out1.view(batch_size, -1),\n 2 * out2.view(batch_size, -1), 2 * out3.view(batch_size, -1),\n 2 * out4.view(batch_size, -1), 2 * out5.view(batch_size, -1),\n 4 * out6.view(batch_size, -1)), 1)\n return output\n","sub_path":"architectures/SegAN/model/Critic.py","file_name":"Critic.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"288078633","text":"#-*-coding:utf-*-\nfrom lxml import etree\nfrom multiprocessing.dummy import Pool as ThreadPool\nimport requests\nimport json\nimport sys\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\ndef towrite(dic):\n\tf.writelines('post time: ' + str(dic['post_time']) + '\\n')\n\tf.writelines('post content: ' + unicode(dic['post_content']) + '\\n')\n\tf.writelines('user name: ' + dic['user_name'] + '\\n\\n')\n\n\ndef spider(url):\n\thtml = requests.get(url)\n\tselector = etree.HTML(html.text)\n\tl_post = selector.xpath('//div[@class=\"l_post j_l_post l_post_bright \"]')\n\titem = {}\n\tfor each in l_post:\n\t\tdata_field = json.loads(each.xpath('@data-field')[0].replace('"',''))\n\t\tauthor = data_field['author']['user_name']\n\t\td_post_content = each.xpath('div[@class=\"d_post_content_main\"]/div/cc/div[@class=\"d_post_content j_d_post_content clearfix\"]/text()')[0].replace('\\n','').replace('\\r','')\t\t\t# the two @replace don't really do anything\n\t\tcontent_date = data_field['content']['date']\n\t\t\n\t\titem['user_name'] = author\n\t\titem['post_content'] = d_post_content\n\t\titem['post_time'] = content_date\n\t\ttowrite(item)\n\n\nif __name__ == '__main__':\n\t\n\tpool = ThreadPool(6)\n\tf = open('tieba.txt', 'a')\n\tpages = []\n\tfor i in range(1, 5):\n\t\tpage = \"http://tieba.baidu.com/p/3522395718?pn=\" + str(i)\n\t\tpages.append(page)\n\t\n\tresult = pool.map(spider, pages)\n\tpool.close()\n\tpool.join()\n\tf.close()\n","sub_path":"py_0.1/tieba.py","file_name":"tieba.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"543269511","text":"import os\nimport sys\nsys.path.append('../')\nimport numpy as np\nfrom numpy import genfromtxt\nfrom classesDtype import dtype as dt\nimport pdb\n\n\nclass airport:\n def __init__(self):\n self.path2File = {'airports':os.getcwd() + \"\\\\airports.dat\", \n 'airportTaxiTime':os.getcwd() + '\\\\aircraftData\\\\airportTaxiTime.csv',\n 'aircraftDefault':os.getcwd() + '\\\\aircraftData\\\\aircraftDefault.csv',\n 'aircraftEngine':os.getcwd() + '\\\\aircraftData\\\\aircraftEngine.csv',\n 'engineLTO':os.getcwd() + '\\\\aircraftData\\\\engineLTO.csv' \n }\n self.airportsDatSA = []\n self.airportTaxiTimeSA = []\n self.aircraftDefaultSA = []\n self.aircraftEngineSA = []\n self.engineLTOSA = []\n self.read2SA()\n\n def read2SA(self):\n self.airportsDatSA = genfromtxt(self.path2File['airports'], delimiter=',', deletechars = '\"', dtype = dt.dtypeAirp)\n self.airportTaxiTimeSA = genfromtxt(self.path2File['airportTaxiTime'], delimiter=',', dtype = dt.airportTaxiTime)\n self.aircraftDefaultSA = genfromtxt(self.path2File['aircraftDefault'], delimiter=',', dtype = dt.aircraftDefault)\n self.aircraftEngineSA = genfromtxt(self.path2File['aircraftEngine'], delimiter=',', dtype = dt.aircraftEngine)\n self.engineLTOSA = genfromtxt(self.path2File['engineLTO'], delimiter=',', dtype = dt.engineLTO)\n\n \n \n","sub_path":"airportSpaceDist/repositories/readAirportsDat.py","file_name":"readAirportsDat.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"526864890","text":"import numpy as np\nfrom findPolicy import *\nfrom centre import *\nfrom north import *\nfrom east import *\nfrom west import *\nfrom south import *\nfrom probability import *\nstepCost = -10\ngamma = 0.25\ndelta = 0.001\n\ndir = np.array(['C', 'E','W','N','S'])\n\n#state = (position , material , arrows , ready(1) or dormant(0) , health)\n\n#starting state is = (2 , 0 , 0 , 0 , )\n\nu = [[[[[0.0]*5]*2]*4]*3]*5\nu = np.array(u)\nv = [[[[[0.0]*5]*2]*4]*3]*5\nv = np.array(v)\nbestAction = [[[[[9]*5]*2]*4]*3]*5\nbestAction = np.array(bestAction)\ncounter = 1\nwhile(1):\n writeFile('Iteration-'+str(counter)+'\\n')\n counter+=1\n dif = 0.0\n for mat in range(0,3):\n for arrow in range(0,4):\n for state in range(0,2):\n for health in range(1,5):\n v , bestAction = findCentre(mat , arrow , state , health , u,v,bestAction)\n v , bestAction = findEast(mat , arrow , state , health , u,v,bestAction)\n v , bestAction = findWest(mat , arrow , state , health , u,v,bestAction)\n v , bestAction = findSouth(mat , arrow , state , health , u,v,bestAction)\n v , bestAction = findNorth(mat , arrow , state , health , u,v,bestAction)\n for i in range(0,5):\n # if i==0 and mat==0 and state==0 and health==1 and arrow==0:\n # print(v[0][0][0][0][1] , u[0][0][0][0][1])\n dif = max(dif,abs(v[i][mat][arrow][state][health]-u[i][mat][arrow][state][health]))\n for mat in range(0,3):\n for arrow in range(0,4):\n for state in range(0,2):\n for health in range(1,5):\n for i in range(0,5):\n u[i][mat][arrow][state][health]=v[i][mat][arrow][state][health]\n if dif NoSQL非关系型 --> Key-Value\r\n# MongoDB:安装\r\n# 1、下载文件\r\n# 2、创建文件夹 mkdir data\r\n# 3、执行命令: mongod -dbpath ./data\r\n# 或者建立一个 start.bat 文件--> mongod --dbpath ./data\r\n\r\n\r\n# MongoDB可视化\r\n# 下载 MongoVUE --> http://www.mongovue.com/ 安装MongoVUE\r\n\r\n# pymongo的安装 (用来操作MongoDB)\r\n# --> pip install pymongo\r\n# --> import pymongo\r\n\r\nconnect = pymongo.MongoClient() # 建立连接,可以传入Ip、Port(默认:localhost 27017)\r\n\r\ntdb = connect.TestDB # (数据库的名字)\r\npost_info = tdb.test # 数据库 TestDB和test都是名字不是变量\r\npost_info.insert({\"id\":\"qwqwqw\",\"name\":\"lzp\"}) # 插入\r\n\r\npost_info.remove({\"id\":\"132323ss53\"}) # 移除\r\n\r\n\r\n\r\n","sub_path":"extend/MongoDB/MyDb.py","file_name":"MyDb.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"127440780","text":"\nimport os\n\nclass video_processer():\n\n def __init__(self,video_path =\"/media/lb/学习/最近学习/Django_project/MyVedioSystem/static\"):\n\n self.video_path =video_path\n\n def toH264(self,video_name =\"video.avi\"):\n\n output_name =video_name.replace(\"avi\",\"mp4\")\n\n origin_video = os.path.join(self.video_path, video_name)\n\n convert_video = os.path.join(self.video_path, output_name)\n\n cmdline = \"source deactivate&&ffmpeg -i {} -vcodec h264 {}\".format(origin_video,convert_video)\n #cmdline = \"ffmpeg -i {} {}\".format(origin_video, convert_video)\n\n flag = os.system(cmdline)\n\n if not flag:\n\n print(\"convert successfully!\")\n\n else:\n\n print(\"false!\")\n\nif __name__ == \"__main__\":\n\n processer =video_processer()\n\n processer.toH264(\"video0.avi\")\n\n\n\n# video_path =\"/media/lb/学习/最近学习/Django_project/MyVedioSystem/static\"\n# video_name =\"video.avi\"\n#\n# origin_video =os.path.join(video_path,video_name)\n#\n# convert_video =os.path.join(video_path,\"optut_test.mp4\")\n#\n# cmdline =\"ffmpeg -i {} -vcodec h264 {}\".format(origin_video,convert_video)\n#\n# flag =os.system(cmdline)\n#\n# if not flag:\n# print(\"convert successfully!\")\n# else:\n# print(\"false!\")\n","sub_path":"static/video_processer.py","file_name":"video_processer.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"100751462","text":"# -*- coding: utf-8 -*-\n# @Author: GXR\n# @CreateTime: 2021-04-01\n# @UpdateTime: 2021-08-10\n\nimport json\nimport threading\nimport time\n\nimport redis\nimport requests\nfrom loguru import logger\n\nimport config\n\nred = redis.Redis(\n host=config.REDIS_HOST,\n port=config.REDIS_PORT,\n db=config.REDIS_DB,\n decode_responses=True,\n)\n\n\n# 刷新可用代理\ndef proxy_refresh():\n while 1:\n proxy = red.spop(config.REDIS_KEY_PROXY_USEFUL)\n if not proxy:\n break\n proxies = {\"http\": \"http://\" + proxy, \"https\": \"http://\" + proxy}\n try:\n response = requests.get(\n config.PROXY_CHECK_URL,\n headers=config.HEADERS,\n proxies=proxies,\n timeout=5,\n )\n if response.status_code == 200:\n red.sadd(\n config.REDIS_KEY_PROXY_USEFUL,\n json.dumps(\n {\n \"proxy\": proxy,\n \"time\": time.strftime(\n \"%Y-%m-%d %H:%M:%S\", time.localtime(time.time())\n ),\n }\n ),\n )\n logger.debug(\"刷新代理[%s]\" % proxy)\n except:\n pass\n\n\ndef run_proxy_refresh():\n for i in range(config.THREAD_COUNT_PROXY_REFRESH):\n t = threading.Thread(target=proxy_refresh)\n t.start()\n\n\nif __name__ == \"__main__\":\n run_proxy_refresh()\n","sub_path":"proxy_refresh.py","file_name":"proxy_refresh.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"20931630","text":"from math import*\nfrom numpy import*\nfrom matplotlib.pyplot import *\n\nL = 1\nN = 1\nc = linspace(0,1,1e3)\n\ndef kF(x):\n\tZ = (1-x)+2*x\n\te1 = (3*pi**2)**(1/3.)\n\te2 = (Z*N)**(1/3.)/(N*L)\n\treturn e1*e2\n\nkBZ = ones(len(c))*pi/L\n\nC = kF(c)\n\nfor i in range(len(c)):\n\tif(kBZ[i]-C[i] > 0):\n\t\tprint(i)\n\t\tprint(C[i])\n\t\tbreak\n\nprint(kF(5))\n\nplot(c*100,kF(c)/kBZ)\nplot(c*100,kBZ/kBZ)\nshow()\n#savefig(\"alloy.pdf\")","sub_path":"module3/alloy.py","file_name":"alloy.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"20272642","text":"import time\nimport keys # Eigenes Modul\nfrom datetime import date\nimport sqlite3\nimport urllib.request\nimport json\nfrom jinja2 import Environment, FileSystemLoader\n\nenv = Environment(loader=FileSystemLoader('templates'))\nsistrixKey = keys.sistrix_key\nhosts = [{'host' : 'kfzteile24.de', 'host_table' : 'kfz24'}, {'host':'autoteile24.de', 'host_table':'auto24'}]\ndata = {}\n\ndef create_tables():\n sql1 = '''CREATE TABLE IF NOT EXISTS kfz24 (\n id INTEGER,\n jahr INTEGER,\n woche INTEGER,\n sichtbarkeitsindex REAL,\n brand_clicks INTEGER,\n nobrand_clicks INTEGER,\n brand_impressions INTEGER,\n nobrand_impressions INTEGER,\n brand_ctr REAL,\n nobrand_ctr REAL,\n umsatz REAL,\n conversionrate REAL\n )'''\n cursor.execute(sql1)\n sql2 = '''CREATE TABLE IF NOT EXISTS auto24 (\n id INTEGER,\n jahr INTEGER,\n woche INTEGER,\n sichtbarkeitsindex REAL,\n brand_clicks INTEGER,\n nobrand_clicks INTEGER,\n brand_impressions INTEGER,\n nobrand_impressions INTEGER,\n brand_ctr REAL,\n nobrand_ctr REAL,\n umsatz REAL,\n conversionrate REAL\n )'''\n cursor.execute(sql2)\n\ndef getSi(host):\n try:\n requrl = \"https://api.sistrix.com/domain.sichtbarkeitsindex?api_key=\" + sistrixKey + \"&domain=\" + host + \"&format=json\"\n resp = urllib.request.urlopen(requrl)\n resp = resp.read().decode(\"utf-8\")\n resp = json.loads(resp)\n si = resp['answer'][0]['sichtbarkeitsindex'][0]['value']\n except:\n si = 0\n return si\n\ndef getAnalyticsUmsatz():\n return\n\ndef getAnalyticsConversionrate():\n return\n\ndef prepareData():\n connection.row_factory = sqlite3.Row # This enables column access by name: row['column_name']\n db = connection.cursor()\n sql = ''' SELECT kfz24.woche,\n kfz24.sichtbarkeitsindex + auto24.sichtbarkeitsindex as sum_si,\n kfz24.brand_clicks + auto24.brand_clicks as sum_clicks\n FROM kfz24\n JOIN auto24\n ON kfz24.id = auto24.id\n WHERE kfz24.jahr = 2016'''\n rows = db.execute(sql).fetchall()\n connection.commit()\n connection.close()\n res = json.dumps( [dict(ix) for ix in rows] ) #CREATE JSON\n res = json.loads(res)\n return res\n\ndef renderReport(res):\n template = env.get_template(\"template.html\")\n f = open(\"index.html\", \"w\")\n data = template.render(data=res)\n f.write(data)\n f.close()\n\nconnection = sqlite3.connect('data.db')\ncursor = connection.cursor()\ncreate_tables()\nfor host in hosts:\n data['sichtbarkeitsindex'] = getSi(host['host'])\n data['id'] = int(str(date.today().isocalendar()[0]) + str(date.today().isocalendar()[1]))\n data['jahr'] = date.today().isocalendar()[0]\n data['woche'] = date.today().isocalendar()[1]\n data['umsatz'] = 0\n data['conversionrate'] = 0\n print(data)\n sql = 'INSERT INTO ' + host['host_table'] + ' VALUES (:id, :jahr, :woche, :sichtbarkeitsindex, NULL, NULL, NULL, NULL, NULL, NULL, :umsatz, :conversionrate)'\n cursor.execute(sql, data)\n connection.commit()\n\nres = prepareData()\nrenderReport(res)\n","sub_path":"model_2.py","file_name":"model_2.py","file_ext":"py","file_size_in_byte":3091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"451113035","text":"import time\nimport threading\nimport wave # wavファイルを扱うためのライブラリ\n\nimport pyaudio # 録音機能を使うためのライブラリ\n\nRECORD_SECONDS = 60 # 録音する時間の長さ(秒)\nWAVE_OUTPUT_FILENAME = \"sample.wav\" # 音声を保存するファイル名\niDeviceIndex = 0 # 録音デバイスのインデックス番号\n\n# 基本情報の設定\nFORMAT = pyaudio.paInt16 # 音声のフォーマット\nCHANNELS = 1 # モノラル\nRATE = 44100 # サンプルレート\nCHUNK = 2 ** 11 # データ点数\naudio = pyaudio.PyAudio() # pyaudio.PyAudio()\n\nstream = audio.open(\n format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n input_device_index=iDeviceIndex, # 録音デバイスのインデックス番号\n frames_per_buffer=CHUNK,\n)\n\nwaveFile = wave.open(WAVE_OUTPUT_FILENAME, \"wb\")\nwaveFile.setnchannels(CHANNELS)\nwaveFile.setsampwidth(audio.get_sample_size(FORMAT))\nwaveFile.setframerate(RATE)\n\nframeslist = []\nlockframeslist = threading.Lock()\nstopflag = False\n\ndef save_2_wav():\n global frameslist\n global lockframeslist\n global stopflag\n global wavefile\n\n while True:\n time.sleep(1)\n sz = len(frameslist)\n if sz <= 0:\n if stopflag == True:\n return\n else:\n continue\n lockframeslist.acquire()\n frames = frameslist.pop(0)\n lockframeslist.release()\n waveFile.writeframes(b\"\".join(frames))\n\ndef record_stream():\n global frameslist\n global lockframeslist\n global stopflag\n global stream\n\n for sec in range(0, 2):\n frames = []\n for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n data = stream.read(CHUNK)\n frames.append(data)\n\n lockframeslist.acquire()\n frameslist.append(frames)\n lockframeslist.release()\n\n stopflag = True\n\n# --------------録��開始---------------\nprint(\"recording...\")\n\nthread1 = threading.Thread(target=record_stream)\nthread2 = threading.Thread(target=save_2_wav)\nthread1.start()\nthread2.start()\nthread1.join()\nthread2.join()\nprint(\"finished recording\")\n# --------------録音終了---------------\nstream.stop_stream()\nstream.close()\naudio.terminate()\n","sub_path":"recordtest.py","file_name":"recordtest.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"283373135","text":"import re\nimport os\nimport sys\nimport subprocess\nimport shutil\nimport tempfile\nimport hashlib\nimport time\nimport logging\n\nfrom . import controller\nfrom anchore import anchore_utils\nfrom anchore.util import scripting\nfrom anchore.util import contexts\n\nclass Analyzer(object):\n _logger = logging.getLogger(__name__)\n\n def __init__(self, anchore_config, imagelist, allimages, force, args=None):\n self._logger.debug(\"analyzer initialization: begin\")\n\n self.config = anchore_config\n self.allimages = allimages\n self.force = force\n self.anchore_datadir = self.config['image_data_store']\n\n self.dockerfile = None\n try:\n self.dockerfile = args['dockerfile']\n except:\n pass\n\n self.skipgates = False\n try:\n self.skipgates = args['skipgates']\n except:\n pass\n\n try:\n if 'isbase' in args and args['isbase']:\n usertype = 'base'\n elif 'anchorebase' in args and args['anchorebase']:\n usertype = 'anchorebase'\n else:\n usertype = None\n except:\n usertype = None\n\n self._logger.debug(\"init input processed, loading input images: \" + str(imagelist))\n \n self.images = anchore_utils.image_context_add(imagelist, allimages, docker_cli=contexts['docker_cli'], dockerfile=self.dockerfile, anchore_datadir=self.anchore_datadir, tmproot=self.config['tmpdir'], anchore_db=contexts['anchore_db'], docker_images=contexts['docker_images'], usertype=usertype, must_load_all=True)\n\n self._logger.debug(\"loaded input images, checking that all input images have been loaded \" + str(self.images))\n\n self.anchoreDB = contexts['anchore_db']\n\n self._logger.debug(\"analyzer initialization: end\")\n\n def get_images(self):\n return(self.images)\n\n def script_is_runnable(self, script):\n suffix_list = ['.py', '.sh']\n match = False\n for s in suffix_list:\n if re.match(\".*\"+re.escape(s)+\"$\", script):\n match = True\n break\n if match and os.access(script, os.R_OK ^ os.X_OK):\n return(True)\n return(False)\n\n def list_analyzers(self):\n analyzerdir = '/'.join([self.config[\"scripts_dir\"], \"analyzers\"])\n overrides = ['extra_scripts_dir', 'user_scripts_dir']\n\n scripts = {'base':list()}\n for override in overrides:\n scripts[override] = list()\n\n if not os.path.exists(analyzerdir):\n raise Exception(\"No base analyzers found - please check anchore insallation for completeness\")\n else:\n for f in os.listdir(analyzerdir):\n script = os.path.join(analyzerdir, f)\n # check the script to make sure its ready to run\n if self.script_is_runnable(script):\n scripts['base'].append(script)\n\n for override in overrides:\n scripts[override] = list()\n if self.config[override]:\n opath = os.path.join(self.config[override], 'analyzers')\n if os.path.exists(opath):\n for f in os.listdir(opath):\n script = os.path.join(opath, f)\n if self.script_is_runnable(script):\n scripts[override].append(script)\n return(scripts)\n\n def run_analyzers(self, image):\n success = True\n analyzers = self.list_analyzers()\n imagename = image.meta['imagename']\n outputdir = image.anchore_imagedir\n shortid = image.meta['shortId']\n imagedir = None\n\n analyzer_status = self.anchoreDB.load_analyzer_manifest(image.meta['imageId'])\n \n analyzer_config = {}\n analyzer_config_csum = None\n try:\n analyzer_config, analyzer_config_csum = anchore_utils.load_analyzer_config(self.config.config_dir)\n except:\n pass\n\n if 'analyzer_config_csum' in analyzer_status:\n try:\n if analyzer_status['analyzer_config_csum']['csum'] != analyzer_config_csum:\n self._logger.debug(\"anchore analyzer config has been updating, forcing re-analysis\")\n self.force = True\n analyzer_status['analyzer_config_csum']['csum'] = analyzer_config_csum\n except:\n pass\n else:\n script = 'analyzer_config_csum'\n analyzer_status[script] = {}\n analyzer_status[script]['command'] = \"ANALYZER_CONFIG_META\"\n analyzer_status[script]['returncode'] = 0\n analyzer_status[script]['output'] = \"\"\n analyzer_status[script]['outputdir'] = \"\"\n analyzer_status[script]['atype'] = 'base'\n analyzer_status[script]['csum'] = analyzer_config_csum\n analyzer_status[script]['timestamp'] = time.time()\n analyzer_status[script]['status'] = 'SUCCESS'\n \n\n results = {}\n outputdirs = {}\n torun = list()\n skip = False\n atypes = ['user_scripts_dir', 'extra_scripts_dir', 'base']\n\n for atype in atypes:\n for script in analyzers[atype]:\n try:\n with open(script, 'r') as FH:\n csum = hashlib.md5(FH.read()).hexdigest()\n except:\n csum = \"N/A\"\n\n # decide whether or not to run the analyzer\n dorun = True\n if self.force:\n dorun = True\n elif script in analyzer_status:\n if csum == analyzer_status[script]['csum'] and analyzer_status[script]['returncode'] == 0:\n dorun = False\n\n outputdir = cmdstr = outstr = \"\"\n if dorun:\n if not skip:\n if not imagedir:\n self._logger.info(image.meta['shortId'] + \": analyzing ...\") \n imagedir = image.unpack()\n\n outputdir = tempfile.mkdtemp(dir=imagedir)\n cmdline = ' '.join([imagename, self.config['image_data_store'], outputdir, imagedir])\n cmdstr = script + \" \" + cmdline\n cmd = cmdstr.split()\n try:\n self._logger.debug(\"running analyzer: \" + cmdstr)\n timer = time.time()\n outstr = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n self._logger.debug(\"analyzer time (seconds): \" + str(time.time() - timer))\n rc = 0\n self._logger.debug(\"analyzer status: success\")\n self._logger.debug(\"analyzer exitcode: \" + str(rc))\n self._logger.debug(\"analyzer output: \" + outstr)\n except subprocess.CalledProcessError as err:\n rc = err.returncode\n outstr = err.output\n outstr = outstr.decode('utf8')\n if rc:\n status = 'FAILED'\n skip = True\n success = False\n self._logger.error(\"analyzer status: failed\")\n self._logger.error(\"analyzer exitcode: \" + str(rc))\n self._logger.error(\"analyzer output: \" + outstr)\n else:\n status = 'SUCCESS'\n else:\n # this means that a prior analyzer failed, so we skip the rest\n self._logger.debug(\"skipping analyzer (due to prior analyzer failure): \" + script)\n outstr = \"\"\n rc = 1\n status = 'SKIPPED'\n\n mtype = \"base\"\n if atype == 'user_scripts_dir':\n mtype = 'user'\n elif atype == 'extra_scripts_dir':\n mtype = 'extra'\n\n results[script] = {}\n results[script]['command'] = cmdstr\n results[script]['returncode'] = rc\n results[script]['output'] = outstr\n results[script]['outputdir'] = outputdir\n results[script]['atype'] = atype\n results[script]['csum'] = csum\n results[script]['timestamp'] = time.time()\n results[script]['status'] = status\n\n if os.path.exists(os.path.join(outputdir, 'analyzer_output')):\n for d in os.listdir(os.path.join(outputdir, 'analyzer_output')):\n if os.path.exists(os.path.join(outputdir, 'analyzer_output', d)):\n for dd in os.listdir(os.path.join(outputdir, 'analyzer_output', d)):\n module_name = d\n module_value = dd\n if 'analyzer_outputs' not in results[script]:\n #results[script]['analyzer_outputs'] = {}\n results[script]['analyzer_outputs'] = list()\n\n aoutput = {'module_name':module_name, 'module_value':module_value, 'module_type':mtype}\n if os.path.isdir(os.path.join(outputdir, 'analyzer_output', d, dd)):\n aoutput['data_type'] = 'dir'\n else:\n aoutput['data_type'] = 'file'\n results[script]['analyzer_outputs'].append(aoutput)\n\n analyzer_status[script] = {}\n analyzer_status[script].update(results[script])\n else:\n self._logger.debug(\"skipping analyzer (no change in analyzer/config and prior run succeeded): \" + script)\n\n # process and store analyzer outputs\n didsave = False\n for script in list(results.keys()):\n result = results[script]\n if result['status'] == 'SUCCESS':\n mtype = None\n if result['atype'] == 'user_scripts_dir':\n mtype = 'user'\n elif result['atype'] == 'extra_scripts_dir':\n mtype = 'extra'\n\n if os.path.exists(os.path.join(result['outputdir'], 'analyzer_output')):\n for d in os.listdir(os.path.join(result['outputdir'], 'analyzer_output')):\n if os.path.exists(os.path.join(result['outputdir'], 'analyzer_output', d)):\n for dd in os.listdir(os.path.join(result['outputdir'], 'analyzer_output', d)):\n dfile = os.path.join(result['outputdir'], 'analyzer_output', d, dd)\n module_name = d\n module_value = dd\n if os.path.isfile(dfile):\n adata = anchore_utils.read_kvfile_todict(dfile)\n self.anchoreDB.save_analysis_output(image.meta['imageId'], module_name, module_value, adata, module_type=mtype)\n didsave = True\n elif os.path.isdir(dfile):\n self.anchoreDB.save_analysis_output(image.meta['imageId'], module_name, module_value, dfile, module_type=mtype, directory_data=True)\n didsave = True\n\n self.anchoreDB.save_analyzer_manifest(image.meta['imageId'], analyzer_status)\n\n if success:\n self._logger.debug(\"analyzer commands all finished with successful exit codes\")\n\n if didsave:\n self._logger.debug(\"generating analysis report from analyzer outputs and saving\") \n report = self.generate_analysis_report(image)\n self.anchoreDB.save_analysis_report(image.meta['imageId'], report)\n\n self._logger.debug(\"saving image information with updated analysis data\")\n image.save_image()\n\n self._logger.info(image.meta['shortId'] + \": analyzed.\")\n\n\n self._logger.debug(\"running analyzers on image: \" + str(image.meta['imagename']) + \": end\")\n\n return(success)\n\n def generate_analysis_report(self, image):\n # this routine reads the results of image analysis and generates a formatted report\n report = {}\n amanifest = self.anchoreDB.load_analyzer_manifest(image.meta['imageId'])\n for amodule in list(amanifest.keys()):\n if 'analyzer_outputs' in amanifest[amodule]:\n for aoutput in amanifest[amodule]['analyzer_outputs']:\n module_name = aoutput['module_name']\n module_value = aoutput['module_value']\n module_type = aoutput['module_type']\n data_type = aoutput['data_type']\n if module_name not in report:\n report[module_name] = {}\n if module_value not in report[module_name]:\n report[module_name][module_value] = {}\n\n if data_type == 'file':\n adata = self.anchoreDB.load_analysis_output(image.meta['imageId'], module_name, module_value, module_type=module_type)\n else:\n adata = {}\n\n report[module_name][module_value][module_type] = adata\n return(report)\n\n def run(self):\n self._logger.debug(\"main image analysis on images: \" + str(self.images) + \": begin\")\n # analyze image and all of its family members\n success = True\n toanalyze = {}\n comparehash = {}\n linkhash = {}\n\n # calculate all images to be analyzed\n for imageId in self.images:\n coreimage = self.allimages[imageId]\n\n toanalyze[coreimage.meta['imageId']] = coreimage\n\n base = False\n lastimage = coreimage\n for i in coreimage.anchore_familytree:\n image = self.allimages[i]\n toanalyze[image.meta['imageId']] = image\n\n if (image.meta['shortId'] != coreimage.meta['shortId'] and not image.is_intermediate()):\n comparehash[coreimage.meta['shortId'] + image.meta['shortId']] = [coreimage, image]\n comparehash[lastimage.meta['shortId'] + image.meta['shortId']] = [lastimage, image]\n if not base and image.is_base():\n base = image\n lastimage = image\n\n if base:\n linkhash[image.meta['imageId']] = base.meta['imageId']\n\n # execute analyzers\n self._logger.debug(\"images to be analyzed: \" + str(list(toanalyze.keys())))\n for imageId in list(toanalyze.keys()):\n image = toanalyze[imageId]\n success = self.run_analyzers(image)\n if not success:\n self._logger.error(\"analyzer failed to run on image \" + str(image.meta['imagename']) + \", skipping the rest\")\n break\n\n if not success:\n self._logger.error(\"analyzers failed to run on one or more images.\")\n return (False)\n\n if not self.skipgates:\n # execute gates\n self._logger.debug(\"running gates post-analysis: begin\")\n for imageId in list(toanalyze.keys()):\n c = controller.Controller(anchore_config=self.config, imagelist=[imageId], allimages=self.allimages).run_gates(refresh=True)\n self._logger.debug(\"running gates post-analysis: end\")\n\n self._logger.debug(\"main image analysis on images: \" + str(self.images) + \": end\")\n return (success)\n","sub_path":"anchore/analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":16123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"547934162","text":"#!/usr/bin/python\n\nfrom tkinter import *\nfrom tkinter.ttk import Notebook\nimport tkinter.filedialog as tkfd\nimport tkinter.messagebox\n\n#-----------------------------------Initialization-----------------------------------\n# Create the root of the GUI and its title/dimensions:\nroot = Tk()\nroot.title('PyCon GUI')\nroot.geometry('600x500')\n\n# Setup tabs via a \"notebook\":\nnotebook = Notebook(root)\nnotebook.pack(fill=BOTH, expand=Y, side=TOP)\n\n# Initialize the different tabs, i.e., frames:\nf1 = Frame(notebook)\nf2 = Frame(notebook)\nf3 = Frame(notebook)\n\n#-----------------------------Tab 1: Dropdown and Listbox----------------------------\n# Dropdown Menu first:\nsome_string = StringVar(f1) # Attach variables and widgets to f1, not root\nsome_string.set('drop down menu button')\n\ndef grab_and_assign(event):\n # Callback function for the option menu\n chosen_option = some_string.get()\n label_chosen_variable = Label(f1, text=chosen_option)\n label_chosen_variable.grid(row=1, column=2)\n print(chosen_option)\n\n# Create the drop down menu:\ndrop_menu = OptionMenu(f1, some_string, 'One', 'two', '34', command=grab_and_assign)\ndrop_menu.grid(row=0, column=0)\n\n# Create a label to display the chosen option:\nlabel_left = Label(f1, text='chosen variable = ')\nlabel_left.grid(row=1, column=1)\n\n# Now for the List Box:\ndef get_list(event):\n # Mouse button release callback\n # Read the listbox selection and put the result in an entry box widget\n index = listbox1.curselection()[0]# get selected line index.\n seltext = listbox1.get(index) # get the line's text & assign to a variable.\n enter_1.delete(0,50) # delete previous text in enter_1 otherwise \n # the entries append to each other.\n enter_1.insert(0, seltext) # now display the selected text.\n\n# Create the listbox (note that size is in characters)\nlistbox1 = Listbox(f1, width=50, height=6)\nlistbox1.place(x=60, y=200)\n\n# Fill the listbox with data:\nlistbox1.insert(END, 'a list entry')\nfor item in ['one has begun','two is a shoe','three like a knee','four to the door']:\n listbox1.insert(END, item)\n\n# Use entry widget to display/edit selection:\nenter_1 = Entry(f1, width=50, bg='yellow')\nenter_1.insert(0, 'Click on an item in the listbox')\nenter_1.place(x=60, y=310)\n\n# Left mouse click on a list item to display selection:\nlistbox1.bind('', get_list)\n\n#----------------------------Tab 2: RadioButtons/Checkboxes--------------------------\n# Radio Buttons first!\n# String to save an option into:\nstring2 = StringVar(f2)\n\n# Now some radio buttions:\nrad_1 = Radiobutton(f2, text='violent', variable=string2, value='action')\nrad_1.place(x=30, y=30)\nrad_2 = Radiobutton(f2, text='love', variable=string2, value='romance')\nrad_2.place(x=30, y=50)\nrad_3 = Radiobutton(f2, text='conflict', variable=string2, value='war')\nrad_3.place(x=30, y=70)\n\ndef callback_radio():\n # Callback function for the radio buttons\n chosen_button = string2.get()\n print(chosen_button)\n\n# A button to activate the choice from the radio button you picked\nrad_activate_button = (Button(f2, text='Hit me', \n command=callback_radio).place(x=200,y=50))\n\n# Now for check boxes!\n# Some variables for storing choices:\ncheck_var1 = IntVar(f2)\ncheck_var2 = IntVar(f2)\ncheck_var3 = StringVar(f2)\ncheck_var4 = StringVar(f2)\n\n# The check boxes themselves:\nCk_1 = (Checkbutton(f2, text='Dog', variable=check_var1, onvalue=1, offvalue=0,\n height=1, width=10).place(x=30, y=200))\nCk_2 = (Checkbutton(f2, text='Cat', variable=check_var2, onvalue=1, offvalue=0,\n height=1, width=10).place(x=30, y=230))\nCk_3 = (Checkbutton(f2, text='Rat', variable=check_var3, onvalue=1, offvalue=0,\n height=1, width=10).place(x=30, y=260))\nCk_4 = (Checkbutton(f2, text='Frog', variable=check_var4, onvalue=1, offvalue=0,\n height=1, width=10).place(x=30, y=290))\n\ndef callback_check():\n # Callback for the check boxes:\n checkChoice1 = check_var1.get()\n checkChoice2 = check_var2.get()\n checkChoice3 = check_var3.get()\n checkChoice4 = check_var4.get()\n print(checkChoice1, checkChoice2, checkChoice3, checkChoice4)\n\n# Button to store choices and execute the callback:\ncheckChooseButton = Button(f2, text='Decide your choices!', command=callback_check)\ncheckChooseButton.place(x=200, y=245)\n\n#----------------------------------Tab 3: File Chooser-------------------------------\ndef file_chooser():\n # Function that lets you choose a file and then do something with it.\n file = tkfd.askopenfile(parent=root, mode='rb', title='Choose a file')\n if file != None:\n data = file.read()\n file.close()\n print('I got %d bytes from this file') % len(data)\n if file == None:\n print(\"You didn't choose a file. Please try again.\")\n\ndef callback_file_chooser():\n # The callback that runs the file_chooser.\n chooseFile = input('Choose a file? Yes or No: ')\n if chooseFile == 'Yes':\n file_chooser()\n elif chooseFile == 'No':\n print(\"Ok, don't choose a file, jerk.\")\n else:\n print(\"Seriously, you don't know how to answer a simple Yes or No?\")\n\n# The button to start the callback function:\nfile_load_button = (Button(f3, text='Touch me and see what I can do!', \n command=callback_file_chooser).place(x=300, y=250))\n\n#--------------------------------------Finish up-------------------------------------\n# Once the tabs are all done, we have to attach them to the notebook. This will allow\n# us to name the different tabs, and give them a 'state.' A normal state means it can\n# be actively used and switched between. You can, if you wish, freeze a tab so you\n# cannot use it until some interaction unfreezes the tab.\nnotebook.add(f1, text='Dropdown/Listbox', state='normal')\nnotebook.add(f2, text='Radios/Checks', state='normal')\nnotebook.add(f3, text='File Chooser', state='normal')\n\n# Lastly, the main loop:\nroot.mainloop()\n","sub_path":"Programs/PyConGUI.py","file_name":"PyConGUI.py","file_ext":"py","file_size_in_byte":6050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"558263817","text":"from sqlalchemy import testing\nfrom sqlalchemy.orm import mapper\nfrom .test_mutable import Foo\nfrom .test_mutable import (\n MutableAssociationScalarPickleTest as _MutableAssociationScalarPickleTest,\n)\nfrom .test_mutable import (\n MutableWithScalarJSONTest as _MutableWithScalarJSONTest,\n)\n\n\nclass MutableIncludeNonPrimaryTest(_MutableWithScalarJSONTest):\n @classmethod\n def setup_mappers(cls):\n foo = cls.tables.foo\n\n mapper(Foo, foo)\n with testing.expect_deprecated(\n \"The mapper.non_primary parameter is deprecated\"\n ):\n mapper(\n Foo, foo, non_primary=True, properties={\"foo_bar\": foo.c.data}\n )\n\n\nclass MutableAssocIncludeNonPrimaryTest(_MutableAssociationScalarPickleTest):\n @classmethod\n def setup_mappers(cls):\n foo = cls.tables.foo\n\n mapper(Foo, foo)\n with testing.expect_deprecated(\n \"The mapper.non_primary parameter is deprecated\"\n ):\n mapper(\n Foo, foo, non_primary=True, properties={\"foo_bar\": foo.c.data}\n )\n","sub_path":"test/ext/test_deprecations.py","file_name":"test_deprecations.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"538716626","text":"# coding=utf-8\nfrom django.conf.urls import patterns, include, url\n\nfrom datum.views import ViewData, EditData\n\n\nurlpatterns = patterns('',\n url(r'^(?P\\d+)/$',\n ViewData.as_view(),\n name='datum_view'),\n url(r'^edit/$',\n EditData.as_view(),\n name='edit_view'),\n)","sub_path":"datum/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"352601299","text":"\"\"\"4. Представлен список чисел. Определить элементы списка, не имеющие повторений.\nСформировать итоговый массив чисел, соответствующих требованию.\nЭлементы вывести в порядке их следования в исходном списке.\nДля выполнения задания обязательно использовать генератор.\nПример исходного списка: [2, 2, 2, 7, 23, 1, 44, 44, 3, 2, 10, 7, 4, 11].\nРезультат: [23, 1, 3, 10, 4, 11]\"\"\"\n\nlist_n = [5, 3, 7, 6, 2, 9, 1, 5, 8, 4, 6, 2, 6]\nprint(\"Исходный список\", list_n)\n\nfrom collections import Counter\ncounter = Counter(list_n) # Посчитали количество повторений\n\nresult_list = [x for x,n in counter.items() if n==1] # Выбрали числа с количеством повторений =1\nprint(\"Неповторяющиеся числа\", result_list)","sub_path":"task_04.4.py","file_name":"task_04.4.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"630022199","text":"#!/usr/bin/env python\nimport rospy\nfrom geometry_msgs.msg import PoseStamped\nfrom styx_msgs.msg import Lane, Waypoint\nfrom std_msgs.msg import Int32\nfrom scipy.spatial import cKDTree\nimport math\nfrom math import sqrt\n\n\nLOOKAHEAD_WPS = 200 # Number of waypoints to publish\n\nclass WaypointUpdater(object):\n def __init__(self):\n rospy.init_node('waypoint_updater')\n\n # Object parameters for inputs\n self.current_pose = None\n self.track_map = None\n self.track_length = None\n self.track_cKDTree = None\n self.traffic_light_waypoint = -1\n # Object parameters for outputs\n self.final_waypoints = Lane()\n # Initialize track index which is closest to the ego vehicle\n self.ego_idx = None\n self.node_creation_time = rospy.Time.now().to_sec()\n # Extracted from the distance function starter code\n self.dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)\n \n # Set to true to print track co-ordinates to file\n self.do_print_track_map_2_file = False\n\n rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)\n \n self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)\n\n self.loop()\n\n def loop(self):\n # publish rate is 50Hz\n rate = rospy.Rate(50)\n while not rospy.is_shutdown():\n # If base_waypoints and current_pose have been received by the waypoint_loader node and simulator,\n # then we have enough info to create final_waypoints\n # Same approach as Q & A Walkthrough videos for this project\n if self.track_map and self.current_pose:\n # If cKDTree has not yet been created, create the cKDTree\n if not self.track_cKDTree:\n self.create_tree() \n \n self.calculate_final_waypoints() \n self.final_waypoints_pub.publish(self.final_waypoints) \n\n rate.sleep()\n\n # Simple function which returns roughly how long the node has been alive.\n # Matches Writing ROS Node - Lesson 5\n def get_alive_time(self):\n return rospy.Time.now().to_sec() - self.node_creation_time\n\n # Print the map x and y track map points to a text file\n def send_map_2_file(self):\n f = open(\"the_track_map_export.txt\", \"w\")\n for idx, waypoint in enumerate(self.track_map.waypoints):\n f.write(''.join((str(waypoint.pose.pose.position.x), ',',str(waypoint.pose.pose.position.y), '\\n')))\n f.close()\n\n # Function to create a cKDTree based on the imported track_map\n def create_tree(self):\n # This function is called within rospy fixed rate while loop.\n # A prerequisite for calling the function is self.track_map != None.\n # Therefore, this function assumes that self.track_map has already been populated\n # Method below follows same approach as Project Walkthrough video (Part 6: System Integration Lesson)\n track_position_orientation = [[waypoint.pose.pose.position, waypoint.pose.pose.orientation] for waypoint in self.track_map.waypoints]\n track_xy_tuple_array = [(position_orientation[0].x, position_orientation[0].y) for position_orientation in track_position_orientation]\n # cKDTree built using list of (x,y) tuples\n self.track_cKDTree = cKDTree(track_xy_tuple_array)\n\n def pose_cb(self, msg):\n # Set the object pose parameter to the value of the ROS message\n # i.e. each time Simulator publishes a new position / orientation for the vehicle, update our stored values\n self.current_pose = msg\n\n def waypoints_cb(self, waypoints):\n self.track_map = waypoints\n # Number of points in the track map\n self.track_length = len(self.track_map.waypoints)\n if self.do_print_track_map_2_file:\n self.send_map_2_file()\n\n # Function to set the final_waypoints\n def calculate_final_waypoints(self):\n # Delete the previously sent final_waypoints\n self.final_waypoints.waypoints = []\n # Get the closest track point which is ahead of the ego vehicle\n self.get_closest_track_point_ahead()\n # Create a vector for final_waypoints with base speed component\n final_waypoints_base_speed = []\n # This is the list of Track Map indices which correspond to each of the final waypoints\n final_wp_idx_vect = []\n\n # If it is possible to locate ego vehicle on track map, then create a list of final_waypoints\n # Here, extract waypoints from Track Map, but do not correct the velocity yet\n # Method implemented for setting final waypoints borrows heavily from Project Walkthrough videos\n if self.ego_idx: \n for idx in range(LOOKAHEAD_WPS):\n track_idx = (idx + self.ego_idx) % self.track_length \n final_wp_idx_vect.append(track_idx)\n final_waypoints_base_speed.append(self.track_map.waypoints[track_idx])\n\n self.update_waypoint_velocity(final_waypoints_base_speed, final_wp_idx_vect)\n\n # Update the velocity of the final_waypoints, from the values given in the track map.\n def update_waypoint_velocity(self, final_waypoints_base_speed, final_wp_idx_vect):\n if self.traffic_light_waypoint == -1:\n # If traffic light is green, drive at speed specified by Track Map\n self.final_waypoints.waypoints = final_waypoints_base_speed\n else:\n # Method implemented here for reducing speed borrows heavily from Project Walkthrough videos\n stop_idx = max(0, self.traffic_light_waypoint)\n\n for it, waypoint in enumerate(final_waypoints_base_speed):\n W = Waypoint() \n W.pose = waypoint.pose\n v_init = self.get_waypoint_velocity(waypoint)\n wp_idx_on_track = final_wp_idx_vect[it]\n d = self.distance(self.track_map.waypoints, wp_idx_on_track, stop_idx)\n #v = max(0, min(ego_v, ego_v *(d)/d_total)) #MR\n # v = v_init * (d / 100) #MR -> SQRT not used. Linear decrease in velocity w/ distance is assumed. Max Decel is not accounted for.\n v = sqrt(d)\n v = 0 if v < 2 else v\n W.twist.twist.linear.x = min(v , v_init)\n self.final_waypoints.waypoints.append(W)\n \n def get_closest_track_point_ahead(self):\n use_distance_based = True\n # This Function borrows method from Project Walkthough video. As per the video, closest point in space may not be in front of the vehicle\n # Find the index of the closest point in space, by querying the cKDTree\n (distance_to_closest, closest_idx_in_space) = self.track_cKDTree.query((self.current_pose.pose.position.x, self.current_pose.pose.position.y))\n previous_idx = self.track_length - 1 if closest_idx_in_space == 0 else closest_idx_in_space - 1\n next_idx = 0 if closest_idx_in_space == self.track_length - 1 else closest_idx_in_space + 1\n # Note: Method assumes anticlockwise travel of the vehicle, as this is the direction that the track map vector runs\n closest_waypoint = self.track_map.waypoints[closest_idx_in_space]\n previous_waypoint = self.track_map.waypoints[previous_idx]\n\n # Distance from previous track point to ego vehicle position\n if (use_distance_based):\n d_prev_2_ego = self.dl(previous_waypoint.pose.pose.position, self.current_pose.pose.position)\n d_prev_2_closest = self.dl(previous_waypoint.pose.pose.position , closest_waypoint.pose.pose.position)\n d_closest_2_ego = self.dl(self.current_pose.pose.position , closest_waypoint.pose.pose.position)\n # If the distance from previous point to ego vehicle is greater than the hypotenuse of right angled\n # triangle which would correspond with these lengths, then vehicle is ahead of closest point\n is_ahead = True if d_prev_2_ego > sqrt( d_closest_2_ego**2 + d_prev_2_closest**2) else False\n else:\n # Dot product as demonstrated in Q & A video\n v_closest_2_ego = [self.current_pose.pose.position.x - closest_waypoint.pose.pose.position.x, self.current_pose.pose.position.y - closest_waypoint.pose.pose.position.y]\n v_closest_2_prev = [previous_waypoint.pose.pose.position.x - closest_waypoint.pose.pose.position.x, previous_waypoint.pose.pose.position.y - closest_waypoint.pose.pose.position.y]\n dot = v_closest_2_ego[0] * v_closest_2_prev[0] + v_closest_2_ego[1] * v_closest_2_prev[1]\n is_ahead = (dot < 0)\n \n # Set the waypoint index of the ego vehicle\n self.ego_idx = next_idx if is_ahead else closest_idx_in_space\n \n def traffic_cb(self, msg):\n self.traffic_light_waypoint = msg.data\n\n def obstacle_cb(self, msg):\n pass\n\n def get_waypoint_velocity(self, waypoint):\n return waypoint.twist.twist.linear.x\n\n def set_waypoint_velocity(self, waypoints, waypoint, velocity):\n waypoints[waypoint].twist.twist.linear.x = velocity\n\n def distance(self, waypoints, wp1, wp2):\n dist = 0\n for i in range(wp1, wp2+1):\n dist += self.dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)\n wp1 = i\n return dist\n\n\nif __name__ == '__main__':\n try:\n WaypointUpdater()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start waypoint updater node.')\n","sub_path":"ros/src/waypoint_updater/waypoint_updater.py","file_name":"waypoint_updater.py","file_ext":"py","file_size_in_byte":9708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"617332111","text":"import os\nimport gtk\nif os.name == 'nt':\n import winsound\nimport Plugin\nfrom emesenecommon import PATH\ntry: \n import gst\n GSTREAMER = True\nexcept:\n GSTREAMER = False\nclass Sound:\n '''A plugin to play sounds using the available modules on the system'''\n def __init__(self, theme):\n '''class constructor'''\n self.theme = theme\n self.beep = False\n self.command = ''\n self.canPlay = False\n self.canGstreamer = False\n if os.name == \"posix\":\n self.checkAvailability()\n if self.canGstreamer:\n self.player = gst.element_factory_make(\"playbin\", \"player\")\n bus = self.player.get_bus()\n bus.enable_sync_message_emission()\n bus.add_signal_watch()\n bus.connect('message', self.gst_on_message)\n else:\n self.canPlay = True\n def gst_on_message(self, bus, message):\n t = message.type\n if t == gst.MESSAGE_EOS:\n self.player.set_state(gst.STATE_NULL)\n def checkAvailability(self):\n if self.beep:\n self.canPlay = True\n elif GSTREAMER:\n self.canPlay = True\n self.canGstreamer = True\n elif self.is_on_path('aplay'):\n self.canPlay = True\n self.command = 'aplay'\n elif self.is_on_path('play'):\n self.canPlay = True\n self.command = 'play'\n def play(self, sound_theme, sound):\n if self.beep:\n gtk.gdk.beep()\n return\n for theme in (sound_theme, 'default'):\n soundPath = PATH.SOUNDS_PATH + os.sep + sound_theme + os.sep + \\\n sound + \".wav\"\n if os.path.exists(soundPath):\n break\n else:\n soundPath = ''\n if not soundPath:\n return\n if os.name == \"nt\":\n winsound.PlaySound(soundPath, \n winsound.SND_FILENAME | winsound.SND_ASYNC)\n elif os.name == \"posix\":\n if self.canGstreamer:\n loc = \"file://\" + soundPath\n self.player.set_property('uri', loc)\n self.player.set_state(gst.STATE_PLAYING) \n else:\n os.popen4(self.command + \" \" + soundPath)\n def getCommand(self):\n return self.command\n def setCommand(self, string):\n self.command = string\n def is_on_path(self, fname):\n for p in os.environ['PATH'].split(os.pathsep):\n if os.path.isfile(os.path.join(p, fname)):\n return True\nclass MainClass(Plugin.Plugin):\n '''Main plugin class'''\n def __init__(self, controller, msn):\n '''Contructor'''\n Plugin.Plugin.__init__(self, controller, msn)\n self.theme = controller.theme\n self.description = _('Play sounds for common events.')\n self.authors = { 'Mark Baas' : 'mark.baas123 at gmail dot com' }\n self.website = 'http://www.emesene.org'\n self.displayName = _('Sound')\n self.name = 'Sound'\n self.sound = Sound(self.theme)\n self.config = controller.config\n self.config.readPluginConfig(self.name)\n self.playOnline = int(self.config.getPluginValue(self.name, \n 'playOnline', '1'))\n self.playMessage = int(self.config.getPluginValue(self.name, \n 'playMessage', '1') )\n self.playNudge = int(self.config.getPluginValue(self.name, \n 'playNudge', '1'))\n self.playInactive = int(self.config.getPluginValue(self.name, \n 'playInactive', '1'))\n self.playSend = int(self.config.getPluginValue(self.name, \n 'playSend', '0'))\n self.disableBusy = int(self.config.getPluginValue(self.name, \n 'disableBusy', '0'))\n self.sound_theme = self.config.getPluginValue(self.name, \n 'theme', 'default')\n self.sound.beep = int(self.config.getPluginValue(self.name, \n 'beep', '0'))\n self.onlineId = None\n self.messageId = None\n self.nudgeId = None\n def start(self):\n '''start the plugin'''\n self.enabled = True\n self.onlineId = self.msn.connect('user-online', self.online)\n self.messageId = self.msn.connect('message-received', self.message)\n self.nudgeId = self.msn.connect('nudge-received', self.nudge)\n self.sendMessageId = self.controller.conversationManager.connect(\n 'send-message', self.send)\n def stop(self): \n '''stop the plugin'''\n self.msn.disconnect(self.onlineId)\n self.msn.disconnect(self.messageId)\n self.msn.disconnect(self.nudgeId)\n self.msn.disconnect(self.sendMessageId)\n self.enabled = False\n def action(self):\n pass \n def check(self):\n '''\n check if everything is OK to start the plugin\n return a tuple whith a boolean and a message\n if OK -> (True, 'some message')\n else -> (False, 'error message')\n '''\n if not self.sound.canPlay:\n return (False, _('gstreamer, play and aplay not found.'))\n return (True, 'Ok')\n def online(self, msnp, email, oldStatus):\n self.playOnline = int(self.config.getPluginValue(self.name, \n 'playOnline', '1'))\n self.sound_theme = self.config.getPluginValue(self.name, 'theme', \n 'default')\n if oldStatus == 'FLN' and self.playOnline and self.soundsEnabled():\n self.sound.play(self.sound_theme, 'online')\n def message(self, msnp, email):\n self.playMessage = int(self.config.getPluginValue(self.name, \n 'playMessage', '1') )\n self.sound_theme = self.config.getPluginValue(self.name, 'theme', \n 'default')\n if self.playMessage and self.soundsEnabled():\n result = self.controller.conversationManager\\\n .getOpenConversation(email)\n if self.playInactive and result != None:\n window, conversation = result\n windowFocus = window.is_active()\n tabFocus = (window.conversation == conversation)\n if not (windowFocus and tabFocus):\n self.sound.play(self.sound_theme, 'type')\n else:\n self.sound.play(self.sound_theme, 'type')\n def nudge(self, *args):\n self.playNudge = int(self.config.getPluginValue(self.name, \n 'playNudge', '1'))\n self.sound_theme = self.config.getPluginValue(self.name, \n 'theme', 'default')\n if self.playNudge and self.soundsEnabled():\n self.sound.play(self.sound_theme, 'nudge')\n def send(self, *args):\n self.playSend = int(self.config.getPluginValue(self.name, \n 'playSend', '0'))\n self.sound_theme = self.config.getPluginValue(self.name, \n 'theme', 'default')\n if self.playSend and self.soundsEnabled():\n self.sound.play(self.sound_theme, 'send')\n def soundsEnabled(self):\n '''checks if sounds are enabled'''\n if not self.enabled:\n return False\n if self.disableBusy and self.controller.contacts.get_status() == 'BSY':\n return False\n return True\n def configure(self):\n '''display a configuration dialog'''\n l = []\n themes = os.listdir(PATH.APP_PATH + os.sep + 'sound_themes')\n themes = [x for x in themes if not x.startswith('.')]\n l.append(Plugin.Option('theme', list, _('Theme'), '', \n self.config.getPluginValue(self.name, 'theme', ''), themes))\n l.append(Plugin.Option('playOnline', bool, \n _('Play online sound'), \n _('Play a sound when someone gets online'), \n (self.config.getPluginValue(self.name, 'playOnline', '1') \\\n == '1')))\n l.append(Plugin.Option('playMessage', bool, \n _('Play message sound'), \n _('Play a sound when someone sends you a message'), \n (self.config.getPluginValue(self.name, 'playMessage', '1') \\\n == '1')))\n l.append(Plugin.Option('playNudge', bool, \n _('Play nudge sound'), \n _('Play a sound when someone sends you a nudge'), \n (self.config.getPluginValue(self.name, 'playNudge', '1') \\\n == '1')))\n l.append(Plugin.Option('playSend', bool, \n _('Play sound when you send a message'), \n _('Play sound when you send a message'), \n (self.config.getPluginValue(self.name, 'playSend', '0') \\\n == '1')))\n l.append(Plugin.Option('playInactive', bool, \n _('Only play message sound when window is inactive'), \n _('Play the message sound only when the window is inactive'), \n (self.config.getPluginValue(self.name, 'playInactive', '1') \\\n == '1')))\n l.append(Plugin.Option('disableBusy', bool, \n _('Disable sounds when busy'), \n _('Disable sounds when busy'), \n (self.config.getPluginValue(self.name, 'disableBusy', '1') \\\n == '1')))\n l.append(Plugin.Option('beep', bool, \n _('Use system beep'), \n _('Play the system beep instead of sound files'), \n (self.config.getPluginValue(self.name, 'beep', '0') \\\n == '1')))\n response = Plugin.ConfigWindow(_('Config Sound Plugin'), l).run()\n if response != None:\n if response.has_key('playOnline'):\n self.config.setPluginValue(self.name, 'playOnline', \n str(int(response['playOnline'].value)))\n if response.has_key('playMessage'):\n self.config.setPluginValue(self.name, 'playMessage', \n str(int(response['playMessage'].value)))\n if response.has_key('playNudge'):\n self.config.setPluginValue(self.name, 'playNudge', \n str(int(response['playNudge'].value)))\n if response.has_key('playInactive'):\n self.config.setPluginValue(self.name, 'playInactive', \n str(int(response['playInactive'].value)))\n if response.has_key('playSend'):\n self.config.setPluginValue(self.name, 'playSend', \n str(int(response['playSend'].value)))\n if response.has_key('beep'):\n self.config.setPluginValue(self.name, 'beep', \n str(int(response['beep'].value)))\n if response.has_key('theme'):\n self.config.setPluginValue(self.name, 'theme', \n response['theme'].value)\n if response.has_key('disableBusy'):\n self.config.setPluginValue(self.name, 'disableBusy', \n str(int(response['disableBusy'].value)))\n self.playOnline = (self.config.getPluginValue(self.name, \n 'playOnline', '1') == '1')\n self.playMessage = (self.config.getPluginValue(self.name, \n 'playMessage', '1') == '1')\n self.playNudge = (self.config.getPluginValue(self.name, \n 'playNudge', '1') == '1')\n self.playInactive = (self.config.getPluginValue(self.name, \n 'playInactive', '1') == '1')\n self.disableBusy = (self.config.getPluginValue(self.name, \n 'disableBusy', '1') == '1')\n self.sound.beep = int(self.config.getPluginValue(self.name, \n 'beep', '0'))\n return True\n","sub_path":"emesene/rev1286-1505/right-branch-1505/plugins_base/Sound.py","file_name":"Sound.py","file_ext":"py","file_size_in_byte":11461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"268445364","text":"# Attempt to assemble whole plasmids from a metagenomic sample (fasta file)\nimport sys, argparse, time\nimport numpy as np\nimport gzip\n\ndef main():\n parser = argparse.ArgumentParser(description='Assemble a plasmid from a starting sequence and fasta file of reads')\n parser.add_argument(\"start_file\", type=str, help=\"File containing starting sequence(s) for targeted assembly\")\n parser.add_argument(\"read_file\", type=str, help=\"FASTA file of reads to be searched/assembled\")\n parser.add_argument(\"out_file\", type=str, help=\"Base output filename for coverage data and final assembly files\")\n parser.add_argument(\"-o\", \"--overlap\", dest=\"overlap\", default=60, type=int, help=\"Length of match to search for\")\n parser.add_argument(\"-c\", \"--coverage\", dest=\"coverage\", default=1, type=int, help=\"Minimum coverage for adding to assembled sequence\")\n parser.add_argument(\"-p\", \"--paired\", dest=\"paired\", type=str, help=\"Paired read file\")\n args = parser.parse_args()\n\n start_time = time.time()\n\n with open(args.start_file, \"r\") as in_file:\n all_lines = in_file.readlines()\n\n start_seq_f = {}\n start_seq_r = {}\n read_dict = {}\n kmer_dict = {}\n kmer_len = args.overlap\n read_files = [args.read_file]\n NT_dict = {\"A\":\"T\",\"C\":\"G\",\"G\":\"C\",\"T\":\"A\"}\n\n for i in range(0,len(all_lines),2):\n seq = all_lines[i+1].rstrip().upper()\n rev_seq = \"\".join([NT_dict[x] for x in reversed(seq)])\n start_seq_f[all_lines[i].rstrip()] = seq\n start_seq_r[all_lines[i].rstrip()] = rev_seq\n\n if len(seq) < kmer_len:\n print(\"Overlap length changed to %d because of minimum start sequence length.\" % (len(seq)))\n kmer_len = len(seq)\n\n if args.paired:\n read_files.append(args.paired)\n\n read_lines = []\n\n for filename in read_files:\n if filename[-3:] == \".gz\":\n with gzip.open(filename, 'rt') as read_file:\n read_lines.extend(read_file.readlines())\n\n else:\n with open(filename, \"r\") as read_file:\n read_lines.extend(read_file.readlines())\n\n if read_lines[0][0] == \"@\":\n n_lines = 4\n elif read_lines[0][0] == \">\":\n n_lines = 2\n else:\n print(\"Read file format not recognized.\")\n exit()\n\n for i in range(1, len(read_lines), n_lines):\n read = read_lines[i].rstrip().upper()\n \n if \"N\" not in read:\n read_dict.setdefault(read, 0)\n read_dict[read] += 1\n rev_read = \"\".join([NT_dict[x] for x in reversed(read)])\n read_dict.setdefault(rev_read, 0)\n read_dict[rev_read] += 1\n\n for read in read_dict.keys():\n for k in range(len(read)-kmer_len):\n kmers = read[k:k+kmer_len] + \" \" + read[k+kmer_len]\n kmer_dict.setdefault(kmers, 0)\n kmer_dict[kmers] += read_dict[read]\n\n read_time = time.time()\n print(\"Dictionary containing %d unique kmers built in %0.2f seconds.\" % (len(kmer_dict.keys()), read_time - start_time))\n\n seq_out = open(args.out_file + \".fa\", \"w\")\n cov_out = open(args.out_file + \".cov\", \"w\")\n\n cov_out.write(\"kmer\\tNT_coverage\\tTotal_coverage\\n\")\n\n final_seq_f = build_seq(start_seq_f, kmer_dict, kmer_len, args.coverage, cov_out)\n final_seq_r = build_seq(start_seq_r, kmer_dict, kmer_len, args.coverage, cov_out)\n\n print(\"Assembly finished in %0.2f seconds.\" % (time.time()-read_time))\n\n for n_seq in final_seq_f.keys():\n rev_seq = \"\".join([NT_dict[x] for x in reversed(final_seq_r[n_seq])])\n seq_out.write(n_seq + \"\\n\")\n seq_out.write(rev_seq[:-kmer_len])\n seq_out.write(final_seq_f[n_seq] + \"\\n\\n\")\n\n seq_out.close()\n\n\n\ndef build_seq(start_seq, kmer_dict, overlap, coverage, coverage_out):\n NTs = [\"A\", \"C\", \"G\", \"T\"]\n final_seq = {}\n \n for seq in start_seq.keys():\n assemble_seq = start_seq[seq]\n curr_cov = coverage\n\n while curr_cov >= coverage:\n curr_cov = 0\n NT_cts = []\n\n for NT in NTs:\n kmer = assemble_seq[-overlap:] + \" \" + NT\n\n try:\n NT_cts.append(kmer_dict[kmer])\n curr_cov += kmer_dict[kmer]\n except:\n NT_cts.append(0)\n\n if max(NT_cts) > 0.5*curr_cov:\n assemble_seq += NTs[np.argmax(NT_cts)]\n coverage_out.write(assemble_seq[-overlap:] + \"\\t\" + str(max(NT_cts)) + \"\\t\" + str(curr_cov) + \"\\n\")\n else:\n coverage_out.write(\"Assembly ended due to divergent sequence.\\n\")\n break\n\n if assemble_seq[:-overlap].find(assemble_seq[-overlap:]) > -1:\n coverage_out.write(\"Assembly ended due to unresolved repeat region.\\n\")\n break\n\n final_seq[seq] = assemble_seq\n\n return final_seq\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bin/plasmid_read_assembly_3+.py","file_name":"plasmid_read_assembly_3+.py","file_ext":"py","file_size_in_byte":4905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"384845052","text":"# Copyright 2021 Zilliz. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport inspect\nfrom typing import List, Union\nimport hashlib\n\n\nclass Callstack:\n \"\"\"\n A Callstack object contains the working frames at the moment.\n\n Args:\n ignore (`int`):\n The number of frames to ignore on the top of the callstack.\n \"\"\"\n def __init__(self, ignore: int = 0):\n self.frames = inspect.stack()\n # ignore the frame of Callstack.__init__\n ignore += 1\n if ignore > len(self.frames):\n raise ValueError(f\"ignore = {ignore-1} is out of frame range\")\n del self.frames[0:ignore]\n self.size = len(self.frames)\n\n def num_frames(self) -> int:\n \"\"\"\n Get the number of frames.\n\n Returns:\n (`int`)\n The size of current stack.\n \"\"\"\n return self.size\n\n def find_func(self, func_name: str) -> Union[int, None]:\n \"\"\"\n Given a function name, find the first-matched and outermost frame from current\n stack.\n\n Args:\n func_name (`str`):\n The function name to find.\n\n Returns:\n (`Union[int, None]`)\n If at least one matching frame exits, return the first-matched frame\n index. Else, return None.\n \"\"\"\n for i in range(self.size - 1, -1, -1):\n if self.frames[i].function == func_name:\n return i\n return None\n\n def hash(self, start: int = None, end: int = None, items: List[str] = None) -> str:\n \"\"\"\n Get the hash value of the attributes contained in `items` between index `start`\n and `end` (includes `start`, excludes `end`).\n\n Args:\n start (`int`):\n The index of the start frame.\n end (`int`):\n The index of the end frame.\n items (`List[str]`):\n The items to be hashed. Supported items are\n {filename, lineno, function, code_context, position, lasti}, where\n code_context denotes the current line of code of the context, position\n denotes the frame's index of the callstack, lasti denotes the index of\n last attempted instruction in bytecode.\n\n Returns:\n (`str`)\n The hash value.\n\n Raises:\n (`IndexError`)\n If the args [`start`, `end`) is out of the frame range or `end` less\n than `start`.\n (`ValueError`)\n If an item in `items` is not supported, i.e. not one of\n {filename, lineno, function, code_context, position, lasti}.\n \"\"\"\n start = start or 0\n end = end or self.size\n\n if end > self.size or end <= 0 or start >= self.size or start < 0:\n raise IndexError(f\"index range [{start}, {end}) out of frame range\" f\"[0, {self.size})\")\n if start >= end:\n raise IndexError(f\"end = {end} is less than or equal to start = {start}\")\n\n full_item = {\"filename\", \"lineno\", \"function\", \"code_context\", \"position\", \"lasti\"}\n if not set(items).issubset(set(full_item)):\n invalid_item = set(items) - (set(items) & full_item)\n raise ValueError(f\"{invalid_item} not supported\")\n\n md5 = hashlib.md5()\n for i, frame in enumerate(self.frames[start:end]):\n frame_dict = frame._asdict()\n frame_dict[\"position\"] = i + start\n frame_dict[\"lasti\"] = frame_dict[\"frame\"].f_lasti\n frame_dict[\"code_context\"] = (\"\".join(frame_dict[\"code_context\"]) if frame_dict[\"code_context\"] else \"\")\n for item in items:\n md5.update(str(frame_dict[item]).encode(\"utf-8\"))\n return md5.hexdigest()\n","sub_path":"towhee/dag/utils/callstack.py","file_name":"callstack.py","file_ext":"py","file_size_in_byte":4324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"593210782","text":"#coding=utf8\nimport json, sys, os\nreload(sys)\nsys.setdefaultencoding('utf8')\n\nall_ships = json.loads(open('all_ships_info.json','r').read())\nfor ship_info in all_ships:\n\tif ship_info == None:\n\t\tcontinue\n\tif not '改' in ship_info['api_name']:\n\t\tprint('api_id: ' + str(ship_info['api_id']))\n\t\tprint('api_name: ' + ship_info['api_name'])\n\t\tos.system(\"cat ./kancolle_nginx.conf | sed s/ID_TO_REPLACE/%d/ > /usr/local/etc/nginx/servers/test.conf\" % ship_info['api_id'])\n\t\tos.system(\"nginx -s reload\")\n\t\traw_input()\n","sub_path":"change_nginx.py","file_name":"change_nginx.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"367780619","text":"def run():\n line = input()\n direct = {'A':(0, -1),'D':(0, 1),'W':(-1, 0),'S':(1, 0)}\n line = line.split(';')\n loc = [0, 0]\n for w in line:\n if 2<= len(w) <= 3 and w[0] in direct and w[1:].isdigit():\n d = int(w[1:])\n print(w)\n p = w[0]\n dx, dy = d*direct[p][0], d*direct[p][1]\n loc = [loc[0]+dx, loc[1]+dy]\n print('{},{}'.format(loc[0], loc[1]))\n return \n\nif __name__ == \"__main__\":\n run()","sub_path":"华为笔试/Huawei-坐标移动.py","file_name":"Huawei-坐标移动.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"12972839","text":"#coding=utf-8\n#author=godpgf\n\nimport abc\nimport datetime\nimport pandas as pd\nimport numpy as np\nfrom six import with_metaclass, string_types\nimport os\n\nfrom .data_source import LocalDataSource\nfrom .bar import *\nfrom .data_reader import date2long\n\n\nclass DataProxy(with_metaclass(abc.ABCMeta)):\n @abc.abstractmethod\n def get_bar(self, order_book_id, dt):\n \"\"\"得到从dt时间开始的股票数据\n\n :param str order_book_id:\n :param datetime.datetime dt:\n :returns: bar object\n :rtype: BarObject\n\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def get_dividends_by_book_date(self, order_book_id, date):\n \"\"\"得到股票分红信息\n\n :param str order_book_id:\n :param datetime.datetime date:\n :returns: dividend\n :rtype: pd.Series\n\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def history(self, order_book_id, dt, bar_count, frequency, field):\n \"\"\"得到从dt开始的bar_count条历史数据\n\n :param str order_book_id:\n :param datetime dt:\n :param int bar_count:\n :param str frequency: '1d' or '1m'\n :param str field: \"open\", \"close\", \"high\", \"low\", \"volume\", \"last\", \"total_turnover\"\n :returns:\n :rtype: pandas.DataFrame\n\n \"\"\"\n raise NotImplementedError\n\n def last(self, order_book_id, dt, bar_count, frequency, field):\n \"\"\"get history data, will not fill empty data\n\n :param str order_book_id:\n :param datetime dt:\n :param int bar_count:\n :param str frequency: '1d' or '1m'\n :param str field: \"open\", \"close\", \"high\", \"low\", \"volume\", \"last\", \"total_turnover\"\n :returns:\n :rtype: pandas.DataFrame\n\n \"\"\"\n raise NotImplementedError\n\n\nclass LocalDataProxy(DataProxy):\n \"\"\"初始化股票信息读取代理\n :param str cache_path:缓存地址\n :param is_offline:是否离线\n \"\"\"\n def __init__(self, cache_path=None, is_offline=False, min_date=\"1995-04-24\"):\n self._cache_path = cache_path\n self._is_offline = False if cache_path is None else is_offline\n # 缓存对齐后的bar数据\n self._cache_alignment = {}\n # 缓存原始bar数据\n self._cache_source = {}\n self._trading_days = {}\n self._data_source = LocalDataSource()\n self.min_date = min_date\n self.trading_calender = None\n self.load_market()\n\n def load_market(self, is_real_time=False):\n self.trading_calender = None\n market_data = self.get_all_data('sh000001', is_real_time)\n if market_data is None:\n return\n market_data = market_data[np.where(market_data['volume'] > 0)]\n self._data_source.init_trading_dates(market_data['date'])\n trading_calendar = self.get_trading_dates(self.min_date, datetime.date.today())\n trading_calender_int = np.array(\n [int(t.strftime(\"%Y%m%d\")) for t in trading_calendar], dtype=\"= min_date_int)]\n bars = self._fill_all_bars(bars)\n self._cache_alignment[order_book_id] = bars\n\n return bars\n\n @classmethod\n def merge_data(cls, bars, days=5):\n reversed_bar = bars[::-1]\n date = []\n open = []\n high = []\n low = []\n close = []\n price = []\n volume = []\n turnover = []\n\n for i in range(0, len(reversed_bar) - days, days):\n date.append(reversed_bar[\"date\"][i])\n open.append(reversed_bar[\"open\"][i + days - 1])\n h = reversed_bar[\"high\"][i]\n l = reversed_bar[\"low\"][i]\n v = reversed_bar[\"volume\"][i]\n t = reversed_bar[\"turnover\"]\n for j in range(1, days):\n h = max(h, reversed_bar[\"high\"][i + j])\n l = min(l, reversed_bar[\"low\"][i + j])\n v += reversed_bar[\"volume\"][i + j]\n t += reversed_bar[\"turnover\"][i + j]\n high.append(h)\n low.append(l)\n close.append(reversed_bar[\"close\"][i])\n price.append(reversed_bar[\"price\"][i])\n volume.append(v)\n turnover.append(t)\n data = np.array([date, open, high, low, close,\n price, volume, turnover]).T\n data = [tuple(d.tolist()) for d in data]\n\n stocktype = np.dtype([\n ('date', 'uint64'), ('open', 'float32'),\n ('high', 'float32'), ('low', 'float32'),\n ('close', 'float32'), ('price', 'float32'),\n ('volume', 'uint64'), ('turnover', 'float32')\n ])\n return np.array(data, dtype=stocktype)[::-1]\n\n def get_trading_days(self, order_book_id):\n try:\n days = self._trading_days[order_book_id]\n return days\n except KeyError:\n self.get_all_data(order_book_id)\n return self.get_trading_days(order_book_id)\n\n def get_table(self, order_book_id):\n bars = self.get_all_data(order_book_id).copy()\n def int2date(date):\n from .data_reader import _2str\n year = int(date / 10000)\n month = int((date - year * 10000) / 100)\n day = int(date - year * 10000 - month * 100)\n return '%s-%s-%s'%('%d'%year,_2str(month),_2str(day))\n date_col = bars[\"date\"]\n index = [pd.Timestamp(int2date(date)) for date in date_col]\n data = [[bars[\"open\"][i],bars[\"high\"][i],bars[\"low\"][i],bars[\"close\"][i],bars[\"price\"][i],bars['volume'][i], bars['turnover'][i]] for i in range(len(index))]\n return pd.DataFrame(np.array(data),index,columns=[\"Open\",\"High\",\"Low\",\"Close\",\"Price\",'Volume','Turnover'])\n\n def get_bar(self, order_book_id, dt):\n bars = self.get_all_data(order_book_id)\n\n if isinstance(dt, string_types):\n dt = pd.Timestamp(dt)\n\n dt = convert_date_to_int(dt)\n return BarObject(bars[bars[\"date\"].searchsorted(dt)])\n\n def history(self, order_book_id, dt, bar_count, frequency, field):\n if frequency == '1m':\n raise RuntimeError('Minute bar not supported yet!')\n\n bars = self.get_all_data(order_book_id)\n\n dt = convert_date_to_int(dt)\n\n i = bars[\"date\"].searchsorted(dt)\n if i == len(bars[\"date\"]) or bars[\"date\"][i] != dt:\n i -= 1\n left = i - bar_count + 1 if i >= bar_count else 0\n bars = bars[left:i + 1]\n\n series = pd.Series(bars[field], index=[convert_int_to_date(t) for t in bars[\"date\"]])\n\n return series\n\n def last(self, order_book_id, dt, bar_count, frequency, field):\n if frequency == '1m':\n raise RuntimeError('Minute bar not supported yet!')\n\n try:\n bars = self._origin_cache[order_book_id]\n except KeyError:\n bars = self._data_source.get_all_bars(order_book_id)\n bars = bars[bars[\"volume\"] > 0]\n self._origin_cache[order_book_id] = bars\n\n dt = convert_date_to_int(dt)\n\n i = bars[\"date\"].searchsorted(dt)\n left = i - bar_count + 1 if i >= bar_count else 0\n hist = bars[left:i + 1][field]\n\n return hist\n\n def get_dividends_by_book_date(self, order_book_id, date):\n #暂时不考虑股息和分红\n return None\n\n def get_trading_dates(self, start_date, end_date):\n return self._data_source.get_trading_dates(start_date, str(end_date))\n\n def _fill_all_bars(self, bars):\n if self.trading_calender is None:\n return bars\n trading_calender_int = self.trading_calender\n\n # prepend\n start_index = trading_calender_int.searchsorted(bars[0][\"date\"])\n prepend_date = trading_calender_int[:start_index]\n prepend_bars = np.zeros(len(prepend_date), dtype=bars.dtype)\n dates = prepend_bars[\"date\"]\n dates[:] = prepend_date\n prepend_bars[\"open\"].fill(bars[0][\"open\"])\n prepend_bars[\"close\"].fill(bars[0][\"open\"])\n prepend_bars[\"high\"].fill(bars[0][\"open\"])\n prepend_bars[\"low\"].fill(bars[0][\"open\"])\n prepend_bars[\"price\"].fill(bars[0][\"open\"])\n\n # midpend\n last_index = trading_calender_int.searchsorted(bars[-1][\"date\"])\n midpend_date = trading_calender_int[start_index: last_index + 1]\n\n midpend_bars = np.zeros(len(midpend_date), dtype=bars.dtype)\n bars_index = bars[\"date\"].searchsorted(midpend_date[0])\n for i in range(len(midpend_bars)):\n if bars[bars_index][\"date\"] == midpend_date[i]:\n midpend_bars[i] = bars[bars_index]\n bars_index += 1\n else:\n data = (midpend_date[i], bars[bars_index - 1][\"close\"], bars[bars_index - 1][\"close\"], bars[bars_index - 1][\"close\"], bars[bars_index - 1][\"close\"], bars[bars_index - 1][\"close\"], 0, 0)\n midpend_bars[i] = data\n\n # append\n append_date = trading_calender_int[last_index + 1:]\n append_bars = np.zeros(len(append_date), dtype=bars.dtype)\n dates = append_bars[\"date\"]\n dates[:] = append_date\n append_bars[\"open\"].fill(bars[-1][\"close\"])\n append_bars[\"close\"].fill(bars[-1][\"close\"])\n append_bars[\"high\"].fill(bars[-1][\"close\"])\n append_bars[\"low\"].fill(bars[-1][\"close\"])\n append_bars[\"price\"].fill(bars[-1][\"close\"])\n\n # fill bars\n new_bars = np.concatenate([prepend_bars, midpend_bars, append_bars])\n return new_bars\n","sub_path":"stdb/data_accessor.py","file_name":"data_accessor.py","file_ext":"py","file_size_in_byte":13486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"617424882","text":"from menu import *\nimport sys\nfrom Vehicle import *\n\n\nif sys.version_info[0] == 2:\n\tinput = raw_input\n\n\nclass ParkingLot:\n\n def __init__(self):\n self.size = 0\n self.slot_no = 0\n self.total_parked_slots = 0\n\n\n def create_Parking_Lot(self, size):\n self.slots = [0] * size\n self.size = size\n return self.size\n\n\n def get_nearest_slot(self):\n for slot in range(self.size):\n if self.slots[slot] == 0:\n return slot\n\n\n def park_car(self,registration_no, color):\n if self.total_parked_slots < self.size: \n slot_no = self.get_nearest_slot()\n self.slots[slot_no] = Car(registration_no, color)\n self.slot_no = self.slot_no+1\n self.total_parked_slots = self.total_parked_slots + 1\n return slot_no + 1\n else:\n return -1\n\n\n def no_of_empty_slots(self):\n return (self.size - self.total_parked_slots)\n\n\n def leave(self,slot_no):\n\n if self.total_parked_slots > 0 and self.slots[slot_no-1] != 0:\n self.slots[slot_no - 1] = 0\n self.total_parked_slots = self.total_parked_slots - 1\n return \"Left\"\n else:\n return \"Already Empty\"\n\n\n def view_parked_cars(self):\n print(\"Slot No.\\tRegistration No.\\tColour\")\n for i in range(self.size):\n if self.slots[i] != 0:\n print(f\"{i+1} \\t\\t {self.slots[i].reg_no} \\t\\t {self.slots[i].color}\")\n else:\n print(f\"{i+1} \\t\\t ((EMPTY))\\t\\t((EMPTY))\")\n\n\n def get_registration_no_by_color(self,color):\n\n registration_nos = []\n for i in self.slots:\n\n if i == 0:\n continue\n\n if i.color.lower() == color.lower():\n registration_nos.append(i.reg_no)\n\n return registration_nos\n\n\n def slot_numbers_by_reg_no(self,registration_no):\n \n for i in range(self.size):\n if self.slots[i].reg_no == registration_no:\n return i+1\n else:\n continue\n return -1\n \n\n def slot_numbers_by_color(self,color):\n \n slot_numbers = []\n\n for i in range(self.size):\n\n if self.slots[i] == 0:\n continue\n if self.slots[i].color.lower() == color.lower():\n slot_numbers.append(str(i+1))\n return slot_numbers\n\n\n def Execute(self,line):\n\n if line.startswith('create_parking_lot'):\n n = int(line.split(\" \")[1])\n name = \" \".join(line.split(\" \")[2:])\n res = self.create_Parking_Lot(n)\n print(f\"Parking lot {name} created with capacity of {n} vehicles\")\n\n elif line.startswith(\"remaining_slots\"):\n print(f\"There are {self.no_of_empty_slots()} empty slots available\")\n\n elif line.startswith(\"nearest_empty_slot\"):\n print(f\"Nearest available slot is {self.get_nearest_slot()+1}\")\n\n elif line.startswith(\"park\"):\n registration_no = line.split(\" \")[1]\n color = line.split(\" \")[2]\n slot = self.park_car(registration_no,color)\n if slot == -1:\n print(\"Parking lot is full, can't accomodate more vehicles... SORRY!\")\n else:\n print(f\"Please park your car in allocated slot : {slot}\")\n\n elif line.startswith(\"view_parked_cars\"):\n self.view_parked_cars()\n\n elif line.startswith(\"registration_numbers_for_cars_with_colour\"):\n color = line.split(\" \")[1]\n registration_nos = self.get_registration_no_by_color(color)\n print(', '.join(registration_nos))\n\n elif line.startswith(\"slot_numbers_for_cars_with_colour\"):\n color = line.split(\" \")[1]\n slot_numbers = self.slot_numbers_by_color(color)\n print(', '.join(slot_numbers))\n\n elif line.startswith(\"leave\"):\n leave_slot = int(line.split(\" \")[1])\n status = self.leave(leave_slot)\n\n if status == \"Left\":\n print(f\"Vacated slot number {leave_slot}\")\n else:\n print(f\"The slot number {leave_slot} is already Vacant\")\n\n elif line.startswith(\"slot_number_for_registration_number\"):\n registration_no = line.split(\" \")[1]\n slot_no = self.slot_numbers_by_reg_no(registration_no)\n if slot_no == -1:\n print(\"No such car is Parked here\")\n else:\n print(f\"The car with registration no. {registration_no} is parked in slot {slot_no}\")\n \n elif line.startswith(\"exit\"):\n print(\"Thank you, for using our Parking station, Visit Again!!!\")\n exit(0)\n\n elif line.startswith(\"menu\"):\n Menu()","sub_path":"Projects/Parkinglot/Parking lot submitted/Parkinglot.py","file_name":"Parkinglot.py","file_ext":"py","file_size_in_byte":4810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"136912856","text":"import os\n\nimport numpy as np\n# import matplotlib.pyplot as plt\nfrom sklearn.ensemble import IsolationForest\n\n\n# required Format: csv file\n# first row is description, others are data\ndef load_data(filename: str):\n if not os.path.exists(filename):\n print(filename, \"Not Exists!\")\n return None\n elif not os.path.isfile(filename):\n print(filename, \"Is Not File!\")\n return None\n try:\n data = np.loadtxt(filename, delimiter=\",\", skiprows=0)\n except Exception as e:\n print(\"Error When Read \", filename, \":\", e)\n return None\n else:\n return data\n\n\ndef init_forest(data: np.array):\n if data is None:\n return None\n i_forest = IsolationForest()\n i_forest.fit(data)\n return i_forest\n\n\ndef predict(isolation_forest: IsolationForest, data: np.array):\n if isolation_forest is None or data is None:\n return None\n return isolation_forest.predict(data)\n\n\nif __name__ == \"__main__\":\n path = r\"/Users/autumnsun/IdeaProjects/sklearn_test/test.csv\"\n my_data = load_data(path)\n my_forest = init_forest(my_data)\n print(predict(my_forest, my_data))\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"401951386","text":"# coding: utf-8\n\"\"\"Modulo para visualizar topicos.\"\"\"\nimport os\nimport time\n\nimport gensim\nimport pyLDAvis\nimport pyLDAvis.gensim\n\n\ndef prepare_visdata(fd, n, lb, dirin):\n \"\"\"\n Prepara data para visualizacion de topicos\n\n :param fd: str (tipo de freqdist)\n :param n: int (numero de topicos)\n :param lb: str (label de tipo de sentimiento)\n :param dirin: str (directorio de entrada)\n\n :return: PreparedData (segun pyLDAvis)\n \"\"\"\n dictf = os.path.join(dirin, 'dict-{f}-{s}.dict'.format(f=fd, s=lb))\n dictionary = gensim.corpora.Dictionary.load(dictf)\n print('dict-{f}-{s}.dict : '.format(f=fd, s=lb), dictionary)\n\n bowmm = os.path.join(dirin, 'bow-{f}-{s}.mm'.format(f=fd, s=lb))\n bowcorpus = gensim.corpora.MmCorpus(bowmm)\n print('bow-{f}-{s}.mm : '.format(f=fd, s=lb), bowcorpus)\n\n ldaf = os.path.join(dirin, 'model-{f}-{n}-{s}.lda'.format(f=fd, n=n, s=lb))\n ldamodel = gensim.models.LdaModel.load(ldaf)\n print('model-{f}-{n}-{s}.lda : '.format(f=fd, n=n, s=lb), ldamodel)\n\n data = pyLDAvis.gensim.prepare(ldamodel, bowcorpus, dictionary)\n\n return data\n\n\ndef main():\n \"\"\"Unificar en main para poder ejecutar despues desde otro script.\"\"\"\n time_start = time.time()\n dir_curdir = os.path.abspath('.')\n dir_in = os.path.join(dir_curdir, 'topicmodels')\n dir_out = os.path.join(dir_curdir, 'visualtopics')\n os.makedirs(dir_out, exist_ok=True)\n labels = ['mejora', 'deterioro']\n\n for fd in ['unifd', 'bifd', 'trifd']:\n for n in (10, 25):\n for lb in labels:\n data = prepare_visdata(fd, n, lb, dir_in)\n html = os.path.join(dir_out,\n '{f}-{n}-{s}.html'.format(f=fd, n=n, s=lb))\n\n pyLDAvis.save_html(data, html)\n\n time_end = time.time()\n secs = time_end - time_start\n print('Total: {m:.2f} minutos'.format(m=secs / 60))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"rss/visualtopics.py","file_name":"visualtopics.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"266092845","text":"from rest_framework import serializers\n\nfrom drone_categories.models import DroneCategory\n\nclass DroneCategorySerializer(serializers.ModelSerializer):\n drones = serializers.SlugRelatedField(\n many=True,\n read_only=True,\n slug_field='name'\n )\n class Meta:\n model = DroneCategory\n fields = (\n 'pk', 'name', 'drones'\n )\n","sub_path":"backend/drone_categories/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"557186077","text":"n=input()\nn=int(n)\nls=input().split(\" \")\nprint(ls)\n#ls=[int(ls[i]) for i in range(n)]\ncount=0\nfor x in ls:\n if x=='0':\n count=count+1\nif count==0:\n print(-1)","sub_path":"Code/CodeRecords/2835/60648/239001.py","file_name":"239001.py","file_ext":"py","file_size_in_byte":170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"91669266","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras import layers\r\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\r\nimport Chapter4.DataSolver.datasolver as da\r\n\r\n# get the data from the json file\r\nlist_train = da.getInput()\r\nx_train = np.array(list_train[0:1100])\r\nx_test = np.array(list_train[1100:len(list_train)+1])\r\n\r\nlist_test = da.getOutput()\r\ny_train = np.array(list_test[0:1100])\r\ny_test = np.array(list_test[1100:len(list_test)+1])\r\n\r\nlist = da.getsizeIn()\r\nlength_input=list[1]\r\nsize_input=list[0]\r\nlength_output=da.getsizeOut()\r\nlength_output=length_output[0]\r\ndic=da.getdic()\r\n# Build the mlp model\r\ndim=length_input*size_input\r\nx_train=x_train.reshape(x_train.shape[0],dim)\r\nx_test=x_test.reshape(x_test.shape[0],dim)\r\n\r\ndef clearzeros(listA):\r\n list=[]\r\n for i in range(0,listA.shape[0]):\r\n list2=[]\r\n for j in listA[i]:\r\n if j != 0:\r\n list2.append(j)\r\n list.append(list2)\r\n return list\r\nx_train=clearzeros(x_train)\r\nx_test=clearzeros(x_test)\r\nx_train=pad_sequences(x_train,dtype=float, maxlen=dim)/len(da.dic)\r\nx_test=pad_sequences(x_test,dtype=float, maxlen=dim)/len(da.dic)\r\n\r\ny_train=pad_sequences(y_train, maxlen=length_output*3)/len(da.dic)\r\ny_test=pad_sequences(y_test,maxlen=length_output*3)/len(da.dic)\r\n\r\nnum_features = len(dic)\r\nembedding_dimension = 100\r\n\r\nfilter_sizes=[3,4,5]\r\ndef convolution():\r\n inn = layers.Input(shape=(dim, embedding_dimension, 1))\r\n cnns = []\r\n for size in filter_sizes:\r\n conv = layers.Conv2D(filters=length_output, kernel_size=(size, embedding_dimension),\r\n strides=1, padding='valid', activation='relu')(inn)\r\n pool = layers.MaxPool2D(pool_size=(dim-size+1, 1), padding='valid')(conv)\r\n cnns.append(pool)\r\n outt = layers.concatenate(cnns)\r\n\r\n model = keras.Model(inputs=inn, outputs=outt)\r\n return model\r\n\r\ndef cnn_mulfilter():\r\n model = keras.Sequential([\r\n layers.Embedding(input_dim=num_features, output_dim=embedding_dimension,\r\n input_length=dim),\r\n layers.Reshape((dim, embedding_dimension, 1)),\r\n convolution(),\r\n layers.Flatten(),\r\n layers.Dense(length_output*3, activation='relu'),\r\n layers.Dropout(0.2),\r\n layers.Dense(length_output*3, activation='sigmoid')\r\n\r\n ])\r\n model.compile(optimizer=keras.optimizers.Adam(),\r\n loss=keras.losses.BinaryCrossentropy(),\r\n metrics=['accuracy'])\r\n return model\r\n\r\nmodel = cnn_mulfilter()\r\nmodel.summary()\r\n\r\nhistory = model.fit(x_train, y_train, batch_size=64, epochs=5, validation_split=0.1)\r\nmodel.evaluate(x_test , y_test)\r\nplt.plot(history.history['accuracy'])\r\nplt.plot(history.history['val_accuracy'])\r\nplt.legend(['training', 'valiation'], loc='upper left')\r\nplt.show()\r\n\r\n# test the ingredient : test_dish=['Sugar','Fish','Vinegar','Prickly Pepper','Soy Sauce','Chili','Tomato']\r\ntest_dish=['白糖','鲫鱼','醋','生抽','辣椒','西红柿']\r\nprint(test_dish)\r\nlist=[]\r\nfor i in test_dish:\r\n list.append(da.myencode(dic,i))\r\ntest_dish=list\r\ntest_dish=pad_sequences(test_dish,dtype=float, maxlen=dim)/len(dic)\r\npredictions = model.predict(test_dish)*len(da.dic)\r\npredictions=np.around(predictions,decimals=0)\r\nlist2=predictions.tolist()\r\n# print(list2[0])\r\n# print(type(list2[0][0]))\r\nlist3=[]\r\nfor number in list2[0]:\r\n if number>0 and number0 and number', value)\n if isinstance(value,dict):\n for keyb, valueb in value.items():\n if keyb == 'children':\n if len(valueb) > 0:\n url = valueb[0]['url']\n dateadd = valueb[0]['date_added']\n dateaddconv = datetime.datetime(1601, 1, 1) + datetime.timedelta(microseconds=int(dateadd))\n name = valueb[0]['name']\n typed = valueb[0]['type']\n flag = 1\n if keyb == 'name' and flag == 1:\n flag = 0\n parent = valueb\n data_list.append((url, dateaddconv, name, parent, typed))\n\n report.write_artifact_data_table(data_headers, data_list, file_found)\n report.end_artifact_report()\n #else:\n # logfunc('No Chrome Login Data available')\n \n \n return\n\n","sub_path":"scripts/artifacts/chromeBookmarks.py","file_name":"chromeBookmarks.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"172027742","text":"# Create your views here.\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.template import RequestContext, loader\nfrom django.utils import timezone\nfrom subprocess import check_output\nfrom django.http import HttpResponse, HttpResponseNotFound\nfrom django.contrib.auth import authenticate\nimport json\n\nfrom .models import *\nfrom .forms import *\nfrom .puzzle import *\nfrom .redis import *\n\ndef is_admin(request):\n if request.user.is_authenticated():\n if request.user.username in settings.ADMIN_ACCTS:\n return True\n return False\n\n# All static file requests are routed through here with file_path resembling:\n# huntserver/puzzles/001.pdf or admin/js/somefile.js etc...\ndef protected_static(request, file_path):\n allowed = False\n levels = file_path.split(\"/\")\n # The only files we really have to protect are in huntserver/puzzles/*\n if(len(levels) > 2 and levels[0] == \"huntserver\" and levels[1] == \"puzzles\"):\n if request.user.is_authenticated():\n puzzle_id = levels[2][0:3]\n puzzle = get_object_or_404(Puzzle, puzzle_id=puzzle_id)\n team = Team.objects.get(login_info=request.user);\n # Only allowed access to the image if the puzzle is unlocked\n # TODO: add condition for hunt is over.\n if puzzle in team.unlocked.all():\n allowed = True\n # At the moment, if it's not a puzzle file, it's allowed\n else:\n allowed = True\n\n if allowed:\n response = HttpResponse()\n url = '/static/' + file_path\n # let nginx determine the correct content type \n response['Content-Type']=\"\"\n # This is what lets django access the normally restricted /static/\n response['X-Accel-Redirect'] = url\n return response\n \n return HttpResponseNotFound('

Page not found

')\n\ndef registration(request):\n curr_hunt = Hunt.objects.get(hunt_number=settings.CURRENT_HUNT_NUM)\n if(request.method == 'POST'):\n # Check for correct password when doing existing registration\n if(request.POST.get(\"validate\")):\n team = curr_hunt.team_set.get(team_name=request.POST.get(\"team_name\"))\n # Check that the team is not full\n if(len(team.person_set.all()) >= team.hunt.team_size):\n return HttpResponse('fail-full')\n # Check that the password is correct\n user = authenticate(username=team.login_info.username, password=request.POST.get(\"password\"))\n if user is not None:\n return HttpResponse('success')\n else:\n return HttpResponse('fail-password')\n\n # Check for correct password when doing existing registration\n if(request.POST.get(\"data\")):\n team = curr_hunt.team_set.get(team_name=request.POST.get(\"team_name\"))\n # Check that the password is correct\n user = authenticate(username=team.login_info.username, password=request.POST.get(\"password\"))\n if user is not None:\n a = Person.objects.filter(team=team).all().values('first_name', 'last_name', 'email')\n print(a)\n return HttpResponse(json.dumps(list(a)))\n else:\n return HttpResponse('fail-password')\n\n # Check if team already exists when doing new registration\n elif(request.POST.get(\"check\")):\n if(curr_hunt.team_set.filter(team_name__iexact=request.POST.get(\"team_name\")).exists()):\n return HttpResponse('fail')\n else:\n return HttpResponse('success')\n\n # Create new user, team, and person\n elif(request.POST.get(\"new\")):\n form = RegistrationForm(request.POST)\n if (form.is_valid()):\n # Make sure their passwords matched\n if(form.cleaned_data['password'] == form.cleaned_data['confirm_password']):\n loc = [\"NAR\", \"Has a room\", \"offcampus\"][int(form.cleaned_data['location'])-1]\n u = User.objects.create_user(form.cleaned_data['username'], \n password=form.cleaned_data['password'])\n t = Team.objects.create(team_name = form.cleaned_data['team_name'], \n login_info = u, hunt = curr_hunt, location=loc)\n p = Person.objects.create(first_name = form.cleaned_data['first_name'], \n last_name = form.cleaned_data['last_name'], \n email = form.cleaned_data['email'], \n phone = form.cleaned_data['phone'], \n comments = \"Dietary Restrictions: \" + form.cleaned_data['dietary_issues'], team = t)\n if(not curr_hunt.is_locked):\n unlock_puzzles(t)\n return HttpResponse('success')\n\n # Find existing team and add person. \n elif(request.POST.get(\"existing\")):\n form = RegistrationForm(request.POST)\n if form.is_valid():\n team = curr_hunt.team_set.get(team_name=form.cleaned_data[\"team_name\"])\n # Make sure there is room on the team\n if(len(team.person_set.all()) < team.hunt.team_size):\n p = Person.objects.create(first_name = form.cleaned_data['first_name'], \n last_name = form.cleaned_data['last_name'], \n email = form.cleaned_data['email'], \n phone = form.cleaned_data['phone'], \n comments = \"Dietary Restrictions: \" + form.cleaned_data['dietary_issues'], team = team)\n return HttpResponse('success')\n else:\n return HttpResponse('fail')\n else:\n # Standard rendering of registration page\n form = RegistrationForm()\n teams = curr_hunt.team_set.all().exclude(team_name=\"Admin\").order_by('pk')\n return render(request, \"registration.html\", {'form': form, 'teams': teams})\n\n@login_required\ndef hunt(request, hunt_num):\n hunt = get_object_or_404(Hunt, hunt_number=hunt_num)\n team = Team.objects.get(login_info=request.user)\n \n # Admins get all access, wrong teams/early lookers get an error page\n # real teams get appropriate puzzles, and puzzles from past hunts are public\n if(is_admin(request)):\n puzzle_list = hunt.puzzle_set.all()\n # Hunt has not yet started\n elif(hunt.is_locked):\n return render(request, 'not_released.html', {'reason': \"locked\"})\n # Hunt has started\n elif(hunt.is_open):\n # see if the team does not belong to the hunt being accessed\n if(team.hunt != hunt):\n return render(request, 'not_released.html', {'reason': \"team\"})\n else:\n puzzle_list = team.unlocked.filter(hunt=hunt)\n # Hunt is over\n elif(hunt.is_public):\n puzzle_list = hunt.puzzle_set.all()\n # How did you get here?\n else:\n return render(request, 'access_error.html')\n \n puzzles = sorted(puzzle_list, key=lambda p: p.puzzle_number)\n\n context = {'puzzles': puzzles, 'team': team}\n \n # Each hunt should have a main template named hunt#.html (ex: hunt3.html)\n return render(request, 'hunt' + str(hunt_num) + '.html', context)\n\n\n@login_required\ndef index(request):\n return hunt(request, settings.CURRENT_HUNT_NUM)\n\n\n@login_required\ndef puzzle(request, puzzle_id):\n puzzle = get_object_or_404(Puzzle, puzzle_id__iexact=puzzle_id)\n team = Team.objects.get(login_info=request.user);\n\n # Create submission object and then rely on puzzle.py->respond_to_submission\n # for automatic responses.\n if request.method == 'POST':\n form = AnswerForm(request.POST)\n if form.is_valid():\n user_answer = form.cleaned_data['answer']\n s = Submission.objects.create(submission_text = user_answer, \n puzzle = puzzle, submission_time = timezone.now(), team = team)\n respond_to_submission(s)\n\n return redirect('huntserver:puzzle', puzzle_id=puzzle_id)\n\n else:\n curr_hunt = Hunt.objects.get(hunt_number=settings.CURRENT_HUNT_NUM)\n # Only allowed access if the hunt is public or if unlocked by team\n if(puzzle.hunt.is_public or puzzle in team.unlocked.all()):\n submissions = puzzle.submission_set.filter(team=team).order_by('pk')\n form = AnswerForm()\n # Directory for puzzle PNGs\n # TODO: what do we do if this doesn't exist\n directory = \"/home/hunt/puzzlehunt_server/static/huntserver/puzzles\"\n file_str = directory + \"/\" + puzzle.puzzle_id + \".pdf\"\n # Ideally this should be done some other way to reduce command calls\n print(\"pdfinfo \" + file_str + \" | grep Pages | awk '{print $2}'\")\n pages = int(check_output(\"pdfinfo \" + file_str + \" | grep Pages | awk '{print $2}'\", shell=True))\n context = {'form': form, 'pages': range(pages), 'puzzle': puzzle, \n 'submission_list': submissions}\n return render(request, 'puzzle.html', context)\n else:\n return render(request, 'access_error.html')\n\n\n@login_required\ndef queue(request):\n if(not is_admin(request)):\n return render(request, 'access_error.html')\n\n # Process admin responses to submissions\n if request.method == 'POST':\n form = SubmissionForm(request.POST)\n if form.is_valid():\n response = form.cleaned_data['response']\n s = Submission.objects.get(pk=form.cleaned_data['sub_id'])\n s.response_text = response\n s.save()\n # Update relevant parties\n send_submission_update(s)\n\n return redirect('huntserver:queue')\n \n else: \n hunt = Hunt.objects.get(hunt_number=settings.CURRENT_HUNT_NUM)\n submissions = Submission.objects.filter(puzzle__hunt=hunt).order_by('pk')\n form = SubmissionForm()\n context = {'form': form, 'submission_list': submissions}\n return render(request, 'queue.html', context)\n\n\n@login_required\ndef progress(request):\n if(not is_admin(request)):\n return render(request, 'access_error.html')\n\n # Admin unlocking a puzzle\n if request.method == 'POST':\n form = UnlockForm(request.POST)\n if form.is_valid():\n t = Team.objects.get(pk=form.cleaned_data['team_id'])\n p = Puzzle.objects.get(puzzle_id=form.cleaned_data['puzzle_id'])\n Unlock.objects.create(team=t, puzzle=p, time=timezone.now())\n send_status_update(p, t, \"unlock\")\n t.save()\n return redirect('huntserver:progress')\n\n else:\n curr_hunt = Hunt.objects.get(hunt_number=settings.CURRENT_HUNT_NUM)\n teams = curr_hunt.team_set.all().order_by('team_name')\n puzzles = curr_hunt.puzzle_set.all().order_by('puzzle_number')\n # An array of solves, organized by team then by puzzle\n # This array is essentially the grid on the progress page\n # The structure is messy, it was built part by part as features were added\n sol_array = []\n for team in teams:\n # Basic team information for row headers\n # The last element ('cells') is an array of the row's data\n sol_array.append({'team':team, 'num':len(team.solved.all()), 'cells':[]})\n # What goes in each cell (item in \"cells\") is based on puzzle status\n for puzzle in puzzles:\n # Solved => solve object and puzzle id\n if(puzzle in team.solved.all()):\n sol_array[-1]['cells'].append([team.solve_set.filter(puzzle=puzzle)[0], puzzle.puzzle_id])\n # Unlocked => Identify as unlocked, puzzle id, and unlock time\n elif(puzzle in team.unlocked.all()): \n unlock_time = team.unlock_set.filter(puzzle=puzzle)[0].time\n sol_array[-1]['cells'].append([\"unlocked\", puzzle.puzzle_id, unlock_time])\n # Locked => Identify as locked and puzzle id\n else:\n sol_array[-1]['cells'].append([\"locked\", puzzle.puzzle_id])\n context = {'puzzle_list':puzzles, 'team_list':teams, 'sol_array':sol_array}\n return render(request, 'progress.html', context)\n\n@login_required\ndef charts(request):\n if(not is_admin(request)):\n return render(request, 'access_error.html')\n\n curr_hunt = Hunt.objects.get(hunt_number=settings.CURRENT_HUNT_NUM)\n puzzles = curr_hunt.puzzle_set.all().order_by('puzzle_number')\n #submissions = Submission.objects.filter(puzzle__hunt=curr_hunt).all().order_by('submission_time')\n #solves = Solve.objects.filter(puzzle__curr_hunt).all().order_by(\"submission__submission_time\")\n teams = curr_hunt.team_set.all().order_by(\"team_name\")\n puzzle_info_dicts = []\n for puzzle in puzzles:\n puzzle_info_dicts.append({\n \"name\": puzzle.puzzle_name,\n \"locked\": curr_hunt.team_set.count()-puzzle.unlocked_for.count(),\n \"unlocked\": puzzle.unlocked_for.count() - puzzle.solved_for.count(),\n \"solved\": puzzle.solved_for.count()\n })\n# submission_dicts = []\n# for submission in submissions:\n# print(submission.submission_text)\n# solve_dicts = []\n# for team in teams:\n# team_solves = team.solve_set.all().order_by(\"submission__submission_time\")\n# for solve in team_solves:\n \n context = {'data1_list':puzzle_info_dicts}\n return render(request, 'charts.html', context)\n\n@login_required\ndef chat(request):\n if request.method == 'POST':\n if(request.POST.get('team_pk') != \"\"):\n m = Message.objects.create(time=timezone.now(), text=request.POST.get('message'),\n is_response=(request.POST.get('is_response')==\"true\"),\n team=Team.objects.get(pk=request.POST.get('team_pk')))\n send_chat_message(m)\n return redirect('huntserver:chat')\n else:\n team = Team.objects.get(login_info=request.user)\n messages = Message.objects.filter(team=team).order_by('time')\n message_list = []\n for message in messages:\n message_list.append({'time': message.time, 'text':message.text,\n 'team':message.team, 'is_response': message.is_response})\n return render(request, 'chat.html', {'messages': message_list, 'team':team})\n\n@login_required\ndef unlockables(request):\n team = Team.objects.get(login_info=request.user)\n unlockables = Unlockable.objects.filter(puzzle__in=team.solved.all())\n return render(request, 'unlockables.html', {'unlockables': unlockables})\n\n@login_required\ndef admin_chat(request):\n if(not is_admin(request)):\n return render(request, 'access_error.html')\n\n curr_hunt = Hunt.objects.get(hunt_number=settings.CURRENT_HUNT_NUM)\n messages = Message.objects.filter(team__hunt=curr_hunt).order_by('team', 'time')\n message_list = []\n for message in messages:\n message_list.append({'time': message.time, 'text':message.text,\n 'team':{'pk': message.team.pk, 'name': message.team.team_name},\n 'is_response': message.is_response})\n return render(request, 'staff_chat.html', {'messages': message_list})\n\n# Not actually a page, just various control functions\n@login_required\ndef control(request):\n if(not is_admin(request)):\n return render(request, 'access_error.html')\n \n curr_hunt = Hunt.objects.get(hunt_number=settings.CURRENT_HUNT_NUM)\n teams = curr_hunt.team_set.all().order_by('team_name')\n if request.GET.get('initial'):\n for team in teams:\n unlock_puzzles(team)\n return redirect('huntserver:progress')\n elif request.GET.get('reset'):\n for team in teams:\n team.unlocked.clear()\n team.unlock_set.all().delete()\n team.solved.clear()\n team.solve_set.all().delete()\n team.submission_set.all().delete()\n return redirect('huntserver:progress')\n elif request.GET.get('getpuzzles'):\n download_puzzles(Hunt.objects.get(hunt_number=settings.CURRENT_HUNT_NUM))\n return redirect('huntserver:progress')\n else:\n return render(request, 'access_error.html')\n\n#TODO: fix\n@login_required\ndef public_stats(request):\n newest_hunt = 1\n return hunt(request, newest_hunt)\n\ndef emails(request):\n if(not is_admin(request)):\n return render(request, 'access_error.html')\n \n people = Person.objects.filter(team__hunt__hunt_number=settings.CURRENT_HUNT_NUM)\n emails = []\n for person in people:\n emails.append(person.email)\n return HttpResponse(\", \".join(emails))\n","sub_path":"huntserver/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"379269596","text":"#coding:utf-8\nimport numpy as np\nimport tensorflow as tf\nimport random\nfrom PIL import Image\nfrom captcha.image import ImageCaptcha # pip install captcha\nimport matplotlib.pyplot as plt\n\ndataset = ['0','1','2','3','4','5','6','7','8','9',\\\n\t'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',\\\n\t'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']\n\nIMAGE_HEIGHT = 60\nIMAGE_WIDTH = 160\ndef create_captcha(size = 4):\n captcha_text = []\n for i in range(size):\n \tc = random.choice(dataset)\n \tcaptcha_text.append(c)\n captcha_text = ''.join(captcha_text)\n\n image = ImageCaptcha()\n captcha = image.generate(captcha_text)\n captcha_image = Image.open(captcha)\n captcha_image = np.array(captcha_image.convert('L'),'f')\n return captcha_image,captcha_text\n\ndef parse_image(images):\n plt.matshow(images,cmap = plt.get_cmap('gray'))\n plt.savefig(\"image.png\")\n\ndef vec_pos(c):\n ascii = ord(c)\n if ascii >= 0x61:\n return ascii - 0x61 + 36\n elif ascii >= 0x41:\n return ascii - 0x41 + 10\n else:\n return ascii - 0x30\n\ndef str2vec(str,size = 4):\n vector = np.zeros(len(dataset)*size)\n for i, c in enumerate(str):\n idx = i * len(dataset) + vec_pos(c)\n vector[idx] = 1\n return vector\n\ndef vec2text(vec):\n char_pos = vec.nonzero()[0]\n text=[]\n for i, c in enumerate(char_pos):\n char_idx = c % len(dataset)\n if char_idx < 10:\n char_code = char_idx + ord('0')\n elif char_idx <36:\n char_code = char_idx - 10 + ord('A')\n elif char_idx < 62:\n char_code = char_idx- 36 + ord('a')\n else:\n raise ValueError('error')\n text.append(chr(char_code))\n return \"\".join(text)\n\ndef create_batch(batch_size):\n batch_x = np.zeros([batch_size, IMAGE_HEIGHT*IMAGE_WIDTH])\n batch_y = np.zeros([batch_size, len(dataset)*4])\n\n def create_text_and_image():\n while True:\n image,text = create_captcha()\n if image.shape == (60,160):\n return text,image\n \n for i in range(batch_size):\n text,image = create_text_and_image()\n batch_x[i,:] = image.flatten() / 255 # (image.flatten()-128)/128 mean为0\n batch_y[i,:] = str2vec(text)\n return batch_x,batch_y\n \nif __name__ == '__main__':\n create_batch(1)","sub_path":"captcha_creater.py","file_name":"captcha_creater.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"312091023","text":"from dominate.tags import *\nimport pandas as pd\nimport json\nimport os\n\n\nclass CSS:\n def __init__(self, name: str):\n print(os.getcwd())\n with open(\"./lists/css/{filename}.json\".format(filename=name)) as json_file:\n stylesDict = json.load(json_file)\n self.styles = stylesDict[\"style\"]\n self.fonts = stylesDict[\"fonts\"]\n\n def __apply_add(self, class_name):\n return class_name + \" { \" + self.styles[class_name] + \" }\"\n\n def add_css(self):\n return \"\\n\".join(map(self.__apply_add, self.styles.keys()))\n\n def __apply_fonts(self, font_link):\n return link(rel=\"stylesheet\", href=font_link)\n\n def add_fonts(self):\n return [self.__apply_fonts(font) for font in self.fonts]\n\n\nclass ListOptions:\n def __init__(self, style=\"test\"):\n self.style = style\n\n\ndef js_list(list_name: str, data: pd.DataFrame, options: ListOptions):\n \"\"\"\n Python wrapper to generate a complete listjs list for displaying a pandas df.\n \"\"\"\n css = CSS(options.style)\n\n container = div(cls=\"container\", id=list_name)\n\n with container:\n css.add_fonts()\n style(css.add_css())\n br()\n search_div()\n br()\n filter_div()\n br()\n make_sort_table(data)\n\n td_setup = \"\".join(td(cls=field).render() for field in data.columns)\n\n options = make_options(fields=str(list(data.columns)), table_setup=td_setup)\n\n records = str(data.to_dict(\"records\"))\n\n text_parser = make_text_parser()\n text_filter = make_filter()\n js_code = f\"\"\"var options ={options};\n var values ={records};\n var userList = new List('{list_name}', options, values);\n {text_parser}\n {text_filter};\"\"\"\n\n return {\"html\": container.render(), \"js\": js_code}\n\n\ndef make_sort_table(data):\n sort_table = table(cls=\"table\")\n with sort_table:\n make_table_header(data)\n tbody(cls=\"list\")\n return sort_table\n\n\ndef filter_div():\n cont = div(cls=\"container\")\n with cont:\n i(cls=\"fas fa-search\")\n input(\n cls=\"input-bar form-control\",\n placeholder=\"Filter\",\n onkeyup=\"filterFunction()\",\n )\n return cont\n\n\ndef make_table_header(data):\n return thead().add(th(field, cls=\"sort\", data_sort=field) for field in data.columns)\n\n\ndef braces_wrap(base_string):\n return \"{\" + base_string + \"}\"\n\n\ndef make_options(fields, table_setup):\n vals = f\"valueNames: {fields},\\n\"\n items = f\"item: '{table_setup}'\"\n return braces_wrap(vals + items)\n\n\ndef search_div():\n cont = div(cls=\"container\")\n with cont:\n i(cls=\"fas fa-search\")\n input(cls=\"input-bar search form-control\", placeholder=\"Search\")\n\n return cont\n\n\ndef make_text_parser():\n return \"\"\"function textParser(filterString){\n if (filterString.indexOf('>') != -1){\n filter = filterString.split('>');\n return [filter[0],'>',filter[1]];\n }\n if(filterString.indexOf('<') != -1){\n filter = filterString.split('<');\n return [filter[0],'<',filter[1]];\n }\n return filterString\n };\"\"\"\n\n\ndef make_filter():\n return \"\"\"function filterFunction() {\n var input, filter, ul, li, a, i, txtValue;\n input = document.getElementsByTagName(\"input\");\n filter = input[1].value.toUpperCase();\n filterProps = textParser(filter);\n table = document.getElementsByTagName(\"table\")[0];\n tbody = table.getElementsByTagName(\"tbody\");\n tr = tbody[0].getElementsByTagName(\"tr\");\n if(filter == ''){\n for (i = 0; i < tr.length; i++){\n tr[i].style.display = \"\";\n }\n }\n else{\n for (i = 0; i < tr.length; i++) {\n filterCategory = tr[i].getElementsByClassName(filterProps[0].trim().toLowerCase())[0];\n txtValue = filterCategory.textContent || filterCategory.innerText;\n if(filterProps[1] == '>'){\n \tparsedFloat = parseFloat(filterProps[2].trim());\n if(parsedFloat){\n if (parseFloat(txtValue) > parsedFloat) {\n \ttr[i].style.display = \"\";\n \t} else {\n \ttr[i].style.display = \"none\";\n \t}\n }\n else{\n if (txtValue.toUpperCase() > (filterProps[2].trim().toUpperCase())) {\n \ttr[i].style.display = \"\";\n \t} else {\n \ttr[i].style.display = \"none\";\n \t}\n }\n }\n else if(filterProps[1] == '<'){\n parsedFloat = parseFloat(filterProps[2].trim());\n if(parsedFloat){\n if (parseFloat(txtValue) < parsedFloat) {\n \ttr[i].style.display = \"\";\n \t} else {\n \ttr[i].style.display = \"none\";\n \t}\n }\n else{\n if (txtValue.toUpperCase() < (filterProps[2].trim().toUpperCase())) {\n \ttr[i].style.display = \"\";\n \t} else {\n \ttr[i].style.display = \"none\";\n \t}\n }\n }\n }\n }\n };\"\"\"\n","sub_path":"slytherinterns/lists/lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":5463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"618725176","text":"\"\"\"Reads 1000s per brick tables and merges them\n\nmpi4py cannot 'gather' lists of fits_tables, so each rank must write out\nits own fits_table, then a later serial job must be used to merge the\ntables from each rank\n\"\"\"\n\nimport numpy as np\nimport os\nimport sys\nfrom glob import glob\nimport pandas as pd\nfrom collections import defaultdict\n\n\nfrom obiwan.runmanager.derived_tables import TargetSelection\n# try:\nfrom astrometry.util.fits import fits_table, merge_tables\n# except ImportError:\n# pass\n\nDATASETS=['dr3','dr5']\n\ndef dir_for_mpi(derived_dir):\n return os.path.join(derived_dir,'merged_tmp')\n\ndef dir_for_serial(derived_dir):\n return os.path.join(derived_dir,'merged')\n\nclass MergeTable(object):\n \"\"\"Base class for merging the MPI rank derived filed tables\"\"\"\n def __init__(self,derived_dir,savefn,**kwargs):\n self.derived_dir= derived_dir\n self.savefn=savefn\n\n def run(self,bricks_to_merge):\n Tlist=[]\n for brick in bricks_to_merge:\n fn= self.table_fn(brick)\n if os.path.exists(fn):\n tab= fits_table(self.table_fn(brick))\n Tlist.append(tab)\n else:\n print('Skipping brick %s b/c randoms table doesnt exist' % fn)\n T= merge_tables(Tlist,columns='fillzero')\n T.writeto(self.savefn)\n print('Wrote %s' % self.savefn)\n\n def table_fn(self,brick):\n return os.path.join(self.derived_dir,brick[:3],brick,\n 'name.fits')\n\nclass RandomsTable(MergeTable):\n \"\"\"Merges the randoms tables (psql db, input, tractor measurements)\n \"\"\"\n def __init__(self,derived_dir,savefn):\n super().__init__(derived_dir,savefn)\n\n def table_fn(self,brick):\n return os.path.join(self.derived_dir,brick[:3],brick,\n 'randoms.fits')\n\n\nclass SummaryTable(MergeTable):\n \"\"\"In addition to merging over brick tables, compute avg quantities per brick\n\n derived table \"randoms.fits\" must exist. Joins the brick summary\n quantities from a data release with a similar set from the\n randoms.fits table. Each brick's table has one\n row and all tables get merged to make the eatmap plots\n \"\"\"\n def __init__(self,derived_dir,savefn):\n \"\"\"\n Args:\n rank: mpi rank\n \"\"\"\n super().__init__(derived_dir,savefn)\n\n def table_fn(self,brick):\n return os.path.join(self.derived_dir,brick[:3],brick,\n 'randoms.fits')\n\n def run(self,bricks_to_merge):\n d=defaultdict(list)\n for brick in bricks_to_merge:\n if os.path.exists( self.table_fn(brick)):\n d['brickname'].append(brick)\n self.add_obiwan_summary(d,self.table_fn(brick),\n prefix='tractor_')\n else:\n print('Skipping brick %s b/c randoms table doesnt exist')\n # Save\n T=fits_table()\n T.set('brickname', np.array(d['brickname']).astype(np.string_))\n for key in ['n_inj','n_inj_elg_ngc','n_inj_elg_sgc',\n 'n_rec',\n 'n_inj_elg_trac_elg_ngc','n_inj_elg_trac_elg_sgc',\n 'n_inj_elg_trac_elg_ngc_allmask',\n 'n_inj_elg_trac_elg_sgc_allmask']:\n T.set(key, np.array(d[key]).astype(np.int32))\n T.set('brick_area', np.array(d['brick_area']).astype(np.float32))\n for b in 'grz':\n T.set('galdepth_'+b, np.array(d['galdepth_'+b]).astype(np.float32))\n self.write_table(T,self.savefn)\n\n def add_obiwan_summary(self,summary_dict,randoms_fn,prefix=''):\n try:\n T = fits_table(randoms_fn)\n except OSError:\n raise OSError('could not open %s' % randoms_fn)\n\n TS= TargetSelection()\n kw= dict(ra=T.ra,dec=T.dec,\n gmag=T.psql_g, rmag=T.psql_r, zmag=T.psql_z)\n true_elg_ngc= TS._eboss_elg('ngc',**kw)\n true_elg_sgc= TS._eboss_elg('sgc',**kw)\n\n kw= dict(prefix='tractor_',anymask=True)\n tractor_elg_ngc= TS.elg_by_measurement(T,'eboss_ngc',**kw)\n tractor_elg_sgc= TS.elg_by_measurement(T,'eboss_sgc',**kw)\n kw.update(anymask=False) # uses allmask\n tractor_elg_ngc_allmask= TS.elg_by_measurement(T,'eboss_ngc',**kw)\n tractor_elg_sgc_allmask= TS.elg_by_measurement(T,'eboss_sgc',**kw)\n\n # Truth\n summary_dict['n_inj'].append( len(T))\n summary_dict['n_inj_elg_ngc'].append( len(T[true_elg_ngc]) )\n summary_dict['n_inj_elg_sgc'].append( len(T[true_elg_sgc]) )\n # Measured\n isRec= T.obiwan_mask == 1\n summary_dict['n_rec'].append( len(T[isRec]) )\n summary_dict['n_inj_elg_trac_elg_ngc'].append( len(T[(true_elg_ngc) & (isRec) & (tractor_elg_ngc)]) )\n summary_dict['n_inj_elg_trac_elg_sgc'].append( len(T[(true_elg_sgc) & (isRec) & (tractor_elg_sgc)]) )\n summary_dict['n_inj_elg_trac_elg_ngc_allmask'].append( len(T[(true_elg_ngc) & (isRec) & (tractor_elg_ngc_allmask)]) )\n summary_dict['n_inj_elg_trac_elg_sgc_allmask'].append( len(T[(true_elg_sgc) & (isRec) & (tractor_elg_sgc_allmask)]) )\n\n # FIXME: depends on the brick\n summary_dict['brick_area'].append( 0.25**2 )\n\n for band in 'grz':\n keep= np.isfinite(T.get(prefix+'galdepth_'+band))\n depth= np.median(T.get(prefix+'galdepth_'+band)[keep])\n summary_dict['galdepth_'+band].append( depth)\n\n def write_table(self,tab,fn):\n if not os.path.exists(fn):\n tab.writeto(fn)\n print('Wrote %s' % fn)\n\n\ndef main_mpi(doWhat=None,bricks=[],nproc=1,\n derived_dir=None):\n \"\"\"\n Args:\n nproc: > 1 for mpi4py\n bricks: list of bricks\n \"\"\"\n if nproc > 1:\n from mpi4py.MPI import COMM_WORLD as comm\n bricks= np.array_split(bricks, comm.size)[comm.rank]\n else:\n class MyComm(object):\n def __init__(self):\n self.rank=0\n self.size=1\n comm= MyComm()\n\n tmpDir= dir_for_mpi(derived_dir)\n try:\n os.makedirs(tmpDir)\n except OSError:\n pass\n\n if doWhat == 'randoms':\n savefn= os.path.join(tmpDir,'randoms_rank%d.fits' % comm.rank)\n tabMerger= RandomsTable(derived_dir,savefn)\n elif doWhat == 'summary':\n savefn= os.path.join(tmpDir,'summary_rank%d.fits' % comm.rank)\n tabMerger= SummaryTable(derived_dir,savefn)\n tab= tabMerger.run(bricks)\n\ndef fits_table_cols(randoms_fn):\n columns= ['unique_id','ra','dec','obiwan_mask',\n 'tractor_anymask_g','tractor_anymask_r','tractor_anymask_z',\n 'tractor_flux_g','tractor_flux_r','tractor_flux_z',\n 'tractor_mw_transmission_g','tractor_mw_transmission_r','tractor_mw_transmission_z',\n 'tractor_psfdepth_g','tractor_psfdepth_r','tractor_psfdepth_z']\n T= fits_table(randoms_fn, columns=columns)\n T.set('brickname',(pd.Series(T.unique_id).str.split('_')\n .str[1].values\n .astype(str)))\n return T\n\ndef main_serial(doWhat=None,derived_dir=None,\n randoms_subset=False):\n \"\"\"merges the rank tables that are stored in merge_tmp/\"\"\"\n saveDir= dir_for_serial(derived_dir)\n try:\n os.makedirs(saveDir)\n except OSError:\n pass\n\n if doWhat == 'randoms':\n wild= \"randoms_rank*.fits\"\n if randoms_subset:\n outfn= os.path.join(saveDir,'randoms_subset.fits')\n else:\n outfn= os.path.join(saveDir,'randoms.fits')\n elif doWhat == 'summary':\n wild= \"summary_rank*.fits\"\n outfn= os.path.join(saveDir,\"summary.fits\")\n\n if os.path.exists(outfn):\n print('Merged table already exists %s' % outfn)\n return\n\n search=os.path.join(dir_for_mpi(derived_dir),\n wild)\n tab_fns= glob(search)\n if len(tab_fns) == 0:\n raise ValueError('found nothing with search: %s' % search)\n tabs=[]\n for fn in tab_fns:\n if randoms_subset:\n T= fits_table_cols(fn)\n else:\n T= fits_table(fn)\n tabs.append(T)\n print('Merging %d tables' % len(tabs))\n tab= merge_tables(tabs,columns='fillzero')\n tab.writeto(outfn)\n print('Wrote %s' % outfn)\n print('has %d rows' % len(tab))\n\ndef randoms_subset_count_rsdirs_per_brick(rand_subset_fn):\n \"\"\"for Hui, count number of rsdirs per brick to get which of the randoms subsets bricks are done/\"\"\"\n a=fits_table(rand_subset_fn)\n df=pd.DataFrame(dict(brick=pd.Series(a.unique_id).str.split(\"_\").str[1],\n rsdir=pd.Series(a.unique_id).str.split(\"_\").str[2]))\n num_rsdirs= df.groupby(['brick']).agg(lambda x: len(set(x)))\n num_rsdirs= num_rsdirs.reset_index().sort_values(by='rsdir',ascending=False)\n fn= rand_subset_fn.replace('.fits','_count_rsdirs_per_brick.csv')\n num_rsdirs.to_csv(fn,index=False)\n print('Wrote %s' % fn)\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n parser = ArgumentParser()\n parser.add_argument('--doWhat', type=str, choices=['randoms','summary'],required=True)\n parser.add_argument('--derived_dir', type=str, required=True)\n parser.add_argument('--nproc', type=int, default=1, help='set to > 1 to run mpi4py')\n parser.add_argument('--bricks_fn', type=str, default=None,\n help='specify a fn listing bricks to run, or a single default brick will be ran')\n parser.add_argument('--merge_rank_tables', action=\"store_true\", default=False,help=\"set to merge the rank tables in the merge_tmp/ dir\")\n parser.add_argument('--randoms_subset', action=\"store_true\", default=False,help=\"make a merged table that is a subset of the randoms columns\")\n parser.add_argument('--count_rsdirs_per_brick', action=\"store_true\", default=False,help=\"read existing randoms_subset table\")\n args = parser.parse_args()\n\n if args.merge_rank_tables:\n kwargs= vars(args)\n for key in ['merge_rank_tables','nproc','bricks_fn',\n 'count_rsdirs_per_brick']:\n del kwargs[key]\n main_serial(**kwargs)\n sys.exit(0)\n\n if args.count_rsdirs_per_brick:\n fn= os.path.join(args.derived_dir,'merged','randoms_subset.fits')\n #fn= '/Users/kaylan1/Downloads/obiwan_plots/randoms_subset_10k.fits'\n randoms_subset_count_rsdirs_per_brick(fn)\n sys.exit(0)\n\n # Bricks to run\n if args.bricks_fn is None:\n bricks= ['1266p292']\n else:\n bricks= np.loadtxt(args.bricks_fn,dtype=str)\n\n kwargs= vars(args)\n for dropCol in ['bricks_fn','merge_rank_tables',\n 'randoms_subset','count_rsdirs_per_brick']:\n del kwargs[dropCol]\n kwargs.update(bricks=bricks)\n\n main_mpi(**kwargs)\n","sub_path":"py/obiwan/runmanager/merge_tables.py","file_name":"merge_tables.py","file_ext":"py","file_size_in_byte":10847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"178814667","text":"\nimport collections\nimport contextlib\nimport logging\nimport os\n\nimport numpy as np\nimport tensorboardX\nimport torch\n\n\n\n\nclass Logger(logging.Logger):\n\n def __init__(self, name=None, rank=None, logdir=None, level=None, fmt=None):\n name = name or 'AgentLogger'\n level = level or logging.INFO\n\n fmt = fmt or '%(asctime)s :: %(levelname)s :: %(message)s'\n if rank is not None:\n fmt = f'[{rank}] {fmt}'\n\n super().__init__(name, level)\n\n sh = logging.StreamHandler()\n sh.setLevel(logging.WARNING if rank else logging.DEBUG)\n sh.setFormatter(logging.Formatter(fmt))\n self.addHandler(sh)\n\n if logdir:\n if rank is None:\n logpath = os.path.join(logdir, 'log.txt')\n else:\n logpath = os.path.join(logdir, f'log_{rank}.txt')\n fh = logging.FileHandler(logpath)\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(logging.Formatter(fmt))\n self.addHandler(fh)\n self.tb_writer = tensorboardX.SummaryWriter(logdir)\n else:\n self.tb_writer = None\n\n def epoch_logs(self, epoch):\n return EpochLogs(epoch, printf=self.info, tb_writer=self.tb_writer)\n\n\n\nclass EpochLogs:\n\n def __init__(self, epoch, printf=None, tb_writer=None):\n self.epoch = epoch\n self.printf = printf or print\n self.tb_writer = tb_writer\n self.scalars = collections.OrderedDict()\n self.videos = collections.OrderedDict()\n self.prefixes = []\n\n def push_prefix(self, *prefixes):\n for prefix in prefixes:\n self.prefixes.append(prefix)\n\n def pop_prefix(self, n=1):\n for _ in range(n):\n self.prefixes.pop()\n\n @contextlib.contextmanager\n def prefix(self, *prefixes):\n try:\n self.push_prefix(*prefixes)\n yield\n finally:\n self.pop_prefix(len(prefixes))\n\n def add_videos(self, name, videos, **video_kwargs):\n for video in videos:\n self.add_video(name, video, **video_kwargs)\n\n def add_video(self, name, video, **video_kwargs):\n if self.prefixes:\n name = f'{\"/\".join(self.prefixes)}/{name}'\n video = Video(name, video, **video_kwargs)\n if name in self.videos:\n self.videos[name].add(video)\n else:\n self.videos[name] = video\n\n def add_scalar(self, name, value, **scalar_kwargs):\n if self.prefixes:\n name = f'{\"/\".join(self.prefixes)}/{name}'\n scalar = EpochScalar(name, value, **scalar_kwargs)\n if not name in self.scalars:\n self.scalars[name] = scalar\n else:\n self.scalars[name].add(scalar)\n\n def add_scalar_dict(self, dict, prefix=None, **scalar_kwargs):\n try:\n if prefix:\n self.push_prefix(prefix)\n for k, v in dict.items():\n self.add_scalar(k, v, **scalar_kwargs)\n finally:\n if prefix:\n self.pop_prefix(1)\n\n\n def dump(self, step=None, debug=False, ffmt='%.4f'):\n # Extract scalars\n names = []\n values = []\n for name, scalar in self.scalars.items():\n if not debug and scalar.debug:\n continue\n for agg, value in scalar:\n names.append(str(name if agg is None else f'{name}:{agg}'))\n values.append(value)\n # Cast to stirng\n value_strings = []\n for value in values:\n if isinstance(value, float):\n value_strings.append(ffmt % value)\n else:\n value_strings.append(str(value))\n # Compute dimensions\n max_length_name = max(len(n) for n in names)\n max_length_value = max(len(vs) for vs in value_strings)\n width = max_length_name + max_length_value + 3\n # Generate string\n string = [\n f'Epoch {self.epoch}',\n '-' * max_length_name + ' ' + '-' * max_length_value\n ]\n for name, value_string in zip(names, value_strings):\n space = width - len(name) - len(value_string)\n string.append(f'{name}{\" \" * space}{value_string}')\n string.append('=' * width)\n # Print to wherever the printf sends stuff to\n self.printf('\\n'.join(string))\n # If available, write scalars to tensorboard too\n if self.tb_writer:\n # Write scalars\n tb_step = self.epoch if step is None else step\n for name, value in zip(names, values):\n self.tb_writer.add_scalar(name, value, tb_step)\n # Write videos\n for name, video in self.videos.items():\n self.tb_writer.add_video(\n name, video.tb_frames, tb_step, fps=video.fps\n )\n self.tb_writer.flush()\n\n\n def __repr__(self):\n return f''\n\n\nclass Video:\n\n def __init__(self, name, frames, fps=10):\n frames = list(frames) if isinstance(frames, (list, int)) else [frames]\n frames = [np.asarray(fs) for fs in frames]\n if not all(len(fs.shape) == 4 for fs in frames):\n raise ValueError('Video shape must be ')\n if not all(fs.shape == frames[0].shape for fs in frames):\n raise ValueError('All video frames must have the same dimensions')\n self.name = name\n self.frames = frames\n self.fps = fps\n\n @property\n def dims(self):\n return self.frames[0].shape\n\n def add(self, other):\n if not self == other:\n raise ValueError(f'Incompatible video types: {self} != {other}')\n self.frames += other.frames\n\n @property\n def tb_frames(self):\n # Combine into single array and transpose to \n return np.stack(self.frames).transpose(0, 1, 4, 2, 3)\n\n def __eq__(self, other):\n return (\n isinstance(other, Video) and\n self.name == other.name and\n self.dims == other.dims and\n self.fps == other.fps\n )\n\n def __len__(self):\n return len(self.frames)\n\n def __repr__(self):\n return f'